]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-client.rs
src/client/http_client.rs: new struct HttpClientOptions
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
CommitLineData
ff5d3707 1use failure::*;
70235f72
CE
2use nix::unistd::{fork, ForkResult, pipe};
3use std::os::unix::io::RawFd;
27c9affb 4use chrono::{Local, DateTime, Utc, TimeZone};
e9c9409a 5use std::path::{Path, PathBuf};
2eeaacb9 6use std::collections::{HashSet, HashMap};
70235f72 7use std::ffi::OsStr;
bb19af73 8use std::io::{Write, Seek, SeekFrom};
2761d6a4
DM
9use std::os::unix::fs::OpenOptionsExt;
10
552c2259 11use proxmox::{sortable, identity};
feaa1ad3 12use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
a47a02ae 13use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
3d482025 14use proxmox::api::schema::*;
7eea56ca 15use proxmox::api::cli::*;
5830c205 16use proxmox::api::api;
ff5d3707 17
fe0e04c6 18use proxmox_backup::tools;
bbf9e7e9 19use proxmox_backup::api2::types::*;
151c6ce2 20use proxmox_backup::client::*;
247cdbce 21use proxmox_backup::backup::*;
7926a3a1 22use proxmox_backup::pxar::{ self, catalog::* };
86eda3eb 23
fe0e04c6
DM
24//use proxmox_backup::backup::image_index::*;
25//use proxmox_backup::config::datastore;
8968258b 26//use proxmox_backup::pxar::encoder::*;
728797d0 27//use proxmox_backup::backup::datastore::*;
23bb8780 28
f5f13ebc 29use serde_json::{json, Value};
1c0472e8 30//use hyper::Body;
2761d6a4 31use std::sync::{Arc, Mutex};
255f378a 32//use regex::Regex;
d0a03d40 33use xdg::BaseDirectories;
ae0be2dd 34
5a2df000 35use futures::*;
c4ff3dce 36use tokio::sync::mpsc;
ae0be2dd 37
9ea4bce4 38proxmox::const_regex! {
255f378a 39 BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
ae0be2dd 40}
33d64b81 41
255f378a
DM
42const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
43 .format(&BACKUP_REPO_URL)
44 .max_length(256)
45 .schema();
d0a03d40 46
a47a02ae
DM
47const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
48 "Backup source specification ([<label>:<path>]).")
49 .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
50 .schema();
51
52const KEYFILE_SCHEMA: Schema = StringSchema::new(
53 "Path to encryption key. All data will be encrypted using this key.")
54 .schema();
55
56const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
57 "Chunk size in KB. Must be a power of 2.")
58 .minimum(64)
59 .maximum(4096)
60 .default(4096)
61 .schema();
62
2665cef7
DM
63fn get_default_repository() -> Option<String> {
64 std::env::var("PBS_REPOSITORY").ok()
65}
66
67fn extract_repository_from_value(
68 param: &Value,
69) -> Result<BackupRepository, Error> {
70
71 let repo_url = param["repository"]
72 .as_str()
73 .map(String::from)
74 .or_else(get_default_repository)
75 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
76
77 let repo: BackupRepository = repo_url.parse()?;
78
79 Ok(repo)
80}
81
82fn extract_repository_from_map(
83 param: &HashMap<String, String>,
84) -> Option<BackupRepository> {
85
86 param.get("repository")
87 .map(String::from)
88 .or_else(get_default_repository)
89 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
90}
91
d0a03d40
DM
92fn record_repository(repo: &BackupRepository) {
93
94 let base = match BaseDirectories::with_prefix("proxmox-backup") {
95 Ok(v) => v,
96 _ => return,
97 };
98
99 // usually $HOME/.cache/proxmox-backup/repo-list
100 let path = match base.place_cache_file("repo-list") {
101 Ok(v) => v,
102 _ => return,
103 };
104
11377a47 105 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
106
107 let repo = repo.to_string();
108
109 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
110
111 let mut map = serde_json::map::Map::new();
112
113 loop {
114 let mut max_used = 0;
115 let mut max_repo = None;
116 for (repo, count) in data.as_object().unwrap() {
117 if map.contains_key(repo) { continue; }
118 if let Some(count) = count.as_i64() {
119 if count > max_used {
120 max_used = count;
121 max_repo = Some(repo);
122 }
123 }
124 }
125 if let Some(repo) = max_repo {
126 map.insert(repo.to_owned(), json!(max_used));
127 } else {
128 break;
129 }
130 if map.len() > 10 { // store max. 10 repos
131 break;
132 }
133 }
134
135 let new_data = json!(map);
136
feaa1ad3 137 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
d0a03d40
DM
138}
139
49811347 140fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
d0a03d40
DM
141
142 let mut result = vec![];
143
144 let base = match BaseDirectories::with_prefix("proxmox-backup") {
145 Ok(v) => v,
146 _ => return result,
147 };
148
149 // usually $HOME/.cache/proxmox-backup/repo-list
150 let path = match base.place_cache_file("repo-list") {
151 Ok(v) => v,
152 _ => return result,
153 };
154
11377a47 155 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
156
157 if let Some(map) = data.as_object() {
49811347 158 for (repo, _count) in map {
d0a03d40
DM
159 result.push(repo.to_owned());
160 }
161 }
162
163 result
164}
165
d59dbeca
DM
166fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
167
168 let options = HttpClientOptions::new()
169 .interactive(true)
170 .ticket_cache(true);
171
172 HttpClient::new(server, userid, options)
173}
174
d105176f
DM
175async fn view_task_result(
176 client: HttpClient,
177 result: Value,
178 output_format: &str,
179) -> Result<(), Error> {
180 let data = &result["data"];
181 if output_format == "text" {
182 if let Some(upid) = data.as_str() {
183 display_task_log(client, upid, true).await?;
184 }
185 } else {
186 format_and_print_result(&data, &output_format);
187 }
188
189 Ok(())
190}
191
42af4b8f
DM
192async fn api_datastore_list_snapshots(
193 client: &HttpClient,
194 store: &str,
195 group: Option<BackupGroup>,
196) -> Result<Vec<SnapshotListItem>, Error> {
197
198 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
199
200 let mut args = json!({});
201 if let Some(group) = group {
202 args["backup-type"] = group.backup_type().into();
203 args["backup-id"] = group.backup_id().into();
204 }
205
206 let mut result = client.get(&path, Some(args)).await?;
207
208 let list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
209
210 Ok(list)
211}
212
27c9affb
DM
213async fn api_datastore_latest_snapshot(
214 client: &HttpClient,
215 store: &str,
216 group: BackupGroup,
217) -> Result<(String, String, DateTime<Utc>), Error> {
218
219 let mut list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
220
221 if list.is_empty() {
222 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
223 }
224
225 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
226
227 let backup_time = Utc.timestamp(list[0].backup_time, 0);
228
229 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
230}
231
232
e9722f8b 233async fn backup_directory<P: AsRef<Path>>(
cf9271e2 234 client: &BackupWriter,
17d6979a 235 dir_path: P,
247cdbce 236 archive_name: &str,
36898ffc 237 chunk_size: Option<usize>,
2eeaacb9 238 device_set: Option<HashSet<u64>>,
219ef0e6 239 verbose: bool,
5b72c9b4 240 skip_lost_and_found: bool,
f98ac774 241 crypt_config: Option<Arc<CryptConfig>>,
f1d99e3f 242 catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
6fc053ed 243 entries_max: usize,
2c3891d1 244) -> Result<BackupStats, Error> {
33d64b81 245
6fc053ed
CE
246 let pxar_stream = PxarBackupStream::open(
247 dir_path.as_ref(),
248 device_set,
249 verbose,
250 skip_lost_and_found,
251 catalog,
252 entries_max,
253 )?;
e9722f8b 254 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 255
e9722f8b 256 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 257
c4ff3dce 258 let stream = rx
e9722f8b 259 .map_err(Error::from);
17d6979a 260
c4ff3dce 261 // spawn chunker inside a separate task so that it can run parallel
e9722f8b 262 tokio::spawn(async move {
db0cb9ce
WB
263 while let Some(v) = chunk_stream.next().await {
264 let _ = tx.send(v).await;
265 }
e9722f8b 266 });
17d6979a 267
e9722f8b
WB
268 let stats = client
269 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
270 .await?;
bcd879cf 271
2c3891d1 272 Ok(stats)
bcd879cf
DM
273}
274
e9722f8b 275async fn backup_image<P: AsRef<Path>>(
cf9271e2 276 client: &BackupWriter,
6af905c1
DM
277 image_path: P,
278 archive_name: &str,
279 image_size: u64,
36898ffc 280 chunk_size: Option<usize>,
1c0472e8 281 _verbose: bool,
f98ac774 282 crypt_config: Option<Arc<CryptConfig>>,
2c3891d1 283) -> Result<BackupStats, Error> {
6af905c1 284
6af905c1
DM
285 let path = image_path.as_ref().to_owned();
286
e9722f8b 287 let file = tokio::fs::File::open(path).await?;
6af905c1 288
db0cb9ce 289 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
6af905c1
DM
290 .map_err(Error::from);
291
36898ffc 292 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
6af905c1 293
e9722f8b
WB
294 let stats = client
295 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
296 .await?;
6af905c1 297
2c3891d1 298 Ok(stats)
6af905c1
DM
299}
300
52c171e4
DM
301fn strip_server_file_expenstion(name: &str) -> String {
302
11377a47
DM
303 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
304 name[..name.len()-5].to_owned()
52c171e4 305 } else {
11377a47 306 name.to_owned() // should not happen
8e39232a 307 }
8e39232a
DM
308}
309
a47a02ae
DM
310#[api(
311 input: {
312 properties: {
313 repository: {
314 schema: REPO_URL_SCHEMA,
315 optional: true,
316 },
317 "output-format": {
318 schema: OUTPUT_FORMAT,
319 optional: true,
320 },
321 }
322 }
323)]
324/// List backup groups.
325async fn list_backup_groups(param: Value) -> Result<Value, Error> {
812c6f87 326
2665cef7 327 let repo = extract_repository_from_value(&param)?;
812c6f87 328
d59dbeca 329 let client = connect(repo.host(), repo.user())?;
812c6f87 330
d0a03d40 331 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 332
8a8a4703 333 let mut result = client.get(&path, None).await?;
812c6f87 334
d0a03d40
DM
335 record_repository(&repo);
336
812c6f87 337 // fixme: implement and use output formatter instead ..
80822b95
DM
338 let list = result["data"].as_array_mut().unwrap();
339
340 list.sort_unstable_by(|a, b| {
341 let a_id = a["backup-id"].as_str().unwrap();
342 let a_backup_type = a["backup-type"].as_str().unwrap();
343 let b_id = b["backup-id"].as_str().unwrap();
344 let b_backup_type = b["backup-type"].as_str().unwrap();
345
346 let type_order = a_backup_type.cmp(b_backup_type);
347 if type_order == std::cmp::Ordering::Equal {
348 a_id.cmp(b_id)
349 } else {
350 type_order
351 }
352 });
812c6f87 353
34a816cc
DM
354 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
355
356 let mut result = vec![];
357
812c6f87
DM
358 for item in list {
359
ad20d198
DM
360 let id = item["backup-id"].as_str().unwrap();
361 let btype = item["backup-type"].as_str().unwrap();
362 let epoch = item["last-backup"].as_i64().unwrap();
fa5d6977 363 let last_backup = Utc.timestamp(epoch, 0);
ad20d198 364 let backup_count = item["backup-count"].as_u64().unwrap();
812c6f87 365
1e9a94e5 366 let group = BackupGroup::new(btype, id);
812c6f87
DM
367
368 let path = group.group_path().to_str().unwrap().to_owned();
ad20d198 369
52c171e4
DM
370 let files = item["files"].as_array().unwrap().iter()
371 .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
ad20d198 372
34a816cc 373 if output_format == "text" {
fa5d6977
DM
374 println!(
375 "{:20} | {} | {:5} | {}",
376 path,
377 BackupDir::backup_time_to_string(last_backup),
378 backup_count,
379 tools::join(&files, ' '),
380 );
34a816cc
DM
381 } else {
382 result.push(json!({
383 "backup-type": btype,
384 "backup-id": id,
385 "last-backup": epoch,
386 "backup-count": backup_count,
387 "files": files,
388 }));
389 }
812c6f87
DM
390 }
391
9aa3f682 392 if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
34a816cc 393
812c6f87
DM
394 Ok(Value::Null)
395}
396
a47a02ae
DM
397#[api(
398 input: {
399 properties: {
400 repository: {
401 schema: REPO_URL_SCHEMA,
402 optional: true,
403 },
404 group: {
405 type: String,
406 description: "Backup group.",
407 optional: true,
408 },
409 "output-format": {
410 schema: OUTPUT_FORMAT,
411 optional: true,
412 },
413 }
414 }
415)]
416/// List backup snapshots.
417async fn list_snapshots(param: Value) -> Result<Value, Error> {
184f17af 418
2665cef7 419 let repo = extract_repository_from_value(&param)?;
184f17af 420
34a816cc
DM
421 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
422
d59dbeca 423 let client = connect(repo.host(), repo.user())?;
184f17af 424
42af4b8f
DM
425 let group = if let Some(path) = param["group"].as_str() {
426 Some(BackupGroup::parse(path)?)
427 } else {
428 None
429 };
184f17af 430
42af4b8f 431 let mut list = api_datastore_list_snapshots(&client, repo.store(), group).await?;
15c847f1 432
42af4b8f 433 list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
184f17af 434
d0a03d40
DM
435 record_repository(&repo);
436
af9d4afc 437 if output_format != "text" {
42af4b8f 438 format_and_print_result(&serde_json::to_value(list)?, &output_format);
af9d4afc
DM
439 return Ok(Value::Null);
440 }
184f17af
DM
441
442 for item in list {
443
af9d4afc 444 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
184f17af
DM
445
446 let path = snapshot.relative_path().to_str().unwrap().to_owned();
447
af9d4afc
DM
448 let files = item.files.iter()
449 .map(|v| strip_server_file_expenstion(&v))
450 .collect();
184f17af 451
af9d4afc
DM
452 let size_str = if let Some(size) = item.size {
453 size.to_string()
34a816cc 454 } else {
af9d4afc
DM
455 String::from("-")
456 };
457 println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
184f17af
DM
458 }
459
460 Ok(Value::Null)
461}
462
a47a02ae
DM
463#[api(
464 input: {
465 properties: {
466 repository: {
467 schema: REPO_URL_SCHEMA,
468 optional: true,
469 },
470 snapshot: {
471 type: String,
472 description: "Snapshot path.",
473 },
474 }
475 }
476)]
477/// Forget (remove) backup snapshots.
478async fn forget_snapshots(param: Value) -> Result<Value, Error> {
6f62c924 479
2665cef7 480 let repo = extract_repository_from_value(&param)?;
6f62c924
DM
481
482 let path = tools::required_string_param(&param, "snapshot")?;
483 let snapshot = BackupDir::parse(path)?;
484
d59dbeca 485 let mut client = connect(repo.host(), repo.user())?;
6f62c924 486
9e391bb7 487 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
6f62c924 488
8a8a4703
DM
489 let result = client.delete(&path, Some(json!({
490 "backup-type": snapshot.group().backup_type(),
491 "backup-id": snapshot.group().backup_id(),
492 "backup-time": snapshot.backup_time().timestamp(),
493 }))).await?;
6f62c924 494
d0a03d40
DM
495 record_repository(&repo);
496
6f62c924
DM
497 Ok(result)
498}
499
a47a02ae
DM
500#[api(
501 input: {
502 properties: {
503 repository: {
504 schema: REPO_URL_SCHEMA,
505 optional: true,
506 },
507 }
508 }
509)]
510/// Try to login. If successful, store ticket.
511async fn api_login(param: Value) -> Result<Value, Error> {
e240d8be
DM
512
513 let repo = extract_repository_from_value(&param)?;
514
d59dbeca 515 let client = connect(repo.host(), repo.user())?;
8a8a4703 516 client.login().await?;
e240d8be
DM
517
518 record_repository(&repo);
519
520 Ok(Value::Null)
521}
522
a47a02ae
DM
523#[api(
524 input: {
525 properties: {
526 repository: {
527 schema: REPO_URL_SCHEMA,
528 optional: true,
529 },
530 }
531 }
532)]
533/// Logout (delete stored ticket).
534fn api_logout(param: Value) -> Result<Value, Error> {
e240d8be
DM
535
536 let repo = extract_repository_from_value(&param)?;
537
538 delete_ticket_info(repo.host(), repo.user())?;
539
540 Ok(Value::Null)
541}
542
a47a02ae
DM
543#[api(
544 input: {
545 properties: {
546 repository: {
547 schema: REPO_URL_SCHEMA,
548 optional: true,
549 },
550 snapshot: {
551 type: String,
552 description: "Snapshot path.",
553 },
554 }
555 }
556)]
557/// Dump catalog.
558async fn dump_catalog(param: Value) -> Result<Value, Error> {
9049a8cf
DM
559
560 let repo = extract_repository_from_value(&param)?;
561
562 let path = tools::required_string_param(&param, "snapshot")?;
563 let snapshot = BackupDir::parse(path)?;
564
11377a47 565 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
9049a8cf
DM
566
567 let crypt_config = match keyfile {
568 None => None,
569 Some(path) => {
6d20a29d 570 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
9025312a 571 Some(Arc::new(CryptConfig::new(key)?))
9049a8cf
DM
572 }
573 };
574
d59dbeca 575 let client = connect(repo.host(), repo.user())?;
9049a8cf 576
8a8a4703
DM
577 let client = BackupReader::start(
578 client,
579 crypt_config.clone(),
580 repo.store(),
581 &snapshot.group().backup_type(),
582 &snapshot.group().backup_id(),
583 snapshot.backup_time(),
584 true,
585 ).await?;
9049a8cf 586
8a8a4703 587 let manifest = client.download_manifest().await?;
d2267b11 588
8a8a4703 589 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
bf6e3217 590
8a8a4703 591 let most_used = index.find_most_used_chunks(8);
bf6e3217 592
8a8a4703 593 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
bf6e3217 594
8a8a4703 595 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
9049a8cf 596
8a8a4703
DM
597 let mut catalogfile = std::fs::OpenOptions::new()
598 .write(true)
599 .read(true)
600 .custom_flags(libc::O_TMPFILE)
601 .open("/tmp")?;
d2267b11 602
8a8a4703
DM
603 std::io::copy(&mut reader, &mut catalogfile)
604 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
a84ef4c2 605
8a8a4703 606 catalogfile.seek(SeekFrom::Start(0))?;
9049a8cf 607
8a8a4703 608 let mut catalog_reader = CatalogReader::new(catalogfile);
9049a8cf 609
8a8a4703 610 catalog_reader.dump()?;
e9722f8b 611
8a8a4703 612 record_repository(&repo);
9049a8cf
DM
613
614 Ok(Value::Null)
615}
616
a47a02ae
DM
617#[api(
618 input: {
619 properties: {
620 repository: {
621 schema: REPO_URL_SCHEMA,
622 optional: true,
623 },
624 snapshot: {
625 type: String,
626 description: "Snapshot path.",
627 },
628 "output-format": {
629 schema: OUTPUT_FORMAT,
630 optional: true,
631 },
632 }
633 }
634)]
635/// List snapshot files.
636async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
52c171e4
DM
637
638 let repo = extract_repository_from_value(&param)?;
639
640 let path = tools::required_string_param(&param, "snapshot")?;
641 let snapshot = BackupDir::parse(path)?;
642
643 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
644
d59dbeca 645 let client = connect(repo.host(), repo.user())?;
52c171e4
DM
646
647 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
648
8a8a4703
DM
649 let mut result = client.get(&path, Some(json!({
650 "backup-type": snapshot.group().backup_type(),
651 "backup-id": snapshot.group().backup_id(),
652 "backup-time": snapshot.backup_time().timestamp(),
653 }))).await?;
52c171e4
DM
654
655 record_repository(&repo);
656
8c70e3eb 657 let list: Value = result["data"].take();
52c171e4
DM
658
659 if output_format == "text" {
8c70e3eb
DM
660 for item in list.as_array().unwrap().iter() {
661 println!(
662 "{} {}",
663 strip_server_file_expenstion(item["filename"].as_str().unwrap()),
664 item["size"].as_u64().unwrap_or(0),
665 );
52c171e4
DM
666 }
667 } else {
8c70e3eb 668 format_and_print_result(&list, &output_format);
52c171e4
DM
669 }
670
671 Ok(Value::Null)
672}
673
a47a02ae 674#[api(
94913f35 675 input: {
a47a02ae
DM
676 properties: {
677 repository: {
678 schema: REPO_URL_SCHEMA,
679 optional: true,
680 },
94913f35
DM
681 "output-format": {
682 schema: OUTPUT_FORMAT,
683 optional: true,
684 },
685 },
686 },
a47a02ae
DM
687)]
688/// Start garbage collection for a specific repository.
689async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
8cc0d6af 690
2665cef7 691 let repo = extract_repository_from_value(&param)?;
e5f7def4 692 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
8cc0d6af 693
d59dbeca 694 let mut client = connect(repo.host(), repo.user())?;
8cc0d6af 695
d0a03d40 696 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 697
8a8a4703 698 let result = client.post(&path, None).await?;
8cc0d6af 699
8a8a4703 700 record_repository(&repo);
d0a03d40 701
8a8a4703 702 view_task_result(client, result, &output_format).await?;
e5f7def4 703
e5f7def4 704 Ok(Value::Null)
8cc0d6af 705}
33d64b81 706
ae0be2dd
DM
707fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
708
255f378a 709 if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
ae0be2dd
DM
710 return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
711 }
712 bail!("unable to parse directory specification '{}'", value);
713}
714
bf6e3217
DM
715fn spawn_catalog_upload(
716 client: Arc<BackupWriter>,
717 crypt_config: Option<Arc<CryptConfig>>,
718) -> Result<
719 (
f1d99e3f 720 Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
bf6e3217
DM
721 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
722 ), Error>
723{
f1d99e3f
DM
724 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
725 let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
bf6e3217
DM
726 let catalog_chunk_size = 512*1024;
727 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
728
f1d99e3f 729 let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
bf6e3217
DM
730
731 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
732
733 tokio::spawn(async move {
734 let catalog_upload_result = client
735 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
736 .await;
737
738 if let Err(ref err) = catalog_upload_result {
739 eprintln!("catalog upload error - {}", err);
740 client.cancel();
741 }
742
743 let _ = catalog_result_tx.send(catalog_upload_result);
744 });
745
746 Ok((catalog, catalog_result_rx))
747}
748
a47a02ae
DM
749#[api(
750 input: {
751 properties: {
752 backupspec: {
753 type: Array,
754 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
755 items: {
756 schema: BACKUP_SOURCE_SCHEMA,
757 }
758 },
759 repository: {
760 schema: REPO_URL_SCHEMA,
761 optional: true,
762 },
763 "include-dev": {
764 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
765 optional: true,
766 items: {
767 type: String,
768 description: "Path to file.",
769 }
770 },
771 keyfile: {
772 schema: KEYFILE_SCHEMA,
773 optional: true,
774 },
775 "skip-lost-and-found": {
776 type: Boolean,
777 description: "Skip lost+found directory.",
778 optional: true,
779 },
780 "backup-type": {
781 schema: BACKUP_TYPE_SCHEMA,
782 optional: true,
783 },
784 "backup-id": {
785 schema: BACKUP_ID_SCHEMA,
786 optional: true,
787 },
788 "backup-time": {
789 schema: BACKUP_TIME_SCHEMA,
790 optional: true,
791 },
792 "chunk-size": {
793 schema: CHUNK_SIZE_SCHEMA,
794 optional: true,
795 },
6fc053ed
CE
796 "entries-max": {
797 type: Integer,
798 description: "Max number of entries to hold in memory.",
799 optional: true,
800 default: pxar::ENCODER_MAX_ENTRIES as isize,
801 },
a47a02ae
DM
802 }
803 }
804)]
805/// Create (host) backup.
806async fn create_backup(
6049b71f
DM
807 param: Value,
808 _info: &ApiMethod,
dd5495d6 809 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 810) -> Result<Value, Error> {
ff5d3707 811
2665cef7 812 let repo = extract_repository_from_value(&param)?;
ae0be2dd
DM
813
814 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
a914a774 815
eed6db39
DM
816 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
817
5b72c9b4
DM
818 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
819
219ef0e6
DM
820 let verbose = param["verbose"].as_bool().unwrap_or(false);
821
ca5d0b61
DM
822 let backup_time_opt = param["backup-time"].as_i64();
823
36898ffc 824 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
2d9d143a 825
247cdbce
DM
826 if let Some(size) = chunk_size_opt {
827 verify_chunk_size(size)?;
2d9d143a
DM
828 }
829
11377a47 830 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
6d0983db 831
f69adc81 832 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
fba30411 833
bbf9e7e9 834 let backup_type = param["backup-type"].as_str().unwrap_or("host");
ca5d0b61 835
2eeaacb9
DM
836 let include_dev = param["include-dev"].as_array();
837
6fc053ed
CE
838 let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
839
2eeaacb9
DM
840 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
841
842 if let Some(include_dev) = include_dev {
843 if all_file_systems {
844 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
845 }
846
847 let mut set = HashSet::new();
848 for path in include_dev {
849 let path = path.as_str().unwrap();
850 let stat = nix::sys::stat::stat(path)
851 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
852 set.insert(stat.st_dev);
853 }
854 devices = Some(set);
855 }
856
ae0be2dd 857 let mut upload_list = vec![];
a914a774 858
79679c2d 859 enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
6af905c1 860
bf6e3217
DM
861 let mut upload_catalog = false;
862
ae0be2dd
DM
863 for backupspec in backupspec_list {
864 let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
bcd879cf 865
eb1804c5
DM
866 use std::os::unix::fs::FileTypeExt;
867
3fa71727
CE
868 let metadata = std::fs::metadata(filename)
869 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 870 let file_type = metadata.file_type();
23bb8780 871
4af0ee05 872 let extension = target.rsplit('.').next()
11377a47 873 .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
bcd879cf 874
ec8a9bb9
DM
875 match extension {
876 "pxar" => {
877 if !file_type.is_dir() {
878 bail!("got unexpected file type (expected directory)");
879 }
4af0ee05 880 upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
bf6e3217 881 upload_catalog = true;
ec8a9bb9
DM
882 }
883 "img" => {
eb1804c5 884
ec8a9bb9
DM
885 if !(file_type.is_file() || file_type.is_block_device()) {
886 bail!("got unexpected file type (expected file or block device)");
887 }
eb1804c5 888
e18a6c9e 889 let size = image_size(&PathBuf::from(filename))?;
23bb8780 890
ec8a9bb9 891 if size == 0 { bail!("got zero-sized file '{}'", filename); }
ae0be2dd 892
4af0ee05 893 upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
ec8a9bb9
DM
894 }
895 "conf" => {
896 if !file_type.is_file() {
897 bail!("got unexpected file type (expected regular file)");
898 }
4af0ee05 899 upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 900 }
79679c2d
DM
901 "log" => {
902 if !file_type.is_file() {
903 bail!("got unexpected file type (expected regular file)");
904 }
4af0ee05 905 upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
79679c2d 906 }
ec8a9bb9
DM
907 _ => {
908 bail!("got unknown archive extension '{}'", extension);
909 }
ae0be2dd
DM
910 }
911 }
912
11377a47 913 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
ae0be2dd 914
d59dbeca 915 let client = connect(repo.host(), repo.user())?;
d0a03d40
DM
916 record_repository(&repo);
917
ca5d0b61
DM
918 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
919
f69adc81 920 println!("Client name: {}", proxmox::tools::nodename());
ca5d0b61
DM
921
922 let start_time = Local::now();
923
7a6cfbd9 924 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
51144821 925
bb823140
DM
926 let (crypt_config, rsa_encrypted_key) = match keyfile {
927 None => (None, None),
6d0983db 928 Some(path) => {
6d20a29d 929 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
bb823140
DM
930
931 let crypt_config = CryptConfig::new(key)?;
932
933 let path = master_pubkey_path()?;
934 if path.exists() {
e18a6c9e 935 let pem_data = file_get_contents(&path)?;
bb823140
DM
936 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
937 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
938 (Some(Arc::new(crypt_config)), Some(enc_key))
939 } else {
940 (Some(Arc::new(crypt_config)), None)
941 }
6d0983db
DM
942 }
943 };
f98ac774 944
8a8a4703
DM
945 let client = BackupWriter::start(
946 client,
947 repo.store(),
948 backup_type,
949 &backup_id,
950 backup_time,
951 verbose,
952 ).await?;
953
954 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
955 let mut manifest = BackupManifest::new(snapshot);
956
957 let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
958
959 for (backup_type, filename, target, size) in upload_list {
960 match backup_type {
961 BackupType::CONFIG => {
962 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
963 let stats = client
964 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
965 .await?;
1e8da0a7 966 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703
DM
967 }
968 BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
969 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
970 let stats = client
971 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
972 .await?;
1e8da0a7 973 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703
DM
974 }
975 BackupType::PXAR => {
976 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
977 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
978 let stats = backup_directory(
979 &client,
980 &filename,
981 &target,
982 chunk_size_opt,
983 devices.clone(),
984 verbose,
985 skip_lost_and_found,
986 crypt_config.clone(),
987 catalog.clone(),
6fc053ed 988 entries_max as usize,
8a8a4703 989 ).await?;
1e8da0a7 990 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703
DM
991 catalog.lock().unwrap().end_directory()?;
992 }
993 BackupType::IMAGE => {
994 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
995 let stats = backup_image(
996 &client,
997 &filename,
998 &target,
999 size,
1000 chunk_size_opt,
1001 verbose,
1002 crypt_config.clone(),
1003 ).await?;
1e8da0a7 1004 manifest.add_file(target, stats.size, stats.csum)?;
6af905c1
DM
1005 }
1006 }
8a8a4703 1007 }
4818c8b6 1008
8a8a4703
DM
1009 // finalize and upload catalog
1010 if upload_catalog {
1011 let mutex = Arc::try_unwrap(catalog)
1012 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
1013 let mut catalog = mutex.into_inner().unwrap();
bf6e3217 1014
8a8a4703 1015 catalog.finish()?;
2761d6a4 1016
8a8a4703 1017 drop(catalog); // close upload stream
2761d6a4 1018
8a8a4703 1019 let stats = catalog_result_rx.await??;
9d135fe6 1020
1e8da0a7 1021 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
8a8a4703 1022 }
2761d6a4 1023
8a8a4703
DM
1024 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1025 let target = "rsa-encrypted.key";
1026 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
1027 let stats = client
1028 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
1029 .await?;
1e8da0a7 1030 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
8a8a4703
DM
1031
1032 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1033 /*
1034 let mut buffer2 = vec![0u8; rsa.size() as usize];
1035 let pem_data = file_get_contents("master-private.pem")?;
1036 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1037 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1038 println!("TEST {} {:?}", len, buffer2);
1039 */
1040 }
9f46c7de 1041
8a8a4703
DM
1042 // create manifest (index.json)
1043 let manifest = manifest.into_json();
2c3891d1 1044
8a8a4703
DM
1045 println!("Upload index.json to '{:?}'", repo);
1046 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1047 client
1048 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
1049 .await?;
2c3891d1 1050
8a8a4703 1051 client.finish().await?;
c4ff3dce 1052
8a8a4703
DM
1053 let end_time = Local::now();
1054 let elapsed = end_time.signed_duration_since(start_time);
1055 println!("Duration: {}", elapsed);
3ec3ec3f 1056
8a8a4703 1057 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
3d5c11e5 1058
8a8a4703 1059 Ok(Value::Null)
f98ea63d
DM
1060}
1061
d0a03d40 1062fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
f98ea63d
DM
1063
1064 let mut result = vec![];
1065
1066 let data: Vec<&str> = arg.splitn(2, ':').collect();
1067
bff11030 1068 if data.len() != 2 {
8968258b
DM
1069 result.push(String::from("root.pxar:/"));
1070 result.push(String::from("etc.pxar:/etc"));
bff11030
DM
1071 return result;
1072 }
f98ea63d 1073
496a6784 1074 let files = tools::complete_file_name(data[1], param);
f98ea63d
DM
1075
1076 for file in files {
1077 result.push(format!("{}:{}", data[0], file));
1078 }
1079
1080 result
ff5d3707 1081}
1082
88892ea8
DM
1083fn dump_image<W: Write>(
1084 client: Arc<BackupReader>,
1085 crypt_config: Option<Arc<CryptConfig>>,
1086 index: FixedIndexReader,
1087 mut writer: W,
fd04ca7a 1088 verbose: bool,
88892ea8
DM
1089) -> Result<(), Error> {
1090
1091 let most_used = index.find_most_used_chunks(8);
1092
1093 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1094
1095 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1096 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
1097 let mut per = 0;
1098 let mut bytes = 0;
1099 let start_time = std::time::Instant::now();
1100
88892ea8
DM
1101 for pos in 0..index.index_count() {
1102 let digest = index.index_digest(pos).unwrap();
1103 let raw_data = chunk_reader.read_chunk(&digest)?;
1104 writer.write_all(&raw_data)?;
fd04ca7a
DM
1105 bytes += raw_data.len();
1106 if verbose {
1107 let next_per = ((pos+1)*100)/index.index_count();
1108 if per != next_per {
1109 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1110 next_per, bytes, start_time.elapsed().as_secs());
1111 per = next_per;
1112 }
1113 }
88892ea8
DM
1114 }
1115
fd04ca7a
DM
1116 let end_time = std::time::Instant::now();
1117 let elapsed = end_time.duration_since(start_time);
1118 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1119 bytes,
1120 elapsed.as_secs_f64(),
1121 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1122 );
1123
1124
88892ea8
DM
1125 Ok(())
1126}
1127
a47a02ae
DM
1128#[api(
1129 input: {
1130 properties: {
1131 repository: {
1132 schema: REPO_URL_SCHEMA,
1133 optional: true,
1134 },
1135 snapshot: {
1136 type: String,
1137 description: "Group/Snapshot path.",
1138 },
1139 "archive-name": {
1140 description: "Backup archive name.",
1141 type: String,
1142 },
1143 target: {
1144 type: String,
90c815bf 1145 description: r###"Target directory path. Use '-' to write to standard output.
8a8a4703 1146
5eee6d89 1147We do not extraxt '.pxar' archives when writing to standard output.
8a8a4703 1148
a47a02ae
DM
1149"###
1150 },
1151 "allow-existing-dirs": {
1152 type: Boolean,
1153 description: "Do not fail if directories already exists.",
1154 optional: true,
1155 },
1156 keyfile: {
1157 schema: KEYFILE_SCHEMA,
1158 optional: true,
1159 },
1160 }
1161 }
1162)]
1163/// Restore backup repository.
1164async fn restore(param: Value) -> Result<Value, Error> {
2665cef7 1165 let repo = extract_repository_from_value(&param)?;
9f912493 1166
86eda3eb
DM
1167 let verbose = param["verbose"].as_bool().unwrap_or(false);
1168
46d5aa0a
DM
1169 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1170
d5c34d98
DM
1171 let archive_name = tools::required_string_param(&param, "archive-name")?;
1172
d59dbeca 1173 let client = connect(repo.host(), repo.user())?;
d0a03d40 1174
d0a03d40 1175 record_repository(&repo);
d5c34d98 1176
9f912493 1177 let path = tools::required_string_param(&param, "snapshot")?;
9f912493 1178
86eda3eb 1179 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
d5c34d98 1180 let group = BackupGroup::parse(path)?;
27c9affb 1181 api_datastore_latest_snapshot(&client, repo.store(), group).await?
d5c34d98
DM
1182 } else {
1183 let snapshot = BackupDir::parse(path)?;
86eda3eb
DM
1184 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1185 };
9f912493 1186
d5c34d98 1187 let target = tools::required_string_param(&param, "target")?;
bf125261 1188 let target = if target == "-" { None } else { Some(target) };
2ae7d196 1189
11377a47 1190 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2ae7d196 1191
86eda3eb
DM
1192 let crypt_config = match keyfile {
1193 None => None,
1194 Some(path) => {
6d20a29d 1195 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
86eda3eb
DM
1196 Some(Arc::new(CryptConfig::new(key)?))
1197 }
1198 };
d5c34d98 1199
afb4cd28
DM
1200 let server_archive_name = if archive_name.ends_with(".pxar") {
1201 format!("{}.didx", archive_name)
1202 } else if archive_name.ends_with(".img") {
1203 format!("{}.fidx", archive_name)
1204 } else {
f8100e96 1205 format!("{}.blob", archive_name)
afb4cd28 1206 };
9f912493 1207
296c50ba
DM
1208 let client = BackupReader::start(
1209 client,
1210 crypt_config.clone(),
1211 repo.store(),
1212 &backup_type,
1213 &backup_id,
1214 backup_time,
1215 true,
1216 ).await?;
86eda3eb 1217
f06b820a 1218 let manifest = client.download_manifest().await?;
02fcf372 1219
ad6e5a6f 1220 if server_archive_name == MANIFEST_BLOB_NAME {
f06b820a 1221 let backup_index_data = manifest.into_json().to_string();
02fcf372 1222 if let Some(target) = target {
feaa1ad3 1223 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
02fcf372
DM
1224 } else {
1225 let stdout = std::io::stdout();
1226 let mut writer = stdout.lock();
296c50ba 1227 writer.write_all(backup_index_data.as_bytes())
02fcf372
DM
1228 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1229 }
1230
1231 } else if server_archive_name.ends_with(".blob") {
d2267b11 1232
bb19af73 1233 let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
f8100e96 1234
bf125261 1235 if let Some(target) = target {
0d986280
DM
1236 let mut writer = std::fs::OpenOptions::new()
1237 .write(true)
1238 .create(true)
1239 .create_new(true)
1240 .open(target)
1241 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1242 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1243 } else {
1244 let stdout = std::io::stdout();
1245 let mut writer = stdout.lock();
0d986280 1246 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1247 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1248 }
f8100e96
DM
1249
1250 } else if server_archive_name.ends_with(".didx") {
86eda3eb 1251
c3d84a22 1252 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
df65bd3d 1253
f4bf7dfc
DM
1254 let most_used = index.find_most_used_chunks(8);
1255
1256 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1257
afb4cd28 1258 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1259
bf125261 1260 if let Some(target) = target {
86eda3eb 1261
47651f95 1262 let feature_flags = pxar::flags::DEFAULT;
f701d033
DM
1263 let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
1264 decoder.set_callback(move |path| {
bf125261 1265 if verbose {
fd04ca7a 1266 eprintln!("{:?}", path);
bf125261
DM
1267 }
1268 Ok(())
1269 });
6a879109
CE
1270 decoder.set_allow_existing_dirs(allow_existing_dirs);
1271
fa7e957c 1272 decoder.restore(Path::new(target), &Vec::new())?;
bf125261 1273 } else {
88892ea8
DM
1274 let mut writer = std::fs::OpenOptions::new()
1275 .write(true)
1276 .open("/dev/stdout")
1277 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1278
bf125261
DM
1279 std::io::copy(&mut reader, &mut writer)
1280 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1281 }
afb4cd28 1282 } else if server_archive_name.ends_with(".fidx") {
afb4cd28 1283
72050500 1284 let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
df65bd3d 1285
88892ea8
DM
1286 let mut writer = if let Some(target) = target {
1287 std::fs::OpenOptions::new()
bf125261
DM
1288 .write(true)
1289 .create(true)
1290 .create_new(true)
1291 .open(target)
88892ea8 1292 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1293 } else {
88892ea8
DM
1294 std::fs::OpenOptions::new()
1295 .write(true)
1296 .open("/dev/stdout")
1297 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1298 };
afb4cd28 1299
fd04ca7a 1300 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
88892ea8
DM
1301
1302 } else {
f8100e96 1303 bail!("unknown archive file extension (expected .pxar of .img)");
3031e44c 1304 }
fef44d4f
DM
1305
1306 Ok(Value::Null)
45db6f89
DM
1307}
1308
a47a02ae
DM
1309#[api(
1310 input: {
1311 properties: {
1312 repository: {
1313 schema: REPO_URL_SCHEMA,
1314 optional: true,
1315 },
1316 snapshot: {
1317 type: String,
1318 description: "Group/Snapshot path.",
1319 },
1320 logfile: {
1321 type: String,
1322 description: "The path to the log file you want to upload.",
1323 },
1324 keyfile: {
1325 schema: KEYFILE_SCHEMA,
1326 optional: true,
1327 },
1328 }
1329 }
1330)]
1331/// Upload backup log file.
1332async fn upload_log(param: Value) -> Result<Value, Error> {
ec34f7eb
DM
1333
1334 let logfile = tools::required_string_param(&param, "logfile")?;
1335 let repo = extract_repository_from_value(&param)?;
1336
1337 let snapshot = tools::required_string_param(&param, "snapshot")?;
1338 let snapshot = BackupDir::parse(snapshot)?;
1339
d59dbeca 1340 let mut client = connect(repo.host(), repo.user())?;
ec34f7eb 1341
11377a47 1342 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
ec34f7eb
DM
1343
1344 let crypt_config = match keyfile {
1345 None => None,
1346 Some(path) => {
6d20a29d 1347 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ec34f7eb 1348 let crypt_config = CryptConfig::new(key)?;
9025312a 1349 Some(Arc::new(crypt_config))
ec34f7eb
DM
1350 }
1351 };
1352
e18a6c9e 1353 let data = file_get_contents(logfile)?;
ec34f7eb 1354
7123ff7d 1355 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
ec34f7eb
DM
1356
1357 let raw_data = blob.into_inner();
1358
1359 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1360
1361 let args = json!({
1362 "backup-type": snapshot.group().backup_type(),
1363 "backup-id": snapshot.group().backup_id(),
1364 "backup-time": snapshot.backup_time().timestamp(),
1365 });
1366
1367 let body = hyper::Body::from(raw_data);
1368
8a8a4703 1369 client.upload("application/octet-stream", body, &path, Some(args)).await
ec34f7eb
DM
1370}
1371
a47a02ae
DM
1372#[api(
1373 input: {
1374 properties: {
1375 repository: {
1376 schema: REPO_URL_SCHEMA,
1377 optional: true,
1378 },
1379 group: {
1380 type: String,
1381 description: "Backup group.",
1382 },
1383 "output-format": {
1384 schema: OUTPUT_FORMAT,
1385 optional: true,
1386 },
1387 "dry-run": {
1388 type: Boolean,
1389 description: "Just show what prune would do, but do not delete anything.",
1390 optional: true,
1391 },
1392 }
1393 }
1394)]
1395/// Prune a backup repository.
1396async fn prune(mut param: Value) -> Result<Value, Error> {
83b7db02 1397
2665cef7 1398 let repo = extract_repository_from_value(&param)?;
83b7db02 1399
d59dbeca 1400 let mut client = connect(repo.host(), repo.user())?;
83b7db02 1401
d0a03d40 1402 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1403
9fdc3ef4
DM
1404 let group = tools::required_string_param(&param, "group")?;
1405 let group = BackupGroup::parse(group)?;
163e9bbe 1406 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
9fdc3ef4 1407
ea7a7ef2
DM
1408 param.as_object_mut().unwrap().remove("repository");
1409 param.as_object_mut().unwrap().remove("group");
163e9bbe 1410 param.as_object_mut().unwrap().remove("output-format");
ea7a7ef2
DM
1411
1412 param["backup-type"] = group.backup_type().into();
1413 param["backup-id"] = group.backup_id().into();
83b7db02 1414
87c42375 1415 let result = client.post(&path, Some(param)).await?;
74fa81b8 1416
87c42375 1417 record_repository(&repo);
3b03abfe 1418
87c42375 1419 view_task_result(client, result, &output_format).await?;
d0a03d40 1420
43a406fd 1421 Ok(Value::Null)
83b7db02
DM
1422}
1423
a47a02ae
DM
1424#[api(
1425 input: {
1426 properties: {
1427 repository: {
1428 schema: REPO_URL_SCHEMA,
1429 optional: true,
1430 },
1431 "output-format": {
1432 schema: OUTPUT_FORMAT,
1433 optional: true,
1434 },
1435 }
1436 }
1437)]
1438/// Get repository status.
1439async fn status(param: Value) -> Result<Value, Error> {
34a816cc
DM
1440
1441 let repo = extract_repository_from_value(&param)?;
1442
1443 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
1444
d59dbeca 1445 let client = connect(repo.host(), repo.user())?;
34a816cc
DM
1446
1447 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1448
1dc117bb 1449 let mut result = client.get(&path, None).await?;
34a816cc
DM
1450
1451 record_repository(&repo);
1452
1453 if output_format == "text" {
1dc117bb
DM
1454 let result: StorageStatus = serde_json::from_value(result["data"].take())?;
1455
1456 let roundup = result.total/200;
34a816cc
DM
1457
1458 println!(
1459 "total: {} used: {} ({} %) available: {}",
1dc117bb
DM
1460 result.total,
1461 result.used,
1462 ((result.used+roundup)*100)/result.total,
1463 result.avail,
34a816cc
DM
1464 );
1465 } else {
1dc117bb 1466 format_and_print_result(&result["data"], &output_format);
34a816cc
DM
1467 }
1468
1469 Ok(Value::Null)
1470}
1471
5a2df000 1472// like get, but simply ignore errors and return Null instead
e9722f8b 1473async fn try_get(repo: &BackupRepository, url: &str) -> Value {
024f11bb 1474
d59dbeca
DM
1475
1476 let options = HttpClientOptions::new()
1477 .verify_cert(false) // fixme: set verify to true, but howto handle fingerprint ??
1478 .interactive(false)
1479 .ticket_cache(true);
1480
1481 let client = match HttpClient::new(repo.host(), repo.user(), options) {
45cdce06
DM
1482 Ok(v) => v,
1483 _ => return Value::Null,
1484 };
b2388518 1485
e9722f8b 1486 let mut resp = match client.get(url, None).await {
b2388518
DM
1487 Ok(v) => v,
1488 _ => return Value::Null,
1489 };
1490
1491 if let Some(map) = resp.as_object_mut() {
1492 if let Some(data) = map.remove("data") {
1493 return data;
1494 }
1495 }
1496 Value::Null
1497}
1498
b2388518 1499fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1500 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
e9722f8b
WB
1501}
1502
1503async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
024f11bb 1504
b2388518
DM
1505 let mut result = vec![];
1506
2665cef7 1507 let repo = match extract_repository_from_map(param) {
b2388518 1508 Some(v) => v,
024f11bb
DM
1509 _ => return result,
1510 };
1511
b2388518
DM
1512 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1513
e9722f8b 1514 let data = try_get(&repo, &path).await;
b2388518
DM
1515
1516 if let Some(list) = data.as_array() {
024f11bb 1517 for item in list {
98f0b972
DM
1518 if let (Some(backup_id), Some(backup_type)) =
1519 (item["backup-id"].as_str(), item["backup-type"].as_str())
1520 {
1521 result.push(format!("{}/{}", backup_type, backup_id));
024f11bb
DM
1522 }
1523 }
1524 }
1525
1526 result
1527}
1528
b2388518 1529fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1530 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
e9722f8b
WB
1531}
1532
1533async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
b2388518 1534
b2388518 1535 if arg.matches('/').count() < 2 {
e9722f8b 1536 let groups = complete_backup_group_do(param).await;
543a260f 1537 let mut result = vec![];
b2388518
DM
1538 for group in groups {
1539 result.push(group.to_string());
1540 result.push(format!("{}/", group));
1541 }
1542 return result;
1543 }
1544
e9722f8b 1545 complete_backup_snapshot_do(param).await
543a260f 1546}
b2388518 1547
3fb53e07 1548fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1549 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
e9722f8b
WB
1550}
1551
1552async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
543a260f
DM
1553
1554 let mut result = vec![];
1555
1556 let repo = match extract_repository_from_map(param) {
1557 Some(v) => v,
1558 _ => return result,
1559 };
1560
1561 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
b2388518 1562
e9722f8b 1563 let data = try_get(&repo, &path).await;
b2388518
DM
1564
1565 if let Some(list) = data.as_array() {
1566 for item in list {
1567 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1568 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1569 {
1570 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1571 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1572 }
1573 }
1574 }
1575
1576 result
1577}
1578
45db6f89 1579fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1580 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
e9722f8b
WB
1581}
1582
1583async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
08dc340a
DM
1584
1585 let mut result = vec![];
1586
2665cef7 1587 let repo = match extract_repository_from_map(param) {
08dc340a
DM
1588 Some(v) => v,
1589 _ => return result,
1590 };
1591
1592 let snapshot = match param.get("snapshot") {
1593 Some(path) => {
1594 match BackupDir::parse(path) {
1595 Ok(v) => v,
1596 _ => return result,
1597 }
1598 }
1599 _ => return result,
1600 };
1601
1602 let query = tools::json_object_to_query(json!({
1603 "backup-type": snapshot.group().backup_type(),
1604 "backup-id": snapshot.group().backup_id(),
1605 "backup-time": snapshot.backup_time().timestamp(),
1606 })).unwrap();
1607
1608 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1609
e9722f8b 1610 let data = try_get(&repo, &path).await;
08dc340a
DM
1611
1612 if let Some(list) = data.as_array() {
1613 for item in list {
c4f025eb 1614 if let Some(filename) = item["filename"].as_str() {
08dc340a
DM
1615 result.push(filename.to_owned());
1616 }
1617 }
1618 }
1619
45db6f89
DM
1620 result
1621}
1622
1623fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
52c171e4 1624 complete_server_file_name(arg, param)
e9722f8b
WB
1625 .iter()
1626 .map(|v| strip_server_file_expenstion(&v))
1627 .collect()
08dc340a
DM
1628}
1629
0ec9e1b0
DM
1630fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1631 complete_server_file_name(arg, param)
1632 .iter()
1633 .filter_map(|v| {
1634 let name = strip_server_file_expenstion(&v);
1635 if name.ends_with(".pxar") {
1636 Some(name)
1637 } else {
1638 None
1639 }
1640 })
1641 .collect()
1642}
1643
49811347
DM
1644fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1645
1646 let mut result = vec![];
1647
1648 let mut size = 64;
1649 loop {
1650 result.push(size.to_string());
11377a47 1651 size *= 2;
49811347
DM
1652 if size > 4096 { break; }
1653 }
1654
1655 result
1656}
1657
826f309b 1658fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
ff5d3707 1659
f2401311
DM
1660 // fixme: implement other input methods
1661
1662 use std::env::VarError::*;
1663 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
826f309b 1664 Ok(p) => return Ok(p.as_bytes().to_vec()),
f2401311
DM
1665 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1666 Err(NotPresent) => {
1667 // Try another method
1668 }
1669 }
1670
1671 // If we're on a TTY, query the user for a password
1672 if crate::tools::tty::stdin_isatty() {
826f309b 1673 return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
f2401311
DM
1674 }
1675
1676 bail!("no password input mechanism available");
1677}
1678
ac716234
DM
1679fn key_create(
1680 param: Value,
1681 _info: &ApiMethod,
1682 _rpcenv: &mut dyn RpcEnvironment,
1683) -> Result<Value, Error> {
1684
9b06db45
DM
1685 let path = tools::required_string_param(&param, "path")?;
1686 let path = PathBuf::from(path);
ac716234 1687
181f097a 1688 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
ac716234
DM
1689
1690 let key = proxmox::sys::linux::random_data(32)?;
1691
181f097a
DM
1692 if kdf == "scrypt" {
1693 // always read passphrase from tty
1694 if !crate::tools::tty::stdin_isatty() {
1695 bail!("unable to read passphrase - no tty");
1696 }
ac716234 1697
cbe01dc5 1698 let password = crate::tools::tty::read_and_verify_password("Encryption Key Password: ")?;
181f097a 1699
ab44acff 1700 let key_config = encrypt_key_with_passphrase(&key, &password)?;
37c5a175 1701
ab44acff 1702 store_key_config(&path, false, key_config)?;
181f097a
DM
1703
1704 Ok(Value::Null)
1705 } else if kdf == "none" {
1706 let created = Local.timestamp(Local::now().timestamp(), 0);
1707
1708 store_key_config(&path, false, KeyConfig {
1709 kdf: None,
1710 created,
ab44acff 1711 modified: created,
181f097a
DM
1712 data: key,
1713 })?;
1714
1715 Ok(Value::Null)
1716 } else {
1717 unreachable!();
1718 }
ac716234
DM
1719}
1720
9f46c7de
DM
1721fn master_pubkey_path() -> Result<PathBuf, Error> {
1722 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1723
1724 // usually $HOME/.config/proxmox-backup/master-public.pem
1725 let path = base.place_config_file("master-public.pem")?;
1726
1727 Ok(path)
1728}
1729
3ea8bfc9
DM
1730fn key_import_master_pubkey(
1731 param: Value,
1732 _info: &ApiMethod,
1733 _rpcenv: &mut dyn RpcEnvironment,
1734) -> Result<Value, Error> {
1735
1736 let path = tools::required_string_param(&param, "path")?;
1737 let path = PathBuf::from(path);
1738
e18a6c9e 1739 let pem_data = file_get_contents(&path)?;
3ea8bfc9
DM
1740
1741 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1742 bail!("Unable to decode PEM data - {}", err);
1743 }
1744
9f46c7de 1745 let target_path = master_pubkey_path()?;
3ea8bfc9 1746
feaa1ad3 1747 replace_file(&target_path, &pem_data, CreateOptions::new())?;
3ea8bfc9
DM
1748
1749 println!("Imported public master key to {:?}", target_path);
1750
1751 Ok(Value::Null)
1752}
1753
37c5a175
DM
1754fn key_create_master_key(
1755 _param: Value,
1756 _info: &ApiMethod,
1757 _rpcenv: &mut dyn RpcEnvironment,
1758) -> Result<Value, Error> {
1759
1760 // we need a TTY to query the new password
1761 if !crate::tools::tty::stdin_isatty() {
1762 bail!("unable to create master key - no tty");
1763 }
1764
1765 let rsa = openssl::rsa::Rsa::generate(4096)?;
1766 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1767
37c5a175 1768
cbe01dc5 1769 let password = String::from_utf8(crate::tools::tty::read_and_verify_password("Master Key Password: ")?)?;
37c5a175
DM
1770
1771 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1772 let filename_pub = "master-public.pem";
1773 println!("Writing public master key to {}", filename_pub);
feaa1ad3 1774 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1775
1776 let cipher = openssl::symm::Cipher::aes_256_cbc();
cbe01dc5 1777 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
37c5a175
DM
1778
1779 let filename_priv = "master-private.pem";
1780 println!("Writing private master key to {}", filename_priv);
feaa1ad3 1781 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1782
1783 Ok(Value::Null)
1784}
ac716234
DM
1785
1786fn key_change_passphrase(
1787 param: Value,
1788 _info: &ApiMethod,
1789 _rpcenv: &mut dyn RpcEnvironment,
1790) -> Result<Value, Error> {
1791
9b06db45
DM
1792 let path = tools::required_string_param(&param, "path")?;
1793 let path = PathBuf::from(path);
ac716234 1794
181f097a
DM
1795 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1796
ac716234
DM
1797 // we need a TTY to query the new password
1798 if !crate::tools::tty::stdin_isatty() {
1799 bail!("unable to change passphrase - no tty");
1800 }
1801
6d20a29d 1802 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ac716234 1803
181f097a 1804 if kdf == "scrypt" {
ac716234 1805
cbe01dc5 1806 let password = crate::tools::tty::read_and_verify_password("New Password: ")?;
ac716234 1807
cbe01dc5 1808 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
ab44acff
DM
1809 new_key_config.created = created; // keep original value
1810
1811 store_key_config(&path, true, new_key_config)?;
ac716234 1812
181f097a
DM
1813 Ok(Value::Null)
1814 } else if kdf == "none" {
ab44acff 1815 let modified = Local.timestamp(Local::now().timestamp(), 0);
181f097a
DM
1816
1817 store_key_config(&path, true, KeyConfig {
1818 kdf: None,
ab44acff
DM
1819 created, // keep original value
1820 modified,
6d0983db 1821 data: key.to_vec(),
181f097a
DM
1822 })?;
1823
1824 Ok(Value::Null)
1825 } else {
1826 unreachable!();
1827 }
f2401311
DM
1828}
1829
1830fn key_mgmt_cli() -> CliCommandMap {
1831
255f378a 1832 const KDF_SCHEMA: Schema =
181f097a 1833 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
255f378a
DM
1834 .format(&ApiStringFormat::Enum(&["scrypt", "none"]))
1835 .default("scrypt")
1836 .schema();
1837
552c2259 1838 #[sortable]
255f378a
DM
1839 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1840 &ApiHandler::Sync(&key_create),
1841 &ObjectSchema::new(
1842 "Create a new encryption key.",
552c2259 1843 &sorted!([
255f378a
DM
1844 ("path", false, &StringSchema::new("File system path.").schema()),
1845 ("kdf", true, &KDF_SCHEMA),
552c2259 1846 ]),
255f378a 1847 )
181f097a 1848 );
7074a0b3 1849
255f378a 1850 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
49fddd98 1851 .arg_param(&["path"])
9b06db45 1852 .completion_cb("path", tools::complete_file_name);
f2401311 1853
552c2259 1854 #[sortable]
255f378a
DM
1855 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1856 &ApiHandler::Sync(&key_change_passphrase),
1857 &ObjectSchema::new(
1858 "Change the passphrase required to decrypt the key.",
552c2259 1859 &sorted!([
255f378a
DM
1860 ("path", false, &StringSchema::new("File system path.").schema()),
1861 ("kdf", true, &KDF_SCHEMA),
552c2259 1862 ]),
255f378a
DM
1863 )
1864 );
7074a0b3 1865
255f378a 1866 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
49fddd98 1867 .arg_param(&["path"])
9b06db45 1868 .completion_cb("path", tools::complete_file_name);
ac716234 1869
255f378a
DM
1870 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1871 &ApiHandler::Sync(&key_create_master_key),
1872 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1873 );
7074a0b3 1874
255f378a
DM
1875 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1876
552c2259 1877 #[sortable]
255f378a
DM
1878 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1879 &ApiHandler::Sync(&key_import_master_pubkey),
1880 &ObjectSchema::new(
1881 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
552c2259 1882 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
255f378a
DM
1883 )
1884 );
7074a0b3 1885
255f378a 1886 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
49fddd98 1887 .arg_param(&["path"])
3ea8bfc9
DM
1888 .completion_cb("path", tools::complete_file_name);
1889
11377a47 1890 CliCommandMap::new()
48ef3c33
DM
1891 .insert("create", key_create_cmd_def)
1892 .insert("create-master-key", key_create_master_key_cmd_def)
1893 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1894 .insert("change-passphrase", key_change_passphrase_cmd_def)
f2401311
DM
1895}
1896
70235f72
CE
1897fn mount(
1898 param: Value,
1899 _info: &ApiMethod,
1900 _rpcenv: &mut dyn RpcEnvironment,
1901) -> Result<Value, Error> {
1902 let verbose = param["verbose"].as_bool().unwrap_or(false);
1903 if verbose {
1904 // This will stay in foreground with debug output enabled as None is
1905 // passed for the RawFd.
3f06d6fb 1906 return proxmox_backup::tools::runtime::main(mount_do(param, None));
70235f72
CE
1907 }
1908
1909 // Process should be deamonized.
1910 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1911 let pipe = pipe()?;
1912 match fork() {
11377a47 1913 Ok(ForkResult::Parent { .. }) => {
70235f72
CE
1914 nix::unistd::close(pipe.1).unwrap();
1915 // Blocks the parent process until we are ready to go in the child
1916 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1917 Ok(Value::Null)
1918 }
1919 Ok(ForkResult::Child) => {
1920 nix::unistd::close(pipe.0).unwrap();
1921 nix::unistd::setsid().unwrap();
3f06d6fb 1922 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
70235f72
CE
1923 }
1924 Err(_) => bail!("failed to daemonize process"),
1925 }
1926}
1927
1928async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
1929 let repo = extract_repository_from_value(&param)?;
1930 let archive_name = tools::required_string_param(&param, "archive-name")?;
1931 let target = tools::required_string_param(&param, "target")?;
d59dbeca 1932 let client = connect(repo.host(), repo.user())?;
70235f72
CE
1933
1934 record_repository(&repo);
1935
1936 let path = tools::required_string_param(&param, "snapshot")?;
1937 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
1938 let group = BackupGroup::parse(path)?;
27c9affb 1939 api_datastore_latest_snapshot(&client, repo.store(), group).await?
70235f72
CE
1940 } else {
1941 let snapshot = BackupDir::parse(path)?;
1942 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1943 };
1944
11377a47 1945 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
70235f72
CE
1946 let crypt_config = match keyfile {
1947 None => None,
1948 Some(path) => {
6d20a29d 1949 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
70235f72
CE
1950 Some(Arc::new(CryptConfig::new(key)?))
1951 }
1952 };
1953
1954 let server_archive_name = if archive_name.ends_with(".pxar") {
1955 format!("{}.didx", archive_name)
1956 } else {
1957 bail!("Can only mount pxar archives.");
1958 };
1959
296c50ba
DM
1960 let client = BackupReader::start(
1961 client,
1962 crypt_config.clone(),
1963 repo.store(),
1964 &backup_type,
1965 &backup_id,
1966 backup_time,
1967 true,
1968 ).await?;
70235f72 1969
f06b820a 1970 let manifest = client.download_manifest().await?;
296c50ba 1971
70235f72 1972 if server_archive_name.ends_with(".didx") {
c3d84a22 1973 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
70235f72
CE
1974 let most_used = index.find_most_used_chunks(8);
1975 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1976 let reader = BufferedDynamicReader::new(index, chunk_reader);
f701d033 1977 let decoder = pxar::Decoder::new(reader)?;
70235f72 1978 let options = OsStr::new("ro,default_permissions");
2a111910 1979 let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
70235f72
CE
1980 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
1981
1982 // Mount the session but not call fuse deamonize as this will cause
1983 // issues with the runtime after the fork
1984 let deamonize = false;
1985 session.mount(&Path::new(target), deamonize)?;
1986
1987 if let Some(pipe) = pipe {
1988 nix::unistd::chdir(Path::new("/")).unwrap();
1989 // Finish creation of deamon by redirecting filedescriptors.
1990 let nullfd = nix::fcntl::open(
1991 "/dev/null",
1992 nix::fcntl::OFlag::O_RDWR,
1993 nix::sys::stat::Mode::empty(),
1994 ).unwrap();
1995 nix::unistd::dup2(nullfd, 0).unwrap();
1996 nix::unistd::dup2(nullfd, 1).unwrap();
1997 nix::unistd::dup2(nullfd, 2).unwrap();
1998 if nullfd > 2 {
1999 nix::unistd::close(nullfd).unwrap();
2000 }
2001 // Signal the parent process that we are done with the setup and it can
2002 // terminate.
11377a47 2003 nix::unistd::write(pipe, &[0u8])?;
70235f72
CE
2004 nix::unistd::close(pipe).unwrap();
2005 }
2006
2007 let multithreaded = true;
2008 session.run_loop(multithreaded)?;
2009 } else {
2010 bail!("unknown archive file extension (expected .pxar)");
2011 }
2012
2013 Ok(Value::Null)
2014}
2015
78d54360
WB
2016#[api(
2017 input: {
2018 properties: {
2019 "snapshot": {
2020 type: String,
2021 description: "Group/Snapshot path.",
2022 },
2023 "archive-name": {
2024 type: String,
2025 description: "Backup archive name.",
2026 },
2027 "repository": {
2028 optional: true,
2029 schema: REPO_URL_SCHEMA,
2030 },
2031 "keyfile": {
2032 optional: true,
2033 type: String,
2034 description: "Path to encryption key.",
2035 },
2036 },
2037 },
2038)]
2039/// Shell to interactively inspect and restore snapshots.
2040async fn catalog_shell(param: Value) -> Result<(), Error> {
3cf73c4e 2041 let repo = extract_repository_from_value(&param)?;
d59dbeca 2042 let client = connect(repo.host(), repo.user())?;
3cf73c4e
CE
2043 let path = tools::required_string_param(&param, "snapshot")?;
2044 let archive_name = tools::required_string_param(&param, "archive-name")?;
2045
2046 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2047 let group = BackupGroup::parse(path)?;
27c9affb 2048 api_datastore_latest_snapshot(&client, repo.store(), group).await?
3cf73c4e
CE
2049 } else {
2050 let snapshot = BackupDir::parse(path)?;
2051 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2052 };
2053
2054 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
2055 let crypt_config = match keyfile {
2056 None => None,
2057 Some(path) => {
6d20a29d 2058 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
3cf73c4e
CE
2059 Some(Arc::new(CryptConfig::new(key)?))
2060 }
2061 };
2062
2063 let server_archive_name = if archive_name.ends_with(".pxar") {
2064 format!("{}.didx", archive_name)
2065 } else {
2066 bail!("Can only mount pxar archives.");
2067 };
2068
2069 let client = BackupReader::start(
2070 client,
2071 crypt_config.clone(),
2072 repo.store(),
2073 &backup_type,
2074 &backup_id,
2075 backup_time,
2076 true,
2077 ).await?;
2078
2079 let tmpfile = std::fs::OpenOptions::new()
2080 .write(true)
2081 .read(true)
2082 .custom_flags(libc::O_TMPFILE)
2083 .open("/tmp")?;
2084
2085 let manifest = client.download_manifest().await?;
2086
2087 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2088 let most_used = index.find_most_used_chunks(8);
2089 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2090 let reader = BufferedDynamicReader::new(index, chunk_reader);
f701d033
DM
2091 let mut decoder = pxar::Decoder::new(reader)?;
2092 decoder.set_callback(|path| {
2093 println!("{:?}", path);
2094 Ok(())
2095 });
3cf73c4e
CE
2096
2097 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
2098 let index = DynamicIndexReader::new(tmpfile)
2099 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
2100
2101 // Note: do not use values stored in index (not trusted) - instead, computed them again
2102 let (csum, size) = index.compute_csum();
2103 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2104
2105 let most_used = index.find_most_used_chunks(8);
2106 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2107 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2108 let mut catalogfile = std::fs::OpenOptions::new()
2109 .write(true)
2110 .read(true)
2111 .custom_flags(libc::O_TMPFILE)
2112 .open("/tmp")?;
2113
2114 std::io::copy(&mut reader, &mut catalogfile)
2115 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
2116
2117 catalogfile.seek(SeekFrom::Start(0))?;
2118 let catalog_reader = CatalogReader::new(catalogfile);
2119 let state = Shell::new(
2120 catalog_reader,
2121 &server_archive_name,
2122 decoder,
2123 )?;
2124
2125 println!("Starting interactive shell");
2126 state.shell()?;
2127
2128 record_repository(&repo);
2129
78d54360 2130 Ok(())
3cf73c4e
CE
2131}
2132
1c6ad6ef 2133fn catalog_mgmt_cli() -> CliCommandMap {
78d54360 2134 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
1c6ad6ef
DM
2135 .arg_param(&["snapshot", "archive-name"])
2136 .completion_cb("repository", complete_repository)
0ec9e1b0 2137 .completion_cb("archive-name", complete_pxar_archive_name)
1c6ad6ef
DM
2138 .completion_cb("snapshot", complete_group_or_snapshot);
2139
1c6ad6ef
DM
2140 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2141 .arg_param(&["snapshot"])
2142 .completion_cb("repository", complete_repository)
2143 .completion_cb("snapshot", complete_backup_snapshot);
2144
2145 CliCommandMap::new()
48ef3c33
DM
2146 .insert("dump", catalog_dump_cmd_def)
2147 .insert("shell", catalog_shell_cmd_def)
1c6ad6ef
DM
2148}
2149
5830c205
DM
2150#[api(
2151 input: {
2152 properties: {
2153 repository: {
2154 schema: REPO_URL_SCHEMA,
2155 optional: true,
2156 },
2157 limit: {
2158 description: "The maximal number of tasks to list.",
2159 type: Integer,
2160 optional: true,
2161 minimum: 1,
2162 maximum: 1000,
2163 default: 50,
2164 },
2165 "output-format": {
2166 schema: OUTPUT_FORMAT,
2167 optional: true,
2168 },
2169 }
2170 }
2171)]
2172/// List running server tasks for this repo user
d6c4a119 2173async fn task_list(param: Value) -> Result<Value, Error> {
5830c205 2174
d6c4a119
DM
2175 let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
2176 let repo = extract_repository_from_value(&param)?;
d59dbeca 2177 let client = connect(repo.host(), repo.user())?;
5830c205 2178
d6c4a119 2179 let limit = param["limit"].as_u64().unwrap_or(50) as usize;
5830c205 2180
d6c4a119
DM
2181 let args = json!({
2182 "running": true,
2183 "start": 0,
2184 "limit": limit,
2185 "userfilter": repo.user(),
2186 "store": repo.store(),
2187 });
2188 let result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
5830c205 2189
d6c4a119 2190 let data = &result["data"];
5830c205 2191
d6c4a119
DM
2192 if output_format == "text" {
2193 for item in data.as_array().unwrap() {
2194 println!(
2195 "{} {}",
2196 item["upid"].as_str().unwrap(),
2197 item["status"].as_str().unwrap_or("running"),
2198 );
5830c205 2199 }
d6c4a119
DM
2200 } else {
2201 format_and_print_result(data, &output_format);
2202 }
5830c205
DM
2203
2204 Ok(Value::Null)
2205}
2206
2207#[api(
2208 input: {
2209 properties: {
2210 repository: {
2211 schema: REPO_URL_SCHEMA,
2212 optional: true,
2213 },
2214 upid: {
2215 schema: UPID_SCHEMA,
2216 },
2217 }
2218 }
2219)]
2220/// Display the task log.
d6c4a119 2221async fn task_log(param: Value) -> Result<Value, Error> {
5830c205 2222
d6c4a119
DM
2223 let repo = extract_repository_from_value(&param)?;
2224 let upid = tools::required_string_param(&param, "upid")?;
5830c205 2225
d59dbeca 2226 let client = connect(repo.host(), repo.user())?;
5830c205 2227
d6c4a119 2228 display_task_log(client, upid, true).await?;
5830c205
DM
2229
2230 Ok(Value::Null)
2231}
2232
3f1020b7
DM
2233#[api(
2234 input: {
2235 properties: {
2236 repository: {
2237 schema: REPO_URL_SCHEMA,
2238 optional: true,
2239 },
2240 upid: {
2241 schema: UPID_SCHEMA,
2242 },
2243 }
2244 }
2245)]
2246/// Try to stop a specific task.
d6c4a119 2247async fn task_stop(param: Value) -> Result<Value, Error> {
3f1020b7 2248
d6c4a119
DM
2249 let repo = extract_repository_from_value(&param)?;
2250 let upid_str = tools::required_string_param(&param, "upid")?;
3f1020b7 2251
d59dbeca 2252 let mut client = connect(repo.host(), repo.user())?;
3f1020b7 2253
d6c4a119
DM
2254 let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
2255 let _ = client.delete(&path, None).await?;
3f1020b7
DM
2256
2257 Ok(Value::Null)
2258}
2259
5830c205
DM
2260fn task_mgmt_cli() -> CliCommandMap {
2261
2262 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2263 .completion_cb("repository", complete_repository);
2264
2265 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2266 .arg_param(&["upid"]);
2267
3f1020b7
DM
2268 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2269 .arg_param(&["upid"]);
2270
5830c205
DM
2271 CliCommandMap::new()
2272 .insert("log", task_log_cmd_def)
2273 .insert("list", task_list_cmd_def)
3f1020b7 2274 .insert("stop", task_stop_cmd_def)
5830c205 2275}
1c6ad6ef 2276
f2401311 2277fn main() {
33d64b81 2278
255f378a 2279 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
49fddd98 2280 .arg_param(&["backupspec"])
d0a03d40 2281 .completion_cb("repository", complete_repository)
49811347 2282 .completion_cb("backupspec", complete_backup_source)
6d0983db 2283 .completion_cb("keyfile", tools::complete_file_name)
49811347 2284 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 2285
255f378a 2286 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
49fddd98 2287 .arg_param(&["snapshot", "logfile"])
543a260f 2288 .completion_cb("snapshot", complete_backup_snapshot)
ec34f7eb
DM
2289 .completion_cb("logfile", tools::complete_file_name)
2290 .completion_cb("keyfile", tools::complete_file_name)
2291 .completion_cb("repository", complete_repository);
2292
255f378a 2293 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
d0a03d40 2294 .completion_cb("repository", complete_repository);
41c039e1 2295
255f378a 2296 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
49fddd98 2297 .arg_param(&["group"])
024f11bb 2298 .completion_cb("group", complete_backup_group)
d0a03d40 2299 .completion_cb("repository", complete_repository);
184f17af 2300
255f378a 2301 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
49fddd98 2302 .arg_param(&["snapshot"])
b2388518 2303 .completion_cb("repository", complete_repository)
543a260f 2304 .completion_cb("snapshot", complete_backup_snapshot);
6f62c924 2305
255f378a 2306 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
d0a03d40 2307 .completion_cb("repository", complete_repository);
8cc0d6af 2308
255f378a 2309 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
49fddd98 2310 .arg_param(&["snapshot", "archive-name", "target"])
b2388518 2311 .completion_cb("repository", complete_repository)
08dc340a
DM
2312 .completion_cb("snapshot", complete_group_or_snapshot)
2313 .completion_cb("archive-name", complete_archive_name)
2314 .completion_cb("target", tools::complete_file_name);
9f912493 2315
255f378a 2316 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
49fddd98 2317 .arg_param(&["snapshot"])
52c171e4 2318 .completion_cb("repository", complete_repository)
543a260f 2319 .completion_cb("snapshot", complete_backup_snapshot);
52c171e4 2320
255f378a 2321 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
49fddd98 2322 .arg_param(&["group"])
9fdc3ef4 2323 .completion_cb("group", complete_backup_group)
d0a03d40 2324 .completion_cb("repository", complete_repository);
9f912493 2325
255f378a 2326 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
34a816cc
DM
2327 .completion_cb("repository", complete_repository);
2328
255f378a 2329 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
e240d8be
DM
2330 .completion_cb("repository", complete_repository);
2331
255f378a 2332 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
e240d8be 2333 .completion_cb("repository", complete_repository);
32efac1c 2334
552c2259 2335 #[sortable]
255f378a
DM
2336 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2337 &ApiHandler::Sync(&mount),
2338 &ObjectSchema::new(
2339 "Mount pxar archive.",
552c2259 2340 &sorted!([
255f378a
DM
2341 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2342 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2343 ("target", false, &StringSchema::new("Target directory path.").schema()),
2344 ("repository", true, &REPO_URL_SCHEMA),
2345 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2346 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
552c2259 2347 ]),
255f378a
DM
2348 )
2349 );
7074a0b3 2350
255f378a 2351 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
49fddd98 2352 .arg_param(&["snapshot", "archive-name", "target"])
70235f72
CE
2353 .completion_cb("repository", complete_repository)
2354 .completion_cb("snapshot", complete_group_or_snapshot)
0ec9e1b0 2355 .completion_cb("archive-name", complete_pxar_archive_name)
70235f72 2356 .completion_cb("target", tools::complete_file_name);
e240d8be 2357
3cf73c4e 2358
41c039e1 2359 let cmd_def = CliCommandMap::new()
48ef3c33
DM
2360 .insert("backup", backup_cmd_def)
2361 .insert("upload-log", upload_log_cmd_def)
2362 .insert("forget", forget_cmd_def)
2363 .insert("garbage-collect", garbage_collect_cmd_def)
2364 .insert("list", list_cmd_def)
2365 .insert("login", login_cmd_def)
2366 .insert("logout", logout_cmd_def)
2367 .insert("prune", prune_cmd_def)
2368 .insert("restore", restore_cmd_def)
2369 .insert("snapshots", snapshots_cmd_def)
2370 .insert("files", files_cmd_def)
2371 .insert("status", status_cmd_def)
2372 .insert("key", key_mgmt_cli())
2373 .insert("mount", mount_cmd_def)
5830c205
DM
2374 .insert("catalog", catalog_mgmt_cli())
2375 .insert("task", task_mgmt_cli());
48ef3c33 2376
3f06d6fb 2377 proxmox_backup::tools::runtime::main(run_cli_command(cmd_def));
ff5d3707 2378}