]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-client.rs
add an AsyncReadChunk trait
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
CommitLineData
2eeaacb9 1use std::collections::{HashSet, HashMap};
70235f72 2use std::ffi::OsStr;
c443f58b 3use std::io::{self, Write, Seek, SeekFrom};
2761d6a4 4use std::os::unix::fs::OpenOptionsExt;
c443f58b
WB
5use std::os::unix::io::RawFd;
6use std::path::{Path, PathBuf};
7use std::pin::Pin;
8use std::sync::{Arc, Mutex};
9use std::task::{Context, Poll};
10
11use anyhow::{bail, format_err, Error};
12use chrono::{Local, DateTime, Utc, TimeZone};
13use futures::future::FutureExt;
14use futures::select;
15use futures::stream::{StreamExt, TryStreamExt};
16use nix::unistd::{fork, ForkResult, pipe};
17use serde_json::{json, Value};
18use tokio::signal::unix::{signal, SignalKind};
19use tokio::sync::mpsc;
20use xdg::BaseDirectories;
2761d6a4 21
c443f58b 22use pathpatterns::{MatchEntry, MatchType, PatternFlag};
552c2259 23use proxmox::{sortable, identity};
feaa1ad3 24use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
501f4fa2 25use proxmox::sys::linux::tty;
a47a02ae 26use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
3d482025 27use proxmox::api::schema::*;
7eea56ca 28use proxmox::api::cli::*;
5830c205 29use proxmox::api::api;
ff5d3707 30
fe0e04c6 31use proxmox_backup::tools;
bbf9e7e9 32use proxmox_backup::api2::types::*;
151c6ce2 33use proxmox_backup::client::*;
c443f58b 34use proxmox_backup::pxar::catalog::*;
4d16badf
WB
35use proxmox_backup::backup::{
36 archive_type,
37 encrypt_key_with_passphrase,
38 load_and_decrypt_key,
39 store_key_config,
40 verify_chunk_size,
41 ArchiveType,
42 BackupDir,
43 BackupGroup,
44 BackupManifest,
45 BufferedDynamicReader,
46 CatalogReader,
47 CatalogWriter,
48 CATALOG_NAME,
49 ChunkStream,
50 CryptConfig,
51 DataBlob,
52 DynamicIndexReader,
53 FixedChunkStream,
54 FixedIndexReader,
55 IndexFile,
56 KeyConfig,
57 MANIFEST_BLOB_NAME,
58 ReadChunk,
59 Shell,
60};
ae0be2dd 61
a05c0c6f 62const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
d1c65727 63const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
a05c0c6f 64
33d64b81 65
255f378a
DM
66const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
67 .format(&BACKUP_REPO_URL)
68 .max_length(256)
69 .schema();
d0a03d40 70
a47a02ae
DM
71const KEYFILE_SCHEMA: Schema = StringSchema::new(
72 "Path to encryption key. All data will be encrypted using this key.")
73 .schema();
74
75const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
76 "Chunk size in KB. Must be a power of 2.")
77 .minimum(64)
78 .maximum(4096)
79 .default(4096)
80 .schema();
81
2665cef7
DM
82fn get_default_repository() -> Option<String> {
83 std::env::var("PBS_REPOSITORY").ok()
84}
85
86fn extract_repository_from_value(
87 param: &Value,
88) -> Result<BackupRepository, Error> {
89
90 let repo_url = param["repository"]
91 .as_str()
92 .map(String::from)
93 .or_else(get_default_repository)
94 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
95
96 let repo: BackupRepository = repo_url.parse()?;
97
98 Ok(repo)
99}
100
101fn extract_repository_from_map(
102 param: &HashMap<String, String>,
103) -> Option<BackupRepository> {
104
105 param.get("repository")
106 .map(String::from)
107 .or_else(get_default_repository)
108 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
109}
110
d0a03d40
DM
111fn record_repository(repo: &BackupRepository) {
112
113 let base = match BaseDirectories::with_prefix("proxmox-backup") {
114 Ok(v) => v,
115 _ => return,
116 };
117
118 // usually $HOME/.cache/proxmox-backup/repo-list
119 let path = match base.place_cache_file("repo-list") {
120 Ok(v) => v,
121 _ => return,
122 };
123
11377a47 124 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
125
126 let repo = repo.to_string();
127
128 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
129
130 let mut map = serde_json::map::Map::new();
131
132 loop {
133 let mut max_used = 0;
134 let mut max_repo = None;
135 for (repo, count) in data.as_object().unwrap() {
136 if map.contains_key(repo) { continue; }
137 if let Some(count) = count.as_i64() {
138 if count > max_used {
139 max_used = count;
140 max_repo = Some(repo);
141 }
142 }
143 }
144 if let Some(repo) = max_repo {
145 map.insert(repo.to_owned(), json!(max_used));
146 } else {
147 break;
148 }
149 if map.len() > 10 { // store max. 10 repos
150 break;
151 }
152 }
153
154 let new_data = json!(map);
155
feaa1ad3 156 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
d0a03d40
DM
157}
158
49811347 159fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
d0a03d40
DM
160
161 let mut result = vec![];
162
163 let base = match BaseDirectories::with_prefix("proxmox-backup") {
164 Ok(v) => v,
165 _ => return result,
166 };
167
168 // usually $HOME/.cache/proxmox-backup/repo-list
169 let path = match base.place_cache_file("repo-list") {
170 Ok(v) => v,
171 _ => return result,
172 };
173
11377a47 174 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
175
176 if let Some(map) = data.as_object() {
49811347 177 for (repo, _count) in map {
d0a03d40
DM
178 result.push(repo.to_owned());
179 }
180 }
181
182 result
183}
184
d59dbeca
DM
185fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
186
a05c0c6f
DM
187 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
188
d1c65727
DM
189 use std::env::VarError::*;
190 let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
191 Ok(p) => Some(p),
192 Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
193 Err(NotPresent) => None,
194 };
195
d59dbeca 196 let options = HttpClientOptions::new()
5030b7ce 197 .prefix(Some("proxmox-backup".to_string()))
d1c65727 198 .password(password)
d59dbeca 199 .interactive(true)
a05c0c6f 200 .fingerprint(fingerprint)
5a74756c 201 .fingerprint_cache(true)
d59dbeca
DM
202 .ticket_cache(true);
203
204 HttpClient::new(server, userid, options)
205}
206
d105176f
DM
207async fn view_task_result(
208 client: HttpClient,
209 result: Value,
210 output_format: &str,
211) -> Result<(), Error> {
212 let data = &result["data"];
213 if output_format == "text" {
214 if let Some(upid) = data.as_str() {
215 display_task_log(client, upid, true).await?;
216 }
217 } else {
218 format_and_print_result(&data, &output_format);
219 }
220
221 Ok(())
222}
223
42af4b8f
DM
224async fn api_datastore_list_snapshots(
225 client: &HttpClient,
226 store: &str,
227 group: Option<BackupGroup>,
f24fc116 228) -> Result<Value, Error> {
42af4b8f
DM
229
230 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
231
232 let mut args = json!({});
233 if let Some(group) = group {
234 args["backup-type"] = group.backup_type().into();
235 args["backup-id"] = group.backup_id().into();
236 }
237
238 let mut result = client.get(&path, Some(args)).await?;
239
f24fc116 240 Ok(result["data"].take())
42af4b8f
DM
241}
242
27c9affb
DM
243async fn api_datastore_latest_snapshot(
244 client: &HttpClient,
245 store: &str,
246 group: BackupGroup,
247) -> Result<(String, String, DateTime<Utc>), Error> {
248
f24fc116
DM
249 let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
250 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
27c9affb
DM
251
252 if list.is_empty() {
253 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
254 }
255
256 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
257
258 let backup_time = Utc.timestamp(list[0].backup_time, 0);
259
260 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
261}
262
263
e9722f8b 264async fn backup_directory<P: AsRef<Path>>(
cf9271e2 265 client: &BackupWriter,
17d6979a 266 dir_path: P,
247cdbce 267 archive_name: &str,
36898ffc 268 chunk_size: Option<usize>,
2eeaacb9 269 device_set: Option<HashSet<u64>>,
219ef0e6 270 verbose: bool,
5b72c9b4 271 skip_lost_and_found: bool,
f98ac774 272 crypt_config: Option<Arc<CryptConfig>>,
f1d99e3f 273 catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
c443f58b 274 exclude_pattern: Vec<MatchEntry>,
6fc053ed 275 entries_max: usize,
2c3891d1 276) -> Result<BackupStats, Error> {
33d64b81 277
6fc053ed
CE
278 let pxar_stream = PxarBackupStream::open(
279 dir_path.as_ref(),
280 device_set,
281 verbose,
282 skip_lost_and_found,
283 catalog,
189996cf 284 exclude_pattern,
6fc053ed
CE
285 entries_max,
286 )?;
e9722f8b 287 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 288
e9722f8b 289 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 290
c4ff3dce 291 let stream = rx
e9722f8b 292 .map_err(Error::from);
17d6979a 293
c4ff3dce 294 // spawn chunker inside a separate task so that it can run parallel
e9722f8b 295 tokio::spawn(async move {
db0cb9ce
WB
296 while let Some(v) = chunk_stream.next().await {
297 let _ = tx.send(v).await;
298 }
e9722f8b 299 });
17d6979a 300
e9722f8b
WB
301 let stats = client
302 .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
303 .await?;
bcd879cf 304
2c3891d1 305 Ok(stats)
bcd879cf
DM
306}
307
e9722f8b 308async fn backup_image<P: AsRef<Path>>(
cf9271e2 309 client: &BackupWriter,
6af905c1
DM
310 image_path: P,
311 archive_name: &str,
312 image_size: u64,
36898ffc 313 chunk_size: Option<usize>,
1c0472e8 314 _verbose: bool,
f98ac774 315 crypt_config: Option<Arc<CryptConfig>>,
2c3891d1 316) -> Result<BackupStats, Error> {
6af905c1 317
6af905c1
DM
318 let path = image_path.as_ref().to_owned();
319
e9722f8b 320 let file = tokio::fs::File::open(path).await?;
6af905c1 321
db0cb9ce 322 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
6af905c1
DM
323 .map_err(Error::from);
324
36898ffc 325 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
6af905c1 326
e9722f8b
WB
327 let stats = client
328 .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
329 .await?;
6af905c1 330
2c3891d1 331 Ok(stats)
6af905c1
DM
332}
333
a47a02ae
DM
334#[api(
335 input: {
336 properties: {
337 repository: {
338 schema: REPO_URL_SCHEMA,
339 optional: true,
340 },
341 "output-format": {
342 schema: OUTPUT_FORMAT,
343 optional: true,
344 },
345 }
346 }
347)]
348/// List backup groups.
349async fn list_backup_groups(param: Value) -> Result<Value, Error> {
812c6f87 350
c81b2b7c
DM
351 let output_format = get_output_format(&param);
352
2665cef7 353 let repo = extract_repository_from_value(&param)?;
812c6f87 354
d59dbeca 355 let client = connect(repo.host(), repo.user())?;
812c6f87 356
d0a03d40 357 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 358
8a8a4703 359 let mut result = client.get(&path, None).await?;
812c6f87 360
d0a03d40
DM
361 record_repository(&repo);
362
c81b2b7c
DM
363 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
364 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
365 let group = BackupGroup::new(item.backup_type, item.backup_id);
366 Ok(group.group_path().to_str().unwrap().to_owned())
367 };
812c6f87 368
18deda40
DM
369 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
370 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
371 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
372 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
c81b2b7c 373 };
812c6f87 374
c81b2b7c
DM
375 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
376 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
4939255f 377 Ok(tools::format::render_backup_file_list(&item.files))
c81b2b7c 378 };
812c6f87 379
c81b2b7c
DM
380 let options = default_table_format_options()
381 .sortby("backup-type", false)
382 .sortby("backup-id", false)
383 .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
18deda40
DM
384 .column(
385 ColumnConfig::new("last-backup")
386 .renderer(render_last_backup)
387 .header("last snapshot")
388 .right_align(false)
389 )
c81b2b7c
DM
390 .column(ColumnConfig::new("backup-count"))
391 .column(ColumnConfig::new("files").renderer(render_files));
ad20d198 392
c81b2b7c 393 let mut data: Value = result["data"].take();
ad20d198 394
c81b2b7c 395 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_GROUPS;
812c6f87 396
c81b2b7c 397 format_and_print_result_full(&mut data, info, &output_format, &options);
34a816cc 398
812c6f87
DM
399 Ok(Value::Null)
400}
401
a47a02ae
DM
402#[api(
403 input: {
404 properties: {
405 repository: {
406 schema: REPO_URL_SCHEMA,
407 optional: true,
408 },
409 group: {
410 type: String,
411 description: "Backup group.",
412 optional: true,
413 },
414 "output-format": {
415 schema: OUTPUT_FORMAT,
416 optional: true,
417 },
418 }
419 }
420)]
421/// List backup snapshots.
422async fn list_snapshots(param: Value) -> Result<Value, Error> {
184f17af 423
2665cef7 424 let repo = extract_repository_from_value(&param)?;
184f17af 425
c2043614 426 let output_format = get_output_format(&param);
34a816cc 427
d59dbeca 428 let client = connect(repo.host(), repo.user())?;
184f17af 429
42af4b8f
DM
430 let group = if let Some(path) = param["group"].as_str() {
431 Some(BackupGroup::parse(path)?)
432 } else {
433 None
434 };
184f17af 435
f24fc116 436 let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
184f17af 437
d0a03d40
DM
438 record_repository(&repo);
439
f24fc116
DM
440 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
441 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
af9d4afc 442 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
f24fc116
DM
443 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
444 };
184f17af 445
f24fc116
DM
446 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
447 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
4939255f 448 Ok(tools::format::render_backup_file_list(&item.files))
f24fc116
DM
449 };
450
c2043614 451 let options = default_table_format_options()
f24fc116
DM
452 .sortby("backup-type", false)
453 .sortby("backup-id", false)
454 .sortby("backup-time", false)
455 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
456 .column(ColumnConfig::new("size"))
457 .column(ColumnConfig::new("files").renderer(render_files))
458 ;
459
460 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
461
462 format_and_print_result_full(&mut data, info, &output_format, &options);
184f17af
DM
463
464 Ok(Value::Null)
465}
466
a47a02ae
DM
467#[api(
468 input: {
469 properties: {
470 repository: {
471 schema: REPO_URL_SCHEMA,
472 optional: true,
473 },
474 snapshot: {
475 type: String,
476 description: "Snapshot path.",
477 },
478 }
479 }
480)]
481/// Forget (remove) backup snapshots.
482async fn forget_snapshots(param: Value) -> Result<Value, Error> {
6f62c924 483
2665cef7 484 let repo = extract_repository_from_value(&param)?;
6f62c924
DM
485
486 let path = tools::required_string_param(&param, "snapshot")?;
487 let snapshot = BackupDir::parse(path)?;
488
d59dbeca 489 let mut client = connect(repo.host(), repo.user())?;
6f62c924 490
9e391bb7 491 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
6f62c924 492
8a8a4703
DM
493 let result = client.delete(&path, Some(json!({
494 "backup-type": snapshot.group().backup_type(),
495 "backup-id": snapshot.group().backup_id(),
496 "backup-time": snapshot.backup_time().timestamp(),
497 }))).await?;
6f62c924 498
d0a03d40
DM
499 record_repository(&repo);
500
6f62c924
DM
501 Ok(result)
502}
503
a47a02ae
DM
504#[api(
505 input: {
506 properties: {
507 repository: {
508 schema: REPO_URL_SCHEMA,
509 optional: true,
510 },
511 }
512 }
513)]
514/// Try to login. If successful, store ticket.
515async fn api_login(param: Value) -> Result<Value, Error> {
e240d8be
DM
516
517 let repo = extract_repository_from_value(&param)?;
518
d59dbeca 519 let client = connect(repo.host(), repo.user())?;
8a8a4703 520 client.login().await?;
e240d8be
DM
521
522 record_repository(&repo);
523
524 Ok(Value::Null)
525}
526
a47a02ae
DM
527#[api(
528 input: {
529 properties: {
530 repository: {
531 schema: REPO_URL_SCHEMA,
532 optional: true,
533 },
534 }
535 }
536)]
537/// Logout (delete stored ticket).
538fn api_logout(param: Value) -> Result<Value, Error> {
e240d8be
DM
539
540 let repo = extract_repository_from_value(&param)?;
541
5030b7ce 542 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
e240d8be
DM
543
544 Ok(Value::Null)
545}
546
a47a02ae
DM
547#[api(
548 input: {
549 properties: {
550 repository: {
551 schema: REPO_URL_SCHEMA,
552 optional: true,
553 },
554 snapshot: {
555 type: String,
556 description: "Snapshot path.",
557 },
558 }
559 }
560)]
561/// Dump catalog.
562async fn dump_catalog(param: Value) -> Result<Value, Error> {
9049a8cf
DM
563
564 let repo = extract_repository_from_value(&param)?;
565
566 let path = tools::required_string_param(&param, "snapshot")?;
567 let snapshot = BackupDir::parse(path)?;
568
11377a47 569 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
9049a8cf
DM
570
571 let crypt_config = match keyfile {
572 None => None,
573 Some(path) => {
6d20a29d 574 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
9025312a 575 Some(Arc::new(CryptConfig::new(key)?))
9049a8cf
DM
576 }
577 };
578
d59dbeca 579 let client = connect(repo.host(), repo.user())?;
9049a8cf 580
8a8a4703
DM
581 let client = BackupReader::start(
582 client,
583 crypt_config.clone(),
584 repo.store(),
585 &snapshot.group().backup_type(),
586 &snapshot.group().backup_id(),
587 snapshot.backup_time(),
588 true,
589 ).await?;
9049a8cf 590
8a8a4703 591 let manifest = client.download_manifest().await?;
d2267b11 592
8a8a4703 593 let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
bf6e3217 594
8a8a4703 595 let most_used = index.find_most_used_chunks(8);
bf6e3217 596
8a8a4703 597 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
bf6e3217 598
8a8a4703 599 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
9049a8cf 600
8a8a4703
DM
601 let mut catalogfile = std::fs::OpenOptions::new()
602 .write(true)
603 .read(true)
604 .custom_flags(libc::O_TMPFILE)
605 .open("/tmp")?;
d2267b11 606
8a8a4703
DM
607 std::io::copy(&mut reader, &mut catalogfile)
608 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
a84ef4c2 609
8a8a4703 610 catalogfile.seek(SeekFrom::Start(0))?;
9049a8cf 611
8a8a4703 612 let mut catalog_reader = CatalogReader::new(catalogfile);
9049a8cf 613
8a8a4703 614 catalog_reader.dump()?;
e9722f8b 615
8a8a4703 616 record_repository(&repo);
9049a8cf
DM
617
618 Ok(Value::Null)
619}
620
a47a02ae
DM
621#[api(
622 input: {
623 properties: {
624 repository: {
625 schema: REPO_URL_SCHEMA,
626 optional: true,
627 },
628 snapshot: {
629 type: String,
630 description: "Snapshot path.",
631 },
632 "output-format": {
633 schema: OUTPUT_FORMAT,
634 optional: true,
635 },
636 }
637 }
638)]
639/// List snapshot files.
640async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
52c171e4
DM
641
642 let repo = extract_repository_from_value(&param)?;
643
644 let path = tools::required_string_param(&param, "snapshot")?;
645 let snapshot = BackupDir::parse(path)?;
646
c2043614 647 let output_format = get_output_format(&param);
52c171e4 648
d59dbeca 649 let client = connect(repo.host(), repo.user())?;
52c171e4
DM
650
651 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
652
8a8a4703
DM
653 let mut result = client.get(&path, Some(json!({
654 "backup-type": snapshot.group().backup_type(),
655 "backup-id": snapshot.group().backup_id(),
656 "backup-time": snapshot.backup_time().timestamp(),
657 }))).await?;
52c171e4
DM
658
659 record_repository(&repo);
660
ea5f547f 661 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
52c171e4 662
ea5f547f
DM
663 let mut data: Value = result["data"].take();
664
c2043614 665 let options = default_table_format_options();
ea5f547f
DM
666
667 format_and_print_result_full(&mut data, info, &output_format, &options);
52c171e4
DM
668
669 Ok(Value::Null)
670}
671
a47a02ae 672#[api(
94913f35 673 input: {
a47a02ae
DM
674 properties: {
675 repository: {
676 schema: REPO_URL_SCHEMA,
677 optional: true,
678 },
94913f35
DM
679 "output-format": {
680 schema: OUTPUT_FORMAT,
681 optional: true,
682 },
683 },
684 },
a47a02ae
DM
685)]
686/// Start garbage collection for a specific repository.
687async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
8cc0d6af 688
2665cef7 689 let repo = extract_repository_from_value(&param)?;
c2043614
DM
690
691 let output_format = get_output_format(&param);
8cc0d6af 692
d59dbeca 693 let mut client = connect(repo.host(), repo.user())?;
8cc0d6af 694
d0a03d40 695 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 696
8a8a4703 697 let result = client.post(&path, None).await?;
8cc0d6af 698
8a8a4703 699 record_repository(&repo);
d0a03d40 700
8a8a4703 701 view_task_result(client, result, &output_format).await?;
e5f7def4 702
e5f7def4 703 Ok(Value::Null)
8cc0d6af 704}
33d64b81 705
bf6e3217
DM
706fn spawn_catalog_upload(
707 client: Arc<BackupWriter>,
708 crypt_config: Option<Arc<CryptConfig>>,
709) -> Result<
710 (
f1d99e3f 711 Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
bf6e3217
DM
712 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
713 ), Error>
714{
f1d99e3f
DM
715 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
716 let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
bf6e3217
DM
717 let catalog_chunk_size = 512*1024;
718 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
719
f1d99e3f 720 let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
bf6e3217
DM
721
722 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
723
724 tokio::spawn(async move {
725 let catalog_upload_result = client
726 .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
727 .await;
728
729 if let Err(ref err) = catalog_upload_result {
730 eprintln!("catalog upload error - {}", err);
731 client.cancel();
732 }
733
734 let _ = catalog_result_tx.send(catalog_upload_result);
735 });
736
737 Ok((catalog, catalog_result_rx))
738}
739
a47a02ae
DM
740#[api(
741 input: {
742 properties: {
743 backupspec: {
744 type: Array,
745 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
746 items: {
747 schema: BACKUP_SOURCE_SCHEMA,
748 }
749 },
750 repository: {
751 schema: REPO_URL_SCHEMA,
752 optional: true,
753 },
754 "include-dev": {
755 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
756 optional: true,
757 items: {
758 type: String,
759 description: "Path to file.",
760 }
761 },
762 keyfile: {
763 schema: KEYFILE_SCHEMA,
764 optional: true,
765 },
766 "skip-lost-and-found": {
767 type: Boolean,
768 description: "Skip lost+found directory.",
769 optional: true,
770 },
771 "backup-type": {
772 schema: BACKUP_TYPE_SCHEMA,
773 optional: true,
774 },
775 "backup-id": {
776 schema: BACKUP_ID_SCHEMA,
777 optional: true,
778 },
779 "backup-time": {
780 schema: BACKUP_TIME_SCHEMA,
781 optional: true,
782 },
783 "chunk-size": {
784 schema: CHUNK_SIZE_SCHEMA,
785 optional: true,
786 },
189996cf
CE
787 "exclude": {
788 type: Array,
789 description: "List of paths or patterns for matching files to exclude.",
790 optional: true,
791 items: {
792 type: String,
793 description: "Path or match pattern.",
794 }
795 },
6fc053ed
CE
796 "entries-max": {
797 type: Integer,
798 description: "Max number of entries to hold in memory.",
799 optional: true,
c443f58b 800 default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
6fc053ed 801 },
e02c3d46
DM
802 "verbose": {
803 type: Boolean,
804 description: "Verbose output.",
805 optional: true,
806 },
a47a02ae
DM
807 }
808 }
809)]
810/// Create (host) backup.
811async fn create_backup(
6049b71f
DM
812 param: Value,
813 _info: &ApiMethod,
dd5495d6 814 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 815) -> Result<Value, Error> {
ff5d3707 816
2665cef7 817 let repo = extract_repository_from_value(&param)?;
ae0be2dd
DM
818
819 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
a914a774 820
eed6db39
DM
821 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
822
5b72c9b4
DM
823 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
824
219ef0e6
DM
825 let verbose = param["verbose"].as_bool().unwrap_or(false);
826
ca5d0b61
DM
827 let backup_time_opt = param["backup-time"].as_i64();
828
36898ffc 829 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
2d9d143a 830
247cdbce
DM
831 if let Some(size) = chunk_size_opt {
832 verify_chunk_size(size)?;
2d9d143a
DM
833 }
834
11377a47 835 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
6d0983db 836
f69adc81 837 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
fba30411 838
bbf9e7e9 839 let backup_type = param["backup-type"].as_str().unwrap_or("host");
ca5d0b61 840
2eeaacb9
DM
841 let include_dev = param["include-dev"].as_array();
842
c443f58b
WB
843 let entries_max = param["entries-max"].as_u64()
844 .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
6fc053ed 845
189996cf 846 let empty = Vec::new();
c443f58b
WB
847 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
848
239e49f9 849 let mut pattern_list = Vec::with_capacity(exclude_args.len());
c443f58b
WB
850 for entry in exclude_args {
851 let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
239e49f9 852 pattern_list.push(
c443f58b
WB
853 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
854 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
855 );
189996cf
CE
856 }
857
2eeaacb9
DM
858 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
859
860 if let Some(include_dev) = include_dev {
861 if all_file_systems {
862 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
863 }
864
865 let mut set = HashSet::new();
866 for path in include_dev {
867 let path = path.as_str().unwrap();
868 let stat = nix::sys::stat::stat(path)
869 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
870 set.insert(stat.st_dev);
871 }
872 devices = Some(set);
873 }
874
ae0be2dd 875 let mut upload_list = vec![];
a914a774 876
ae0be2dd 877 for backupspec in backupspec_list {
7cc3473a
DM
878 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
879 let filename = &spec.config_string;
880 let target = &spec.archive_name;
bcd879cf 881
eb1804c5
DM
882 use std::os::unix::fs::FileTypeExt;
883
3fa71727
CE
884 let metadata = std::fs::metadata(filename)
885 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 886 let file_type = metadata.file_type();
23bb8780 887
7cc3473a
DM
888 match spec.spec_type {
889 BackupSpecificationType::PXAR => {
ec8a9bb9
DM
890 if !file_type.is_dir() {
891 bail!("got unexpected file type (expected directory)");
892 }
7cc3473a 893 upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
ec8a9bb9 894 }
7cc3473a 895 BackupSpecificationType::IMAGE => {
ec8a9bb9
DM
896 if !(file_type.is_file() || file_type.is_block_device()) {
897 bail!("got unexpected file type (expected file or block device)");
898 }
eb1804c5 899
e18a6c9e 900 let size = image_size(&PathBuf::from(filename))?;
23bb8780 901
ec8a9bb9 902 if size == 0 { bail!("got zero-sized file '{}'", filename); }
ae0be2dd 903
7cc3473a 904 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
ec8a9bb9 905 }
7cc3473a 906 BackupSpecificationType::CONFIG => {
ec8a9bb9
DM
907 if !file_type.is_file() {
908 bail!("got unexpected file type (expected regular file)");
909 }
7cc3473a 910 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 911 }
7cc3473a 912 BackupSpecificationType::LOGFILE => {
79679c2d
DM
913 if !file_type.is_file() {
914 bail!("got unexpected file type (expected regular file)");
915 }
7cc3473a 916 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 917 }
ae0be2dd
DM
918 }
919 }
920
11377a47 921 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
ae0be2dd 922
d59dbeca 923 let client = connect(repo.host(), repo.user())?;
d0a03d40
DM
924 record_repository(&repo);
925
ca5d0b61
DM
926 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
927
f69adc81 928 println!("Client name: {}", proxmox::tools::nodename());
ca5d0b61
DM
929
930 let start_time = Local::now();
931
7a6cfbd9 932 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
51144821 933
bb823140
DM
934 let (crypt_config, rsa_encrypted_key) = match keyfile {
935 None => (None, None),
6d0983db 936 Some(path) => {
6d20a29d 937 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
bb823140
DM
938
939 let crypt_config = CryptConfig::new(key)?;
940
941 let path = master_pubkey_path()?;
942 if path.exists() {
e18a6c9e 943 let pem_data = file_get_contents(&path)?;
bb823140
DM
944 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
945 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
946 (Some(Arc::new(crypt_config)), Some(enc_key))
947 } else {
948 (Some(Arc::new(crypt_config)), None)
949 }
6d0983db
DM
950 }
951 };
f98ac774 952
8a8a4703
DM
953 let client = BackupWriter::start(
954 client,
955 repo.store(),
956 backup_type,
957 &backup_id,
958 backup_time,
959 verbose,
960 ).await?;
961
962 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
963 let mut manifest = BackupManifest::new(snapshot);
964
5d85847f
DC
965 let mut catalog = None;
966 let mut catalog_result_tx = None;
8a8a4703
DM
967
968 for (backup_type, filename, target, size) in upload_list {
969 match backup_type {
7cc3473a 970 BackupSpecificationType::CONFIG => {
8a8a4703
DM
971 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
972 let stats = client
973 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
974 .await?;
1e8da0a7 975 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703 976 }
7cc3473a 977 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
8a8a4703
DM
978 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
979 let stats = client
980 .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
981 .await?;
1e8da0a7 982 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703 983 }
7cc3473a 984 BackupSpecificationType::PXAR => {
5d85847f
DC
985 // start catalog upload on first use
986 if catalog.is_none() {
987 let (cat, res) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
988 catalog = Some(cat);
989 catalog_result_tx = Some(res);
990 }
991 let catalog = catalog.as_ref().unwrap();
992
8a8a4703
DM
993 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
994 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
995 let stats = backup_directory(
996 &client,
997 &filename,
998 &target,
999 chunk_size_opt,
1000 devices.clone(),
1001 verbose,
1002 skip_lost_and_found,
1003 crypt_config.clone(),
1004 catalog.clone(),
239e49f9 1005 pattern_list.clone(),
6fc053ed 1006 entries_max as usize,
8a8a4703 1007 ).await?;
1e8da0a7 1008 manifest.add_file(target, stats.size, stats.csum)?;
8a8a4703
DM
1009 catalog.lock().unwrap().end_directory()?;
1010 }
7cc3473a 1011 BackupSpecificationType::IMAGE => {
8a8a4703
DM
1012 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
1013 let stats = backup_image(
1014 &client,
1015 &filename,
1016 &target,
1017 size,
1018 chunk_size_opt,
1019 verbose,
1020 crypt_config.clone(),
1021 ).await?;
1e8da0a7 1022 manifest.add_file(target, stats.size, stats.csum)?;
6af905c1
DM
1023 }
1024 }
8a8a4703 1025 }
4818c8b6 1026
8a8a4703 1027 // finalize and upload catalog
5d85847f 1028 if let Some(catalog) = catalog {
8a8a4703
DM
1029 let mutex = Arc::try_unwrap(catalog)
1030 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
1031 let mut catalog = mutex.into_inner().unwrap();
bf6e3217 1032
8a8a4703 1033 catalog.finish()?;
2761d6a4 1034
8a8a4703 1035 drop(catalog); // close upload stream
2761d6a4 1036
5d85847f
DC
1037 if let Some(catalog_result_rx) = catalog_result_tx {
1038 let stats = catalog_result_rx.await??;
1039 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
1040 }
8a8a4703 1041 }
2761d6a4 1042
8a8a4703
DM
1043 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
1044 let target = "rsa-encrypted.key";
1045 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
1046 let stats = client
1047 .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
1048 .await?;
1e8da0a7 1049 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
8a8a4703
DM
1050
1051 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
1052 /*
1053 let mut buffer2 = vec![0u8; rsa.size() as usize];
1054 let pem_data = file_get_contents("master-private.pem")?;
1055 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
1056 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
1057 println!("TEST {} {:?}", len, buffer2);
1058 */
1059 }
9f46c7de 1060
8a8a4703
DM
1061 // create manifest (index.json)
1062 let manifest = manifest.into_json();
2c3891d1 1063
8a8a4703
DM
1064 println!("Upload index.json to '{:?}'", repo);
1065 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1066 client
1067 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
1068 .await?;
2c3891d1 1069
8a8a4703 1070 client.finish().await?;
c4ff3dce 1071
8a8a4703
DM
1072 let end_time = Local::now();
1073 let elapsed = end_time.signed_duration_since(start_time);
1074 println!("Duration: {}", elapsed);
3ec3ec3f 1075
8a8a4703 1076 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
3d5c11e5 1077
8a8a4703 1078 Ok(Value::Null)
f98ea63d
DM
1079}
1080
d0a03d40 1081fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
f98ea63d
DM
1082
1083 let mut result = vec![];
1084
1085 let data: Vec<&str> = arg.splitn(2, ':').collect();
1086
bff11030 1087 if data.len() != 2 {
8968258b
DM
1088 result.push(String::from("root.pxar:/"));
1089 result.push(String::from("etc.pxar:/etc"));
bff11030
DM
1090 return result;
1091 }
f98ea63d 1092
496a6784 1093 let files = tools::complete_file_name(data[1], param);
f98ea63d
DM
1094
1095 for file in files {
1096 result.push(format!("{}:{}", data[0], file));
1097 }
1098
1099 result
ff5d3707 1100}
1101
88892ea8
DM
1102fn dump_image<W: Write>(
1103 client: Arc<BackupReader>,
1104 crypt_config: Option<Arc<CryptConfig>>,
1105 index: FixedIndexReader,
1106 mut writer: W,
fd04ca7a 1107 verbose: bool,
88892ea8
DM
1108) -> Result<(), Error> {
1109
1110 let most_used = index.find_most_used_chunks(8);
1111
1112 let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1113
1114 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1115 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
1116 let mut per = 0;
1117 let mut bytes = 0;
1118 let start_time = std::time::Instant::now();
1119
88892ea8
DM
1120 for pos in 0..index.index_count() {
1121 let digest = index.index_digest(pos).unwrap();
1122 let raw_data = chunk_reader.read_chunk(&digest)?;
1123 writer.write_all(&raw_data)?;
fd04ca7a
DM
1124 bytes += raw_data.len();
1125 if verbose {
1126 let next_per = ((pos+1)*100)/index.index_count();
1127 if per != next_per {
1128 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1129 next_per, bytes, start_time.elapsed().as_secs());
1130 per = next_per;
1131 }
1132 }
88892ea8
DM
1133 }
1134
fd04ca7a
DM
1135 let end_time = std::time::Instant::now();
1136 let elapsed = end_time.duration_since(start_time);
1137 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1138 bytes,
1139 elapsed.as_secs_f64(),
1140 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1141 );
1142
1143
88892ea8
DM
1144 Ok(())
1145}
1146
dc155e9b 1147fn parse_archive_type(name: &str) -> (String, ArchiveType) {
2d32fe2c
TL
1148 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1149 (name.into(), archive_type(name).unwrap())
1150 } else if name.ends_with(".pxar") {
dc155e9b
TL
1151 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1152 } else if name.ends_with(".img") {
1153 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1154 } else {
1155 (format!("{}.blob", name), ArchiveType::Blob)
1156 }
1157}
1158
a47a02ae
DM
1159#[api(
1160 input: {
1161 properties: {
1162 repository: {
1163 schema: REPO_URL_SCHEMA,
1164 optional: true,
1165 },
1166 snapshot: {
1167 type: String,
1168 description: "Group/Snapshot path.",
1169 },
1170 "archive-name": {
1171 description: "Backup archive name.",
1172 type: String,
1173 },
1174 target: {
1175 type: String,
90c815bf 1176 description: r###"Target directory path. Use '-' to write to standard output.
8a8a4703 1177
5eee6d89 1178We do not extraxt '.pxar' archives when writing to standard output.
8a8a4703 1179
a47a02ae
DM
1180"###
1181 },
1182 "allow-existing-dirs": {
1183 type: Boolean,
1184 description: "Do not fail if directories already exists.",
1185 optional: true,
1186 },
1187 keyfile: {
1188 schema: KEYFILE_SCHEMA,
1189 optional: true,
1190 },
1191 }
1192 }
1193)]
1194/// Restore backup repository.
1195async fn restore(param: Value) -> Result<Value, Error> {
2665cef7 1196 let repo = extract_repository_from_value(&param)?;
9f912493 1197
86eda3eb
DM
1198 let verbose = param["verbose"].as_bool().unwrap_or(false);
1199
46d5aa0a
DM
1200 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1201
d5c34d98
DM
1202 let archive_name = tools::required_string_param(&param, "archive-name")?;
1203
d59dbeca 1204 let client = connect(repo.host(), repo.user())?;
d0a03d40 1205
d0a03d40 1206 record_repository(&repo);
d5c34d98 1207
9f912493 1208 let path = tools::required_string_param(&param, "snapshot")?;
9f912493 1209
86eda3eb 1210 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
d5c34d98 1211 let group = BackupGroup::parse(path)?;
27c9affb 1212 api_datastore_latest_snapshot(&client, repo.store(), group).await?
d5c34d98
DM
1213 } else {
1214 let snapshot = BackupDir::parse(path)?;
86eda3eb
DM
1215 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1216 };
9f912493 1217
d5c34d98 1218 let target = tools::required_string_param(&param, "target")?;
bf125261 1219 let target = if target == "-" { None } else { Some(target) };
2ae7d196 1220
11377a47 1221 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2ae7d196 1222
86eda3eb
DM
1223 let crypt_config = match keyfile {
1224 None => None,
1225 Some(path) => {
6d20a29d 1226 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
86eda3eb
DM
1227 Some(Arc::new(CryptConfig::new(key)?))
1228 }
1229 };
d5c34d98 1230
296c50ba
DM
1231 let client = BackupReader::start(
1232 client,
1233 crypt_config.clone(),
1234 repo.store(),
1235 &backup_type,
1236 &backup_id,
1237 backup_time,
1238 true,
1239 ).await?;
86eda3eb 1240
f06b820a 1241 let manifest = client.download_manifest().await?;
02fcf372 1242
dc155e9b
TL
1243 let (archive_name, archive_type) = parse_archive_type(archive_name);
1244
1245 if archive_name == MANIFEST_BLOB_NAME {
f06b820a 1246 let backup_index_data = manifest.into_json().to_string();
02fcf372 1247 if let Some(target) = target {
feaa1ad3 1248 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
02fcf372
DM
1249 } else {
1250 let stdout = std::io::stdout();
1251 let mut writer = stdout.lock();
296c50ba 1252 writer.write_all(backup_index_data.as_bytes())
02fcf372
DM
1253 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1254 }
1255
dc155e9b 1256 } else if archive_type == ArchiveType::Blob {
d2267b11 1257
dc155e9b 1258 let mut reader = client.download_blob(&manifest, &archive_name).await?;
f8100e96 1259
bf125261 1260 if let Some(target) = target {
0d986280
DM
1261 let mut writer = std::fs::OpenOptions::new()
1262 .write(true)
1263 .create(true)
1264 .create_new(true)
1265 .open(target)
1266 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1267 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1268 } else {
1269 let stdout = std::io::stdout();
1270 let mut writer = stdout.lock();
0d986280 1271 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1272 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1273 }
f8100e96 1274
dc155e9b 1275 } else if archive_type == ArchiveType::DynamicIndex {
86eda3eb 1276
dc155e9b 1277 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
df65bd3d 1278
f4bf7dfc
DM
1279 let most_used = index.find_most_used_chunks(8);
1280
1281 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1282
afb4cd28 1283 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1284
bf125261 1285 if let Some(target) = target {
c443f58b
WB
1286 proxmox_backup::pxar::extract_archive(
1287 pxar::decoder::Decoder::from_std(reader)?,
1288 Path::new(target),
1289 &[],
5444fa94 1290 proxmox_backup::pxar::Flags::DEFAULT,
c443f58b
WB
1291 allow_existing_dirs,
1292 |path| {
1293 if verbose {
1294 println!("{:?}", path);
1295 }
1296 },
1297 )
1298 .map_err(|err| format_err!("error extracting archive - {}", err))?;
bf125261 1299 } else {
88892ea8
DM
1300 let mut writer = std::fs::OpenOptions::new()
1301 .write(true)
1302 .open("/dev/stdout")
1303 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1304
bf125261
DM
1305 std::io::copy(&mut reader, &mut writer)
1306 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1307 }
dc155e9b 1308 } else if archive_type == ArchiveType::FixedIndex {
afb4cd28 1309
dc155e9b 1310 let index = client.download_fixed_index(&manifest, &archive_name).await?;
df65bd3d 1311
88892ea8
DM
1312 let mut writer = if let Some(target) = target {
1313 std::fs::OpenOptions::new()
bf125261
DM
1314 .write(true)
1315 .create(true)
1316 .create_new(true)
1317 .open(target)
88892ea8 1318 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1319 } else {
88892ea8
DM
1320 std::fs::OpenOptions::new()
1321 .write(true)
1322 .open("/dev/stdout")
1323 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1324 };
afb4cd28 1325
fd04ca7a 1326 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
3031e44c 1327 }
fef44d4f
DM
1328
1329 Ok(Value::Null)
45db6f89
DM
1330}
1331
a47a02ae
DM
1332#[api(
1333 input: {
1334 properties: {
1335 repository: {
1336 schema: REPO_URL_SCHEMA,
1337 optional: true,
1338 },
1339 snapshot: {
1340 type: String,
1341 description: "Group/Snapshot path.",
1342 },
1343 logfile: {
1344 type: String,
1345 description: "The path to the log file you want to upload.",
1346 },
1347 keyfile: {
1348 schema: KEYFILE_SCHEMA,
1349 optional: true,
1350 },
1351 }
1352 }
1353)]
1354/// Upload backup log file.
1355async fn upload_log(param: Value) -> Result<Value, Error> {
ec34f7eb
DM
1356
1357 let logfile = tools::required_string_param(&param, "logfile")?;
1358 let repo = extract_repository_from_value(&param)?;
1359
1360 let snapshot = tools::required_string_param(&param, "snapshot")?;
1361 let snapshot = BackupDir::parse(snapshot)?;
1362
d59dbeca 1363 let mut client = connect(repo.host(), repo.user())?;
ec34f7eb 1364
11377a47 1365 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
ec34f7eb
DM
1366
1367 let crypt_config = match keyfile {
1368 None => None,
1369 Some(path) => {
6d20a29d 1370 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ec34f7eb 1371 let crypt_config = CryptConfig::new(key)?;
9025312a 1372 Some(Arc::new(crypt_config))
ec34f7eb
DM
1373 }
1374 };
1375
e18a6c9e 1376 let data = file_get_contents(logfile)?;
ec34f7eb 1377
7123ff7d 1378 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
ec34f7eb
DM
1379
1380 let raw_data = blob.into_inner();
1381
1382 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1383
1384 let args = json!({
1385 "backup-type": snapshot.group().backup_type(),
1386 "backup-id": snapshot.group().backup_id(),
1387 "backup-time": snapshot.backup_time().timestamp(),
1388 });
1389
1390 let body = hyper::Body::from(raw_data);
1391
8a8a4703 1392 client.upload("application/octet-stream", body, &path, Some(args)).await
ec34f7eb
DM
1393}
1394
032d3ad8
DM
1395const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1396 &ApiHandler::Async(&prune),
1397 &ObjectSchema::new(
1398 "Prune a backup repository.",
1399 &proxmox_backup::add_common_prune_prameters!([
1400 ("dry-run", true, &BooleanSchema::new(
1401 "Just show what prune would do, but do not delete anything.")
1402 .schema()),
1403 ("group", false, &StringSchema::new("Backup group.").schema()),
1404 ], [
1405 ("output-format", true, &OUTPUT_FORMAT),
c48aa39f
DM
1406 (
1407 "quiet",
1408 true,
1409 &BooleanSchema::new("Minimal output - only show removals.")
1410 .schema()
1411 ),
032d3ad8
DM
1412 ("repository", true, &REPO_URL_SCHEMA),
1413 ])
1414 )
1415);
1416
1417fn prune<'a>(
1418 param: Value,
1419 _info: &ApiMethod,
1420 _rpcenv: &'a mut dyn RpcEnvironment,
1421) -> proxmox::api::ApiFuture<'a> {
1422 async move {
1423 prune_async(param).await
1424 }.boxed()
1425}
83b7db02 1426
032d3ad8 1427async fn prune_async(mut param: Value) -> Result<Value, Error> {
2665cef7 1428 let repo = extract_repository_from_value(&param)?;
83b7db02 1429
d59dbeca 1430 let mut client = connect(repo.host(), repo.user())?;
83b7db02 1431
d0a03d40 1432 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1433
9fdc3ef4
DM
1434 let group = tools::required_string_param(&param, "group")?;
1435 let group = BackupGroup::parse(group)?;
c2043614
DM
1436
1437 let output_format = get_output_format(&param);
9fdc3ef4 1438
c48aa39f
DM
1439 let quiet = param["quiet"].as_bool().unwrap_or(false);
1440
ea7a7ef2
DM
1441 param.as_object_mut().unwrap().remove("repository");
1442 param.as_object_mut().unwrap().remove("group");
163e9bbe 1443 param.as_object_mut().unwrap().remove("output-format");
c48aa39f 1444 param.as_object_mut().unwrap().remove("quiet");
ea7a7ef2
DM
1445
1446 param["backup-type"] = group.backup_type().into();
1447 param["backup-id"] = group.backup_id().into();
83b7db02 1448
db1e061d 1449 let mut result = client.post(&path, Some(param)).await?;
74fa81b8 1450
87c42375 1451 record_repository(&repo);
3b03abfe 1452
db1e061d
DM
1453 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1454 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1455 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1456 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1457 };
1458
c48aa39f
DM
1459 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1460 Ok(match v.as_bool() {
1461 Some(true) => "keep",
1462 Some(false) => "remove",
1463 None => "unknown",
1464 }.to_string())
1465 };
1466
db1e061d
DM
1467 let options = default_table_format_options()
1468 .sortby("backup-type", false)
1469 .sortby("backup-id", false)
1470 .sortby("backup-time", false)
1471 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
74f7240b 1472 .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
c48aa39f 1473 .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
db1e061d
DM
1474 ;
1475
1476 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1477
1478 let mut data = result["data"].take();
1479
c48aa39f
DM
1480 if quiet {
1481 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1482 item["keep"].as_bool() == Some(false)
1483 }).map(|v| v.clone()).collect();
1484 data = list.into();
1485 }
1486
db1e061d 1487 format_and_print_result_full(&mut data, info, &output_format, &options);
d0a03d40 1488
43a406fd 1489 Ok(Value::Null)
83b7db02
DM
1490}
1491
a47a02ae
DM
1492#[api(
1493 input: {
1494 properties: {
1495 repository: {
1496 schema: REPO_URL_SCHEMA,
1497 optional: true,
1498 },
1499 "output-format": {
1500 schema: OUTPUT_FORMAT,
1501 optional: true,
1502 },
1503 }
1504 }
1505)]
1506/// Get repository status.
1507async fn status(param: Value) -> Result<Value, Error> {
34a816cc
DM
1508
1509 let repo = extract_repository_from_value(&param)?;
1510
c2043614 1511 let output_format = get_output_format(&param);
34a816cc 1512
d59dbeca 1513 let client = connect(repo.host(), repo.user())?;
34a816cc
DM
1514
1515 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1516
1dc117bb 1517 let mut result = client.get(&path, None).await?;
390c5bdd 1518 let mut data = result["data"].take();
34a816cc
DM
1519
1520 record_repository(&repo);
1521
390c5bdd
DM
1522 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1523 let v = v.as_u64().unwrap();
1524 let total = record["total"].as_u64().unwrap();
1525 let roundup = total/200;
1526 let per = ((v+roundup)*100)/total;
e23f5863
DM
1527 let info = format!(" ({} %)", per);
1528 Ok(format!("{} {:>8}", v, info))
390c5bdd 1529 };
1dc117bb 1530
c2043614 1531 let options = default_table_format_options()
be2425ff 1532 .noheader(true)
e23f5863 1533 .column(ColumnConfig::new("total").renderer(render_total_percentage))
390c5bdd
DM
1534 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1535 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
34a816cc 1536
ea5f547f 1537 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
390c5bdd
DM
1538
1539 format_and_print_result_full(&mut data, schema, &output_format, &options);
34a816cc
DM
1540
1541 Ok(Value::Null)
1542}
1543
5a2df000 1544// like get, but simply ignore errors and return Null instead
e9722f8b 1545async fn try_get(repo: &BackupRepository, url: &str) -> Value {
024f11bb 1546
a05c0c6f 1547 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
d1c65727 1548 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
a05c0c6f 1549
d59dbeca 1550 let options = HttpClientOptions::new()
5030b7ce 1551 .prefix(Some("proxmox-backup".to_string()))
d1c65727 1552 .password(password)
d59dbeca 1553 .interactive(false)
a05c0c6f 1554 .fingerprint(fingerprint)
5a74756c 1555 .fingerprint_cache(true)
d59dbeca
DM
1556 .ticket_cache(true);
1557
1558 let client = match HttpClient::new(repo.host(), repo.user(), options) {
45cdce06
DM
1559 Ok(v) => v,
1560 _ => return Value::Null,
1561 };
b2388518 1562
e9722f8b 1563 let mut resp = match client.get(url, None).await {
b2388518
DM
1564 Ok(v) => v,
1565 _ => return Value::Null,
1566 };
1567
1568 if let Some(map) = resp.as_object_mut() {
1569 if let Some(data) = map.remove("data") {
1570 return data;
1571 }
1572 }
1573 Value::Null
1574}
1575
b2388518 1576fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1577 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
e9722f8b
WB
1578}
1579
1580async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
024f11bb 1581
b2388518
DM
1582 let mut result = vec![];
1583
2665cef7 1584 let repo = match extract_repository_from_map(param) {
b2388518 1585 Some(v) => v,
024f11bb
DM
1586 _ => return result,
1587 };
1588
b2388518
DM
1589 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1590
e9722f8b 1591 let data = try_get(&repo, &path).await;
b2388518
DM
1592
1593 if let Some(list) = data.as_array() {
024f11bb 1594 for item in list {
98f0b972
DM
1595 if let (Some(backup_id), Some(backup_type)) =
1596 (item["backup-id"].as_str(), item["backup-type"].as_str())
1597 {
1598 result.push(format!("{}/{}", backup_type, backup_id));
024f11bb
DM
1599 }
1600 }
1601 }
1602
1603 result
1604}
1605
b2388518 1606fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1607 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
e9722f8b
WB
1608}
1609
1610async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
b2388518 1611
b2388518 1612 if arg.matches('/').count() < 2 {
e9722f8b 1613 let groups = complete_backup_group_do(param).await;
543a260f 1614 let mut result = vec![];
b2388518
DM
1615 for group in groups {
1616 result.push(group.to_string());
1617 result.push(format!("{}/", group));
1618 }
1619 return result;
1620 }
1621
e9722f8b 1622 complete_backup_snapshot_do(param).await
543a260f 1623}
b2388518 1624
3fb53e07 1625fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1626 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
e9722f8b
WB
1627}
1628
1629async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
543a260f
DM
1630
1631 let mut result = vec![];
1632
1633 let repo = match extract_repository_from_map(param) {
1634 Some(v) => v,
1635 _ => return result,
1636 };
1637
1638 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
b2388518 1639
e9722f8b 1640 let data = try_get(&repo, &path).await;
b2388518
DM
1641
1642 if let Some(list) = data.as_array() {
1643 for item in list {
1644 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1645 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1646 {
1647 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1648 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1649 }
1650 }
1651 }
1652
1653 result
1654}
1655
45db6f89 1656fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1657 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
e9722f8b
WB
1658}
1659
1660async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
08dc340a
DM
1661
1662 let mut result = vec![];
1663
2665cef7 1664 let repo = match extract_repository_from_map(param) {
08dc340a
DM
1665 Some(v) => v,
1666 _ => return result,
1667 };
1668
1669 let snapshot = match param.get("snapshot") {
1670 Some(path) => {
1671 match BackupDir::parse(path) {
1672 Ok(v) => v,
1673 _ => return result,
1674 }
1675 }
1676 _ => return result,
1677 };
1678
1679 let query = tools::json_object_to_query(json!({
1680 "backup-type": snapshot.group().backup_type(),
1681 "backup-id": snapshot.group().backup_id(),
1682 "backup-time": snapshot.backup_time().timestamp(),
1683 })).unwrap();
1684
1685 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1686
e9722f8b 1687 let data = try_get(&repo, &path).await;
08dc340a
DM
1688
1689 if let Some(list) = data.as_array() {
1690 for item in list {
c4f025eb 1691 if let Some(filename) = item["filename"].as_str() {
08dc340a
DM
1692 result.push(filename.to_owned());
1693 }
1694 }
1695 }
1696
45db6f89
DM
1697 result
1698}
1699
1700fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
52c171e4 1701 complete_server_file_name(arg, param)
e9722f8b 1702 .iter()
4939255f 1703 .map(|v| tools::format::strip_server_file_expenstion(&v))
e9722f8b 1704 .collect()
08dc340a
DM
1705}
1706
0ec9e1b0
DM
1707fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
1708 complete_server_file_name(arg, param)
1709 .iter()
1710 .filter_map(|v| {
4939255f 1711 let name = tools::format::strip_server_file_expenstion(&v);
0ec9e1b0
DM
1712 if name.ends_with(".pxar") {
1713 Some(name)
1714 } else {
1715 None
1716 }
1717 })
1718 .collect()
1719}
1720
49811347
DM
1721fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1722
1723 let mut result = vec![];
1724
1725 let mut size = 64;
1726 loop {
1727 result.push(size.to_string());
11377a47 1728 size *= 2;
49811347
DM
1729 if size > 4096 { break; }
1730 }
1731
1732 result
1733}
1734
826f309b 1735fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
ff5d3707 1736
f2401311
DM
1737 // fixme: implement other input methods
1738
1739 use std::env::VarError::*;
1740 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
826f309b 1741 Ok(p) => return Ok(p.as_bytes().to_vec()),
f2401311
DM
1742 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1743 Err(NotPresent) => {
1744 // Try another method
1745 }
1746 }
1747
1748 // If we're on a TTY, query the user for a password
501f4fa2
DM
1749 if tty::stdin_isatty() {
1750 return Ok(tty::read_password("Encryption Key Password: ")?);
f2401311
DM
1751 }
1752
1753 bail!("no password input mechanism available");
1754}
1755
ac716234
DM
1756fn key_create(
1757 param: Value,
1758 _info: &ApiMethod,
1759 _rpcenv: &mut dyn RpcEnvironment,
1760) -> Result<Value, Error> {
1761
9b06db45
DM
1762 let path = tools::required_string_param(&param, "path")?;
1763 let path = PathBuf::from(path);
ac716234 1764
181f097a 1765 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
ac716234
DM
1766
1767 let key = proxmox::sys::linux::random_data(32)?;
1768
181f097a
DM
1769 if kdf == "scrypt" {
1770 // always read passphrase from tty
501f4fa2 1771 if !tty::stdin_isatty() {
181f097a
DM
1772 bail!("unable to read passphrase - no tty");
1773 }
ac716234 1774
501f4fa2 1775 let password = tty::read_and_verify_password("Encryption Key Password: ")?;
181f097a 1776
ab44acff 1777 let key_config = encrypt_key_with_passphrase(&key, &password)?;
37c5a175 1778
ab44acff 1779 store_key_config(&path, false, key_config)?;
181f097a
DM
1780
1781 Ok(Value::Null)
1782 } else if kdf == "none" {
1783 let created = Local.timestamp(Local::now().timestamp(), 0);
1784
1785 store_key_config(&path, false, KeyConfig {
1786 kdf: None,
1787 created,
ab44acff 1788 modified: created,
181f097a
DM
1789 data: key,
1790 })?;
1791
1792 Ok(Value::Null)
1793 } else {
1794 unreachable!();
1795 }
ac716234
DM
1796}
1797
9f46c7de
DM
1798fn master_pubkey_path() -> Result<PathBuf, Error> {
1799 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1800
1801 // usually $HOME/.config/proxmox-backup/master-public.pem
1802 let path = base.place_config_file("master-public.pem")?;
1803
1804 Ok(path)
1805}
1806
3ea8bfc9
DM
1807fn key_import_master_pubkey(
1808 param: Value,
1809 _info: &ApiMethod,
1810 _rpcenv: &mut dyn RpcEnvironment,
1811) -> Result<Value, Error> {
1812
1813 let path = tools::required_string_param(&param, "path")?;
1814 let path = PathBuf::from(path);
1815
e18a6c9e 1816 let pem_data = file_get_contents(&path)?;
3ea8bfc9
DM
1817
1818 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1819 bail!("Unable to decode PEM data - {}", err);
1820 }
1821
9f46c7de 1822 let target_path = master_pubkey_path()?;
3ea8bfc9 1823
feaa1ad3 1824 replace_file(&target_path, &pem_data, CreateOptions::new())?;
3ea8bfc9
DM
1825
1826 println!("Imported public master key to {:?}", target_path);
1827
1828 Ok(Value::Null)
1829}
1830
37c5a175
DM
1831fn key_create_master_key(
1832 _param: Value,
1833 _info: &ApiMethod,
1834 _rpcenv: &mut dyn RpcEnvironment,
1835) -> Result<Value, Error> {
1836
1837 // we need a TTY to query the new password
501f4fa2 1838 if !tty::stdin_isatty() {
37c5a175
DM
1839 bail!("unable to create master key - no tty");
1840 }
1841
1842 let rsa = openssl::rsa::Rsa::generate(4096)?;
1843 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1844
37c5a175 1845
501f4fa2 1846 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
37c5a175
DM
1847
1848 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1849 let filename_pub = "master-public.pem";
1850 println!("Writing public master key to {}", filename_pub);
feaa1ad3 1851 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1852
1853 let cipher = openssl::symm::Cipher::aes_256_cbc();
cbe01dc5 1854 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
37c5a175
DM
1855
1856 let filename_priv = "master-private.pem";
1857 println!("Writing private master key to {}", filename_priv);
feaa1ad3 1858 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1859
1860 Ok(Value::Null)
1861}
ac716234
DM
1862
1863fn key_change_passphrase(
1864 param: Value,
1865 _info: &ApiMethod,
1866 _rpcenv: &mut dyn RpcEnvironment,
1867) -> Result<Value, Error> {
1868
9b06db45
DM
1869 let path = tools::required_string_param(&param, "path")?;
1870 let path = PathBuf::from(path);
ac716234 1871
181f097a
DM
1872 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1873
ac716234 1874 // we need a TTY to query the new password
501f4fa2 1875 if !tty::stdin_isatty() {
ac716234
DM
1876 bail!("unable to change passphrase - no tty");
1877 }
1878
6d20a29d 1879 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ac716234 1880
181f097a 1881 if kdf == "scrypt" {
ac716234 1882
501f4fa2 1883 let password = tty::read_and_verify_password("New Password: ")?;
ac716234 1884
cbe01dc5 1885 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
ab44acff
DM
1886 new_key_config.created = created; // keep original value
1887
1888 store_key_config(&path, true, new_key_config)?;
ac716234 1889
181f097a
DM
1890 Ok(Value::Null)
1891 } else if kdf == "none" {
ab44acff 1892 let modified = Local.timestamp(Local::now().timestamp(), 0);
181f097a
DM
1893
1894 store_key_config(&path, true, KeyConfig {
1895 kdf: None,
ab44acff
DM
1896 created, // keep original value
1897 modified,
6d0983db 1898 data: key.to_vec(),
181f097a
DM
1899 })?;
1900
1901 Ok(Value::Null)
1902 } else {
1903 unreachable!();
1904 }
f2401311
DM
1905}
1906
1907fn key_mgmt_cli() -> CliCommandMap {
1908
255f378a 1909 const KDF_SCHEMA: Schema =
181f097a 1910 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
bc0d0388
DM
1911 .format(&ApiStringFormat::Enum(&[
1912 EnumEntry::new("scrypt", "SCrypt"),
1913 EnumEntry::new("none", "Do not encrypt the key")]))
255f378a
DM
1914 .default("scrypt")
1915 .schema();
1916
552c2259 1917 #[sortable]
255f378a
DM
1918 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1919 &ApiHandler::Sync(&key_create),
1920 &ObjectSchema::new(
1921 "Create a new encryption key.",
552c2259 1922 &sorted!([
255f378a
DM
1923 ("path", false, &StringSchema::new("File system path.").schema()),
1924 ("kdf", true, &KDF_SCHEMA),
552c2259 1925 ]),
255f378a 1926 )
181f097a 1927 );
7074a0b3 1928
255f378a 1929 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
49fddd98 1930 .arg_param(&["path"])
9b06db45 1931 .completion_cb("path", tools::complete_file_name);
f2401311 1932
552c2259 1933 #[sortable]
255f378a
DM
1934 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1935 &ApiHandler::Sync(&key_change_passphrase),
1936 &ObjectSchema::new(
1937 "Change the passphrase required to decrypt the key.",
552c2259 1938 &sorted!([
255f378a
DM
1939 ("path", false, &StringSchema::new("File system path.").schema()),
1940 ("kdf", true, &KDF_SCHEMA),
552c2259 1941 ]),
255f378a
DM
1942 )
1943 );
7074a0b3 1944
255f378a 1945 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
49fddd98 1946 .arg_param(&["path"])
9b06db45 1947 .completion_cb("path", tools::complete_file_name);
ac716234 1948
255f378a
DM
1949 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1950 &ApiHandler::Sync(&key_create_master_key),
1951 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1952 );
7074a0b3 1953
255f378a
DM
1954 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1955
552c2259 1956 #[sortable]
255f378a
DM
1957 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1958 &ApiHandler::Sync(&key_import_master_pubkey),
1959 &ObjectSchema::new(
1960 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
552c2259 1961 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
255f378a
DM
1962 )
1963 );
7074a0b3 1964
255f378a 1965 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
49fddd98 1966 .arg_param(&["path"])
3ea8bfc9
DM
1967 .completion_cb("path", tools::complete_file_name);
1968
11377a47 1969 CliCommandMap::new()
48ef3c33
DM
1970 .insert("create", key_create_cmd_def)
1971 .insert("create-master-key", key_create_master_key_cmd_def)
1972 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1973 .insert("change-passphrase", key_change_passphrase_cmd_def)
f2401311
DM
1974}
1975
70235f72
CE
1976fn mount(
1977 param: Value,
1978 _info: &ApiMethod,
1979 _rpcenv: &mut dyn RpcEnvironment,
1980) -> Result<Value, Error> {
1981 let verbose = param["verbose"].as_bool().unwrap_or(false);
1982 if verbose {
1983 // This will stay in foreground with debug output enabled as None is
1984 // passed for the RawFd.
3f06d6fb 1985 return proxmox_backup::tools::runtime::main(mount_do(param, None));
70235f72
CE
1986 }
1987
1988 // Process should be deamonized.
1989 // Make sure to fork before the async runtime is instantiated to avoid troubles.
1990 let pipe = pipe()?;
1991 match fork() {
11377a47 1992 Ok(ForkResult::Parent { .. }) => {
70235f72
CE
1993 nix::unistd::close(pipe.1).unwrap();
1994 // Blocks the parent process until we are ready to go in the child
1995 let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
1996 Ok(Value::Null)
1997 }
1998 Ok(ForkResult::Child) => {
1999 nix::unistd::close(pipe.0).unwrap();
2000 nix::unistd::setsid().unwrap();
3f06d6fb 2001 proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
70235f72
CE
2002 }
2003 Err(_) => bail!("failed to daemonize process"),
2004 }
2005}
2006
c443f58b
WB
2007use proxmox_backup::client::RemoteChunkReader;
2008/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
2009/// async use!
2010///
2011/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
2012/// so that we can properly access it from multiple threads simultaneously while not issuing
2013/// duplicate simultaneous reads over http.
2014struct BufferedDynamicReadAt {
2015 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
2016}
2017
2018impl BufferedDynamicReadAt {
2019 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
2020 Self {
2021 inner: Mutex::new(inner),
2022 }
2023 }
2024}
2025
2026impl pxar::accessor::ReadAt for BufferedDynamicReadAt {
2027 fn poll_read_at(
2028 self: Pin<&Self>,
2029 _cx: &mut Context,
2030 buf: &mut [u8],
2031 offset: u64,
2032 ) -> Poll<io::Result<usize>> {
2033 use std::io::Read;
2034 tokio::task::block_in_place(move || {
2035 let mut reader = self.inner.lock().unwrap();
2036 reader.seek(SeekFrom::Start(offset))?;
2037 Poll::Ready(Ok(reader.read(buf)?))
2038 })
2039 }
2040}
2041
70235f72
CE
2042async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
2043 let repo = extract_repository_from_value(&param)?;
2044 let archive_name = tools::required_string_param(&param, "archive-name")?;
2045 let target = tools::required_string_param(&param, "target")?;
d59dbeca 2046 let client = connect(repo.host(), repo.user())?;
70235f72
CE
2047
2048 record_repository(&repo);
2049
2050 let path = tools::required_string_param(&param, "snapshot")?;
2051 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2052 let group = BackupGroup::parse(path)?;
27c9affb 2053 api_datastore_latest_snapshot(&client, repo.store(), group).await?
70235f72
CE
2054 } else {
2055 let snapshot = BackupDir::parse(path)?;
2056 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2057 };
2058
11377a47 2059 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
70235f72
CE
2060 let crypt_config = match keyfile {
2061 None => None,
2062 Some(path) => {
6d20a29d 2063 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
70235f72
CE
2064 Some(Arc::new(CryptConfig::new(key)?))
2065 }
2066 };
2067
2068 let server_archive_name = if archive_name.ends_with(".pxar") {
2069 format!("{}.didx", archive_name)
2070 } else {
2071 bail!("Can only mount pxar archives.");
2072 };
2073
296c50ba
DM
2074 let client = BackupReader::start(
2075 client,
2076 crypt_config.clone(),
2077 repo.store(),
2078 &backup_type,
2079 &backup_id,
2080 backup_time,
2081 true,
2082 ).await?;
70235f72 2083
f06b820a 2084 let manifest = client.download_manifest().await?;
296c50ba 2085
70235f72 2086 if server_archive_name.ends_with(".didx") {
c3d84a22 2087 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
70235f72
CE
2088 let most_used = index.find_most_used_chunks(8);
2089 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2090 let reader = BufferedDynamicReader::new(index, chunk_reader);
c443f58b
WB
2091 let archive_size = reader.archive_size();
2092 let reader: proxmox_backup::pxar::fuse::Reader =
2093 Arc::new(BufferedDynamicReadAt::new(reader));
2094 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
70235f72 2095 let options = OsStr::new("ro,default_permissions");
70235f72 2096
c443f58b
WB
2097 let session = proxmox_backup::pxar::fuse::Session::mount(
2098 decoder,
2099 &options,
2100 false,
2101 Path::new(target),
2102 )
2103 .map_err(|err| format_err!("pxar mount failed: {}", err))?;
70235f72
CE
2104
2105 if let Some(pipe) = pipe {
2106 nix::unistd::chdir(Path::new("/")).unwrap();
add5861e 2107 // Finish creation of daemon by redirecting filedescriptors.
70235f72
CE
2108 let nullfd = nix::fcntl::open(
2109 "/dev/null",
2110 nix::fcntl::OFlag::O_RDWR,
2111 nix::sys::stat::Mode::empty(),
2112 ).unwrap();
2113 nix::unistd::dup2(nullfd, 0).unwrap();
2114 nix::unistd::dup2(nullfd, 1).unwrap();
2115 nix::unistd::dup2(nullfd, 2).unwrap();
2116 if nullfd > 2 {
2117 nix::unistd::close(nullfd).unwrap();
2118 }
2119 // Signal the parent process that we are done with the setup and it can
2120 // terminate.
11377a47 2121 nix::unistd::write(pipe, &[0u8])?;
70235f72
CE
2122 nix::unistd::close(pipe).unwrap();
2123 }
2124
c443f58b
WB
2125 let mut interrupt = signal(SignalKind::interrupt())?;
2126 select! {
2127 res = session.fuse() => res?,
2128 _ = interrupt.recv().fuse() => {
2129 // exit on interrupted
2130 }
2131 }
70235f72
CE
2132 } else {
2133 bail!("unknown archive file extension (expected .pxar)");
2134 }
2135
2136 Ok(Value::Null)
2137}
2138
78d54360
WB
2139#[api(
2140 input: {
2141 properties: {
2142 "snapshot": {
2143 type: String,
2144 description: "Group/Snapshot path.",
2145 },
2146 "archive-name": {
2147 type: String,
2148 description: "Backup archive name.",
2149 },
2150 "repository": {
2151 optional: true,
2152 schema: REPO_URL_SCHEMA,
2153 },
2154 "keyfile": {
2155 optional: true,
2156 type: String,
2157 description: "Path to encryption key.",
2158 },
2159 },
2160 },
2161)]
2162/// Shell to interactively inspect and restore snapshots.
2163async fn catalog_shell(param: Value) -> Result<(), Error> {
3cf73c4e 2164 let repo = extract_repository_from_value(&param)?;
d59dbeca 2165 let client = connect(repo.host(), repo.user())?;
3cf73c4e
CE
2166 let path = tools::required_string_param(&param, "snapshot")?;
2167 let archive_name = tools::required_string_param(&param, "archive-name")?;
2168
2169 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
2170 let group = BackupGroup::parse(path)?;
27c9affb 2171 api_datastore_latest_snapshot(&client, repo.store(), group).await?
3cf73c4e
CE
2172 } else {
2173 let snapshot = BackupDir::parse(path)?;
2174 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
2175 };
2176
2177 let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
2178 let crypt_config = match keyfile {
2179 None => None,
2180 Some(path) => {
6d20a29d 2181 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
3cf73c4e
CE
2182 Some(Arc::new(CryptConfig::new(key)?))
2183 }
2184 };
2185
2186 let server_archive_name = if archive_name.ends_with(".pxar") {
2187 format!("{}.didx", archive_name)
2188 } else {
2189 bail!("Can only mount pxar archives.");
2190 };
2191
2192 let client = BackupReader::start(
2193 client,
2194 crypt_config.clone(),
2195 repo.store(),
2196 &backup_type,
2197 &backup_id,
2198 backup_time,
2199 true,
2200 ).await?;
2201
2202 let tmpfile = std::fs::OpenOptions::new()
2203 .write(true)
2204 .read(true)
2205 .custom_flags(libc::O_TMPFILE)
2206 .open("/tmp")?;
2207
2208 let manifest = client.download_manifest().await?;
2209
2210 let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
2211 let most_used = index.find_most_used_chunks(8);
2212 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
2213 let reader = BufferedDynamicReader::new(index, chunk_reader);
c443f58b
WB
2214 let archive_size = reader.archive_size();
2215 let reader: proxmox_backup::pxar::fuse::Reader =
2216 Arc::new(BufferedDynamicReadAt::new(reader));
2217 let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
3cf73c4e
CE
2218
2219 let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
2220 let index = DynamicIndexReader::new(tmpfile)
2221 .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
2222
2223 // Note: do not use values stored in index (not trusted) - instead, computed them again
2224 let (csum, size) = index.compute_csum();
2225 manifest.verify_file(CATALOG_NAME, &csum, size)?;
2226
2227 let most_used = index.find_most_used_chunks(8);
2228 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
2229 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
2230 let mut catalogfile = std::fs::OpenOptions::new()
2231 .write(true)
2232 .read(true)
2233 .custom_flags(libc::O_TMPFILE)
2234 .open("/tmp")?;
2235
2236 std::io::copy(&mut reader, &mut catalogfile)
2237 .map_err(|err| format_err!("unable to download catalog - {}", err))?;
2238
2239 catalogfile.seek(SeekFrom::Start(0))?;
2240 let catalog_reader = CatalogReader::new(catalogfile);
2241 let state = Shell::new(
2242 catalog_reader,
2243 &server_archive_name,
2244 decoder,
c443f58b 2245 ).await?;
3cf73c4e
CE
2246
2247 println!("Starting interactive shell");
c443f58b 2248 state.shell().await?;
3cf73c4e
CE
2249
2250 record_repository(&repo);
2251
78d54360 2252 Ok(())
3cf73c4e
CE
2253}
2254
1c6ad6ef 2255fn catalog_mgmt_cli() -> CliCommandMap {
78d54360 2256 let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
1c6ad6ef
DM
2257 .arg_param(&["snapshot", "archive-name"])
2258 .completion_cb("repository", complete_repository)
0ec9e1b0 2259 .completion_cb("archive-name", complete_pxar_archive_name)
1c6ad6ef
DM
2260 .completion_cb("snapshot", complete_group_or_snapshot);
2261
1c6ad6ef
DM
2262 let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
2263 .arg_param(&["snapshot"])
2264 .completion_cb("repository", complete_repository)
2265 .completion_cb("snapshot", complete_backup_snapshot);
2266
2267 CliCommandMap::new()
48ef3c33
DM
2268 .insert("dump", catalog_dump_cmd_def)
2269 .insert("shell", catalog_shell_cmd_def)
1c6ad6ef
DM
2270}
2271
5830c205
DM
2272#[api(
2273 input: {
2274 properties: {
2275 repository: {
2276 schema: REPO_URL_SCHEMA,
2277 optional: true,
2278 },
2279 limit: {
2280 description: "The maximal number of tasks to list.",
2281 type: Integer,
2282 optional: true,
2283 minimum: 1,
2284 maximum: 1000,
2285 default: 50,
2286 },
2287 "output-format": {
2288 schema: OUTPUT_FORMAT,
2289 optional: true,
2290 },
4939255f
DM
2291 all: {
2292 type: Boolean,
2293 description: "Also list stopped tasks.",
2294 optional: true,
2295 },
5830c205
DM
2296 }
2297 }
2298)]
2299/// List running server tasks for this repo user
d6c4a119 2300async fn task_list(param: Value) -> Result<Value, Error> {
5830c205 2301
c2043614
DM
2302 let output_format = get_output_format(&param);
2303
d6c4a119 2304 let repo = extract_repository_from_value(&param)?;
d59dbeca 2305 let client = connect(repo.host(), repo.user())?;
5830c205 2306
d6c4a119 2307 let limit = param["limit"].as_u64().unwrap_or(50) as usize;
4939255f 2308 let running = !param["all"].as_bool().unwrap_or(false);
5830c205 2309
d6c4a119 2310 let args = json!({
4939255f 2311 "running": running,
d6c4a119
DM
2312 "start": 0,
2313 "limit": limit,
2314 "userfilter": repo.user(),
2315 "store": repo.store(),
2316 });
5830c205 2317
4939255f
DM
2318 let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
2319 let mut data = result["data"].take();
5830c205 2320
4939255f
DM
2321 let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
2322
2323 let options = default_table_format_options()
2324 .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
2325 .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
2326 .column(ColumnConfig::new("upid"))
2327 .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
2328
2329 format_and_print_result_full(&mut data, schema, &output_format, &options);
5830c205
DM
2330
2331 Ok(Value::Null)
2332}
2333
2334#[api(
2335 input: {
2336 properties: {
2337 repository: {
2338 schema: REPO_URL_SCHEMA,
2339 optional: true,
2340 },
2341 upid: {
2342 schema: UPID_SCHEMA,
2343 },
2344 }
2345 }
2346)]
2347/// Display the task log.
d6c4a119 2348async fn task_log(param: Value) -> Result<Value, Error> {
5830c205 2349
d6c4a119
DM
2350 let repo = extract_repository_from_value(&param)?;
2351 let upid = tools::required_string_param(&param, "upid")?;
5830c205 2352
d59dbeca 2353 let client = connect(repo.host(), repo.user())?;
5830c205 2354
d6c4a119 2355 display_task_log(client, upid, true).await?;
5830c205
DM
2356
2357 Ok(Value::Null)
2358}
2359
3f1020b7
DM
2360#[api(
2361 input: {
2362 properties: {
2363 repository: {
2364 schema: REPO_URL_SCHEMA,
2365 optional: true,
2366 },
2367 upid: {
2368 schema: UPID_SCHEMA,
2369 },
2370 }
2371 }
2372)]
2373/// Try to stop a specific task.
d6c4a119 2374async fn task_stop(param: Value) -> Result<Value, Error> {
3f1020b7 2375
d6c4a119
DM
2376 let repo = extract_repository_from_value(&param)?;
2377 let upid_str = tools::required_string_param(&param, "upid")?;
3f1020b7 2378
d59dbeca 2379 let mut client = connect(repo.host(), repo.user())?;
3f1020b7 2380
d6c4a119
DM
2381 let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
2382 let _ = client.delete(&path, None).await?;
3f1020b7
DM
2383
2384 Ok(Value::Null)
2385}
2386
5830c205
DM
2387fn task_mgmt_cli() -> CliCommandMap {
2388
2389 let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
2390 .completion_cb("repository", complete_repository);
2391
2392 let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
2393 .arg_param(&["upid"]);
2394
3f1020b7
DM
2395 let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
2396 .arg_param(&["upid"]);
2397
5830c205
DM
2398 CliCommandMap::new()
2399 .insert("log", task_log_cmd_def)
2400 .insert("list", task_list_cmd_def)
3f1020b7 2401 .insert("stop", task_stop_cmd_def)
5830c205 2402}
1c6ad6ef 2403
f2401311 2404fn main() {
33d64b81 2405
255f378a 2406 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
49fddd98 2407 .arg_param(&["backupspec"])
d0a03d40 2408 .completion_cb("repository", complete_repository)
49811347 2409 .completion_cb("backupspec", complete_backup_source)
6d0983db 2410 .completion_cb("keyfile", tools::complete_file_name)
49811347 2411 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 2412
255f378a 2413 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
49fddd98 2414 .arg_param(&["snapshot", "logfile"])
543a260f 2415 .completion_cb("snapshot", complete_backup_snapshot)
ec34f7eb
DM
2416 .completion_cb("logfile", tools::complete_file_name)
2417 .completion_cb("keyfile", tools::complete_file_name)
2418 .completion_cb("repository", complete_repository);
2419
255f378a 2420 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
d0a03d40 2421 .completion_cb("repository", complete_repository);
41c039e1 2422
255f378a 2423 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
49fddd98 2424 .arg_param(&["group"])
024f11bb 2425 .completion_cb("group", complete_backup_group)
d0a03d40 2426 .completion_cb("repository", complete_repository);
184f17af 2427
255f378a 2428 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
49fddd98 2429 .arg_param(&["snapshot"])
b2388518 2430 .completion_cb("repository", complete_repository)
543a260f 2431 .completion_cb("snapshot", complete_backup_snapshot);
6f62c924 2432
255f378a 2433 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
d0a03d40 2434 .completion_cb("repository", complete_repository);
8cc0d6af 2435
255f378a 2436 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
49fddd98 2437 .arg_param(&["snapshot", "archive-name", "target"])
b2388518 2438 .completion_cb("repository", complete_repository)
08dc340a
DM
2439 .completion_cb("snapshot", complete_group_or_snapshot)
2440 .completion_cb("archive-name", complete_archive_name)
2441 .completion_cb("target", tools::complete_file_name);
9f912493 2442
255f378a 2443 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
49fddd98 2444 .arg_param(&["snapshot"])
52c171e4 2445 .completion_cb("repository", complete_repository)
543a260f 2446 .completion_cb("snapshot", complete_backup_snapshot);
52c171e4 2447
255f378a 2448 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
49fddd98 2449 .arg_param(&["group"])
9fdc3ef4 2450 .completion_cb("group", complete_backup_group)
d0a03d40 2451 .completion_cb("repository", complete_repository);
9f912493 2452
255f378a 2453 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
34a816cc
DM
2454 .completion_cb("repository", complete_repository);
2455
255f378a 2456 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
e240d8be
DM
2457 .completion_cb("repository", complete_repository);
2458
255f378a 2459 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
e240d8be 2460 .completion_cb("repository", complete_repository);
32efac1c 2461
552c2259 2462 #[sortable]
255f378a
DM
2463 const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
2464 &ApiHandler::Sync(&mount),
2465 &ObjectSchema::new(
2466 "Mount pxar archive.",
552c2259 2467 &sorted!([
255f378a
DM
2468 ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
2469 ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
2470 ("target", false, &StringSchema::new("Target directory path.").schema()),
2471 ("repository", true, &REPO_URL_SCHEMA),
2472 ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
2473 ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
552c2259 2474 ]),
255f378a
DM
2475 )
2476 );
7074a0b3 2477
255f378a 2478 let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
49fddd98 2479 .arg_param(&["snapshot", "archive-name", "target"])
70235f72
CE
2480 .completion_cb("repository", complete_repository)
2481 .completion_cb("snapshot", complete_group_or_snapshot)
0ec9e1b0 2482 .completion_cb("archive-name", complete_pxar_archive_name)
70235f72 2483 .completion_cb("target", tools::complete_file_name);
e240d8be 2484
3cf73c4e 2485
41c039e1 2486 let cmd_def = CliCommandMap::new()
48ef3c33
DM
2487 .insert("backup", backup_cmd_def)
2488 .insert("upload-log", upload_log_cmd_def)
2489 .insert("forget", forget_cmd_def)
2490 .insert("garbage-collect", garbage_collect_cmd_def)
2491 .insert("list", list_cmd_def)
2492 .insert("login", login_cmd_def)
2493 .insert("logout", logout_cmd_def)
2494 .insert("prune", prune_cmd_def)
2495 .insert("restore", restore_cmd_def)
2496 .insert("snapshots", snapshots_cmd_def)
2497 .insert("files", files_cmd_def)
2498 .insert("status", status_cmd_def)
2499 .insert("key", key_mgmt_cli())
2500 .insert("mount", mount_cmd_def)
5830c205
DM
2501 .insert("catalog", catalog_mgmt_cli())
2502 .insert("task", task_mgmt_cli());
48ef3c33 2503
7b22acd0
DM
2504 let rpcenv = CliEnvironment::new();
2505 run_cli_command(cmd_def, rpcenv, Some(|future| {
d08bc483
DM
2506 proxmox_backup::tools::runtime::main(future)
2507 }));
ff5d3707 2508}