]> git.proxmox.com Git - proxmox-backup.git/blame - proxmox-backup-client/src/main.rs
update proxmox-metrics dependency to 0.3.1
[proxmox-backup.git] / proxmox-backup-client / src / main.rs
CommitLineData
f1a83e97 1use std::collections::HashSet;
118f8589 2use std::io::{self, Read, Seek, SeekFrom, Write};
c443f58b
WB
3use std::path::{Path, PathBuf};
4use std::pin::Pin;
5use std::sync::{Arc, Mutex};
a6f87283 6use std::task::Context;
c443f58b
WB
7
8use anyhow::{bail, format_err, Error};
c443f58b 9use futures::stream::{StreamExt, TryStreamExt};
8c74349b 10use serde::Deserialize;
c443f58b 11use serde_json::{json, Value};
c443f58b 12use tokio::sync::mpsc;
7c667013 13use tokio_stream::wrappers::ReceiverStream;
c443f58b 14use xdg::BaseDirectories;
2761d6a4 15
c443f58b 16use pathpatterns::{MatchEntry, MatchType, PatternFlag};
118f8589 17use proxmox_async::blocking::TokioWriterAdapter;
08f8a3e5 18use proxmox_human_byte::HumanByte;
726b9d44 19use proxmox_io::StdChannelWriter;
118f8589 20use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
6ef1b649 21use proxmox_schema::api;
118f8589
TL
22use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
23use proxmox_time::{epoch_i64, strftime_local};
a6f87283 24use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
ff5d3707 25
51ec8a3c 26use pbs_api_types::{
8c74349b 27 Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
08f8a3e5
LW
28 Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem,
29 StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
8c74349b 30 BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
2b7f8dd5
WB
31};
32use pbs_client::catalog_shell::Shell;
ad630fb4 33use pbs_client::pxar::ErrorHandler as PxarErrorHandler;
2b7f8dd5
WB
34use pbs_client::tools::{
35 complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
36 complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
4adb574d
WB
37 complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository,
38 connect, connect_rate_limited, extract_repository_from_value,
2b7f8dd5
WB
39 key_source::{
40 crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
41 KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
42 },
43 CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
44};
118f8589
TL
45use pbs_client::{
46 delete_ticket_info, parse_backup_specification, view_task_result, BackupReader,
47 BackupRepository, BackupSpecificationType, BackupStats, BackupWriter, ChunkStream,
48 FixedChunkStream, HttpClient, PxarBackupStream, RemoteChunkReader, UploadOptions,
49 BACKUP_SOURCE_SCHEMA,
50};
51ec8a3c
WB
51use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
52use pbs_datastore::chunk_store::verify_chunk_size;
eb5e0ae6 53use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
2b7f8dd5
WB
54use pbs_datastore::fixed_index::FixedIndexReader;
55use pbs_datastore::index::IndexFile;
51ec8a3c 56use pbs_datastore::manifest::{
118f8589 57 archive_type, ArchiveType, BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME,
51ec8a3c 58};
2b7f8dd5 59use pbs_datastore::read_chunk::AsyncReadChunk;
118f8589 60use pbs_datastore::CATALOG_NAME;
1104d2a2 61use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig};
bbdda58b 62use pbs_tools::crypt_config::CryptConfig;
118f8589 63use pbs_tools::json;
f323e906 64
e351ac78
WB
65mod benchmark;
66pub use benchmark::*;
67mod mount;
68pub use mount::*;
69mod task;
70pub use task::*;
71mod catalog;
72pub use catalog::*;
73mod snapshot;
74pub use snapshot::*;
75pub mod key;
226a4e68 76pub mod namespace;
caea8d61 77
d0a03d40 78fn record_repository(repo: &BackupRepository) {
d0a03d40
DM
79 let base = match BaseDirectories::with_prefix("proxmox-backup") {
80 Ok(v) => v,
81 _ => return,
82 };
83
84 // usually $HOME/.cache/proxmox-backup/repo-list
85 let path = match base.place_cache_file("repo-list") {
86 Ok(v) => v,
87 _ => return,
88 };
89
11377a47 90 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
91
92 let repo = repo.to_string();
93
118f8589 94 data[&repo] = json! { data[&repo].as_i64().unwrap_or(0) + 1 };
d0a03d40
DM
95
96 let mut map = serde_json::map::Map::new();
97
98 loop {
99 let mut max_used = 0;
100 let mut max_repo = None;
101 for (repo, count) in data.as_object().unwrap() {
118f8589
TL
102 if map.contains_key(repo) {
103 continue;
104 }
d0a03d40
DM
105 if let Some(count) = count.as_i64() {
106 if count > max_used {
107 max_used = count;
108 max_repo = Some(repo);
109 }
110 }
111 }
112 if let Some(repo) = max_repo {
113 map.insert(repo.to_owned(), json!(max_used));
114 } else {
115 break;
116 }
118f8589
TL
117 if map.len() > 10 {
118 // store max. 10 repos
d0a03d40
DM
119 break;
120 }
121 }
122
123 let new_data = json!(map);
124
118f8589
TL
125 let _ = replace_file(
126 path,
127 new_data.to_string().as_bytes(),
128 CreateOptions::new(),
129 false,
130 );
d0a03d40
DM
131}
132
42af4b8f
DM
133async fn api_datastore_list_snapshots(
134 client: &HttpClient,
135 store: &str,
133d718f
WB
136 ns: &BackupNamespace,
137 group: Option<&BackupGroup>,
f24fc116 138) -> Result<Value, Error> {
42af4b8f
DM
139 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
140
133d718f
WB
141 let mut args = match group {
142 Some(group) => serde_json::to_value(group)?,
143 None => json!({}),
89ae3c32 144 };
133d718f 145 if !ns.is_root() {
bc21ade2 146 args["ns"] = serde_json::to_value(ns)?;
133d718f 147 }
42af4b8f
DM
148
149 let mut result = client.get(&path, Some(args)).await?;
150
f24fc116 151 Ok(result["data"].take())
42af4b8f
DM
152}
153
43abba4b 154pub async fn api_datastore_latest_snapshot(
27c9affb
DM
155 client: &HttpClient,
156 store: &str,
133d718f 157 ns: &BackupNamespace,
27c9affb 158 group: BackupGroup,
8c74349b 159) -> Result<BackupDir, Error> {
133d718f 160 let list = api_datastore_list_snapshots(client, store, ns, Some(&group)).await?;
f24fc116 161 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
27c9affb
DM
162
163 if list.is_empty() {
db87d93e 164 bail!("backup group {} does not contain any snapshots.", group);
27c9affb
DM
165 }
166
988d575d 167 list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
27c9affb 168
8c74349b
WB
169 Ok((group, list[0].backup.time).into())
170}
171
172pub async fn dir_or_last_from_group(
173 client: &HttpClient,
174 repo: &BackupRepository,
133d718f 175 ns: &BackupNamespace,
8c74349b
WB
176 path: &str,
177) -> Result<BackupDir, Error> {
178 match path.parse::<BackupPart>()? {
179 BackupPart::Dir(dir) => Ok(dir),
180 BackupPart::Group(group) => {
fbfb64a6 181 api_datastore_latest_snapshot(client, repo.store(), ns, group).await
8c74349b
WB
182 }
183 }
27c9affb
DM
184}
185
e9722f8b 186async fn backup_directory<P: AsRef<Path>>(
cf9271e2 187 client: &BackupWriter,
17d6979a 188 dir_path: P,
247cdbce 189 archive_name: &str,
36898ffc 190 chunk_size: Option<usize>,
f35e187f 191 catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>,
2b7f8dd5 192 pxar_create_options: pbs_client::pxar::PxarCreateOptions,
e43b9175 193 upload_options: UploadOptions,
2c3891d1 194) -> Result<BackupStats, Error> {
ceea2e48
CE
195 if upload_options.fixed_size.is_some() {
196 bail!("cannot backup directory with fixed chunk size!");
197 }
198
118f8589 199 let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), catalog, pxar_create_options)?;
e9722f8b 200 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 201
0bfcea6a 202 let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 203
118f8589 204 let stream = ReceiverStream::new(rx).map_err(Error::from);
17d6979a 205
c4ff3dce 206 // spawn chunker inside a separate task so that it can run parallel
e9722f8b 207 tokio::spawn(async move {
db0cb9ce
WB
208 while let Some(v) = chunk_stream.next().await {
209 let _ = tx.send(v).await;
210 }
e9722f8b 211 });
17d6979a 212
e9722f8b 213 let stats = client
e43b9175 214 .upload_stream(archive_name, stream, upload_options)
e9722f8b 215 .await?;
bcd879cf 216
2c3891d1 217 Ok(stats)
bcd879cf
DM
218}
219
e9722f8b 220async fn backup_image<P: AsRef<Path>>(
cf9271e2 221 client: &BackupWriter,
6af905c1
DM
222 image_path: P,
223 archive_name: &str,
36898ffc 224 chunk_size: Option<usize>,
e43b9175 225 upload_options: UploadOptions,
2c3891d1 226) -> Result<BackupStats, Error> {
6af905c1
DM
227 let path = image_path.as_ref().to_owned();
228
e9722f8b 229 let file = tokio::fs::File::open(path).await?;
6af905c1 230
db0cb9ce 231 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
6af905c1
DM
232 .map_err(Error::from);
233
118f8589 234 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4 * 1024 * 1024));
6af905c1 235
e43b9175
FG
236 if upload_options.fixed_size.is_none() {
237 bail!("cannot backup image with dynamic chunk size!");
238 }
239
e9722f8b 240 let stats = client
e43b9175 241 .upload_stream(archive_name, stream, upload_options)
e9722f8b 242 .await?;
6af905c1 243
2c3891d1 244 Ok(stats)
6af905c1
DM
245}
246
133d718f
WB
247pub fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
248 Ok(match param.get("ns") {
249 Some(Value::String(ns)) => ns.parse()?,
250 Some(_) => bail!("invalid namespace parameter"),
251 None => BackupNamespace::root(),
252 })
253}
254
a47a02ae
DM
255#[api(
256 input: {
257 properties: {
258 repository: {
259 schema: REPO_URL_SCHEMA,
260 optional: true,
261 },
89ae3c32
WB
262 "ns": {
263 type: BackupNamespace,
264 optional: true,
265 },
a47a02ae
DM
266 "output-format": {
267 schema: OUTPUT_FORMAT,
268 optional: true,
269 },
270 }
271 }
272)]
273/// List backup groups.
274async fn list_backup_groups(param: Value) -> Result<Value, Error> {
c81b2b7c
DM
275 let output_format = get_output_format(&param);
276
2665cef7 277 let repo = extract_repository_from_value(&param)?;
812c6f87 278
f3fde36b 279 let client = connect(&repo)?;
812c6f87 280
d0a03d40 281 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 282
133d718f 283 let backup_ns = optional_ns_param(&param)?;
89ae3c32 284 let mut result = client
3c09413a
WB
285 .get(
286 &path,
287 match backup_ns.is_root() {
288 true => None,
bc21ade2 289 false => Some(json!({ "ns": backup_ns })),
3c09413a
WB
290 },
291 )
89ae3c32 292 .await?;
812c6f87 293
d0a03d40
DM
294 record_repository(&repo);
295
c81b2b7c 296 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
8c74349b
WB
297 let item = GroupListItem::deserialize(record)?;
298 Ok(item.backup.to_string())
c81b2b7c 299 };
812c6f87 300
18deda40 301 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
8c74349b 302 let item = GroupListItem::deserialize(record)?;
db87d93e
WB
303 let snapshot = BackupDir {
304 group: item.backup,
305 time: item.last_backup,
306 };
307 Ok(snapshot.to_string())
c81b2b7c 308 };
812c6f87 309
c81b2b7c 310 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
8c74349b 311 let item = GroupListItem::deserialize(record)?;
770a36e5 312 Ok(pbs_tools::format::render_backup_file_list(&item.files))
c81b2b7c 313 };
812c6f87 314
c81b2b7c
DM
315 let options = default_table_format_options()
316 .sortby("backup-type", false)
317 .sortby("backup-id", false)
118f8589
TL
318 .column(
319 ColumnConfig::new("backup-id")
320 .renderer(render_group_path)
321 .header("group"),
322 )
18deda40
DM
323 .column(
324 ColumnConfig::new("last-backup")
325 .renderer(render_last_backup)
326 .header("last snapshot")
118f8589 327 .right_align(false),
18deda40 328 )
c81b2b7c
DM
329 .column(ColumnConfig::new("backup-count"))
330 .column(ColumnConfig::new("files").renderer(render_files));
ad20d198 331
c81b2b7c 332 let mut data: Value = result["data"].take();
ad20d198 333
e351ac78 334 let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE;
812c6f87 335
b2362a12 336 format_and_print_result_full(&mut data, return_type, &output_format, &options);
34a816cc 337
812c6f87
DM
338 Ok(Value::Null)
339}
340
89ae3c32
WB
341fn merge_group_into(to: &mut serde_json::Map<String, Value>, group: BackupGroup) {
342 match serde_json::to_value(group).unwrap() {
343 Value::Object(group) => to.extend(group),
344 _ => unreachable!(),
345 }
346}
347
344add38
DW
348#[api(
349 input: {
350 properties: {
351 repository: {
352 schema: REPO_URL_SCHEMA,
353 optional: true,
354 },
355 group: {
356 type: String,
357 description: "Backup group.",
358 },
1f71e441
TL
359 "ns": {
360 type: BackupNamespace,
361 optional: true,
362 },
344add38 363 "new-owner": {
e6dc35ac 364 type: Authid,
344add38
DW
365 },
366 }
367 }
368)]
369/// Change owner of a backup group
370async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
344add38 371 let repo = extract_repository_from_value(&param)?;
1f71e441 372 let ns = optional_ns_param(&param)?;
344add38 373
d4877712 374 let client = connect(&repo)?;
344add38
DW
375
376 param.as_object_mut().unwrap().remove("repository");
377
378 let group: BackupGroup = group.parse()?;
379
89ae3c32 380 merge_group_into(param.as_object_mut().unwrap(), group);
1f71e441
TL
381 if !ns.is_root() {
382 param["ns"] = serde_json::to_value(ns)?;
383 }
344add38
DW
384
385 let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
386 client.post(&path, Some(param)).await?;
387
388 record_repository(&repo);
389
390 Ok(())
391}
392
a47a02ae
DM
393#[api(
394 input: {
395 properties: {
396 repository: {
397 schema: REPO_URL_SCHEMA,
398 optional: true,
399 },
400 }
401 }
402)]
403/// Try to login. If successful, store ticket.
404async fn api_login(param: Value) -> Result<Value, Error> {
e240d8be
DM
405 let repo = extract_repository_from_value(&param)?;
406
f3fde36b 407 let client = connect(&repo)?;
8a8a4703 408 client.login().await?;
e240d8be
DM
409
410 record_repository(&repo);
411
412 Ok(Value::Null)
413}
414
a47a02ae
DM
415#[api(
416 input: {
417 properties: {
418 repository: {
419 schema: REPO_URL_SCHEMA,
420 optional: true,
421 },
422 }
423 }
424)]
425/// Logout (delete stored ticket).
426fn api_logout(param: Value) -> Result<Value, Error> {
e240d8be
DM
427 let repo = extract_repository_from_value(&param)?;
428
5030b7ce 429 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
e240d8be
DM
430
431 Ok(Value::Null)
432}
433
e39974af
TL
434#[api(
435 input: {
436 properties: {
437 repository: {
438 schema: REPO_URL_SCHEMA,
439 optional: true,
440 },
441 "output-format": {
442 schema: OUTPUT_FORMAT,
443 optional: true,
444 },
445 }
446 }
447)]
448/// Show client and optional server version
449async fn api_version(param: Value) -> Result<(), Error> {
e39974af
TL
450 let output_format = get_output_format(&param);
451
452 let mut version_info = json!({
453 "client": {
a12b1be7
WB
454 "version": pbs_buildcfg::PROXMOX_PKG_VERSION,
455 "release": pbs_buildcfg::PROXMOX_PKG_RELEASE,
456 "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID,
e39974af
TL
457 }
458 });
459
460 let repo = extract_repository_from_value(&param);
461 if let Ok(repo) = repo {
f3fde36b 462 let client = connect(&repo)?;
e39974af
TL
463
464 match client.get("api2/json/version", None).await {
465 Ok(mut result) => version_info["server"] = result["data"].take(),
4a2e4467 466 Err(e) => log::error!("could not connect to server - {}", e),
e39974af
TL
467 }
468 }
469 if output_format == "text" {
a12b1be7
WB
470 println!(
471 "client version: {}.{}",
472 pbs_buildcfg::PROXMOX_PKG_VERSION,
473 pbs_buildcfg::PROXMOX_PKG_RELEASE,
474 );
e39974af
TL
475 if let Some(server) = version_info["server"].as_object() {
476 let server_version = server["version"].as_str().unwrap();
477 let server_release = server["release"].as_str().unwrap();
478 println!("server version: {}.{}", server_version, server_release);
479 }
480 } else {
481 format_and_print_result(&version_info, &output_format);
482 }
483
484 Ok(())
485}
486
a47a02ae 487#[api(
94913f35 488 input: {
a47a02ae
DM
489 properties: {
490 repository: {
491 schema: REPO_URL_SCHEMA,
492 optional: true,
493 },
94913f35
DM
494 "output-format": {
495 schema: OUTPUT_FORMAT,
496 optional: true,
497 },
498 },
499 },
a47a02ae
DM
500)]
501/// Start garbage collection for a specific repository.
502async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
2665cef7 503 let repo = extract_repository_from_value(&param)?;
c2043614
DM
504
505 let output_format = get_output_format(&param);
8cc0d6af 506
d4877712 507 let client = connect(&repo)?;
8cc0d6af 508
d0a03d40 509 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 510
8a8a4703 511 let result = client.post(&path, None).await?;
8cc0d6af 512
8a8a4703 513 record_repository(&repo);
d0a03d40 514
d4877712 515 view_task_result(&client, result, &output_format).await?;
e5f7def4 516
e5f7def4 517 Ok(Value::Null)
8cc0d6af 518}
33d64b81 519
6d233161 520struct CatalogUploadResult {
f35e187f 521 catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>,
6d233161
FG
522 result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
523}
524
bf6e3217 525fn spawn_catalog_upload(
3bad3e6e 526 client: Arc<BackupWriter>,
3638341a 527 encrypt: bool,
6d233161 528) -> Result<CatalogUploadResult, Error> {
f1d99e3f 529 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
9a1b24b6 530 let catalog_stream = proxmox_async::blocking::StdChannelStream(catalog_rx);
118f8589 531 let catalog_chunk_size = 512 * 1024;
bf6e3217
DM
532 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
533
118f8589
TL
534 let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(
535 StdChannelWriter::new(catalog_tx),
536 ))?));
bf6e3217
DM
537
538 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
539
e43b9175
FG
540 let upload_options = UploadOptions {
541 encrypt,
542 compress: true,
543 ..UploadOptions::default()
544 };
545
bf6e3217
DM
546 tokio::spawn(async move {
547 let catalog_upload_result = client
e43b9175 548 .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options)
bf6e3217
DM
549 .await;
550
551 if let Err(ref err) = catalog_upload_result {
4a2e4467 552 log::error!("catalog upload error - {}", err);
bf6e3217
DM
553 client.cancel();
554 }
555
556 let _ = catalog_result_tx.send(catalog_upload_result);
557 });
558
118f8589
TL
559 Ok(CatalogUploadResult {
560 catalog_writer,
561 result: catalog_result_rx,
562 })
bf6e3217
DM
563}
564
a47a02ae
DM
565#[api(
566 input: {
567 properties: {
568 backupspec: {
569 type: Array,
570 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
571 items: {
572 schema: BACKUP_SOURCE_SCHEMA,
573 }
574 },
575 repository: {
576 schema: REPO_URL_SCHEMA,
577 optional: true,
578 },
579 "include-dev": {
580 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
581 optional: true,
582 items: {
583 type: String,
584 description: "Path to file.",
585 }
586 },
58fcbf5a
FE
587 "all-file-systems": {
588 type: Boolean,
589 description: "Include all mounted subdirectories.",
590 optional: true,
667476f1 591 default: false,
58fcbf5a 592 },
a47a02ae
DM
593 keyfile: {
594 schema: KEYFILE_SCHEMA,
595 optional: true,
596 },
0351f23b
WB
597 "keyfd": {
598 schema: KEYFD_SCHEMA,
599 optional: true,
600 },
c0a87c12
FG
601 "master-pubkey-file": {
602 schema: MASTER_PUBKEY_FILE_SCHEMA,
603 optional: true,
604 },
605 "master-pubkey-fd": {
606 schema: MASTER_PUBKEY_FD_SCHEMA,
607 optional: true,
608 },
24be37e3
WB
609 "crypt-mode": {
610 type: CryptMode,
96ee8577
WB
611 optional: true,
612 },
a47a02ae
DM
613 "skip-lost-and-found": {
614 type: Boolean,
615 description: "Skip lost+found directory.",
616 optional: true,
667476f1 617 default: false,
a47a02ae 618 },
03d4f43d 619 "ns": {
8c74349b
WB
620 schema: BACKUP_NAMESPACE_SCHEMA,
621 optional: true,
622 },
a47a02ae
DM
623 "backup-type": {
624 schema: BACKUP_TYPE_SCHEMA,
625 optional: true,
626 },
627 "backup-id": {
628 schema: BACKUP_ID_SCHEMA,
629 optional: true,
630 },
631 "backup-time": {
632 schema: BACKUP_TIME_SCHEMA,
633 optional: true,
634 },
635 "chunk-size": {
636 schema: CHUNK_SIZE_SCHEMA,
637 optional: true,
638 },
e4bc3e0e 639 rate: {
bfd12e87 640 schema: TRAFFIC_CONTROL_RATE_SCHEMA,
e4bc3e0e 641 optional: true,
e4bc3e0e
DM
642 },
643 burst: {
bfd12e87 644 schema: TRAFFIC_CONTROL_BURST_SCHEMA,
e4bc3e0e 645 optional: true,
e4bc3e0e 646 },
189996cf
CE
647 "exclude": {
648 type: Array,
649 description: "List of paths or patterns for matching files to exclude.",
650 optional: true,
651 items: {
652 type: String,
653 description: "Path or match pattern.",
654 }
655 },
6fc053ed
CE
656 "entries-max": {
657 type: Integer,
658 description: "Max number of entries to hold in memory.",
659 optional: true,
2b7f8dd5 660 default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
6fc053ed 661 },
4b8395ee
MF
662 "dry-run": {
663 type: Boolean,
664 description: "Just show what backup would do, but do not upload anything.",
665 optional: true,
667476f1 666 default: false,
4b8395ee 667 },
a602a885
GG
668 "skip-e2big-xattr": {
669 type: Boolean,
670 description: "Ignore the E2BIG error when retrieving xattrs. This includes the file, but discards the metadata.",
671 optional: true,
672 default: false,
673 },
a47a02ae
DM
674 }
675 }
676)]
677/// Create (host) backup.
678async fn create_backup(
6049b71f 679 param: Value,
667476f1
TL
680 all_file_systems: bool,
681 skip_lost_and_found: bool,
682 dry_run: bool,
a602a885 683 skip_e2big_xattr: bool,
6049b71f 684 _info: &ApiMethod,
dd5495d6 685 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 686) -> Result<Value, Error> {
2665cef7 687 let repo = extract_repository_from_value(&param)?;
ae0be2dd 688
3c8c2827 689 let backupspec_list = json::required_array_param(&param, "backupspec")?;
a914a774 690
ca5d0b61
DM
691 let backup_time_opt = param["backup-time"].as_i64();
692
118f8589 693 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v * 1024) as usize);
2d9d143a 694
247cdbce
DM
695 if let Some(size) = chunk_size_opt {
696 verify_chunk_size(size)?;
2d9d143a
DM
697 }
698
2d5287fb
DM
699 let rate = match param["rate"].as_str() {
700 Some(s) => Some(s.parse::<HumanByte>()?),
701 None => None,
702 };
703 let burst = match param["burst"].as_str() {
704 Some(s) => Some(s.parse::<HumanByte>()?),
705 None => None,
706 };
707
708 let rate_limit = RateLimitConfig::with_same_inout(rate, burst);
e4bc3e0e 709
c6a7ea0a 710 let crypto = crypto_parameters(&param)?;
6d0983db 711
118f8589
TL
712 let backup_id = param["backup-id"]
713 .as_str()
e1db0670 714 .unwrap_or_else(|| proxmox_sys::nodename());
fba30411 715
03d4f43d 716 let backup_ns = optional_ns_param(&param)?;
8c74349b 717
988d575d 718 let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
ca5d0b61 719
2eeaacb9
DM
720 let include_dev = param["include-dev"].as_array();
721
118f8589
TL
722 let entries_max = param["entries-max"]
723 .as_u64()
2b7f8dd5 724 .unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64);
6fc053ed 725
189996cf 726 let empty = Vec::new();
c443f58b
WB
727 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
728
239e49f9 729 let mut pattern_list = Vec::with_capacity(exclude_args.len());
c443f58b 730 for entry in exclude_args {
118f8589
TL
731 let entry = entry
732 .as_str()
733 .ok_or_else(|| format_err!("Invalid pattern string slice"))?;
239e49f9 734 pattern_list.push(
c443f58b 735 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
118f8589 736 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?,
c443f58b 737 );
189996cf
CE
738 }
739
118f8589
TL
740 let mut devices = if all_file_systems {
741 None
742 } else {
743 Some(HashSet::new())
744 };
2eeaacb9
DM
745
746 if let Some(include_dev) = include_dev {
747 if all_file_systems {
748 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
749 }
750
751 let mut set = HashSet::new();
752 for path in include_dev {
753 let path = path.as_str().unwrap();
754 let stat = nix::sys::stat::stat(path)
755 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
756 set.insert(stat.st_dev);
757 }
758 devices = Some(set);
759 }
760
ae0be2dd 761 let mut upload_list = vec![];
f2b4b4b9 762 let mut target_set = HashSet::new();
a914a774 763
ae0be2dd 764 for backupspec in backupspec_list {
7cc3473a
DM
765 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
766 let filename = &spec.config_string;
767 let target = &spec.archive_name;
bcd879cf 768
f2b4b4b9
SI
769 if target_set.contains(target) {
770 bail!("got target twice: '{}'", target);
771 }
772 target_set.insert(target.to_string());
773
eb1804c5
DM
774 use std::os::unix::fs::FileTypeExt;
775
3fa71727
CE
776 let metadata = std::fs::metadata(filename)
777 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 778 let file_type = metadata.file_type();
23bb8780 779
7cc3473a
DM
780 match spec.spec_type {
781 BackupSpecificationType::PXAR => {
ec8a9bb9
DM
782 if !file_type.is_dir() {
783 bail!("got unexpected file type (expected directory)");
784 }
118f8589
TL
785 upload_list.push((
786 BackupSpecificationType::PXAR,
787 filename.to_owned(),
788 format!("{}.didx", target),
789 0,
790 ));
ec8a9bb9 791 }
7cc3473a 792 BackupSpecificationType::IMAGE => {
ec8a9bb9
DM
793 if !(file_type.is_file() || file_type.is_block_device()) {
794 bail!("got unexpected file type (expected file or block device)");
795 }
eb1804c5 796
e18a6c9e 797 let size = image_size(&PathBuf::from(filename))?;
23bb8780 798
118f8589
TL
799 if size == 0 {
800 bail!("got zero-sized file '{}'", filename);
801 }
ae0be2dd 802
118f8589
TL
803 upload_list.push((
804 BackupSpecificationType::IMAGE,
805 filename.to_owned(),
806 format!("{}.fidx", target),
807 size,
808 ));
ec8a9bb9 809 }
7cc3473a 810 BackupSpecificationType::CONFIG => {
ec8a9bb9
DM
811 if !file_type.is_file() {
812 bail!("got unexpected file type (expected regular file)");
813 }
118f8589
TL
814 upload_list.push((
815 BackupSpecificationType::CONFIG,
816 filename.to_owned(),
817 format!("{}.blob", target),
818 metadata.len(),
819 ));
ec8a9bb9 820 }
7cc3473a 821 BackupSpecificationType::LOGFILE => {
79679c2d
DM
822 if !file_type.is_file() {
823 bail!("got unexpected file type (expected regular file)");
824 }
118f8589
TL
825 upload_list.push((
826 BackupSpecificationType::LOGFILE,
827 filename.to_owned(),
828 format!("{}.blob", target),
829 metadata.len(),
830 ));
ec8a9bb9 831 }
ae0be2dd
DM
832 }
833 }
834
22a9189e 835 let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
ae0be2dd 836
fcea0794 837 let http_client = connect_rate_limited(&repo, rate_limit)?;
d0a03d40
DM
838 record_repository(&repo);
839
133d718f
WB
840 let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
841 if backup_ns.is_root() {
4a2e4467 842 log::info!("Starting backup: {snapshot}");
133d718f 843 } else {
4a2e4467 844 log::info!("Starting backup: [{backup_ns}]:{snapshot}");
133d718f 845 }
ca5d0b61 846
4a2e4467 847 log::info!("Client name: {}", proxmox_sys::nodename());
ca5d0b61 848
6a7be83e 849 let start_time = std::time::Instant::now();
ca5d0b61 850
4a2e4467 851 log::info!(
118f8589
TL
852 "Starting backup protocol: {}",
853 strftime_local("%c", epoch_i64())?
854 );
51144821 855
c6a7ea0a 856 let (crypt_config, rsa_encrypted_key) = match crypto.enc_key {
bb823140 857 None => (None, None),
2f26b866 858 Some(key_with_source) => {
4a2e4467 859 log::info!(
2f26b866
FG
860 "{}",
861 format_key_source(&key_with_source.source, "encryption")
862 );
863
864 let (key, created, fingerprint) =
ff8945fd 865 decrypt_key(&key_with_source.key, &get_encryption_key_password)?;
4a2e4467 866 log::info!("Encryption key fingerprint: {}", fingerprint);
bb823140 867
44288184 868 let crypt_config = CryptConfig::new(key)?;
bb823140 869
c0a87c12 870 match crypto.master_pubkey {
2f26b866 871 Some(pem_with_source) => {
4a2e4467 872 log::info!("{}", format_key_source(&pem_with_source.source, "master"));
2f26b866
FG
873
874 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_with_source.key)?;
82a103c8 875
1c86893d 876 let mut key_config = KeyConfig::without_password(key)?;
82a103c8 877 key_config.created = created; // keep original value
82a103c8 878
8acfd15d 879 let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
6f2626ae 880
05389a01 881 (Some(Arc::new(crypt_config)), Some(enc_key))
118f8589 882 }
05389a01 883 _ => (Some(Arc::new(crypt_config)), None),
bb823140 884 }
6d0983db
DM
885 }
886 };
f98ac774 887
8a8a4703 888 let client = BackupWriter::start(
fcea0794 889 &http_client,
b957aa81 890 crypt_config.clone(),
8a8a4703 891 repo.store(),
133d718f 892 &backup_ns,
8c74349b 893 &snapshot,
4a2e4467 894 true,
118f8589
TL
895 false,
896 )
897 .await?;
8a8a4703 898
8b7f8d3f
FG
899 let download_previous_manifest = match client.previous_backup_time().await {
900 Ok(Some(backup_time)) => {
4a2e4467 901 log::info!(
8b7f8d3f
FG
902 "Downloading previous manifest ({})",
903 strftime_local("%c", backup_time)?
904 );
905 true
906 }
907 Ok(None) => {
4a2e4467 908 log::info!("No previous manifest available.");
8b7f8d3f
FG
909 false
910 }
911 Err(_) => {
912 // Fallback for outdated server, TODO remove/bubble up with 2.0
913 true
914 }
915 };
916
917 let previous_manifest = if download_previous_manifest {
918 match client.download_previous_manifest().await {
919 Ok(previous_manifest) => {
920 match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
921 Ok(()) => Some(Arc::new(previous_manifest)),
922 Err(err) => {
4a2e4467 923 log::error!("Couldn't re-use previous manifest - {}", err);
8b7f8d3f
FG
924 None
925 }
926 }
23f9503a 927 }
8b7f8d3f 928 Err(err) => {
4a2e4467 929 log::error!("Couldn't download previous manifest - {}", err);
8b7f8d3f
FG
930 None
931 }
932 }
933 } else {
934 None
b957aa81
DM
935 };
936
8a8a4703
DM
937 let mut manifest = BackupManifest::new(snapshot);
938
5d85847f 939 let mut catalog = None;
6d233161 940 let mut catalog_result_rx = None;
8a8a4703 941
118f8589
TL
942 let log_file = |desc: &str, file: &str, target: &str| {
943 let what = if dry_run { "Would upload" } else { "Upload" };
4a2e4467 944 log::info!("{} {} '{}' to '{}' as {}", what, desc, file, repo, target);
4b8395ee
MF
945 };
946
8a8a4703 947 for (backup_type, filename, target, size) in upload_list {
4b8395ee 948 match (backup_type, dry_run) {
a1b800c2
TL
949 // dry-run
950 (BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target),
951 (BackupSpecificationType::LOGFILE, true) => log_file("log file", &filename, &target),
952 (BackupSpecificationType::PXAR, true) => log_file("directory", &filename, &target),
953 (BackupSpecificationType::IMAGE, true) => log_file("image", &filename, &target),
954 // no dry-run
4b8395ee 955 (BackupSpecificationType::CONFIG, false) => {
e43b9175
FG
956 let upload_options = UploadOptions {
957 compress: true,
c6a7ea0a 958 encrypt: crypto.mode == CryptMode::Encrypt,
e43b9175
FG
959 ..UploadOptions::default()
960 };
961
a1b800c2 962 log_file("config file", &filename, &target);
8a8a4703 963 let stats = client
e43b9175 964 .upload_blob_from_file(&filename, &target, upload_options)
8a8a4703 965 .await?;
c6a7ea0a 966 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
8a8a4703 967 }
118f8589
TL
968 (BackupSpecificationType::LOGFILE, false) => {
969 // fixme: remove - not needed anymore ?
e43b9175
FG
970 let upload_options = UploadOptions {
971 compress: true,
c6a7ea0a 972 encrypt: crypto.mode == CryptMode::Encrypt,
e43b9175
FG
973 ..UploadOptions::default()
974 };
975
a1b800c2 976 log_file("log file", &filename, &target);
8a8a4703 977 let stats = client
e43b9175 978 .upload_blob_from_file(&filename, &target, upload_options)
8a8a4703 979 .await?;
c6a7ea0a 980 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
8a8a4703 981 }
4b8395ee 982 (BackupSpecificationType::PXAR, false) => {
5d85847f
DC
983 // start catalog upload on first use
984 if catalog.is_none() {
118f8589
TL
985 let catalog_upload_res =
986 spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?;
6d233161
FG
987 catalog = Some(catalog_upload_res.catalog_writer);
988 catalog_result_rx = Some(catalog_upload_res.result);
5d85847f
DC
989 }
990 let catalog = catalog.as_ref().unwrap();
991
a1b800c2 992 log_file("directory", &filename, &target);
118f8589
TL
993 catalog
994 .lock()
995 .unwrap()
996 .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
77486a60 997
2b7f8dd5 998 let pxar_options = pbs_client::pxar::PxarCreateOptions {
77486a60
FG
999 device_set: devices.clone(),
1000 patterns: pattern_list.clone(),
1001 entries_max: entries_max as usize,
1002 skip_lost_and_found,
9052dff2 1003 skip_e2big_xattr,
77486a60
FG
1004 };
1005
e43b9175
FG
1006 let upload_options = UploadOptions {
1007 previous_manifest: previous_manifest.clone(),
1008 compress: true,
c6a7ea0a 1009 encrypt: crypto.mode == CryptMode::Encrypt,
e43b9175
FG
1010 ..UploadOptions::default()
1011 };
1012
8a8a4703
DM
1013 let stats = backup_directory(
1014 &client,
1015 &filename,
1016 &target,
1017 chunk_size_opt,
8a8a4703 1018 catalog.clone(),
77486a60 1019 pxar_options,
e43b9175 1020 upload_options,
118f8589
TL
1021 )
1022 .await?;
c6a7ea0a 1023 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
8a8a4703
DM
1024 catalog.lock().unwrap().end_directory()?;
1025 }
4b8395ee 1026 (BackupSpecificationType::IMAGE, false) => {
a1b800c2 1027 log_file("image", &filename, &target);
e43b9175
FG
1028
1029 let upload_options = UploadOptions {
1030 previous_manifest: previous_manifest.clone(),
1031 fixed_size: Some(size),
1032 compress: true,
c6a7ea0a 1033 encrypt: crypto.mode == CryptMode::Encrypt,
e43b9175
FG
1034 };
1035
118f8589
TL
1036 let stats =
1037 backup_image(&client, &filename, &target, chunk_size_opt, upload_options)
1038 .await?;
c6a7ea0a 1039 manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
6af905c1
DM
1040 }
1041 }
8a8a4703 1042 }
4818c8b6 1043
4b8395ee 1044 if dry_run {
6685122c 1045 log::info!("dry-run: no upload happened");
4b8395ee
MF
1046 return Ok(Value::Null);
1047 }
1048
8a8a4703 1049 // finalize and upload catalog
5d85847f 1050 if let Some(catalog) = catalog {
8a8a4703
DM
1051 let mutex = Arc::try_unwrap(catalog)
1052 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
1053 let mut catalog = mutex.into_inner().unwrap();
bf6e3217 1054
8a8a4703 1055 catalog.finish()?;
2761d6a4 1056
8a8a4703 1057 drop(catalog); // close upload stream
2761d6a4 1058
6d233161 1059 if let Some(catalog_result_rx) = catalog_result_rx {
5d85847f 1060 let stats = catalog_result_rx.await??;
c6a7ea0a 1061 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?;
5d85847f 1062 }
8a8a4703 1063 }
2761d6a4 1064
8a8a4703 1065 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
9990af30 1066 let target = ENCRYPTED_KEY_BLOB_NAME;
cbaabb48 1067 log::info!("Upload RSA encoded key to '{}' as {}", repo, target);
118f8589
TL
1068 let options = UploadOptions {
1069 compress: false,
1070 encrypt: false,
1071 ..UploadOptions::default()
1072 };
8a8a4703 1073 let stats = client
e43b9175 1074 .upload_blob_from_data(rsa_encrypted_key, target, options)
8a8a4703 1075 .await?;
c6a7ea0a 1076 manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?;
8a8a4703 1077 }
8a8a4703 1078 // create manifest (index.json)
3638341a 1079 // manifests are never encrypted, but include a signature
118f8589
TL
1080 let manifest = manifest
1081 .to_string(crypt_config.as_ref().map(Arc::as_ref))
b53f6379 1082 .map_err(|err| format_err!("unable to format manifest - {}", err))?;
3638341a 1083
4a2e4467
HL
1084 log::debug!("Upload index.json to '{}'", repo);
1085
118f8589
TL
1086 let options = UploadOptions {
1087 compress: true,
1088 encrypt: false,
1089 ..UploadOptions::default()
1090 };
8a8a4703 1091 client
e43b9175 1092 .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options)
8a8a4703 1093 .await?;
2c3891d1 1094
8a8a4703 1095 client.finish().await?;
c4ff3dce 1096
6a7be83e
DM
1097 let end_time = std::time::Instant::now();
1098 let elapsed = end_time.duration_since(start_time);
4a2e4467
HL
1099 log::info!("Duration: {:.2}s", elapsed.as_secs_f64());
1100 log::info!("End Time: {}", strftime_local("%c", epoch_i64())?);
8a8a4703 1101 Ok(Value::Null)
f98ea63d
DM
1102}
1103
8e6e18b7 1104async fn dump_image<W: Write>(
88892ea8
DM
1105 client: Arc<BackupReader>,
1106 crypt_config: Option<Arc<CryptConfig>>,
14f6c9cb 1107 crypt_mode: CryptMode,
88892ea8
DM
1108 index: FixedIndexReader,
1109 mut writer: W,
1110) -> Result<(), Error> {
88892ea8
DM
1111 let most_used = index.find_most_used_chunks(8);
1112
14f6c9cb 1113 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
88892ea8
DM
1114
1115 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1116 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
1117 let mut per = 0;
1118 let mut bytes = 0;
1119 let start_time = std::time::Instant::now();
1120
88892ea8
DM
1121 for pos in 0..index.index_count() {
1122 let digest = index.index_digest(pos).unwrap();
9a37bd6c 1123 let raw_data = chunk_reader.read_chunk(digest).await?;
88892ea8 1124 writer.write_all(&raw_data)?;
fd04ca7a 1125 bytes += raw_data.len();
4a2e4467
HL
1126 let next_per = ((pos + 1) * 100) / index.index_count();
1127 if per != next_per {
1128 log::debug!(
1129 "progress {}% (read {} bytes, duration {} sec)",
1130 next_per,
1131 bytes,
1132 start_time.elapsed().as_secs()
1133 );
1134 per = next_per;
fd04ca7a 1135 }
88892ea8
DM
1136 }
1137
fd04ca7a
DM
1138 let end_time = std::time::Instant::now();
1139 let elapsed = end_time.duration_since(start_time);
4a2e4467 1140 log::info!(
118f8589
TL
1141 "restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1142 bytes,
1143 elapsed.as_secs_f64(),
1144 bytes as f64 / (1024.0 * 1024.0 * elapsed.as_secs_f64())
fd04ca7a
DM
1145 );
1146
88892ea8
DM
1147 Ok(())
1148}
1149
dc155e9b 1150fn parse_archive_type(name: &str) -> (String, ArchiveType) {
2d32fe2c
TL
1151 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1152 (name.into(), archive_type(name).unwrap())
1153 } else if name.ends_with(".pxar") {
dc155e9b
TL
1154 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1155 } else if name.ends_with(".img") {
1156 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1157 } else {
1158 (format!("{}.blob", name), ArchiveType::Blob)
1159 }
1160}
1161
a47a02ae 1162#[api(
133d718f
WB
1163 input: {
1164 properties: {
1165 repository: {
1166 schema: REPO_URL_SCHEMA,
1167 optional: true,
1168 },
1169 ns: {
1170 type: BackupNamespace,
1171 optional: true,
1172 },
1173 snapshot: {
1174 type: String,
1175 description: "Group/Snapshot path.",
1176 },
1177 "archive-name": {
1178 description: "Backup archive name.",
1179 type: String,
1180 },
1181 target: {
1182 type: String,
1183 description: r###"Target directory path. Use '-' to write to standard output.
8a8a4703 1184
d1d74c43 1185We do not extract '.pxar' archives when writing to standard output.
8a8a4703 1186
a47a02ae 1187"###
133d718f
WB
1188 },
1189 rate: {
1190 schema: TRAFFIC_CONTROL_RATE_SCHEMA,
1191 optional: true,
1192 },
1193 burst: {
1194 schema: TRAFFIC_CONTROL_BURST_SCHEMA,
1195 optional: true,
1196 },
1197 "allow-existing-dirs": {
1198 type: Boolean,
1199 description: "Do not fail if directories already exists.",
1200 optional: true,
10cc2a13 1201 default: false,
133d718f
WB
1202 },
1203 keyfile: {
1204 schema: KEYFILE_SCHEMA,
1205 optional: true,
1206 },
1207 "keyfd": {
1208 schema: KEYFD_SCHEMA,
1209 optional: true,
1210 },
1211 "crypt-mode": {
1212 type: CryptMode,
1213 optional: true,
1214 },
10cc2a13
MF
1215 "ignore-acls": {
1216 type: Boolean,
1217 description: "ignore acl settings",
1218 optional: true,
1219 default: false,
1220 },
1221 "ignore-xattrs": {
1222 type: Boolean,
1223 description: "ignore xattr settings",
1224 optional: true,
1225 default: false,
1226 },
1227 "ignore-ownership": {
1228 type: Boolean,
1229 description: "ignore owner settings (no chown)",
1230 optional: true,
1231 default: false,
1232 },
1233 "ignore-permissions": {
1234 type: Boolean,
1235 description: "ignore permission settings (no chmod)",
1236 optional: true,
1237 default: false,
1238 },
1239 "overwrite": {
1240 type: Boolean,
1241 description: "overwrite already existing files",
1242 optional: true,
1243 default: false,
1244 },
70bca123
CE
1245 "overwrite-files": {
1246 description: "overwrite already existing files",
1247 optional: true,
1248 default: false,
1249 },
1250 "overwrite-symlinks": {
1251 description: "overwrite already existing entries by archives symlink",
1252 optional: true,
1253 default: false,
1254 },
1255 "overwrite-hardlinks": {
1256 description: "overwrite already existing entries by archives hardlink",
1257 optional: true,
1258 default: false,
1259 },
ad630fb4
MC
1260 "ignore-extract-device-errors": {
1261 type: Boolean,
1262 description: "ignore errors that occur during device node extraction",
1263 optional: true,
1264 default: false,
1265 }
133d718f
WB
1266 }
1267 }
a47a02ae
DM
1268)]
1269/// Restore backup repository.
10cc2a13 1270async fn restore(
2a23675d
WB
1271 param: Value,
1272 allow_existing_dirs: bool,
1273 ignore_acls: bool,
1274 ignore_xattrs: bool,
1275 ignore_ownership: bool,
1276 ignore_permissions: bool,
1277 overwrite: bool,
70bca123
CE
1278 overwrite_files: bool,
1279 overwrite_symlinks: bool,
1280 overwrite_hardlinks: bool,
ad630fb4 1281 ignore_extract_device_errors: bool,
2a23675d 1282) -> Result<Value, Error> {
2665cef7 1283 let repo = extract_repository_from_value(&param)?;
9f912493 1284
3c8c2827 1285 let archive_name = json::required_string_param(&param, "archive-name")?;
d5c34d98 1286
2d5287fb
DM
1287 let rate = match param["rate"].as_str() {
1288 Some(s) => Some(s.parse::<HumanByte>()?),
1289 None => None,
1290 };
1291 let burst = match param["burst"].as_str() {
1292 Some(s) => Some(s.parse::<HumanByte>()?),
1293 None => None,
1294 };
1295
1296 let rate_limit = RateLimitConfig::with_same_inout(rate, burst);
d0a03d40 1297
2d5287fb 1298 let client = connect_rate_limited(&repo, rate_limit)?;
d0a03d40 1299 record_repository(&repo);
d5c34d98 1300
1f71e441 1301 let ns = optional_ns_param(&param)?;
3c8c2827 1302 let path = json::required_string_param(&param, "snapshot")?;
9f912493 1303
fbfb64a6 1304 let backup_dir = dir_or_last_from_group(&client, &repo, &ns, path).await?;
9f912493 1305
3c8c2827 1306 let target = json::required_string_param(&param, "target")?;
bf125261 1307 let target = if target == "-" { None } else { Some(target) };
2ae7d196 1308
c6a7ea0a 1309 let crypto = crypto_parameters(&param)?;
2ae7d196 1310
c6a7ea0a 1311 let crypt_config = match crypto.enc_key {
86eda3eb 1312 None => None,
2f26b866
FG
1313 Some(ref key) => {
1314 let (key, _, _) =
ff8945fd 1315 decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| {
4a2e4467 1316 log::error!("{}", format_key_source(&key.source, "encryption"));
2f26b866
FG
1317 err
1318 })?;
86eda3eb
DM
1319 Some(Arc::new(CryptConfig::new(key)?))
1320 }
1321 };
d5c34d98 1322
296c50ba 1323 let client = BackupReader::start(
09683f12 1324 &client,
296c50ba
DM
1325 crypt_config.clone(),
1326 repo.store(),
133d718f 1327 &ns,
8c74349b 1328 &backup_dir,
296c50ba 1329 true,
118f8589
TL
1330 )
1331 .await?;
86eda3eb 1332
48fbbfeb
FG
1333 let (archive_name, archive_type) = parse_archive_type(archive_name);
1334
2107a5ae 1335 let (manifest, backup_index_data) = client.download_manifest().await?;
02fcf372 1336
48fbbfeb 1337 if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() {
4a2e4467 1338 log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
48fbbfeb 1339 } else {
2f26b866
FG
1340 if manifest.signature.is_some() {
1341 if let Some(key) = &crypto.enc_key {
4a2e4467 1342 log::info!("{}", format_key_source(&key.source, "encryption"));
2f26b866
FG
1343 }
1344 if let Some(config) = &crypt_config {
4a2e4467 1345 log::info!("Fingerprint: {}", Fingerprint::new(config.fingerprint()));
2f26b866
FG
1346 }
1347 }
48fbbfeb
FG
1348 manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
1349 }
dc155e9b
TL
1350
1351 if archive_name == MANIFEST_BLOB_NAME {
02fcf372 1352 if let Some(target) = target {
e0a19d33 1353 replace_file(target, &backup_index_data, CreateOptions::new(), false)?;
02fcf372
DM
1354 } else {
1355 let stdout = std::io::stdout();
1356 let mut writer = stdout.lock();
118f8589
TL
1357 writer
1358 .write_all(&backup_index_data)
02fcf372
DM
1359 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1360 }
1361
14f6c9cb
FG
1362 return Ok(Value::Null);
1363 }
1364
1365 let file_info = manifest.lookup_file_info(&archive_name)?;
1366
1367 if archive_type == ArchiveType::Blob {
dc155e9b 1368 let mut reader = client.download_blob(&manifest, &archive_name).await?;
f8100e96 1369
bf125261 1370 if let Some(target) = target {
118f8589 1371 let mut writer = std::fs::OpenOptions::new()
0d986280
DM
1372 .write(true)
1373 .create(true)
1374 .create_new(true)
1375 .open(target)
118f8589
TL
1376 .map_err(|err| {
1377 format_err!("unable to create target file {:?} - {}", target, err)
1378 })?;
0d986280 1379 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1380 } else {
1381 let stdout = std::io::stdout();
1382 let mut writer = stdout.lock();
0d986280 1383 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1384 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1385 }
dc155e9b 1386 } else if archive_type == ArchiveType::DynamicIndex {
118f8589
TL
1387 let index = client
1388 .download_dynamic_index(&manifest, &archive_name)
1389 .await?;
df65bd3d 1390
f4bf7dfc
DM
1391 let most_used = index.find_most_used_chunks(8);
1392
118f8589
TL
1393 let chunk_reader = RemoteChunkReader::new(
1394 client.clone(),
1395 crypt_config,
1396 file_info.chunk_crypt_mode(),
1397 most_used,
1398 );
f4bf7dfc 1399
afb4cd28 1400 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1401
ad630fb4
MC
1402 let on_error = if ignore_extract_device_errors {
1403 let handler: PxarErrorHandler = Box::new(move |err: Error| {
1404 use pbs_client::pxar::PxarExtractContext;
1405
1406 match err.downcast_ref::<PxarExtractContext>() {
1407 Some(PxarExtractContext::ExtractDevice) => Ok(()),
1408 _ => Err(err),
1409 }
1410 });
1411
1412 Some(handler)
1413 } else {
1414 None
1415 };
1416
70bca123
CE
1417 let mut overwrite_flags = pbs_client::pxar::OverwriteFlags::empty();
1418 overwrite_flags.set(pbs_client::pxar::OverwriteFlags::FILE, overwrite_files);
1419 overwrite_flags.set(
1420 pbs_client::pxar::OverwriteFlags::SYMLINK,
1421 overwrite_symlinks,
1422 );
1423 overwrite_flags.set(
1424 pbs_client::pxar::OverwriteFlags::HARDLINK,
1425 overwrite_hardlinks,
1426 );
1427 if overwrite {
1428 overwrite_flags.insert(pbs_client::pxar::OverwriteFlags::all());
1429 }
1430
2b7f8dd5 1431 let options = pbs_client::pxar::PxarExtractOptions {
72064fd0
FG
1432 match_list: &[],
1433 extract_match_default: true,
1434 allow_existing_dirs,
70bca123 1435 overwrite_flags,
ad630fb4 1436 on_error,
72064fd0
FG
1437 };
1438
10cc2a13
MF
1439 let mut feature_flags = pbs_client::pxar::Flags::DEFAULT;
1440
1441 if ignore_acls {
1442 feature_flags.remove(pbs_client::pxar::Flags::WITH_ACL);
1443 }
1444 if ignore_xattrs {
1445 feature_flags.remove(pbs_client::pxar::Flags::WITH_XATTRS);
1446 }
1447 if ignore_ownership {
1448 feature_flags.remove(pbs_client::pxar::Flags::WITH_OWNER);
1449 }
1450 if ignore_permissions {
1451 feature_flags.remove(pbs_client::pxar::Flags::WITH_PERMISSIONS);
1452 }
1453
bf125261 1454 if let Some(target) = target {
2b7f8dd5 1455 pbs_client::pxar::extract_archive(
c443f58b
WB
1456 pxar::decoder::Decoder::from_std(reader)?,
1457 Path::new(target),
10cc2a13 1458 feature_flags,
c443f58b 1459 |path| {
4a2e4467 1460 log::debug!("{:?}", path);
c443f58b 1461 },
72064fd0 1462 options,
c443f58b 1463 )
7e4815ba 1464 .map_err(|err| format_err!("error extracting archive - {:#}", err))?;
bf125261 1465 } else {
88892ea8
DM
1466 let mut writer = std::fs::OpenOptions::new()
1467 .write(true)
1468 .open("/dev/stdout")
1469 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1470
bf125261
DM
1471 std::io::copy(&mut reader, &mut writer)
1472 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1473 }
dc155e9b 1474 } else if archive_type == ArchiveType::FixedIndex {
118f8589
TL
1475 let index = client
1476 .download_fixed_index(&manifest, &archive_name)
1477 .await?;
df65bd3d 1478
88892ea8
DM
1479 let mut writer = if let Some(target) = target {
1480 std::fs::OpenOptions::new()
bf125261
DM
1481 .write(true)
1482 .create(true)
1483 .create_new(true)
1484 .open(target)
88892ea8 1485 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1486 } else {
88892ea8
DM
1487 std::fs::OpenOptions::new()
1488 .write(true)
1489 .open("/dev/stdout")
1490 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1491 };
afb4cd28 1492
118f8589
TL
1493 dump_image(
1494 client.clone(),
1495 crypt_config.clone(),
1496 file_info.chunk_crypt_mode(),
1497 index,
1498 &mut writer,
118f8589
TL
1499 )
1500 .await?;
3031e44c 1501 }
fef44d4f
DM
1502
1503 Ok(Value::Null)
45db6f89
DM
1504}
1505
e0665a64
DC
1506#[api(
1507 input: {
1508 properties: {
1509 "dry-run": {
1510 type: bool,
1511 optional: true,
1512 description: "Just show what prune would do, but do not delete anything.",
1513 },
1514 group: {
1515 type: String,
1516 description: "Backup group",
1517 },
1518 "prune-options": {
434dd3cc 1519 type: PruneJobOptions,
e0665a64
DC
1520 flatten: true,
1521 },
1522 "output-format": {
1523 schema: OUTPUT_FORMAT,
1524 optional: true,
1525 },
1526 quiet: {
1527 type: bool,
1528 optional: true,
1529 default: false,
1530 description: "Minimal output - only show removals.",
1531 },
1532 repository: {
1533 schema: REPO_URL_SCHEMA,
1534 optional: true,
1535 },
1536 },
1537 },
1538)]
1539/// Prune a backup repository.
1540async fn prune(
1541 dry_run: Option<bool>,
1542 group: String,
434dd3cc 1543 prune_options: PruneJobOptions,
e0665a64 1544 quiet: bool,
118f8589 1545 mut param: Value,
e0665a64 1546) -> Result<Value, Error> {
2665cef7 1547 let repo = extract_repository_from_value(&param)?;
83b7db02 1548
d4877712 1549 let client = connect(&repo)?;
83b7db02 1550
d0a03d40 1551 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1552
d6d3b353 1553 let group: BackupGroup = group.parse()?;
c2043614 1554
671c6a96 1555 let output_format = extract_output_format(&mut param);
9fdc3ef4 1556
e0665a64
DC
1557 let mut api_param = serde_json::to_value(prune_options)?;
1558 if let Some(dry_run) = dry_run {
1559 api_param["dry-run"] = dry_run.into();
1560 }
89ae3c32 1561 merge_group_into(api_param.as_object_mut().unwrap(), group);
83b7db02 1562
e0665a64 1563 let mut result = client.post(&path, Some(api_param)).await?;
74fa81b8 1564
87c42375 1565 record_repository(&repo);
3b03abfe 1566
db1e061d
DM
1567 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1568 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
db87d93e 1569 Ok(item.backup.to_string())
db1e061d
DM
1570 };
1571
c48aa39f
DM
1572 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1573 Ok(match v.as_bool() {
1574 Some(true) => "keep",
1575 Some(false) => "remove",
1576 None => "unknown",
118f8589
TL
1577 }
1578 .to_string())
c48aa39f
DM
1579 };
1580
db1e061d
DM
1581 let options = default_table_format_options()
1582 .sortby("backup-type", false)
1583 .sortby("backup-id", false)
1584 .sortby("backup-time", false)
118f8589
TL
1585 .column(
1586 ColumnConfig::new("backup-id")
1587 .renderer(render_snapshot_path)
1588 .header("snapshot"),
1589 )
1590 .column(
1591 ColumnConfig::new("backup-time")
1592 .renderer(pbs_tools::format::render_epoch)
1593 .header("date"),
1594 )
1595 .column(
1596 ColumnConfig::new("keep")
1597 .renderer(render_prune_action)
1598 .header("action"),
1599 );
db1e061d 1600
e351ac78 1601 let return_type = &pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE;
db1e061d
DM
1602
1603 let mut data = result["data"].take();
1604
c48aa39f 1605 if quiet {
118f8589
TL
1606 let list: Vec<Value> = data
1607 .as_array()
1608 .unwrap()
1609 .iter()
1610 .filter(|item| item["keep"].as_bool() == Some(false))
1611 .cloned()
1612 .collect();
c48aa39f
DM
1613 data = list.into();
1614 }
1615
b2362a12 1616 format_and_print_result_full(&mut data, return_type, &output_format, &options);
d0a03d40 1617
43a406fd 1618 Ok(Value::Null)
83b7db02
DM
1619}
1620
a47a02ae
DM
1621#[api(
1622 input: {
1623 properties: {
1624 repository: {
1625 schema: REPO_URL_SCHEMA,
1626 optional: true,
1627 },
1628 "output-format": {
1629 schema: OUTPUT_FORMAT,
1630 optional: true,
1631 },
1632 }
f9beae9c
TL
1633 },
1634 returns: {
1635 type: StorageStatus,
1636 },
a47a02ae
DM
1637)]
1638/// Get repository status.
1639async fn status(param: Value) -> Result<Value, Error> {
34a816cc
DM
1640 let repo = extract_repository_from_value(&param)?;
1641
c2043614 1642 let output_format = get_output_format(&param);
34a816cc 1643
f3fde36b 1644 let client = connect(&repo)?;
34a816cc
DM
1645
1646 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1647
1dc117bb 1648 let mut result = client.get(&path, None).await?;
14e08625 1649 let mut data = result["data"].take();
34a816cc
DM
1650
1651 record_repository(&repo);
1652
390c5bdd
DM
1653 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1654 let v = v.as_u64().unwrap();
1655 let total = record["total"].as_u64().unwrap();
118f8589 1656 let roundup = total / 200;
dae0b67f
MS
1657 if let Some(per) = ((v + roundup) * 100).checked_div(total) {
1658 let info = format!(" ({} %)", per);
1659 Ok(format!("{} {:>8}", v, info))
1660 } else {
1661 bail!("Cannot render total percentage: denominator is zero");
1662 }
390c5bdd 1663 };
1dc117bb 1664
c2043614 1665 let options = default_table_format_options()
be2425ff 1666 .noheader(true)
e23f5863 1667 .column(ColumnConfig::new("total").renderer(render_total_percentage))
390c5bdd
DM
1668 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1669 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
34a816cc 1670
b2362a12 1671 let return_type = &API_METHOD_STATUS.returns;
390c5bdd 1672
b2362a12 1673 format_and_print_result_full(&mut data, return_type, &output_format, &options);
34a816cc
DM
1674
1675 Ok(Value::Null)
1676}
1677
c443f58b
WB
1678/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1679/// async use!
1680///
1681/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1682/// so that we can properly access it from multiple threads simultaneously while not issuing
1683/// duplicate simultaneous reads over http.
43abba4b 1684pub struct BufferedDynamicReadAt {
c443f58b
WB
1685 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
1686}
1687
1688impl BufferedDynamicReadAt {
1689 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
1690 Self {
1691 inner: Mutex::new(inner),
1692 }
1693 }
1694}
1695
a6f87283
WB
1696impl ReadAt for BufferedDynamicReadAt {
1697 fn start_read_at<'a>(
1698 self: Pin<&'a Self>,
c443f58b 1699 _cx: &mut Context,
a6f87283 1700 buf: &'a mut [u8],
c443f58b 1701 offset: u64,
a6f87283 1702 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
a6f87283 1703 MaybeReady::Ready(tokio::task::block_in_place(move || {
c443f58b
WB
1704 let mut reader = self.inner.lock().unwrap();
1705 reader.seek(SeekFrom::Start(offset))?;
dcf5a0f6 1706 reader.read(buf)
a6f87283
WB
1707 }))
1708 }
1709
1710 fn poll_complete<'a>(
1711 self: Pin<&'a Self>,
1712 _op: ReadAtOperation<'a>,
1713 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
bbc71e3b 1714 panic!("BufferedDynamicReadAt::start_read_at returned Pending");
c443f58b
WB
1715 }
1716}
1717
f2401311 1718fn main() {
d91a0f9f 1719 pbs_tools::setup_libc_malloc_opts();
955aea8a 1720 init_cli_logger("PBS_LOG", "info");
33d64b81 1721
255f378a 1722 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
49fddd98 1723 .arg_param(&["backupspec"])
d0a03d40 1724 .completion_cb("repository", complete_repository)
49811347 1725 .completion_cb("backupspec", complete_backup_source)
b3f279e2
DM
1726 .completion_cb("keyfile", complete_file_name)
1727 .completion_cb("master-pubkey-file", complete_file_name)
49811347 1728 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 1729
caea8d61
DM
1730 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
1731 .completion_cb("repository", complete_repository)
b3f279e2 1732 .completion_cb("keyfile", complete_file_name);
caea8d61 1733
255f378a 1734 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
4adb574d 1735 .completion_cb("ns", complete_namespace)
d0a03d40 1736 .completion_cb("repository", complete_repository);
41c039e1 1737
255f378a 1738 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
d0a03d40 1739 .completion_cb("repository", complete_repository);
8cc0d6af 1740
255f378a 1741 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
49fddd98 1742 .arg_param(&["snapshot", "archive-name", "target"])
b2388518 1743 .completion_cb("repository", complete_repository)
4adb574d 1744 .completion_cb("ns", complete_namespace)
08dc340a
DM
1745 .completion_cb("snapshot", complete_group_or_snapshot)
1746 .completion_cb("archive-name", complete_archive_name)
b3f279e2 1747 .completion_cb("target", complete_file_name);
9f912493 1748
255f378a 1749 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
49fddd98 1750 .arg_param(&["group"])
4adb574d 1751 .completion_cb("ns", complete_namespace)
9fdc3ef4 1752 .completion_cb("group", complete_backup_group)
d0a03d40 1753 .completion_cb("repository", complete_repository);
9f912493 1754
118f8589
TL
1755 let status_cmd_def =
1756 CliCommand::new(&API_METHOD_STATUS).completion_cb("repository", complete_repository);
34a816cc 1757
118f8589
TL
1758 let login_cmd_def =
1759 CliCommand::new(&API_METHOD_API_LOGIN).completion_cb("repository", complete_repository);
e240d8be 1760
118f8589
TL
1761 let logout_cmd_def =
1762 CliCommand::new(&API_METHOD_API_LOGOUT).completion_cb("repository", complete_repository);
32efac1c 1763
118f8589
TL
1764 let version_cmd_def =
1765 CliCommand::new(&API_METHOD_API_VERSION).completion_cb("repository", complete_repository);
e39974af 1766
344add38
DW
1767 let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
1768 .arg_param(&["group", "new-owner"])
4adb574d 1769 .completion_cb("ns", complete_namespace)
344add38 1770 .completion_cb("group", complete_backup_group)
118f8589 1771 .completion_cb("new-owner", complete_auth_id)
344add38
DW
1772 .completion_cb("repository", complete_repository);
1773
41c039e1 1774 let cmd_def = CliCommandMap::new()
48ef3c33 1775 .insert("backup", backup_cmd_def)
48ef3c33
DM
1776 .insert("garbage-collect", garbage_collect_cmd_def)
1777 .insert("list", list_cmd_def)
1778 .insert("login", login_cmd_def)
1779 .insert("logout", logout_cmd_def)
1780 .insert("prune", prune_cmd_def)
1781 .insert("restore", restore_cmd_def)
a65e3e4b 1782 .insert("snapshot", snapshot_mgtm_cli())
48ef3c33 1783 .insert("status", status_cmd_def)
9696f519 1784 .insert("key", key::cli())
43abba4b 1785 .insert("mount", mount_cmd_def())
45f9b32e
SR
1786 .insert("map", map_cmd_def())
1787 .insert("unmap", unmap_cmd_def())
5830c205 1788 .insert("catalog", catalog_mgmt_cli())
caea8d61 1789 .insert("task", task_mgmt_cli())
e39974af 1790 .insert("version", version_cmd_def)
344add38 1791 .insert("benchmark", benchmark_cmd_def)
731eeef2 1792 .insert("change-owner", change_owner_cmd_def)
226a4e68 1793 .insert("namespace", namespace::cli_map())
61205f00 1794 .alias(&["files"], &["snapshot", "files"])
edebd523 1795 .alias(&["forget"], &["snapshot", "forget"])
0c9209b0 1796 .alias(&["upload-log"], &["snapshot", "upload-log"])
118f8589 1797 .alias(&["snapshots"], &["snapshot", "list"]);
48ef3c33 1798
7b22acd0 1799 let rpcenv = CliEnvironment::new();
118f8589
TL
1800 run_cli_command(
1801 cmd_def,
1802 rpcenv,
1803 Some(|future| proxmox_async::runtime::main(future)),
1804 );
ff5d3707 1805}