]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-client.rs
minor style & whitespace fixups
[proxmox-backup.git] / src / bin / proxmox-backup-client.rs
CommitLineData
2eeaacb9 1use std::collections::{HashSet, HashMap};
c443f58b 2use std::io::{self, Write, Seek, SeekFrom};
c443f58b
WB
3use std::path::{Path, PathBuf};
4use std::pin::Pin;
5use std::sync::{Arc, Mutex};
a6f87283 6use std::task::Context;
c443f58b
WB
7
8use anyhow::{bail, format_err, Error};
9use chrono::{Local, DateTime, Utc, TimeZone};
10use futures::future::FutureExt;
c443f58b 11use futures::stream::{StreamExt, TryStreamExt};
c443f58b 12use serde_json::{json, Value};
c443f58b
WB
13use tokio::sync::mpsc;
14use xdg::BaseDirectories;
2761d6a4 15
c443f58b 16use pathpatterns::{MatchEntry, MatchType, PatternFlag};
552c2259 17use proxmox::{sortable, identity};
feaa1ad3 18use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
501f4fa2 19use proxmox::sys::linux::tty;
a47a02ae 20use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
3d482025 21use proxmox::api::schema::*;
7eea56ca 22use proxmox::api::cli::*;
5830c205 23use proxmox::api::api;
a6f87283 24use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
ff5d3707 25
fe0e04c6 26use proxmox_backup::tools;
bbf9e7e9 27use proxmox_backup::api2::types::*;
151c6ce2 28use proxmox_backup::client::*;
c443f58b 29use proxmox_backup::pxar::catalog::*;
4d16badf
WB
30use proxmox_backup::backup::{
31 archive_type,
32 encrypt_key_with_passphrase,
33 load_and_decrypt_key,
34 store_key_config,
35 verify_chunk_size,
36 ArchiveType,
8e6e18b7 37 AsyncReadChunk,
4d16badf
WB
38 BackupDir,
39 BackupGroup,
40 BackupManifest,
41 BufferedDynamicReader,
42 CatalogReader,
43 CatalogWriter,
44 CATALOG_NAME,
45 ChunkStream,
46 CryptConfig,
47 DataBlob,
48 DynamicIndexReader,
49 FixedChunkStream,
50 FixedIndexReader,
51 IndexFile,
52 KeyConfig,
53 MANIFEST_BLOB_NAME,
4d16badf
WB
54 Shell,
55};
ae0be2dd 56
caea8d61
DM
57mod proxmox_backup_client;
58use proxmox_backup_client::*;
59
a05c0c6f 60const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
d1c65727 61const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
a05c0c6f 62
33d64b81 63
caea8d61 64pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
255f378a
DM
65 .format(&BACKUP_REPO_URL)
66 .max_length(256)
67 .schema();
d0a03d40 68
caea8d61 69pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
a47a02ae
DM
70 "Path to encryption key. All data will be encrypted using this key.")
71 .schema();
72
73const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
74 "Chunk size in KB. Must be a power of 2.")
75 .minimum(64)
76 .maximum(4096)
77 .default(4096)
78 .schema();
79
2665cef7
DM
80fn get_default_repository() -> Option<String> {
81 std::env::var("PBS_REPOSITORY").ok()
82}
83
caea8d61 84pub fn extract_repository_from_value(
2665cef7
DM
85 param: &Value,
86) -> Result<BackupRepository, Error> {
87
88 let repo_url = param["repository"]
89 .as_str()
90 .map(String::from)
91 .or_else(get_default_repository)
92 .ok_or_else(|| format_err!("unable to get (default) repository"))?;
93
94 let repo: BackupRepository = repo_url.parse()?;
95
96 Ok(repo)
97}
98
99fn extract_repository_from_map(
100 param: &HashMap<String, String>,
101) -> Option<BackupRepository> {
102
103 param.get("repository")
104 .map(String::from)
105 .or_else(get_default_repository)
106 .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
107}
108
d0a03d40
DM
109fn record_repository(repo: &BackupRepository) {
110
111 let base = match BaseDirectories::with_prefix("proxmox-backup") {
112 Ok(v) => v,
113 _ => return,
114 };
115
116 // usually $HOME/.cache/proxmox-backup/repo-list
117 let path = match base.place_cache_file("repo-list") {
118 Ok(v) => v,
119 _ => return,
120 };
121
11377a47 122 let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
123
124 let repo = repo.to_string();
125
126 data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
127
128 let mut map = serde_json::map::Map::new();
129
130 loop {
131 let mut max_used = 0;
132 let mut max_repo = None;
133 for (repo, count) in data.as_object().unwrap() {
134 if map.contains_key(repo) { continue; }
135 if let Some(count) = count.as_i64() {
136 if count > max_used {
137 max_used = count;
138 max_repo = Some(repo);
139 }
140 }
141 }
142 if let Some(repo) = max_repo {
143 map.insert(repo.to_owned(), json!(max_used));
144 } else {
145 break;
146 }
147 if map.len() > 10 { // store max. 10 repos
148 break;
149 }
150 }
151
152 let new_data = json!(map);
153
feaa1ad3 154 let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
d0a03d40
DM
155}
156
43abba4b 157pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
d0a03d40
DM
158
159 let mut result = vec![];
160
161 let base = match BaseDirectories::with_prefix("proxmox-backup") {
162 Ok(v) => v,
163 _ => return result,
164 };
165
166 // usually $HOME/.cache/proxmox-backup/repo-list
167 let path = match base.place_cache_file("repo-list") {
168 Ok(v) => v,
169 _ => return result,
170 };
171
11377a47 172 let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
d0a03d40
DM
173
174 if let Some(map) = data.as_object() {
49811347 175 for (repo, _count) in map {
d0a03d40
DM
176 result.push(repo.to_owned());
177 }
178 }
179
180 result
181}
182
d59dbeca
DM
183fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
184
a05c0c6f
DM
185 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
186
d1c65727
DM
187 use std::env::VarError::*;
188 let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
189 Ok(p) => Some(p),
190 Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
191 Err(NotPresent) => None,
192 };
193
d59dbeca 194 let options = HttpClientOptions::new()
5030b7ce 195 .prefix(Some("proxmox-backup".to_string()))
d1c65727 196 .password(password)
d59dbeca 197 .interactive(true)
a05c0c6f 198 .fingerprint(fingerprint)
5a74756c 199 .fingerprint_cache(true)
d59dbeca
DM
200 .ticket_cache(true);
201
202 HttpClient::new(server, userid, options)
203}
204
d105176f
DM
205async fn view_task_result(
206 client: HttpClient,
207 result: Value,
208 output_format: &str,
209) -> Result<(), Error> {
210 let data = &result["data"];
211 if output_format == "text" {
212 if let Some(upid) = data.as_str() {
213 display_task_log(client, upid, true).await?;
214 }
215 } else {
216 format_and_print_result(&data, &output_format);
217 }
218
219 Ok(())
220}
221
42af4b8f
DM
222async fn api_datastore_list_snapshots(
223 client: &HttpClient,
224 store: &str,
225 group: Option<BackupGroup>,
f24fc116 226) -> Result<Value, Error> {
42af4b8f
DM
227
228 let path = format!("api2/json/admin/datastore/{}/snapshots", store);
229
230 let mut args = json!({});
231 if let Some(group) = group {
232 args["backup-type"] = group.backup_type().into();
233 args["backup-id"] = group.backup_id().into();
234 }
235
236 let mut result = client.get(&path, Some(args)).await?;
237
f24fc116 238 Ok(result["data"].take())
42af4b8f
DM
239}
240
43abba4b 241pub async fn api_datastore_latest_snapshot(
27c9affb
DM
242 client: &HttpClient,
243 store: &str,
244 group: BackupGroup,
245) -> Result<(String, String, DateTime<Utc>), Error> {
246
f24fc116
DM
247 let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
248 let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
27c9affb
DM
249
250 if list.is_empty() {
251 bail!("backup group {:?} does not contain any snapshots.", group.group_path());
252 }
253
254 list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
255
256 let backup_time = Utc.timestamp(list[0].backup_time, 0);
257
258 Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
259}
260
e9722f8b 261async fn backup_directory<P: AsRef<Path>>(
cf9271e2 262 client: &BackupWriter,
b957aa81 263 previous_manifest: Option<Arc<BackupManifest>>,
17d6979a 264 dir_path: P,
247cdbce 265 archive_name: &str,
36898ffc 266 chunk_size: Option<usize>,
2eeaacb9 267 device_set: Option<HashSet<u64>>,
219ef0e6 268 verbose: bool,
5b72c9b4 269 skip_lost_and_found: bool,
f1d99e3f 270 catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
c443f58b 271 exclude_pattern: Vec<MatchEntry>,
6fc053ed 272 entries_max: usize,
2c3891d1 273) -> Result<BackupStats, Error> {
33d64b81 274
6fc053ed
CE
275 let pxar_stream = PxarBackupStream::open(
276 dir_path.as_ref(),
277 device_set,
278 verbose,
279 skip_lost_and_found,
280 catalog,
189996cf 281 exclude_pattern,
6fc053ed
CE
282 entries_max,
283 )?;
e9722f8b 284 let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
ff3d3100 285
e9722f8b 286 let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
5e7a09be 287
c4ff3dce 288 let stream = rx
e9722f8b 289 .map_err(Error::from);
17d6979a 290
c4ff3dce 291 // spawn chunker inside a separate task so that it can run parallel
e9722f8b 292 tokio::spawn(async move {
db0cb9ce
WB
293 while let Some(v) = chunk_stream.next().await {
294 let _ = tx.send(v).await;
295 }
e9722f8b 296 });
17d6979a 297
e9722f8b 298 let stats = client
b957aa81 299 .upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
e9722f8b 300 .await?;
bcd879cf 301
2c3891d1 302 Ok(stats)
bcd879cf
DM
303}
304
e9722f8b 305async fn backup_image<P: AsRef<Path>>(
cf9271e2 306 client: &BackupWriter,
b957aa81 307 previous_manifest: Option<Arc<BackupManifest>>,
6af905c1
DM
308 image_path: P,
309 archive_name: &str,
310 image_size: u64,
36898ffc 311 chunk_size: Option<usize>,
1c0472e8 312 _verbose: bool,
2c3891d1 313) -> Result<BackupStats, Error> {
6af905c1 314
6af905c1
DM
315 let path = image_path.as_ref().to_owned();
316
e9722f8b 317 let file = tokio::fs::File::open(path).await?;
6af905c1 318
db0cb9ce 319 let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
6af905c1
DM
320 .map_err(Error::from);
321
36898ffc 322 let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
6af905c1 323
e9722f8b 324 let stats = client
b957aa81 325 .upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
e9722f8b 326 .await?;
6af905c1 327
2c3891d1 328 Ok(stats)
6af905c1
DM
329}
330
a47a02ae
DM
331#[api(
332 input: {
333 properties: {
334 repository: {
335 schema: REPO_URL_SCHEMA,
336 optional: true,
337 },
338 "output-format": {
339 schema: OUTPUT_FORMAT,
340 optional: true,
341 },
342 }
343 }
344)]
345/// List backup groups.
346async fn list_backup_groups(param: Value) -> Result<Value, Error> {
812c6f87 347
c81b2b7c
DM
348 let output_format = get_output_format(&param);
349
2665cef7 350 let repo = extract_repository_from_value(&param)?;
812c6f87 351
d59dbeca 352 let client = connect(repo.host(), repo.user())?;
812c6f87 353
d0a03d40 354 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
812c6f87 355
8a8a4703 356 let mut result = client.get(&path, None).await?;
812c6f87 357
d0a03d40
DM
358 record_repository(&repo);
359
c81b2b7c
DM
360 let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
361 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
362 let group = BackupGroup::new(item.backup_type, item.backup_id);
363 Ok(group.group_path().to_str().unwrap().to_owned())
364 };
812c6f87 365
18deda40
DM
366 let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
367 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
368 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
369 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
c81b2b7c 370 };
812c6f87 371
c81b2b7c
DM
372 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
373 let item: GroupListItem = serde_json::from_value(record.to_owned())?;
4939255f 374 Ok(tools::format::render_backup_file_list(&item.files))
c81b2b7c 375 };
812c6f87 376
c81b2b7c
DM
377 let options = default_table_format_options()
378 .sortby("backup-type", false)
379 .sortby("backup-id", false)
380 .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
18deda40
DM
381 .column(
382 ColumnConfig::new("last-backup")
383 .renderer(render_last_backup)
384 .header("last snapshot")
385 .right_align(false)
386 )
c81b2b7c
DM
387 .column(ColumnConfig::new("backup-count"))
388 .column(ColumnConfig::new("files").renderer(render_files));
ad20d198 389
c81b2b7c 390 let mut data: Value = result["data"].take();
ad20d198 391
c81b2b7c 392 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_GROUPS;
812c6f87 393
c81b2b7c 394 format_and_print_result_full(&mut data, info, &output_format, &options);
34a816cc 395
812c6f87
DM
396 Ok(Value::Null)
397}
398
a47a02ae
DM
399#[api(
400 input: {
401 properties: {
402 repository: {
403 schema: REPO_URL_SCHEMA,
404 optional: true,
405 },
406 group: {
407 type: String,
408 description: "Backup group.",
409 optional: true,
410 },
411 "output-format": {
412 schema: OUTPUT_FORMAT,
413 optional: true,
414 },
415 }
416 }
417)]
418/// List backup snapshots.
419async fn list_snapshots(param: Value) -> Result<Value, Error> {
184f17af 420
2665cef7 421 let repo = extract_repository_from_value(&param)?;
184f17af 422
c2043614 423 let output_format = get_output_format(&param);
34a816cc 424
d59dbeca 425 let client = connect(repo.host(), repo.user())?;
184f17af 426
d6d3b353
DM
427 let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
428 Some(path.parse()?)
42af4b8f
DM
429 } else {
430 None
431 };
184f17af 432
f24fc116 433 let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
184f17af 434
d0a03d40
DM
435 record_repository(&repo);
436
f24fc116
DM
437 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
438 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
af9d4afc 439 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
f24fc116
DM
440 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
441 };
184f17af 442
f24fc116
DM
443 let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
444 let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
1c090810
DC
445 let mut filenames = Vec::new();
446 for file in &item.files {
447 filenames.push(file.filename.to_string());
448 }
449 Ok(tools::format::render_backup_file_list(&filenames[..]))
f24fc116
DM
450 };
451
c2043614 452 let options = default_table_format_options()
f24fc116
DM
453 .sortby("backup-type", false)
454 .sortby("backup-id", false)
455 .sortby("backup-time", false)
456 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
457 .column(ColumnConfig::new("size"))
458 .column(ColumnConfig::new("files").renderer(render_files))
459 ;
460
461 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
462
463 format_and_print_result_full(&mut data, info, &output_format, &options);
184f17af
DM
464
465 Ok(Value::Null)
466}
467
a47a02ae
DM
468#[api(
469 input: {
470 properties: {
471 repository: {
472 schema: REPO_URL_SCHEMA,
473 optional: true,
474 },
475 snapshot: {
476 type: String,
477 description: "Snapshot path.",
478 },
479 }
480 }
481)]
482/// Forget (remove) backup snapshots.
483async fn forget_snapshots(param: Value) -> Result<Value, Error> {
6f62c924 484
2665cef7 485 let repo = extract_repository_from_value(&param)?;
6f62c924
DM
486
487 let path = tools::required_string_param(&param, "snapshot")?;
a67f7d0a 488 let snapshot: BackupDir = path.parse()?;
6f62c924 489
d59dbeca 490 let mut client = connect(repo.host(), repo.user())?;
6f62c924 491
9e391bb7 492 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
6f62c924 493
8a8a4703
DM
494 let result = client.delete(&path, Some(json!({
495 "backup-type": snapshot.group().backup_type(),
496 "backup-id": snapshot.group().backup_id(),
497 "backup-time": snapshot.backup_time().timestamp(),
498 }))).await?;
6f62c924 499
d0a03d40
DM
500 record_repository(&repo);
501
6f62c924
DM
502 Ok(result)
503}
504
a47a02ae
DM
505#[api(
506 input: {
507 properties: {
508 repository: {
509 schema: REPO_URL_SCHEMA,
510 optional: true,
511 },
512 }
513 }
514)]
515/// Try to login. If successful, store ticket.
516async fn api_login(param: Value) -> Result<Value, Error> {
e240d8be
DM
517
518 let repo = extract_repository_from_value(&param)?;
519
d59dbeca 520 let client = connect(repo.host(), repo.user())?;
8a8a4703 521 client.login().await?;
e240d8be
DM
522
523 record_repository(&repo);
524
525 Ok(Value::Null)
526}
527
a47a02ae
DM
528#[api(
529 input: {
530 properties: {
531 repository: {
532 schema: REPO_URL_SCHEMA,
533 optional: true,
534 },
535 }
536 }
537)]
538/// Logout (delete stored ticket).
539fn api_logout(param: Value) -> Result<Value, Error> {
e240d8be
DM
540
541 let repo = extract_repository_from_value(&param)?;
542
5030b7ce 543 delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
e240d8be
DM
544
545 Ok(Value::Null)
546}
547
9049a8cf 548
a47a02ae
DM
549#[api(
550 input: {
551 properties: {
552 repository: {
553 schema: REPO_URL_SCHEMA,
554 optional: true,
555 },
556 snapshot: {
557 type: String,
558 description: "Snapshot path.",
559 },
560 "output-format": {
561 schema: OUTPUT_FORMAT,
562 optional: true,
563 },
564 }
565 }
566)]
567/// List snapshot files.
568async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
52c171e4
DM
569
570 let repo = extract_repository_from_value(&param)?;
571
572 let path = tools::required_string_param(&param, "snapshot")?;
a67f7d0a 573 let snapshot: BackupDir = path.parse()?;
52c171e4 574
c2043614 575 let output_format = get_output_format(&param);
52c171e4 576
d59dbeca 577 let client = connect(repo.host(), repo.user())?;
52c171e4
DM
578
579 let path = format!("api2/json/admin/datastore/{}/files", repo.store());
580
8a8a4703
DM
581 let mut result = client.get(&path, Some(json!({
582 "backup-type": snapshot.group().backup_type(),
583 "backup-id": snapshot.group().backup_id(),
584 "backup-time": snapshot.backup_time().timestamp(),
585 }))).await?;
52c171e4
DM
586
587 record_repository(&repo);
588
ea5f547f 589 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
52c171e4 590
ea5f547f
DM
591 let mut data: Value = result["data"].take();
592
c2043614 593 let options = default_table_format_options();
ea5f547f
DM
594
595 format_and_print_result_full(&mut data, info, &output_format, &options);
52c171e4
DM
596
597 Ok(Value::Null)
598}
599
a47a02ae 600#[api(
94913f35 601 input: {
a47a02ae
DM
602 properties: {
603 repository: {
604 schema: REPO_URL_SCHEMA,
605 optional: true,
606 },
94913f35
DM
607 "output-format": {
608 schema: OUTPUT_FORMAT,
609 optional: true,
610 },
611 },
612 },
a47a02ae
DM
613)]
614/// Start garbage collection for a specific repository.
615async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
8cc0d6af 616
2665cef7 617 let repo = extract_repository_from_value(&param)?;
c2043614
DM
618
619 let output_format = get_output_format(&param);
8cc0d6af 620
d59dbeca 621 let mut client = connect(repo.host(), repo.user())?;
8cc0d6af 622
d0a03d40 623 let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
8cc0d6af 624
8a8a4703 625 let result = client.post(&path, None).await?;
8cc0d6af 626
8a8a4703 627 record_repository(&repo);
d0a03d40 628
8a8a4703 629 view_task_result(client, result, &output_format).await?;
e5f7def4 630
e5f7def4 631 Ok(Value::Null)
8cc0d6af 632}
33d64b81 633
bf6e3217 634fn spawn_catalog_upload(
b957aa81 635 client: Arc<BackupWriter>
bf6e3217
DM
636) -> Result<
637 (
f1d99e3f 638 Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
bf6e3217
DM
639 tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
640 ), Error>
641{
f1d99e3f
DM
642 let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
643 let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
bf6e3217
DM
644 let catalog_chunk_size = 512*1024;
645 let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
646
f1d99e3f 647 let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
bf6e3217
DM
648
649 let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
650
651 tokio::spawn(async move {
652 let catalog_upload_result = client
b957aa81 653 .upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
bf6e3217
DM
654 .await;
655
656 if let Err(ref err) = catalog_upload_result {
657 eprintln!("catalog upload error - {}", err);
658 client.cancel();
659 }
660
661 let _ = catalog_result_tx.send(catalog_upload_result);
662 });
663
664 Ok((catalog, catalog_result_rx))
665}
666
a47a02ae
DM
667#[api(
668 input: {
669 properties: {
670 backupspec: {
671 type: Array,
672 description: "List of backup source specifications ([<label.ext>:<path>] ...)",
673 items: {
674 schema: BACKUP_SOURCE_SCHEMA,
675 }
676 },
677 repository: {
678 schema: REPO_URL_SCHEMA,
679 optional: true,
680 },
681 "include-dev": {
682 description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
683 optional: true,
684 items: {
685 type: String,
686 description: "Path to file.",
687 }
688 },
689 keyfile: {
690 schema: KEYFILE_SCHEMA,
691 optional: true,
692 },
693 "skip-lost-and-found": {
694 type: Boolean,
695 description: "Skip lost+found directory.",
696 optional: true,
697 },
698 "backup-type": {
699 schema: BACKUP_TYPE_SCHEMA,
700 optional: true,
701 },
702 "backup-id": {
703 schema: BACKUP_ID_SCHEMA,
704 optional: true,
705 },
706 "backup-time": {
707 schema: BACKUP_TIME_SCHEMA,
708 optional: true,
709 },
710 "chunk-size": {
711 schema: CHUNK_SIZE_SCHEMA,
712 optional: true,
713 },
189996cf
CE
714 "exclude": {
715 type: Array,
716 description: "List of paths or patterns for matching files to exclude.",
717 optional: true,
718 items: {
719 type: String,
720 description: "Path or match pattern.",
721 }
722 },
6fc053ed
CE
723 "entries-max": {
724 type: Integer,
725 description: "Max number of entries to hold in memory.",
726 optional: true,
c443f58b 727 default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
6fc053ed 728 },
e02c3d46
DM
729 "verbose": {
730 type: Boolean,
731 description: "Verbose output.",
732 optional: true,
733 },
a47a02ae
DM
734 }
735 }
736)]
737/// Create (host) backup.
738async fn create_backup(
6049b71f
DM
739 param: Value,
740 _info: &ApiMethod,
dd5495d6 741 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 742) -> Result<Value, Error> {
ff5d3707 743
2665cef7 744 let repo = extract_repository_from_value(&param)?;
ae0be2dd
DM
745
746 let backupspec_list = tools::required_array_param(&param, "backupspec")?;
a914a774 747
eed6db39
DM
748 let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
749
5b72c9b4
DM
750 let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
751
219ef0e6
DM
752 let verbose = param["verbose"].as_bool().unwrap_or(false);
753
ca5d0b61
DM
754 let backup_time_opt = param["backup-time"].as_i64();
755
36898ffc 756 let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
2d9d143a 757
247cdbce
DM
758 if let Some(size) = chunk_size_opt {
759 verify_chunk_size(size)?;
2d9d143a
DM
760 }
761
11377a47 762 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
6d0983db 763
f69adc81 764 let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
fba30411 765
bbf9e7e9 766 let backup_type = param["backup-type"].as_str().unwrap_or("host");
ca5d0b61 767
2eeaacb9
DM
768 let include_dev = param["include-dev"].as_array();
769
c443f58b
WB
770 let entries_max = param["entries-max"].as_u64()
771 .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
6fc053ed 772
189996cf 773 let empty = Vec::new();
c443f58b
WB
774 let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
775
239e49f9 776 let mut pattern_list = Vec::with_capacity(exclude_args.len());
c443f58b
WB
777 for entry in exclude_args {
778 let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
239e49f9 779 pattern_list.push(
c443f58b
WB
780 MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
781 .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
782 );
189996cf
CE
783 }
784
2eeaacb9
DM
785 let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
786
787 if let Some(include_dev) = include_dev {
788 if all_file_systems {
789 bail!("option 'all-file-systems' conflicts with option 'include-dev'");
790 }
791
792 let mut set = HashSet::new();
793 for path in include_dev {
794 let path = path.as_str().unwrap();
795 let stat = nix::sys::stat::stat(path)
796 .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
797 set.insert(stat.st_dev);
798 }
799 devices = Some(set);
800 }
801
ae0be2dd 802 let mut upload_list = vec![];
a914a774 803
ae0be2dd 804 for backupspec in backupspec_list {
7cc3473a
DM
805 let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
806 let filename = &spec.config_string;
807 let target = &spec.archive_name;
bcd879cf 808
eb1804c5
DM
809 use std::os::unix::fs::FileTypeExt;
810
3fa71727
CE
811 let metadata = std::fs::metadata(filename)
812 .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
eb1804c5 813 let file_type = metadata.file_type();
23bb8780 814
7cc3473a
DM
815 match spec.spec_type {
816 BackupSpecificationType::PXAR => {
ec8a9bb9
DM
817 if !file_type.is_dir() {
818 bail!("got unexpected file type (expected directory)");
819 }
7cc3473a 820 upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
ec8a9bb9 821 }
7cc3473a 822 BackupSpecificationType::IMAGE => {
ec8a9bb9
DM
823 if !(file_type.is_file() || file_type.is_block_device()) {
824 bail!("got unexpected file type (expected file or block device)");
825 }
eb1804c5 826
e18a6c9e 827 let size = image_size(&PathBuf::from(filename))?;
23bb8780 828
ec8a9bb9 829 if size == 0 { bail!("got zero-sized file '{}'", filename); }
ae0be2dd 830
7cc3473a 831 upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
ec8a9bb9 832 }
7cc3473a 833 BackupSpecificationType::CONFIG => {
ec8a9bb9
DM
834 if !file_type.is_file() {
835 bail!("got unexpected file type (expected regular file)");
836 }
7cc3473a 837 upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 838 }
7cc3473a 839 BackupSpecificationType::LOGFILE => {
79679c2d
DM
840 if !file_type.is_file() {
841 bail!("got unexpected file type (expected regular file)");
842 }
7cc3473a 843 upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
ec8a9bb9 844 }
ae0be2dd
DM
845 }
846 }
847
11377a47 848 let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
ae0be2dd 849
d59dbeca 850 let client = connect(repo.host(), repo.user())?;
d0a03d40
DM
851 record_repository(&repo);
852
ca5d0b61
DM
853 println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
854
f69adc81 855 println!("Client name: {}", proxmox::tools::nodename());
ca5d0b61
DM
856
857 let start_time = Local::now();
858
7a6cfbd9 859 println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
51144821 860
bb823140
DM
861 let (crypt_config, rsa_encrypted_key) = match keyfile {
862 None => (None, None),
6d0983db 863 Some(path) => {
6d20a29d 864 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
bb823140
DM
865
866 let crypt_config = CryptConfig::new(key)?;
867
868 let path = master_pubkey_path()?;
869 if path.exists() {
e18a6c9e 870 let pem_data = file_get_contents(&path)?;
bb823140
DM
871 let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
872 let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
873 (Some(Arc::new(crypt_config)), Some(enc_key))
874 } else {
875 (Some(Arc::new(crypt_config)), None)
876 }
6d0983db
DM
877 }
878 };
f98ac774 879
e181d2f6
DC
880 let is_encrypted = Some(crypt_config.is_some());
881
8a8a4703
DM
882 let client = BackupWriter::start(
883 client,
b957aa81 884 crypt_config.clone(),
8a8a4703
DM
885 repo.store(),
886 backup_type,
887 &backup_id,
888 backup_time,
889 verbose,
890 ).await?;
891
b957aa81
DM
892 let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
893 Some(Arc::new(previous_manifest))
894 } else {
895 None
896 };
897
8a8a4703
DM
898 let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
899 let mut manifest = BackupManifest::new(snapshot);
900
5d85847f
DC
901 let mut catalog = None;
902 let mut catalog_result_tx = None;
8a8a4703
DM
903
904 for (backup_type, filename, target, size) in upload_list {
905 match backup_type {
7cc3473a 906 BackupSpecificationType::CONFIG => {
8a8a4703
DM
907 println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
908 let stats = client
b957aa81 909 .upload_blob_from_file(&filename, &target, true, Some(true))
8a8a4703 910 .await?;
e181d2f6 911 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
8a8a4703 912 }
7cc3473a 913 BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
8a8a4703
DM
914 println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
915 let stats = client
b957aa81 916 .upload_blob_from_file(&filename, &target, true, Some(true))
8a8a4703 917 .await?;
e181d2f6 918 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
8a8a4703 919 }
7cc3473a 920 BackupSpecificationType::PXAR => {
5d85847f
DC
921 // start catalog upload on first use
922 if catalog.is_none() {
b957aa81 923 let (cat, res) = spawn_catalog_upload(client.clone())?;
5d85847f
DC
924 catalog = Some(cat);
925 catalog_result_tx = Some(res);
926 }
927 let catalog = catalog.as_ref().unwrap();
928
8a8a4703
DM
929 println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
930 catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
931 let stats = backup_directory(
932 &client,
b957aa81 933 previous_manifest.clone(),
8a8a4703
DM
934 &filename,
935 &target,
936 chunk_size_opt,
937 devices.clone(),
938 verbose,
939 skip_lost_and_found,
8a8a4703 940 catalog.clone(),
239e49f9 941 pattern_list.clone(),
6fc053ed 942 entries_max as usize,
8a8a4703 943 ).await?;
e181d2f6 944 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
8a8a4703
DM
945 catalog.lock().unwrap().end_directory()?;
946 }
7cc3473a 947 BackupSpecificationType::IMAGE => {
8a8a4703
DM
948 println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
949 let stats = backup_image(
950 &client,
b957aa81
DM
951 previous_manifest.clone(),
952 &filename,
8a8a4703
DM
953 &target,
954 size,
955 chunk_size_opt,
956 verbose,
8a8a4703 957 ).await?;
e181d2f6 958 manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
6af905c1
DM
959 }
960 }
8a8a4703 961 }
4818c8b6 962
8a8a4703 963 // finalize and upload catalog
5d85847f 964 if let Some(catalog) = catalog {
8a8a4703
DM
965 let mutex = Arc::try_unwrap(catalog)
966 .map_err(|_| format_err!("unable to get catalog (still used)"))?;
967 let mut catalog = mutex.into_inner().unwrap();
bf6e3217 968
8a8a4703 969 catalog.finish()?;
2761d6a4 970
8a8a4703 971 drop(catalog); // close upload stream
2761d6a4 972
5d85847f
DC
973 if let Some(catalog_result_rx) = catalog_result_tx {
974 let stats = catalog_result_rx.await??;
e181d2f6 975 manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
5d85847f 976 }
8a8a4703 977 }
2761d6a4 978
8a8a4703
DM
979 if let Some(rsa_encrypted_key) = rsa_encrypted_key {
980 let target = "rsa-encrypted.key";
981 println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
982 let stats = client
b957aa81 983 .upload_blob_from_data(rsa_encrypted_key, target, false, None)
8a8a4703 984 .await?;
e181d2f6 985 manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
8a8a4703
DM
986
987 // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
988 /*
989 let mut buffer2 = vec![0u8; rsa.size() as usize];
990 let pem_data = file_get_contents("master-private.pem")?;
991 let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
992 let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
993 println!("TEST {} {:?}", len, buffer2);
994 */
995 }
9f46c7de 996
8a8a4703
DM
997 // create manifest (index.json)
998 let manifest = manifest.into_json();
2c3891d1 999
8a8a4703
DM
1000 println!("Upload index.json to '{:?}'", repo);
1001 let manifest = serde_json::to_string_pretty(&manifest)?.into();
1002 client
b957aa81 1003 .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
8a8a4703 1004 .await?;
2c3891d1 1005
8a8a4703 1006 client.finish().await?;
c4ff3dce 1007
8a8a4703
DM
1008 let end_time = Local::now();
1009 let elapsed = end_time.signed_duration_since(start_time);
1010 println!("Duration: {}", elapsed);
3ec3ec3f 1011
8a8a4703 1012 println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
3d5c11e5 1013
8a8a4703 1014 Ok(Value::Null)
f98ea63d
DM
1015}
1016
d0a03d40 1017fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
f98ea63d
DM
1018
1019 let mut result = vec![];
1020
1021 let data: Vec<&str> = arg.splitn(2, ':').collect();
1022
bff11030 1023 if data.len() != 2 {
8968258b
DM
1024 result.push(String::from("root.pxar:/"));
1025 result.push(String::from("etc.pxar:/etc"));
bff11030
DM
1026 return result;
1027 }
f98ea63d 1028
496a6784 1029 let files = tools::complete_file_name(data[1], param);
f98ea63d
DM
1030
1031 for file in files {
1032 result.push(format!("{}:{}", data[0], file));
1033 }
1034
1035 result
ff5d3707 1036}
1037
8e6e18b7 1038async fn dump_image<W: Write>(
88892ea8
DM
1039 client: Arc<BackupReader>,
1040 crypt_config: Option<Arc<CryptConfig>>,
1041 index: FixedIndexReader,
1042 mut writer: W,
fd04ca7a 1043 verbose: bool,
88892ea8
DM
1044) -> Result<(), Error> {
1045
1046 let most_used = index.find_most_used_chunks(8);
1047
e9764238 1048 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
88892ea8
DM
1049
1050 // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
1051 // and thus slows down reading. Instead, directly use RemoteChunkReader
fd04ca7a
DM
1052 let mut per = 0;
1053 let mut bytes = 0;
1054 let start_time = std::time::Instant::now();
1055
88892ea8
DM
1056 for pos in 0..index.index_count() {
1057 let digest = index.index_digest(pos).unwrap();
8e6e18b7 1058 let raw_data = chunk_reader.read_chunk(&digest).await?;
88892ea8 1059 writer.write_all(&raw_data)?;
fd04ca7a
DM
1060 bytes += raw_data.len();
1061 if verbose {
1062 let next_per = ((pos+1)*100)/index.index_count();
1063 if per != next_per {
1064 eprintln!("progress {}% (read {} bytes, duration {} sec)",
1065 next_per, bytes, start_time.elapsed().as_secs());
1066 per = next_per;
1067 }
1068 }
88892ea8
DM
1069 }
1070
fd04ca7a
DM
1071 let end_time = std::time::Instant::now();
1072 let elapsed = end_time.duration_since(start_time);
1073 eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
1074 bytes,
1075 elapsed.as_secs_f64(),
1076 bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
1077 );
1078
1079
88892ea8
DM
1080 Ok(())
1081}
1082
dc155e9b 1083fn parse_archive_type(name: &str) -> (String, ArchiveType) {
2d32fe2c
TL
1084 if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
1085 (name.into(), archive_type(name).unwrap())
1086 } else if name.ends_with(".pxar") {
dc155e9b
TL
1087 (format!("{}.didx", name), ArchiveType::DynamicIndex)
1088 } else if name.ends_with(".img") {
1089 (format!("{}.fidx", name), ArchiveType::FixedIndex)
1090 } else {
1091 (format!("{}.blob", name), ArchiveType::Blob)
1092 }
1093}
1094
a47a02ae
DM
1095#[api(
1096 input: {
1097 properties: {
1098 repository: {
1099 schema: REPO_URL_SCHEMA,
1100 optional: true,
1101 },
1102 snapshot: {
1103 type: String,
1104 description: "Group/Snapshot path.",
1105 },
1106 "archive-name": {
1107 description: "Backup archive name.",
1108 type: String,
1109 },
1110 target: {
1111 type: String,
90c815bf 1112 description: r###"Target directory path. Use '-' to write to standard output.
8a8a4703 1113
5eee6d89 1114We do not extraxt '.pxar' archives when writing to standard output.
8a8a4703 1115
a47a02ae
DM
1116"###
1117 },
1118 "allow-existing-dirs": {
1119 type: Boolean,
1120 description: "Do not fail if directories already exists.",
1121 optional: true,
1122 },
1123 keyfile: {
1124 schema: KEYFILE_SCHEMA,
1125 optional: true,
1126 },
1127 }
1128 }
1129)]
1130/// Restore backup repository.
1131async fn restore(param: Value) -> Result<Value, Error> {
2665cef7 1132 let repo = extract_repository_from_value(&param)?;
9f912493 1133
86eda3eb
DM
1134 let verbose = param["verbose"].as_bool().unwrap_or(false);
1135
46d5aa0a
DM
1136 let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
1137
d5c34d98
DM
1138 let archive_name = tools::required_string_param(&param, "archive-name")?;
1139
d59dbeca 1140 let client = connect(repo.host(), repo.user())?;
d0a03d40 1141
d0a03d40 1142 record_repository(&repo);
d5c34d98 1143
9f912493 1144 let path = tools::required_string_param(&param, "snapshot")?;
9f912493 1145
86eda3eb 1146 let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
d6d3b353 1147 let group: BackupGroup = path.parse()?;
27c9affb 1148 api_datastore_latest_snapshot(&client, repo.store(), group).await?
d5c34d98 1149 } else {
a67f7d0a 1150 let snapshot: BackupDir = path.parse()?;
86eda3eb
DM
1151 (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
1152 };
9f912493 1153
d5c34d98 1154 let target = tools::required_string_param(&param, "target")?;
bf125261 1155 let target = if target == "-" { None } else { Some(target) };
2ae7d196 1156
11377a47 1157 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
2ae7d196 1158
86eda3eb
DM
1159 let crypt_config = match keyfile {
1160 None => None,
1161 Some(path) => {
6d20a29d 1162 let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
86eda3eb
DM
1163 Some(Arc::new(CryptConfig::new(key)?))
1164 }
1165 };
d5c34d98 1166
296c50ba
DM
1167 let client = BackupReader::start(
1168 client,
1169 crypt_config.clone(),
1170 repo.store(),
1171 &backup_type,
1172 &backup_id,
1173 backup_time,
1174 true,
1175 ).await?;
86eda3eb 1176
f06b820a 1177 let manifest = client.download_manifest().await?;
02fcf372 1178
dc155e9b
TL
1179 let (archive_name, archive_type) = parse_archive_type(archive_name);
1180
1181 if archive_name == MANIFEST_BLOB_NAME {
f06b820a 1182 let backup_index_data = manifest.into_json().to_string();
02fcf372 1183 if let Some(target) = target {
feaa1ad3 1184 replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
02fcf372
DM
1185 } else {
1186 let stdout = std::io::stdout();
1187 let mut writer = stdout.lock();
296c50ba 1188 writer.write_all(backup_index_data.as_bytes())
02fcf372
DM
1189 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1190 }
1191
dc155e9b 1192 } else if archive_type == ArchiveType::Blob {
d2267b11 1193
dc155e9b 1194 let mut reader = client.download_blob(&manifest, &archive_name).await?;
f8100e96 1195
bf125261 1196 if let Some(target) = target {
0d986280
DM
1197 let mut writer = std::fs::OpenOptions::new()
1198 .write(true)
1199 .create(true)
1200 .create_new(true)
1201 .open(target)
1202 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
1203 std::io::copy(&mut reader, &mut writer)?;
bf125261
DM
1204 } else {
1205 let stdout = std::io::stdout();
1206 let mut writer = stdout.lock();
0d986280 1207 std::io::copy(&mut reader, &mut writer)
bf125261
DM
1208 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1209 }
f8100e96 1210
dc155e9b 1211 } else if archive_type == ArchiveType::DynamicIndex {
86eda3eb 1212
dc155e9b 1213 let index = client.download_dynamic_index(&manifest, &archive_name).await?;
df65bd3d 1214
f4bf7dfc
DM
1215 let most_used = index.find_most_used_chunks(8);
1216
1217 let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
1218
afb4cd28 1219 let mut reader = BufferedDynamicReader::new(index, chunk_reader);
86eda3eb 1220
bf125261 1221 if let Some(target) = target {
c443f58b
WB
1222 proxmox_backup::pxar::extract_archive(
1223 pxar::decoder::Decoder::from_std(reader)?,
1224 Path::new(target),
1225 &[],
5444fa94 1226 proxmox_backup::pxar::Flags::DEFAULT,
c443f58b
WB
1227 allow_existing_dirs,
1228 |path| {
1229 if verbose {
1230 println!("{:?}", path);
1231 }
1232 },
1233 )
1234 .map_err(|err| format_err!("error extracting archive - {}", err))?;
bf125261 1235 } else {
88892ea8
DM
1236 let mut writer = std::fs::OpenOptions::new()
1237 .write(true)
1238 .open("/dev/stdout")
1239 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
afb4cd28 1240
bf125261
DM
1241 std::io::copy(&mut reader, &mut writer)
1242 .map_err(|err| format_err!("unable to pipe data - {}", err))?;
1243 }
dc155e9b 1244 } else if archive_type == ArchiveType::FixedIndex {
afb4cd28 1245
dc155e9b 1246 let index = client.download_fixed_index(&manifest, &archive_name).await?;
df65bd3d 1247
88892ea8
DM
1248 let mut writer = if let Some(target) = target {
1249 std::fs::OpenOptions::new()
bf125261
DM
1250 .write(true)
1251 .create(true)
1252 .create_new(true)
1253 .open(target)
88892ea8 1254 .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
bf125261 1255 } else {
88892ea8
DM
1256 std::fs::OpenOptions::new()
1257 .write(true)
1258 .open("/dev/stdout")
1259 .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
1260 };
afb4cd28 1261
8e6e18b7 1262 dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
3031e44c 1263 }
fef44d4f
DM
1264
1265 Ok(Value::Null)
45db6f89
DM
1266}
1267
a47a02ae
DM
1268#[api(
1269 input: {
1270 properties: {
1271 repository: {
1272 schema: REPO_URL_SCHEMA,
1273 optional: true,
1274 },
1275 snapshot: {
1276 type: String,
1277 description: "Group/Snapshot path.",
1278 },
1279 logfile: {
1280 type: String,
1281 description: "The path to the log file you want to upload.",
1282 },
1283 keyfile: {
1284 schema: KEYFILE_SCHEMA,
1285 optional: true,
1286 },
1287 }
1288 }
1289)]
1290/// Upload backup log file.
1291async fn upload_log(param: Value) -> Result<Value, Error> {
ec34f7eb
DM
1292
1293 let logfile = tools::required_string_param(&param, "logfile")?;
1294 let repo = extract_repository_from_value(&param)?;
1295
1296 let snapshot = tools::required_string_param(&param, "snapshot")?;
a67f7d0a 1297 let snapshot: BackupDir = snapshot.parse()?;
ec34f7eb 1298
d59dbeca 1299 let mut client = connect(repo.host(), repo.user())?;
ec34f7eb 1300
11377a47 1301 let keyfile = param["keyfile"].as_str().map(PathBuf::from);
ec34f7eb
DM
1302
1303 let crypt_config = match keyfile {
1304 None => None,
1305 Some(path) => {
6d20a29d 1306 let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ec34f7eb 1307 let crypt_config = CryptConfig::new(key)?;
9025312a 1308 Some(Arc::new(crypt_config))
ec34f7eb
DM
1309 }
1310 };
1311
e18a6c9e 1312 let data = file_get_contents(logfile)?;
ec34f7eb 1313
7123ff7d 1314 let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
ec34f7eb
DM
1315
1316 let raw_data = blob.into_inner();
1317
1318 let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
1319
1320 let args = json!({
1321 "backup-type": snapshot.group().backup_type(),
1322 "backup-id": snapshot.group().backup_id(),
1323 "backup-time": snapshot.backup_time().timestamp(),
1324 });
1325
1326 let body = hyper::Body::from(raw_data);
1327
8a8a4703 1328 client.upload("application/octet-stream", body, &path, Some(args)).await
ec34f7eb
DM
1329}
1330
032d3ad8
DM
1331const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
1332 &ApiHandler::Async(&prune),
1333 &ObjectSchema::new(
1334 "Prune a backup repository.",
1335 &proxmox_backup::add_common_prune_prameters!([
1336 ("dry-run", true, &BooleanSchema::new(
1337 "Just show what prune would do, but do not delete anything.")
1338 .schema()),
1339 ("group", false, &StringSchema::new("Backup group.").schema()),
1340 ], [
1341 ("output-format", true, &OUTPUT_FORMAT),
c48aa39f
DM
1342 (
1343 "quiet",
1344 true,
1345 &BooleanSchema::new("Minimal output - only show removals.")
1346 .schema()
1347 ),
032d3ad8
DM
1348 ("repository", true, &REPO_URL_SCHEMA),
1349 ])
1350 )
1351);
1352
1353fn prune<'a>(
1354 param: Value,
1355 _info: &ApiMethod,
1356 _rpcenv: &'a mut dyn RpcEnvironment,
1357) -> proxmox::api::ApiFuture<'a> {
1358 async move {
1359 prune_async(param).await
1360 }.boxed()
1361}
83b7db02 1362
032d3ad8 1363async fn prune_async(mut param: Value) -> Result<Value, Error> {
2665cef7 1364 let repo = extract_repository_from_value(&param)?;
83b7db02 1365
d59dbeca 1366 let mut client = connect(repo.host(), repo.user())?;
83b7db02 1367
d0a03d40 1368 let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
83b7db02 1369
9fdc3ef4 1370 let group = tools::required_string_param(&param, "group")?;
d6d3b353 1371 let group: BackupGroup = group.parse()?;
c2043614
DM
1372
1373 let output_format = get_output_format(&param);
9fdc3ef4 1374
c48aa39f
DM
1375 let quiet = param["quiet"].as_bool().unwrap_or(false);
1376
ea7a7ef2
DM
1377 param.as_object_mut().unwrap().remove("repository");
1378 param.as_object_mut().unwrap().remove("group");
163e9bbe 1379 param.as_object_mut().unwrap().remove("output-format");
c48aa39f 1380 param.as_object_mut().unwrap().remove("quiet");
ea7a7ef2
DM
1381
1382 param["backup-type"] = group.backup_type().into();
1383 param["backup-id"] = group.backup_id().into();
83b7db02 1384
db1e061d 1385 let mut result = client.post(&path, Some(param)).await?;
74fa81b8 1386
87c42375 1387 record_repository(&repo);
3b03abfe 1388
db1e061d
DM
1389 let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
1390 let item: PruneListItem = serde_json::from_value(record.to_owned())?;
1391 let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
1392 Ok(snapshot.relative_path().to_str().unwrap().to_owned())
1393 };
1394
c48aa39f
DM
1395 let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
1396 Ok(match v.as_bool() {
1397 Some(true) => "keep",
1398 Some(false) => "remove",
1399 None => "unknown",
1400 }.to_string())
1401 };
1402
db1e061d
DM
1403 let options = default_table_format_options()
1404 .sortby("backup-type", false)
1405 .sortby("backup-id", false)
1406 .sortby("backup-time", false)
1407 .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
74f7240b 1408 .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
c48aa39f 1409 .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
db1e061d
DM
1410 ;
1411
1412 let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
1413
1414 let mut data = result["data"].take();
1415
c48aa39f
DM
1416 if quiet {
1417 let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
1418 item["keep"].as_bool() == Some(false)
1419 }).map(|v| v.clone()).collect();
1420 data = list.into();
1421 }
1422
db1e061d 1423 format_and_print_result_full(&mut data, info, &output_format, &options);
d0a03d40 1424
43a406fd 1425 Ok(Value::Null)
83b7db02
DM
1426}
1427
a47a02ae
DM
1428#[api(
1429 input: {
1430 properties: {
1431 repository: {
1432 schema: REPO_URL_SCHEMA,
1433 optional: true,
1434 },
1435 "output-format": {
1436 schema: OUTPUT_FORMAT,
1437 optional: true,
1438 },
1439 }
1440 }
1441)]
1442/// Get repository status.
1443async fn status(param: Value) -> Result<Value, Error> {
34a816cc
DM
1444
1445 let repo = extract_repository_from_value(&param)?;
1446
c2043614 1447 let output_format = get_output_format(&param);
34a816cc 1448
d59dbeca 1449 let client = connect(repo.host(), repo.user())?;
34a816cc
DM
1450
1451 let path = format!("api2/json/admin/datastore/{}/status", repo.store());
1452
1dc117bb 1453 let mut result = client.get(&path, None).await?;
390c5bdd 1454 let mut data = result["data"].take();
34a816cc
DM
1455
1456 record_repository(&repo);
1457
390c5bdd
DM
1458 let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
1459 let v = v.as_u64().unwrap();
1460 let total = record["total"].as_u64().unwrap();
1461 let roundup = total/200;
1462 let per = ((v+roundup)*100)/total;
e23f5863
DM
1463 let info = format!(" ({} %)", per);
1464 Ok(format!("{} {:>8}", v, info))
390c5bdd 1465 };
1dc117bb 1466
c2043614 1467 let options = default_table_format_options()
be2425ff 1468 .noheader(true)
e23f5863 1469 .column(ColumnConfig::new("total").renderer(render_total_percentage))
390c5bdd
DM
1470 .column(ColumnConfig::new("used").renderer(render_total_percentage))
1471 .column(ColumnConfig::new("avail").renderer(render_total_percentage));
34a816cc 1472
ea5f547f 1473 let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
390c5bdd
DM
1474
1475 format_and_print_result_full(&mut data, schema, &output_format, &options);
34a816cc
DM
1476
1477 Ok(Value::Null)
1478}
1479
5a2df000 1480// like get, but simply ignore errors and return Null instead
e9722f8b 1481async fn try_get(repo: &BackupRepository, url: &str) -> Value {
024f11bb 1482
a05c0c6f 1483 let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
d1c65727 1484 let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
a05c0c6f 1485
d59dbeca 1486 let options = HttpClientOptions::new()
5030b7ce 1487 .prefix(Some("proxmox-backup".to_string()))
d1c65727 1488 .password(password)
d59dbeca 1489 .interactive(false)
a05c0c6f 1490 .fingerprint(fingerprint)
5a74756c 1491 .fingerprint_cache(true)
d59dbeca
DM
1492 .ticket_cache(true);
1493
1494 let client = match HttpClient::new(repo.host(), repo.user(), options) {
45cdce06
DM
1495 Ok(v) => v,
1496 _ => return Value::Null,
1497 };
b2388518 1498
e9722f8b 1499 let mut resp = match client.get(url, None).await {
b2388518
DM
1500 Ok(v) => v,
1501 _ => return Value::Null,
1502 };
1503
1504 if let Some(map) = resp.as_object_mut() {
1505 if let Some(data) = map.remove("data") {
1506 return data;
1507 }
1508 }
1509 Value::Null
1510}
1511
b2388518 1512fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1513 proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
e9722f8b
WB
1514}
1515
1516async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
024f11bb 1517
b2388518
DM
1518 let mut result = vec![];
1519
2665cef7 1520 let repo = match extract_repository_from_map(param) {
b2388518 1521 Some(v) => v,
024f11bb
DM
1522 _ => return result,
1523 };
1524
b2388518
DM
1525 let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
1526
e9722f8b 1527 let data = try_get(&repo, &path).await;
b2388518
DM
1528
1529 if let Some(list) = data.as_array() {
024f11bb 1530 for item in list {
98f0b972
DM
1531 if let (Some(backup_id), Some(backup_type)) =
1532 (item["backup-id"].as_str(), item["backup-type"].as_str())
1533 {
1534 result.push(format!("{}/{}", backup_type, backup_id));
024f11bb
DM
1535 }
1536 }
1537 }
1538
1539 result
1540}
1541
43abba4b 1542pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1543 proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
e9722f8b
WB
1544}
1545
1546async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
b2388518 1547
b2388518 1548 if arg.matches('/').count() < 2 {
e9722f8b 1549 let groups = complete_backup_group_do(param).await;
543a260f 1550 let mut result = vec![];
b2388518
DM
1551 for group in groups {
1552 result.push(group.to_string());
1553 result.push(format!("{}/", group));
1554 }
1555 return result;
1556 }
1557
e9722f8b 1558 complete_backup_snapshot_do(param).await
543a260f 1559}
b2388518 1560
3fb53e07 1561fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1562 proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
e9722f8b
WB
1563}
1564
1565async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
543a260f
DM
1566
1567 let mut result = vec![];
1568
1569 let repo = match extract_repository_from_map(param) {
1570 Some(v) => v,
1571 _ => return result,
1572 };
1573
1574 let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
b2388518 1575
e9722f8b 1576 let data = try_get(&repo, &path).await;
b2388518
DM
1577
1578 if let Some(list) = data.as_array() {
1579 for item in list {
1580 if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
1581 (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
1582 {
1583 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
1584 result.push(snapshot.relative_path().to_str().unwrap().to_owned());
1585 }
1586 }
1587 }
1588
1589 result
1590}
1591
45db6f89 1592fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
3f06d6fb 1593 proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
e9722f8b
WB
1594}
1595
1596async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
08dc340a
DM
1597
1598 let mut result = vec![];
1599
2665cef7 1600 let repo = match extract_repository_from_map(param) {
08dc340a
DM
1601 Some(v) => v,
1602 _ => return result,
1603 };
1604
a67f7d0a 1605 let snapshot: BackupDir = match param.get("snapshot") {
08dc340a 1606 Some(path) => {
a67f7d0a 1607 match path.parse() {
08dc340a
DM
1608 Ok(v) => v,
1609 _ => return result,
1610 }
1611 }
1612 _ => return result,
1613 };
1614
1615 let query = tools::json_object_to_query(json!({
1616 "backup-type": snapshot.group().backup_type(),
1617 "backup-id": snapshot.group().backup_id(),
1618 "backup-time": snapshot.backup_time().timestamp(),
1619 })).unwrap();
1620
1621 let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
1622
e9722f8b 1623 let data = try_get(&repo, &path).await;
08dc340a
DM
1624
1625 if let Some(list) = data.as_array() {
1626 for item in list {
c4f025eb 1627 if let Some(filename) = item["filename"].as_str() {
08dc340a
DM
1628 result.push(filename.to_owned());
1629 }
1630 }
1631 }
1632
45db6f89
DM
1633 result
1634}
1635
1636fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
52c171e4 1637 complete_server_file_name(arg, param)
e9722f8b 1638 .iter()
4939255f 1639 .map(|v| tools::format::strip_server_file_expenstion(&v))
e9722f8b 1640 .collect()
08dc340a
DM
1641}
1642
43abba4b 1643pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
0ec9e1b0
DM
1644 complete_server_file_name(arg, param)
1645 .iter()
1646 .filter_map(|v| {
4939255f 1647 let name = tools::format::strip_server_file_expenstion(&v);
0ec9e1b0
DM
1648 if name.ends_with(".pxar") {
1649 Some(name)
1650 } else {
1651 None
1652 }
1653 })
1654 .collect()
1655}
1656
49811347
DM
1657fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
1658
1659 let mut result = vec![];
1660
1661 let mut size = 64;
1662 loop {
1663 result.push(size.to_string());
11377a47 1664 size *= 2;
49811347
DM
1665 if size > 4096 { break; }
1666 }
1667
1668 result
1669}
1670
826f309b 1671fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
ff5d3707 1672
f2401311
DM
1673 // fixme: implement other input methods
1674
1675 use std::env::VarError::*;
1676 match std::env::var("PBS_ENCRYPTION_PASSWORD") {
826f309b 1677 Ok(p) => return Ok(p.as_bytes().to_vec()),
f2401311
DM
1678 Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
1679 Err(NotPresent) => {
1680 // Try another method
1681 }
1682 }
1683
1684 // If we're on a TTY, query the user for a password
501f4fa2
DM
1685 if tty::stdin_isatty() {
1686 return Ok(tty::read_password("Encryption Key Password: ")?);
f2401311
DM
1687 }
1688
1689 bail!("no password input mechanism available");
1690}
1691
ac716234
DM
1692fn key_create(
1693 param: Value,
1694 _info: &ApiMethod,
1695 _rpcenv: &mut dyn RpcEnvironment,
1696) -> Result<Value, Error> {
1697
9b06db45
DM
1698 let path = tools::required_string_param(&param, "path")?;
1699 let path = PathBuf::from(path);
ac716234 1700
181f097a 1701 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
ac716234
DM
1702
1703 let key = proxmox::sys::linux::random_data(32)?;
1704
181f097a
DM
1705 if kdf == "scrypt" {
1706 // always read passphrase from tty
501f4fa2 1707 if !tty::stdin_isatty() {
181f097a
DM
1708 bail!("unable to read passphrase - no tty");
1709 }
ac716234 1710
501f4fa2 1711 let password = tty::read_and_verify_password("Encryption Key Password: ")?;
181f097a 1712
ab44acff 1713 let key_config = encrypt_key_with_passphrase(&key, &password)?;
37c5a175 1714
ab44acff 1715 store_key_config(&path, false, key_config)?;
181f097a
DM
1716
1717 Ok(Value::Null)
1718 } else if kdf == "none" {
1719 let created = Local.timestamp(Local::now().timestamp(), 0);
1720
1721 store_key_config(&path, false, KeyConfig {
1722 kdf: None,
1723 created,
ab44acff 1724 modified: created,
181f097a
DM
1725 data: key,
1726 })?;
1727
1728 Ok(Value::Null)
1729 } else {
1730 unreachable!();
1731 }
ac716234
DM
1732}
1733
9f46c7de
DM
1734fn master_pubkey_path() -> Result<PathBuf, Error> {
1735 let base = BaseDirectories::with_prefix("proxmox-backup")?;
1736
1737 // usually $HOME/.config/proxmox-backup/master-public.pem
1738 let path = base.place_config_file("master-public.pem")?;
1739
1740 Ok(path)
1741}
1742
3ea8bfc9
DM
1743fn key_import_master_pubkey(
1744 param: Value,
1745 _info: &ApiMethod,
1746 _rpcenv: &mut dyn RpcEnvironment,
1747) -> Result<Value, Error> {
1748
1749 let path = tools::required_string_param(&param, "path")?;
1750 let path = PathBuf::from(path);
1751
e18a6c9e 1752 let pem_data = file_get_contents(&path)?;
3ea8bfc9
DM
1753
1754 if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
1755 bail!("Unable to decode PEM data - {}", err);
1756 }
1757
9f46c7de 1758 let target_path = master_pubkey_path()?;
3ea8bfc9 1759
feaa1ad3 1760 replace_file(&target_path, &pem_data, CreateOptions::new())?;
3ea8bfc9
DM
1761
1762 println!("Imported public master key to {:?}", target_path);
1763
1764 Ok(Value::Null)
1765}
1766
37c5a175
DM
1767fn key_create_master_key(
1768 _param: Value,
1769 _info: &ApiMethod,
1770 _rpcenv: &mut dyn RpcEnvironment,
1771) -> Result<Value, Error> {
1772
1773 // we need a TTY to query the new password
501f4fa2 1774 if !tty::stdin_isatty() {
37c5a175
DM
1775 bail!("unable to create master key - no tty");
1776 }
1777
1778 let rsa = openssl::rsa::Rsa::generate(4096)?;
1779 let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
1780
37c5a175 1781
501f4fa2 1782 let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
37c5a175
DM
1783
1784 let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
1785 let filename_pub = "master-public.pem";
1786 println!("Writing public master key to {}", filename_pub);
feaa1ad3 1787 replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1788
1789 let cipher = openssl::symm::Cipher::aes_256_cbc();
cbe01dc5 1790 let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
37c5a175
DM
1791
1792 let filename_priv = "master-private.pem";
1793 println!("Writing private master key to {}", filename_priv);
feaa1ad3 1794 replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
37c5a175
DM
1795
1796 Ok(Value::Null)
1797}
ac716234
DM
1798
1799fn key_change_passphrase(
1800 param: Value,
1801 _info: &ApiMethod,
1802 _rpcenv: &mut dyn RpcEnvironment,
1803) -> Result<Value, Error> {
1804
9b06db45
DM
1805 let path = tools::required_string_param(&param, "path")?;
1806 let path = PathBuf::from(path);
ac716234 1807
181f097a
DM
1808 let kdf = param["kdf"].as_str().unwrap_or("scrypt");
1809
ac716234 1810 // we need a TTY to query the new password
501f4fa2 1811 if !tty::stdin_isatty() {
ac716234
DM
1812 bail!("unable to change passphrase - no tty");
1813 }
1814
6d20a29d 1815 let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
ac716234 1816
181f097a 1817 if kdf == "scrypt" {
ac716234 1818
501f4fa2 1819 let password = tty::read_and_verify_password("New Password: ")?;
ac716234 1820
cbe01dc5 1821 let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
ab44acff
DM
1822 new_key_config.created = created; // keep original value
1823
1824 store_key_config(&path, true, new_key_config)?;
ac716234 1825
181f097a
DM
1826 Ok(Value::Null)
1827 } else if kdf == "none" {
ab44acff 1828 let modified = Local.timestamp(Local::now().timestamp(), 0);
181f097a
DM
1829
1830 store_key_config(&path, true, KeyConfig {
1831 kdf: None,
ab44acff
DM
1832 created, // keep original value
1833 modified,
6d0983db 1834 data: key.to_vec(),
181f097a
DM
1835 })?;
1836
1837 Ok(Value::Null)
1838 } else {
1839 unreachable!();
1840 }
f2401311
DM
1841}
1842
1843fn key_mgmt_cli() -> CliCommandMap {
1844
255f378a 1845 const KDF_SCHEMA: Schema =
181f097a 1846 StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
bc0d0388
DM
1847 .format(&ApiStringFormat::Enum(&[
1848 EnumEntry::new("scrypt", "SCrypt"),
1849 EnumEntry::new("none", "Do not encrypt the key")]))
255f378a
DM
1850 .default("scrypt")
1851 .schema();
1852
552c2259 1853 #[sortable]
255f378a
DM
1854 const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
1855 &ApiHandler::Sync(&key_create),
1856 &ObjectSchema::new(
1857 "Create a new encryption key.",
552c2259 1858 &sorted!([
255f378a
DM
1859 ("path", false, &StringSchema::new("File system path.").schema()),
1860 ("kdf", true, &KDF_SCHEMA),
552c2259 1861 ]),
255f378a 1862 )
181f097a 1863 );
7074a0b3 1864
255f378a 1865 let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
49fddd98 1866 .arg_param(&["path"])
9b06db45 1867 .completion_cb("path", tools::complete_file_name);
f2401311 1868
552c2259 1869 #[sortable]
255f378a
DM
1870 const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
1871 &ApiHandler::Sync(&key_change_passphrase),
1872 &ObjectSchema::new(
1873 "Change the passphrase required to decrypt the key.",
552c2259 1874 &sorted!([
255f378a
DM
1875 ("path", false, &StringSchema::new("File system path.").schema()),
1876 ("kdf", true, &KDF_SCHEMA),
552c2259 1877 ]),
255f378a
DM
1878 )
1879 );
7074a0b3 1880
255f378a 1881 let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
49fddd98 1882 .arg_param(&["path"])
9b06db45 1883 .completion_cb("path", tools::complete_file_name);
ac716234 1884
255f378a
DM
1885 const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
1886 &ApiHandler::Sync(&key_create_master_key),
1887 &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
1888 );
7074a0b3 1889
255f378a
DM
1890 let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
1891
552c2259 1892 #[sortable]
255f378a
DM
1893 const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
1894 &ApiHandler::Sync(&key_import_master_pubkey),
1895 &ObjectSchema::new(
1896 "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
552c2259 1897 &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
255f378a
DM
1898 )
1899 );
7074a0b3 1900
255f378a 1901 let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
49fddd98 1902 .arg_param(&["path"])
3ea8bfc9
DM
1903 .completion_cb("path", tools::complete_file_name);
1904
11377a47 1905 CliCommandMap::new()
48ef3c33
DM
1906 .insert("create", key_create_cmd_def)
1907 .insert("create-master-key", key_create_master_key_cmd_def)
1908 .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
1909 .insert("change-passphrase", key_change_passphrase_cmd_def)
f2401311
DM
1910}
1911
70235f72 1912
c443f58b
WB
1913use proxmox_backup::client::RemoteChunkReader;
1914/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
1915/// async use!
1916///
1917/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
1918/// so that we can properly access it from multiple threads simultaneously while not issuing
1919/// duplicate simultaneous reads over http.
43abba4b 1920pub struct BufferedDynamicReadAt {
c443f58b
WB
1921 inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
1922}
1923
1924impl BufferedDynamicReadAt {
1925 fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
1926 Self {
1927 inner: Mutex::new(inner),
1928 }
1929 }
1930}
1931
a6f87283
WB
1932impl ReadAt for BufferedDynamicReadAt {
1933 fn start_read_at<'a>(
1934 self: Pin<&'a Self>,
c443f58b 1935 _cx: &mut Context,
a6f87283 1936 buf: &'a mut [u8],
c443f58b 1937 offset: u64,
a6f87283 1938 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
c443f58b 1939 use std::io::Read;
a6f87283 1940 MaybeReady::Ready(tokio::task::block_in_place(move || {
c443f58b
WB
1941 let mut reader = self.inner.lock().unwrap();
1942 reader.seek(SeekFrom::Start(offset))?;
a6f87283
WB
1943 Ok(reader.read(buf)?)
1944 }))
1945 }
1946
1947 fn poll_complete<'a>(
1948 self: Pin<&'a Self>,
1949 _op: ReadAtOperation<'a>,
1950 ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
1951 panic!("LocalDynamicReadAt::start_read_at returned Pending");
c443f58b
WB
1952 }
1953}
1954
f2401311 1955fn main() {
33d64b81 1956
255f378a 1957 let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
49fddd98 1958 .arg_param(&["backupspec"])
d0a03d40 1959 .completion_cb("repository", complete_repository)
49811347 1960 .completion_cb("backupspec", complete_backup_source)
6d0983db 1961 .completion_cb("keyfile", tools::complete_file_name)
49811347 1962 .completion_cb("chunk-size", complete_chunk_size);
f8838fe9 1963
caea8d61
DM
1964 let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
1965 .completion_cb("repository", complete_repository)
1966 .completion_cb("keyfile", tools::complete_file_name);
1967
255f378a 1968 let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
49fddd98 1969 .arg_param(&["snapshot", "logfile"])
543a260f 1970 .completion_cb("snapshot", complete_backup_snapshot)
ec34f7eb
DM
1971 .completion_cb("logfile", tools::complete_file_name)
1972 .completion_cb("keyfile", tools::complete_file_name)
1973 .completion_cb("repository", complete_repository);
1974
255f378a 1975 let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
d0a03d40 1976 .completion_cb("repository", complete_repository);
41c039e1 1977
255f378a 1978 let snapshots_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
49fddd98 1979 .arg_param(&["group"])
024f11bb 1980 .completion_cb("group", complete_backup_group)
d0a03d40 1981 .completion_cb("repository", complete_repository);
184f17af 1982
255f378a 1983 let forget_cmd_def = CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
49fddd98 1984 .arg_param(&["snapshot"])
b2388518 1985 .completion_cb("repository", complete_repository)
543a260f 1986 .completion_cb("snapshot", complete_backup_snapshot);
6f62c924 1987
255f378a 1988 let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
d0a03d40 1989 .completion_cb("repository", complete_repository);
8cc0d6af 1990
255f378a 1991 let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
49fddd98 1992 .arg_param(&["snapshot", "archive-name", "target"])
b2388518 1993 .completion_cb("repository", complete_repository)
08dc340a
DM
1994 .completion_cb("snapshot", complete_group_or_snapshot)
1995 .completion_cb("archive-name", complete_archive_name)
1996 .completion_cb("target", tools::complete_file_name);
9f912493 1997
255f378a 1998 let files_cmd_def = CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
49fddd98 1999 .arg_param(&["snapshot"])
52c171e4 2000 .completion_cb("repository", complete_repository)
543a260f 2001 .completion_cb("snapshot", complete_backup_snapshot);
52c171e4 2002
255f378a 2003 let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
49fddd98 2004 .arg_param(&["group"])
9fdc3ef4 2005 .completion_cb("group", complete_backup_group)
d0a03d40 2006 .completion_cb("repository", complete_repository);
9f912493 2007
255f378a 2008 let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
34a816cc
DM
2009 .completion_cb("repository", complete_repository);
2010
255f378a 2011 let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
e240d8be
DM
2012 .completion_cb("repository", complete_repository);
2013
255f378a 2014 let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
e240d8be 2015 .completion_cb("repository", complete_repository);
32efac1c 2016
41c039e1 2017 let cmd_def = CliCommandMap::new()
48ef3c33
DM
2018 .insert("backup", backup_cmd_def)
2019 .insert("upload-log", upload_log_cmd_def)
2020 .insert("forget", forget_cmd_def)
2021 .insert("garbage-collect", garbage_collect_cmd_def)
2022 .insert("list", list_cmd_def)
2023 .insert("login", login_cmd_def)
2024 .insert("logout", logout_cmd_def)
2025 .insert("prune", prune_cmd_def)
2026 .insert("restore", restore_cmd_def)
2027 .insert("snapshots", snapshots_cmd_def)
2028 .insert("files", files_cmd_def)
2029 .insert("status", status_cmd_def)
2030 .insert("key", key_mgmt_cli())
43abba4b 2031 .insert("mount", mount_cmd_def())
5830c205 2032 .insert("catalog", catalog_mgmt_cli())
caea8d61
DM
2033 .insert("task", task_mgmt_cli())
2034 .insert("benchmark", benchmark_cmd_def);
48ef3c33 2035
7b22acd0
DM
2036 let rpcenv = CliEnvironment::new();
2037 run_cli_command(cmd_def, rpcenv, Some(|future| {
d08bc483
DM
2038 proxmox_backup::tools::runtime::main(future)
2039 }));
ff5d3707 2040}