]>
Commit | Line | Data |
---|---|---|
1 | use std::collections::HashSet; | |
2 | use std::io::{self, Read, Write, Seek, SeekFrom}; | |
3 | use std::path::{Path, PathBuf}; | |
4 | use std::pin::Pin; | |
5 | use std::sync::{Arc, Mutex}; | |
6 | use std::task::Context; | |
7 | ||
8 | use anyhow::{bail, format_err, Error}; | |
9 | use futures::stream::{StreamExt, TryStreamExt}; | |
10 | use serde_json::{json, Value}; | |
11 | use tokio::sync::mpsc; | |
12 | use tokio_stream::wrappers::ReceiverStream; | |
13 | use xdg::BaseDirectories; | |
14 | ||
15 | use pathpatterns::{MatchEntry, MatchType, PatternFlag}; | |
16 | use proxmox_io::StdChannelWriter; | |
17 | use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions, image_size}; | |
18 | use proxmox_router::{ApiMethod, RpcEnvironment, cli::*}; | |
19 | use proxmox_schema::api; | |
20 | use proxmox_time::{strftime_local, epoch_i64}; | |
21 | use proxmox_async::blocking::TokioWriterAdapter; | |
22 | use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; | |
23 | ||
24 | use pbs_api_types::{ | |
25 | BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, | |
26 | TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA, | |
27 | Authid, CryptMode, Fingerprint, GroupListItem, HumanByte, | |
28 | PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, | |
29 | StorageStatus, | |
30 | }; | |
31 | use pbs_client::{ | |
32 | BACKUP_SOURCE_SCHEMA, | |
33 | BackupReader, | |
34 | BackupRepository, | |
35 | BackupSpecificationType, | |
36 | BackupStats, | |
37 | BackupWriter, | |
38 | ChunkStream, | |
39 | FixedChunkStream, | |
40 | HttpClient, | |
41 | PxarBackupStream, | |
42 | RemoteChunkReader, | |
43 | UploadOptions, | |
44 | delete_ticket_info, | |
45 | parse_backup_specification, | |
46 | view_task_result, | |
47 | }; | |
48 | use pbs_client::catalog_shell::Shell; | |
49 | use pbs_client::tools::{ | |
50 | complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, | |
51 | complete_backup_source, complete_chunk_size, complete_group_or_snapshot, | |
52 | complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect, | |
53 | connect_rate_limited, extract_repository_from_value, | |
54 | key_source::{ | |
55 | crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, | |
56 | KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, | |
57 | }, | |
58 | CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA, | |
59 | }; | |
60 | use pbs_config::key_config::{KeyConfig, decrypt_key, rsa_encrypt_key_config}; | |
61 | use pbs_datastore::CATALOG_NAME; | |
62 | use pbs_datastore::backup_info::{BackupDir, BackupGroup}; | |
63 | use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter}; | |
64 | use pbs_datastore::chunk_store::verify_chunk_size; | |
65 | use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader}; | |
66 | use pbs_datastore::fixed_index::FixedIndexReader; | |
67 | use pbs_datastore::index::IndexFile; | |
68 | use pbs_datastore::manifest::{ | |
69 | ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, archive_type, | |
70 | }; | |
71 | use pbs_datastore::read_chunk::AsyncReadChunk; | |
72 | use pbs_tools::json; | |
73 | use pbs_tools::crypt_config::CryptConfig; | |
74 | ||
75 | mod benchmark; | |
76 | pub use benchmark::*; | |
77 | mod mount; | |
78 | pub use mount::*; | |
79 | mod task; | |
80 | pub use task::*; | |
81 | mod catalog; | |
82 | pub use catalog::*; | |
83 | mod snapshot; | |
84 | pub use snapshot::*; | |
85 | pub mod key; | |
86 | ||
87 | fn record_repository(repo: &BackupRepository) { | |
88 | ||
89 | let base = match BaseDirectories::with_prefix("proxmox-backup") { | |
90 | Ok(v) => v, | |
91 | _ => return, | |
92 | }; | |
93 | ||
94 | // usually $HOME/.cache/proxmox-backup/repo-list | |
95 | let path = match base.place_cache_file("repo-list") { | |
96 | Ok(v) => v, | |
97 | _ => return, | |
98 | }; | |
99 | ||
100 | let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({})); | |
101 | ||
102 | let repo = repo.to_string(); | |
103 | ||
104 | data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 }; | |
105 | ||
106 | let mut map = serde_json::map::Map::new(); | |
107 | ||
108 | loop { | |
109 | let mut max_used = 0; | |
110 | let mut max_repo = None; | |
111 | for (repo, count) in data.as_object().unwrap() { | |
112 | if map.contains_key(repo) { continue; } | |
113 | if let Some(count) = count.as_i64() { | |
114 | if count > max_used { | |
115 | max_used = count; | |
116 | max_repo = Some(repo); | |
117 | } | |
118 | } | |
119 | } | |
120 | if let Some(repo) = max_repo { | |
121 | map.insert(repo.to_owned(), json!(max_used)); | |
122 | } else { | |
123 | break; | |
124 | } | |
125 | if map.len() > 10 { // store max. 10 repos | |
126 | break; | |
127 | } | |
128 | } | |
129 | ||
130 | let new_data = json!(map); | |
131 | ||
132 | let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new(), false); | |
133 | } | |
134 | ||
135 | async fn api_datastore_list_snapshots( | |
136 | client: &HttpClient, | |
137 | store: &str, | |
138 | group: Option<BackupGroup>, | |
139 | ) -> Result<Value, Error> { | |
140 | ||
141 | let path = format!("api2/json/admin/datastore/{}/snapshots", store); | |
142 | ||
143 | let mut args = json!({}); | |
144 | if let Some(group) = group { | |
145 | args["backup-type"] = group.backup_type().into(); | |
146 | args["backup-id"] = group.backup_id().into(); | |
147 | } | |
148 | ||
149 | let mut result = client.get(&path, Some(args)).await?; | |
150 | ||
151 | Ok(result["data"].take()) | |
152 | } | |
153 | ||
154 | pub async fn api_datastore_latest_snapshot( | |
155 | client: &HttpClient, | |
156 | store: &str, | |
157 | group: BackupGroup, | |
158 | ) -> Result<(String, String, i64), Error> { | |
159 | ||
160 | let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?; | |
161 | let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?; | |
162 | ||
163 | if list.is_empty() { | |
164 | bail!("backup group {:?} does not contain any snapshots.", group.group_path()); | |
165 | } | |
166 | ||
167 | list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time)); | |
168 | ||
169 | let backup_time = list[0].backup_time; | |
170 | ||
171 | Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)) | |
172 | } | |
173 | ||
174 | async fn backup_directory<P: AsRef<Path>>( | |
175 | client: &BackupWriter, | |
176 | dir_path: P, | |
177 | archive_name: &str, | |
178 | chunk_size: Option<usize>, | |
179 | catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>, | |
180 | pxar_create_options: pbs_client::pxar::PxarCreateOptions, | |
181 | upload_options: UploadOptions, | |
182 | ) -> Result<BackupStats, Error> { | |
183 | ||
184 | let pxar_stream = PxarBackupStream::open( | |
185 | dir_path.as_ref(), | |
186 | catalog, | |
187 | pxar_create_options, | |
188 | )?; | |
189 | let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size); | |
190 | ||
191 | let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks | |
192 | ||
193 | let stream = ReceiverStream::new(rx) | |
194 | .map_err(Error::from); | |
195 | ||
196 | // spawn chunker inside a separate task so that it can run parallel | |
197 | tokio::spawn(async move { | |
198 | while let Some(v) = chunk_stream.next().await { | |
199 | let _ = tx.send(v).await; | |
200 | } | |
201 | }); | |
202 | ||
203 | if upload_options.fixed_size.is_some() { | |
204 | bail!("cannot backup directory with fixed chunk size!"); | |
205 | } | |
206 | ||
207 | let stats = client | |
208 | .upload_stream(archive_name, stream, upload_options) | |
209 | .await?; | |
210 | ||
211 | Ok(stats) | |
212 | } | |
213 | ||
214 | async fn backup_image<P: AsRef<Path>>( | |
215 | client: &BackupWriter, | |
216 | image_path: P, | |
217 | archive_name: &str, | |
218 | chunk_size: Option<usize>, | |
219 | upload_options: UploadOptions, | |
220 | ) -> Result<BackupStats, Error> { | |
221 | ||
222 | let path = image_path.as_ref().to_owned(); | |
223 | ||
224 | let file = tokio::fs::File::open(path).await?; | |
225 | ||
226 | let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) | |
227 | .map_err(Error::from); | |
228 | ||
229 | let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024)); | |
230 | ||
231 | if upload_options.fixed_size.is_none() { | |
232 | bail!("cannot backup image with dynamic chunk size!"); | |
233 | } | |
234 | ||
235 | let stats = client | |
236 | .upload_stream(archive_name, stream, upload_options) | |
237 | .await?; | |
238 | ||
239 | Ok(stats) | |
240 | } | |
241 | ||
242 | #[api( | |
243 | input: { | |
244 | properties: { | |
245 | repository: { | |
246 | schema: REPO_URL_SCHEMA, | |
247 | optional: true, | |
248 | }, | |
249 | "output-format": { | |
250 | schema: OUTPUT_FORMAT, | |
251 | optional: true, | |
252 | }, | |
253 | } | |
254 | } | |
255 | )] | |
256 | /// List backup groups. | |
257 | async fn list_backup_groups(param: Value) -> Result<Value, Error> { | |
258 | ||
259 | let output_format = get_output_format(¶m); | |
260 | ||
261 | let repo = extract_repository_from_value(¶m)?; | |
262 | ||
263 | let client = connect(&repo)?; | |
264 | ||
265 | let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); | |
266 | ||
267 | let mut result = client.get(&path, None).await?; | |
268 | ||
269 | record_repository(&repo); | |
270 | ||
271 | let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> { | |
272 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
273 | let group = BackupGroup::new(item.backup_type, item.backup_id); | |
274 | Ok(group.group_path().to_str().unwrap().to_owned()) | |
275 | }; | |
276 | ||
277 | let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> { | |
278 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
279 | let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?; | |
280 | Ok(snapshot.relative_path().to_str().unwrap().to_owned()) | |
281 | }; | |
282 | ||
283 | let render_files = |_v: &Value, record: &Value| -> Result<String, Error> { | |
284 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
285 | Ok(pbs_tools::format::render_backup_file_list(&item.files)) | |
286 | }; | |
287 | ||
288 | let options = default_table_format_options() | |
289 | .sortby("backup-type", false) | |
290 | .sortby("backup-id", false) | |
291 | .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group")) | |
292 | .column( | |
293 | ColumnConfig::new("last-backup") | |
294 | .renderer(render_last_backup) | |
295 | .header("last snapshot") | |
296 | .right_align(false) | |
297 | ) | |
298 | .column(ColumnConfig::new("backup-count")) | |
299 | .column(ColumnConfig::new("files").renderer(render_files)); | |
300 | ||
301 | let mut data: Value = result["data"].take(); | |
302 | ||
303 | let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE; | |
304 | ||
305 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
306 | ||
307 | Ok(Value::Null) | |
308 | } | |
309 | ||
310 | #[api( | |
311 | input: { | |
312 | properties: { | |
313 | repository: { | |
314 | schema: REPO_URL_SCHEMA, | |
315 | optional: true, | |
316 | }, | |
317 | group: { | |
318 | type: String, | |
319 | description: "Backup group.", | |
320 | }, | |
321 | "new-owner": { | |
322 | type: Authid, | |
323 | }, | |
324 | } | |
325 | } | |
326 | )] | |
327 | /// Change owner of a backup group | |
328 | async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> { | |
329 | ||
330 | let repo = extract_repository_from_value(¶m)?; | |
331 | ||
332 | let client = connect(&repo)?; | |
333 | ||
334 | param.as_object_mut().unwrap().remove("repository"); | |
335 | ||
336 | let group: BackupGroup = group.parse()?; | |
337 | ||
338 | param["backup-type"] = group.backup_type().into(); | |
339 | param["backup-id"] = group.backup_id().into(); | |
340 | ||
341 | let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store()); | |
342 | client.post(&path, Some(param)).await?; | |
343 | ||
344 | record_repository(&repo); | |
345 | ||
346 | Ok(()) | |
347 | } | |
348 | ||
349 | #[api( | |
350 | input: { | |
351 | properties: { | |
352 | repository: { | |
353 | schema: REPO_URL_SCHEMA, | |
354 | optional: true, | |
355 | }, | |
356 | } | |
357 | } | |
358 | )] | |
359 | /// Try to login. If successful, store ticket. | |
360 | async fn api_login(param: Value) -> Result<Value, Error> { | |
361 | ||
362 | let repo = extract_repository_from_value(¶m)?; | |
363 | ||
364 | let client = connect(&repo)?; | |
365 | client.login().await?; | |
366 | ||
367 | record_repository(&repo); | |
368 | ||
369 | Ok(Value::Null) | |
370 | } | |
371 | ||
372 | #[api( | |
373 | input: { | |
374 | properties: { | |
375 | repository: { | |
376 | schema: REPO_URL_SCHEMA, | |
377 | optional: true, | |
378 | }, | |
379 | } | |
380 | } | |
381 | )] | |
382 | /// Logout (delete stored ticket). | |
383 | fn api_logout(param: Value) -> Result<Value, Error> { | |
384 | ||
385 | let repo = extract_repository_from_value(¶m)?; | |
386 | ||
387 | delete_ticket_info("proxmox-backup", repo.host(), repo.user())?; | |
388 | ||
389 | Ok(Value::Null) | |
390 | } | |
391 | ||
392 | #[api( | |
393 | input: { | |
394 | properties: { | |
395 | repository: { | |
396 | schema: REPO_URL_SCHEMA, | |
397 | optional: true, | |
398 | }, | |
399 | "output-format": { | |
400 | schema: OUTPUT_FORMAT, | |
401 | optional: true, | |
402 | }, | |
403 | } | |
404 | } | |
405 | )] | |
406 | /// Show client and optional server version | |
407 | async fn api_version(param: Value) -> Result<(), Error> { | |
408 | ||
409 | let output_format = get_output_format(¶m); | |
410 | ||
411 | let mut version_info = json!({ | |
412 | "client": { | |
413 | "version": pbs_buildcfg::PROXMOX_PKG_VERSION, | |
414 | "release": pbs_buildcfg::PROXMOX_PKG_RELEASE, | |
415 | "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID, | |
416 | } | |
417 | }); | |
418 | ||
419 | let repo = extract_repository_from_value(¶m); | |
420 | if let Ok(repo) = repo { | |
421 | let client = connect(&repo)?; | |
422 | ||
423 | match client.get("api2/json/version", None).await { | |
424 | Ok(mut result) => version_info["server"] = result["data"].take(), | |
425 | Err(e) => eprintln!("could not connect to server - {}", e), | |
426 | } | |
427 | } | |
428 | if output_format == "text" { | |
429 | println!( | |
430 | "client version: {}.{}", | |
431 | pbs_buildcfg::PROXMOX_PKG_VERSION, | |
432 | pbs_buildcfg::PROXMOX_PKG_RELEASE, | |
433 | ); | |
434 | if let Some(server) = version_info["server"].as_object() { | |
435 | let server_version = server["version"].as_str().unwrap(); | |
436 | let server_release = server["release"].as_str().unwrap(); | |
437 | println!("server version: {}.{}", server_version, server_release); | |
438 | } | |
439 | } else { | |
440 | format_and_print_result(&version_info, &output_format); | |
441 | } | |
442 | ||
443 | Ok(()) | |
444 | } | |
445 | ||
446 | #[api( | |
447 | input: { | |
448 | properties: { | |
449 | repository: { | |
450 | schema: REPO_URL_SCHEMA, | |
451 | optional: true, | |
452 | }, | |
453 | "output-format": { | |
454 | schema: OUTPUT_FORMAT, | |
455 | optional: true, | |
456 | }, | |
457 | }, | |
458 | }, | |
459 | )] | |
460 | /// Start garbage collection for a specific repository. | |
461 | async fn start_garbage_collection(param: Value) -> Result<Value, Error> { | |
462 | ||
463 | let repo = extract_repository_from_value(¶m)?; | |
464 | ||
465 | let output_format = get_output_format(¶m); | |
466 | ||
467 | let client = connect(&repo)?; | |
468 | ||
469 | let path = format!("api2/json/admin/datastore/{}/gc", repo.store()); | |
470 | ||
471 | let result = client.post(&path, None).await?; | |
472 | ||
473 | record_repository(&repo); | |
474 | ||
475 | view_task_result(&client, result, &output_format).await?; | |
476 | ||
477 | Ok(Value::Null) | |
478 | } | |
479 | ||
480 | struct CatalogUploadResult { | |
481 | catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>, | |
482 | result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>, | |
483 | } | |
484 | ||
485 | fn spawn_catalog_upload( | |
486 | client: Arc<BackupWriter>, | |
487 | encrypt: bool, | |
488 | ) -> Result<CatalogUploadResult, Error> { | |
489 | let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes | |
490 | let catalog_stream = proxmox_async::blocking::StdChannelStream(catalog_rx); | |
491 | let catalog_chunk_size = 512*1024; | |
492 | let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size)); | |
493 | ||
494 | let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(StdChannelWriter::new(catalog_tx)))?)); | |
495 | ||
496 | let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel(); | |
497 | ||
498 | let upload_options = UploadOptions { | |
499 | encrypt, | |
500 | compress: true, | |
501 | ..UploadOptions::default() | |
502 | }; | |
503 | ||
504 | tokio::spawn(async move { | |
505 | let catalog_upload_result = client | |
506 | .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options) | |
507 | .await; | |
508 | ||
509 | if let Err(ref err) = catalog_upload_result { | |
510 | eprintln!("catalog upload error - {}", err); | |
511 | client.cancel(); | |
512 | } | |
513 | ||
514 | let _ = catalog_result_tx.send(catalog_upload_result); | |
515 | }); | |
516 | ||
517 | Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx }) | |
518 | } | |
519 | ||
520 | #[api( | |
521 | input: { | |
522 | properties: { | |
523 | backupspec: { | |
524 | type: Array, | |
525 | description: "List of backup source specifications ([<label.ext>:<path>] ...)", | |
526 | items: { | |
527 | schema: BACKUP_SOURCE_SCHEMA, | |
528 | } | |
529 | }, | |
530 | repository: { | |
531 | schema: REPO_URL_SCHEMA, | |
532 | optional: true, | |
533 | }, | |
534 | "include-dev": { | |
535 | description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.", | |
536 | optional: true, | |
537 | items: { | |
538 | type: String, | |
539 | description: "Path to file.", | |
540 | } | |
541 | }, | |
542 | "all-file-systems": { | |
543 | type: Boolean, | |
544 | description: "Include all mounted subdirectories.", | |
545 | optional: true, | |
546 | }, | |
547 | keyfile: { | |
548 | schema: KEYFILE_SCHEMA, | |
549 | optional: true, | |
550 | }, | |
551 | "keyfd": { | |
552 | schema: KEYFD_SCHEMA, | |
553 | optional: true, | |
554 | }, | |
555 | "master-pubkey-file": { | |
556 | schema: MASTER_PUBKEY_FILE_SCHEMA, | |
557 | optional: true, | |
558 | }, | |
559 | "master-pubkey-fd": { | |
560 | schema: MASTER_PUBKEY_FD_SCHEMA, | |
561 | optional: true, | |
562 | }, | |
563 | "crypt-mode": { | |
564 | type: CryptMode, | |
565 | optional: true, | |
566 | }, | |
567 | "skip-lost-and-found": { | |
568 | type: Boolean, | |
569 | description: "Skip lost+found directory.", | |
570 | optional: true, | |
571 | }, | |
572 | "backup-type": { | |
573 | schema: BACKUP_TYPE_SCHEMA, | |
574 | optional: true, | |
575 | }, | |
576 | "backup-id": { | |
577 | schema: BACKUP_ID_SCHEMA, | |
578 | optional: true, | |
579 | }, | |
580 | "backup-time": { | |
581 | schema: BACKUP_TIME_SCHEMA, | |
582 | optional: true, | |
583 | }, | |
584 | "chunk-size": { | |
585 | schema: CHUNK_SIZE_SCHEMA, | |
586 | optional: true, | |
587 | }, | |
588 | rate: { | |
589 | schema: TRAFFIC_CONTROL_RATE_SCHEMA, | |
590 | optional: true, | |
591 | }, | |
592 | burst: { | |
593 | schema: TRAFFIC_CONTROL_BURST_SCHEMA, | |
594 | optional: true, | |
595 | }, | |
596 | "exclude": { | |
597 | type: Array, | |
598 | description: "List of paths or patterns for matching files to exclude.", | |
599 | optional: true, | |
600 | items: { | |
601 | type: String, | |
602 | description: "Path or match pattern.", | |
603 | } | |
604 | }, | |
605 | "entries-max": { | |
606 | type: Integer, | |
607 | description: "Max number of entries to hold in memory.", | |
608 | optional: true, | |
609 | default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize, | |
610 | }, | |
611 | "verbose": { | |
612 | type: Boolean, | |
613 | description: "Verbose output.", | |
614 | optional: true, | |
615 | }, | |
616 | } | |
617 | } | |
618 | )] | |
619 | /// Create (host) backup. | |
620 | async fn create_backup( | |
621 | param: Value, | |
622 | _info: &ApiMethod, | |
623 | _rpcenv: &mut dyn RpcEnvironment, | |
624 | ) -> Result<Value, Error> { | |
625 | ||
626 | let repo = extract_repository_from_value(¶m)?; | |
627 | ||
628 | let backupspec_list = json::required_array_param(¶m, "backupspec")?; | |
629 | ||
630 | let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false); | |
631 | ||
632 | let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false); | |
633 | ||
634 | let verbose = param["verbose"].as_bool().unwrap_or(false); | |
635 | ||
636 | let backup_time_opt = param["backup-time"].as_i64(); | |
637 | ||
638 | let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize); | |
639 | ||
640 | if let Some(size) = chunk_size_opt { | |
641 | verify_chunk_size(size)?; | |
642 | } | |
643 | ||
644 | let rate = match param["rate"].as_str() { | |
645 | Some(s) => Some(s.parse::<HumanByte>()?), | |
646 | None => None, | |
647 | }; | |
648 | let burst = match param["burst"].as_str() { | |
649 | Some(s) => Some(s.parse::<HumanByte>()?), | |
650 | None => None, | |
651 | }; | |
652 | ||
653 | let rate_limit = RateLimitConfig::with_same_inout(rate, burst); | |
654 | ||
655 | let crypto = crypto_parameters(¶m)?; | |
656 | ||
657 | let backup_id = param["backup-id"].as_str().unwrap_or(proxmox_sys::nodename()); | |
658 | ||
659 | let backup_type = param["backup-type"].as_str().unwrap_or("host"); | |
660 | ||
661 | let include_dev = param["include-dev"].as_array(); | |
662 | ||
663 | let entries_max = param["entries-max"].as_u64() | |
664 | .unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64); | |
665 | ||
666 | let empty = Vec::new(); | |
667 | let exclude_args = param["exclude"].as_array().unwrap_or(&empty); | |
668 | ||
669 | let mut pattern_list = Vec::with_capacity(exclude_args.len()); | |
670 | for entry in exclude_args { | |
671 | let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?; | |
672 | pattern_list.push( | |
673 | MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude) | |
674 | .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))? | |
675 | ); | |
676 | } | |
677 | ||
678 | let mut devices = if all_file_systems { None } else { Some(HashSet::new()) }; | |
679 | ||
680 | if let Some(include_dev) = include_dev { | |
681 | if all_file_systems { | |
682 | bail!("option 'all-file-systems' conflicts with option 'include-dev'"); | |
683 | } | |
684 | ||
685 | let mut set = HashSet::new(); | |
686 | for path in include_dev { | |
687 | let path = path.as_str().unwrap(); | |
688 | let stat = nix::sys::stat::stat(path) | |
689 | .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?; | |
690 | set.insert(stat.st_dev); | |
691 | } | |
692 | devices = Some(set); | |
693 | } | |
694 | ||
695 | let mut upload_list = vec![]; | |
696 | let mut target_set = HashSet::new(); | |
697 | ||
698 | for backupspec in backupspec_list { | |
699 | let spec = parse_backup_specification(backupspec.as_str().unwrap())?; | |
700 | let filename = &spec.config_string; | |
701 | let target = &spec.archive_name; | |
702 | ||
703 | if target_set.contains(target) { | |
704 | bail!("got target twice: '{}'", target); | |
705 | } | |
706 | target_set.insert(target.to_string()); | |
707 | ||
708 | use std::os::unix::fs::FileTypeExt; | |
709 | ||
710 | let metadata = std::fs::metadata(filename) | |
711 | .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?; | |
712 | let file_type = metadata.file_type(); | |
713 | ||
714 | match spec.spec_type { | |
715 | BackupSpecificationType::PXAR => { | |
716 | if !file_type.is_dir() { | |
717 | bail!("got unexpected file type (expected directory)"); | |
718 | } | |
719 | upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0)); | |
720 | } | |
721 | BackupSpecificationType::IMAGE => { | |
722 | if !(file_type.is_file() || file_type.is_block_device()) { | |
723 | bail!("got unexpected file type (expected file or block device)"); | |
724 | } | |
725 | ||
726 | let size = image_size(&PathBuf::from(filename))?; | |
727 | ||
728 | if size == 0 { bail!("got zero-sized file '{}'", filename); } | |
729 | ||
730 | upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size)); | |
731 | } | |
732 | BackupSpecificationType::CONFIG => { | |
733 | if !file_type.is_file() { | |
734 | bail!("got unexpected file type (expected regular file)"); | |
735 | } | |
736 | upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len())); | |
737 | } | |
738 | BackupSpecificationType::LOGFILE => { | |
739 | if !file_type.is_file() { | |
740 | bail!("got unexpected file type (expected regular file)"); | |
741 | } | |
742 | upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len())); | |
743 | } | |
744 | } | |
745 | } | |
746 | ||
747 | let backup_time = backup_time_opt.unwrap_or_else(epoch_i64); | |
748 | ||
749 | let client = connect_rate_limited(&repo, rate_limit)?; | |
750 | record_repository(&repo); | |
751 | ||
752 | println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?); | |
753 | ||
754 | println!("Client name: {}", proxmox_sys::nodename()); | |
755 | ||
756 | let start_time = std::time::Instant::now(); | |
757 | ||
758 | println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?); | |
759 | ||
760 | let (crypt_config, rsa_encrypted_key) = match crypto.enc_key { | |
761 | None => (None, None), | |
762 | Some(key_with_source) => { | |
763 | println!( | |
764 | "{}", | |
765 | format_key_source(&key_with_source.source, "encryption") | |
766 | ); | |
767 | ||
768 | let (key, created, fingerprint) = | |
769 | decrypt_key(&key_with_source.key, &get_encryption_key_password)?; | |
770 | println!("Encryption key fingerprint: {}", fingerprint); | |
771 | ||
772 | let crypt_config = CryptConfig::new(key)?; | |
773 | ||
774 | match crypto.master_pubkey { | |
775 | Some(pem_with_source) => { | |
776 | println!("{}", format_key_source(&pem_with_source.source, "master")); | |
777 | ||
778 | let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_with_source.key)?; | |
779 | ||
780 | let mut key_config = KeyConfig::without_password(key)?; | |
781 | key_config.created = created; // keep original value | |
782 | ||
783 | let enc_key = rsa_encrypt_key_config(rsa, &key_config)?; | |
784 | ||
785 | (Some(Arc::new(crypt_config)), Some(enc_key)) | |
786 | }, | |
787 | _ => (Some(Arc::new(crypt_config)), None), | |
788 | } | |
789 | } | |
790 | }; | |
791 | ||
792 | let client = BackupWriter::start( | |
793 | client, | |
794 | crypt_config.clone(), | |
795 | repo.store(), | |
796 | backup_type, | |
797 | backup_id, | |
798 | backup_time, | |
799 | verbose, | |
800 | false | |
801 | ).await?; | |
802 | ||
803 | let download_previous_manifest = match client.previous_backup_time().await { | |
804 | Ok(Some(backup_time)) => { | |
805 | println!( | |
806 | "Downloading previous manifest ({})", | |
807 | strftime_local("%c", backup_time)? | |
808 | ); | |
809 | true | |
810 | } | |
811 | Ok(None) => { | |
812 | println!("No previous manifest available."); | |
813 | false | |
814 | } | |
815 | Err(_) => { | |
816 | // Fallback for outdated server, TODO remove/bubble up with 2.0 | |
817 | true | |
818 | } | |
819 | }; | |
820 | ||
821 | let previous_manifest = if download_previous_manifest { | |
822 | match client.download_previous_manifest().await { | |
823 | Ok(previous_manifest) => { | |
824 | match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) { | |
825 | Ok(()) => Some(Arc::new(previous_manifest)), | |
826 | Err(err) => { | |
827 | println!("Couldn't re-use previous manifest - {}", err); | |
828 | None | |
829 | } | |
830 | } | |
831 | } | |
832 | Err(err) => { | |
833 | println!("Couldn't download previous manifest - {}", err); | |
834 | None | |
835 | } | |
836 | } | |
837 | } else { | |
838 | None | |
839 | }; | |
840 | ||
841 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; | |
842 | let mut manifest = BackupManifest::new(snapshot); | |
843 | ||
844 | let mut catalog = None; | |
845 | let mut catalog_result_rx = None; | |
846 | ||
847 | for (backup_type, filename, target, size) in upload_list { | |
848 | match backup_type { | |
849 | BackupSpecificationType::CONFIG => { | |
850 | let upload_options = UploadOptions { | |
851 | compress: true, | |
852 | encrypt: crypto.mode == CryptMode::Encrypt, | |
853 | ..UploadOptions::default() | |
854 | }; | |
855 | ||
856 | println!("Upload config file '{}' to '{}' as {}", filename, repo, target); | |
857 | let stats = client | |
858 | .upload_blob_from_file(&filename, &target, upload_options) | |
859 | .await?; | |
860 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
861 | } | |
862 | BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ? | |
863 | let upload_options = UploadOptions { | |
864 | compress: true, | |
865 | encrypt: crypto.mode == CryptMode::Encrypt, | |
866 | ..UploadOptions::default() | |
867 | }; | |
868 | ||
869 | println!("Upload log file '{}' to '{}' as {}", filename, repo, target); | |
870 | let stats = client | |
871 | .upload_blob_from_file(&filename, &target, upload_options) | |
872 | .await?; | |
873 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
874 | } | |
875 | BackupSpecificationType::PXAR => { | |
876 | // start catalog upload on first use | |
877 | if catalog.is_none() { | |
878 | let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?; | |
879 | catalog = Some(catalog_upload_res.catalog_writer); | |
880 | catalog_result_rx = Some(catalog_upload_res.result); | |
881 | } | |
882 | let catalog = catalog.as_ref().unwrap(); | |
883 | ||
884 | println!("Upload directory '{}' to '{}' as {}", filename, repo, target); | |
885 | catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; | |
886 | ||
887 | let pxar_options = pbs_client::pxar::PxarCreateOptions { | |
888 | device_set: devices.clone(), | |
889 | patterns: pattern_list.clone(), | |
890 | entries_max: entries_max as usize, | |
891 | skip_lost_and_found, | |
892 | verbose, | |
893 | }; | |
894 | ||
895 | let upload_options = UploadOptions { | |
896 | previous_manifest: previous_manifest.clone(), | |
897 | compress: true, | |
898 | encrypt: crypto.mode == CryptMode::Encrypt, | |
899 | ..UploadOptions::default() | |
900 | }; | |
901 | ||
902 | let stats = backup_directory( | |
903 | &client, | |
904 | &filename, | |
905 | &target, | |
906 | chunk_size_opt, | |
907 | catalog.clone(), | |
908 | pxar_options, | |
909 | upload_options, | |
910 | ).await?; | |
911 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
912 | catalog.lock().unwrap().end_directory()?; | |
913 | } | |
914 | BackupSpecificationType::IMAGE => { | |
915 | println!("Upload image '{}' to '{:?}' as {}", filename, repo, target); | |
916 | ||
917 | let upload_options = UploadOptions { | |
918 | previous_manifest: previous_manifest.clone(), | |
919 | fixed_size: Some(size), | |
920 | compress: true, | |
921 | encrypt: crypto.mode == CryptMode::Encrypt, | |
922 | }; | |
923 | ||
924 | let stats = backup_image( | |
925 | &client, | |
926 | &filename, | |
927 | &target, | |
928 | chunk_size_opt, | |
929 | upload_options, | |
930 | ).await?; | |
931 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
932 | } | |
933 | } | |
934 | } | |
935 | ||
936 | // finalize and upload catalog | |
937 | if let Some(catalog) = catalog { | |
938 | let mutex = Arc::try_unwrap(catalog) | |
939 | .map_err(|_| format_err!("unable to get catalog (still used)"))?; | |
940 | let mut catalog = mutex.into_inner().unwrap(); | |
941 | ||
942 | catalog.finish()?; | |
943 | ||
944 | drop(catalog); // close upload stream | |
945 | ||
946 | if let Some(catalog_result_rx) = catalog_result_rx { | |
947 | let stats = catalog_result_rx.await??; | |
948 | manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; | |
949 | } | |
950 | } | |
951 | ||
952 | if let Some(rsa_encrypted_key) = rsa_encrypted_key { | |
953 | let target = ENCRYPTED_KEY_BLOB_NAME; | |
954 | println!("Upload RSA encoded key to '{:?}' as {}", repo, target); | |
955 | let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() }; | |
956 | let stats = client | |
957 | .upload_blob_from_data(rsa_encrypted_key, target, options) | |
958 | .await?; | |
959 | manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; | |
960 | ||
961 | } | |
962 | // create manifest (index.json) | |
963 | // manifests are never encrypted, but include a signature | |
964 | let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref)) | |
965 | .map_err(|err| format_err!("unable to format manifest - {}", err))?; | |
966 | ||
967 | ||
968 | if verbose { println!("Upload index.json to '{}'", repo) }; | |
969 | let options = UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() }; | |
970 | client | |
971 | .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) | |
972 | .await?; | |
973 | ||
974 | client.finish().await?; | |
975 | ||
976 | let end_time = std::time::Instant::now(); | |
977 | let elapsed = end_time.duration_since(start_time); | |
978 | println!("Duration: {:.2}s", elapsed.as_secs_f64()); | |
979 | ||
980 | println!("End Time: {}", strftime_local("%c", epoch_i64())?); | |
981 | ||
982 | Ok(Value::Null) | |
983 | } | |
984 | ||
985 | async fn dump_image<W: Write>( | |
986 | client: Arc<BackupReader>, | |
987 | crypt_config: Option<Arc<CryptConfig>>, | |
988 | crypt_mode: CryptMode, | |
989 | index: FixedIndexReader, | |
990 | mut writer: W, | |
991 | verbose: bool, | |
992 | ) -> Result<(), Error> { | |
993 | ||
994 | let most_used = index.find_most_used_chunks(8); | |
995 | ||
996 | let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used); | |
997 | ||
998 | // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy | |
999 | // and thus slows down reading. Instead, directly use RemoteChunkReader | |
1000 | let mut per = 0; | |
1001 | let mut bytes = 0; | |
1002 | let start_time = std::time::Instant::now(); | |
1003 | ||
1004 | for pos in 0..index.index_count() { | |
1005 | let digest = index.index_digest(pos).unwrap(); | |
1006 | let raw_data = chunk_reader.read_chunk(digest).await?; | |
1007 | writer.write_all(&raw_data)?; | |
1008 | bytes += raw_data.len(); | |
1009 | if verbose { | |
1010 | let next_per = ((pos+1)*100)/index.index_count(); | |
1011 | if per != next_per { | |
1012 | eprintln!("progress {}% (read {} bytes, duration {} sec)", | |
1013 | next_per, bytes, start_time.elapsed().as_secs()); | |
1014 | per = next_per; | |
1015 | } | |
1016 | } | |
1017 | } | |
1018 | ||
1019 | let end_time = std::time::Instant::now(); | |
1020 | let elapsed = end_time.duration_since(start_time); | |
1021 | eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)", | |
1022 | bytes, | |
1023 | elapsed.as_secs_f64(), | |
1024 | bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64()) | |
1025 | ); | |
1026 | ||
1027 | ||
1028 | Ok(()) | |
1029 | } | |
1030 | ||
1031 | fn parse_archive_type(name: &str) -> (String, ArchiveType) { | |
1032 | if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { | |
1033 | (name.into(), archive_type(name).unwrap()) | |
1034 | } else if name.ends_with(".pxar") { | |
1035 | (format!("{}.didx", name), ArchiveType::DynamicIndex) | |
1036 | } else if name.ends_with(".img") { | |
1037 | (format!("{}.fidx", name), ArchiveType::FixedIndex) | |
1038 | } else { | |
1039 | (format!("{}.blob", name), ArchiveType::Blob) | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | #[api( | |
1044 | input: { | |
1045 | properties: { | |
1046 | repository: { | |
1047 | schema: REPO_URL_SCHEMA, | |
1048 | optional: true, | |
1049 | }, | |
1050 | snapshot: { | |
1051 | type: String, | |
1052 | description: "Group/Snapshot path.", | |
1053 | }, | |
1054 | "archive-name": { | |
1055 | description: "Backup archive name.", | |
1056 | type: String, | |
1057 | }, | |
1058 | target: { | |
1059 | type: String, | |
1060 | description: r###"Target directory path. Use '-' to write to standard output. | |
1061 | ||
1062 | We do not extract '.pxar' archives when writing to standard output. | |
1063 | ||
1064 | "### | |
1065 | }, | |
1066 | rate: { | |
1067 | schema: TRAFFIC_CONTROL_RATE_SCHEMA, | |
1068 | optional: true, | |
1069 | }, | |
1070 | burst: { | |
1071 | schema: TRAFFIC_CONTROL_BURST_SCHEMA, | |
1072 | optional: true, | |
1073 | }, | |
1074 | "allow-existing-dirs": { | |
1075 | type: Boolean, | |
1076 | description: "Do not fail if directories already exists.", | |
1077 | optional: true, | |
1078 | }, | |
1079 | keyfile: { | |
1080 | schema: KEYFILE_SCHEMA, | |
1081 | optional: true, | |
1082 | }, | |
1083 | "keyfd": { | |
1084 | schema: KEYFD_SCHEMA, | |
1085 | optional: true, | |
1086 | }, | |
1087 | "crypt-mode": { | |
1088 | type: CryptMode, | |
1089 | optional: true, | |
1090 | }, | |
1091 | } | |
1092 | } | |
1093 | )] | |
1094 | /// Restore backup repository. | |
1095 | async fn restore(param: Value) -> Result<Value, Error> { | |
1096 | let repo = extract_repository_from_value(¶m)?; | |
1097 | ||
1098 | let verbose = param["verbose"].as_bool().unwrap_or(false); | |
1099 | ||
1100 | let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false); | |
1101 | ||
1102 | let archive_name = json::required_string_param(¶m, "archive-name")?; | |
1103 | ||
1104 | let rate = match param["rate"].as_str() { | |
1105 | Some(s) => Some(s.parse::<HumanByte>()?), | |
1106 | None => None, | |
1107 | }; | |
1108 | let burst = match param["burst"].as_str() { | |
1109 | Some(s) => Some(s.parse::<HumanByte>()?), | |
1110 | None => None, | |
1111 | }; | |
1112 | ||
1113 | let rate_limit = RateLimitConfig::with_same_inout(rate, burst); | |
1114 | ||
1115 | let client = connect_rate_limited(&repo, rate_limit)?; | |
1116 | record_repository(&repo); | |
1117 | ||
1118 | let path = json::required_string_param(¶m, "snapshot")?; | |
1119 | ||
1120 | let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { | |
1121 | let group: BackupGroup = path.parse()?; | |
1122 | api_datastore_latest_snapshot(&client, repo.store(), group).await? | |
1123 | } else { | |
1124 | let snapshot: BackupDir = path.parse()?; | |
1125 | (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time()) | |
1126 | }; | |
1127 | ||
1128 | let target = json::required_string_param(¶m, "target")?; | |
1129 | let target = if target == "-" { None } else { Some(target) }; | |
1130 | ||
1131 | let crypto = crypto_parameters(¶m)?; | |
1132 | ||
1133 | let crypt_config = match crypto.enc_key { | |
1134 | None => None, | |
1135 | Some(ref key) => { | |
1136 | let (key, _, _) = | |
1137 | decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| { | |
1138 | eprintln!("{}", format_key_source(&key.source, "encryption")); | |
1139 | err | |
1140 | })?; | |
1141 | Some(Arc::new(CryptConfig::new(key)?)) | |
1142 | } | |
1143 | }; | |
1144 | ||
1145 | let client = BackupReader::start( | |
1146 | client, | |
1147 | crypt_config.clone(), | |
1148 | repo.store(), | |
1149 | &backup_type, | |
1150 | &backup_id, | |
1151 | backup_time, | |
1152 | true, | |
1153 | ).await?; | |
1154 | ||
1155 | let (archive_name, archive_type) = parse_archive_type(archive_name); | |
1156 | ||
1157 | let (manifest, backup_index_data) = client.download_manifest().await?; | |
1158 | ||
1159 | if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { | |
1160 | eprintln!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") | |
1161 | } else { | |
1162 | if manifest.signature.is_some() { | |
1163 | if let Some(key) = &crypto.enc_key { | |
1164 | eprintln!("{}", format_key_source(&key.source, "encryption")); | |
1165 | } | |
1166 | if let Some(config) = &crypt_config { | |
1167 | eprintln!("Fingerprint: {}", Fingerprint::new(config.fingerprint())); | |
1168 | } | |
1169 | } | |
1170 | manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; | |
1171 | } | |
1172 | ||
1173 | if archive_name == MANIFEST_BLOB_NAME { | |
1174 | if let Some(target) = target { | |
1175 | replace_file(target, &backup_index_data, CreateOptions::new(), false)?; | |
1176 | } else { | |
1177 | let stdout = std::io::stdout(); | |
1178 | let mut writer = stdout.lock(); | |
1179 | writer.write_all(&backup_index_data) | |
1180 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1181 | } | |
1182 | ||
1183 | return Ok(Value::Null); | |
1184 | } | |
1185 | ||
1186 | let file_info = manifest.lookup_file_info(&archive_name)?; | |
1187 | ||
1188 | if archive_type == ArchiveType::Blob { | |
1189 | ||
1190 | let mut reader = client.download_blob(&manifest, &archive_name).await?; | |
1191 | ||
1192 | if let Some(target) = target { | |
1193 | let mut writer = std::fs::OpenOptions::new() | |
1194 | .write(true) | |
1195 | .create(true) | |
1196 | .create_new(true) | |
1197 | .open(target) | |
1198 | .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?; | |
1199 | std::io::copy(&mut reader, &mut writer)?; | |
1200 | } else { | |
1201 | let stdout = std::io::stdout(); | |
1202 | let mut writer = stdout.lock(); | |
1203 | std::io::copy(&mut reader, &mut writer) | |
1204 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1205 | } | |
1206 | ||
1207 | } else if archive_type == ArchiveType::DynamicIndex { | |
1208 | ||
1209 | let index = client.download_dynamic_index(&manifest, &archive_name).await?; | |
1210 | ||
1211 | let most_used = index.find_most_used_chunks(8); | |
1212 | ||
1213 | let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); | |
1214 | ||
1215 | let mut reader = BufferedDynamicReader::new(index, chunk_reader); | |
1216 | ||
1217 | let options = pbs_client::pxar::PxarExtractOptions { | |
1218 | match_list: &[], | |
1219 | extract_match_default: true, | |
1220 | allow_existing_dirs, | |
1221 | on_error: None, | |
1222 | }; | |
1223 | ||
1224 | if let Some(target) = target { | |
1225 | pbs_client::pxar::extract_archive( | |
1226 | pxar::decoder::Decoder::from_std(reader)?, | |
1227 | Path::new(target), | |
1228 | pbs_client::pxar::Flags::DEFAULT, | |
1229 | |path| { | |
1230 | if verbose { | |
1231 | println!("{:?}", path); | |
1232 | } | |
1233 | }, | |
1234 | options, | |
1235 | ) | |
1236 | .map_err(|err| format_err!("error extracting archive - {}", err))?; | |
1237 | } else { | |
1238 | let mut writer = std::fs::OpenOptions::new() | |
1239 | .write(true) | |
1240 | .open("/dev/stdout") | |
1241 | .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?; | |
1242 | ||
1243 | std::io::copy(&mut reader, &mut writer) | |
1244 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1245 | } | |
1246 | } else if archive_type == ArchiveType::FixedIndex { | |
1247 | ||
1248 | let index = client.download_fixed_index(&manifest, &archive_name).await?; | |
1249 | ||
1250 | let mut writer = if let Some(target) = target { | |
1251 | std::fs::OpenOptions::new() | |
1252 | .write(true) | |
1253 | .create(true) | |
1254 | .create_new(true) | |
1255 | .open(target) | |
1256 | .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))? | |
1257 | } else { | |
1258 | std::fs::OpenOptions::new() | |
1259 | .write(true) | |
1260 | .open("/dev/stdout") | |
1261 | .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))? | |
1262 | }; | |
1263 | ||
1264 | dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?; | |
1265 | } | |
1266 | ||
1267 | Ok(Value::Null) | |
1268 | } | |
1269 | ||
1270 | #[api( | |
1271 | input: { | |
1272 | properties: { | |
1273 | "dry-run": { | |
1274 | type: bool, | |
1275 | optional: true, | |
1276 | description: "Just show what prune would do, but do not delete anything.", | |
1277 | }, | |
1278 | group: { | |
1279 | type: String, | |
1280 | description: "Backup group", | |
1281 | }, | |
1282 | "prune-options": { | |
1283 | type: PruneOptions, | |
1284 | flatten: true, | |
1285 | }, | |
1286 | "output-format": { | |
1287 | schema: OUTPUT_FORMAT, | |
1288 | optional: true, | |
1289 | }, | |
1290 | quiet: { | |
1291 | type: bool, | |
1292 | optional: true, | |
1293 | default: false, | |
1294 | description: "Minimal output - only show removals.", | |
1295 | }, | |
1296 | repository: { | |
1297 | schema: REPO_URL_SCHEMA, | |
1298 | optional: true, | |
1299 | }, | |
1300 | }, | |
1301 | }, | |
1302 | )] | |
1303 | /// Prune a backup repository. | |
1304 | async fn prune( | |
1305 | dry_run: Option<bool>, | |
1306 | group: String, | |
1307 | prune_options: PruneOptions, | |
1308 | quiet: bool, | |
1309 | mut param: Value | |
1310 | ) -> Result<Value, Error> { | |
1311 | let repo = extract_repository_from_value(¶m)?; | |
1312 | ||
1313 | let client = connect(&repo)?; | |
1314 | ||
1315 | let path = format!("api2/json/admin/datastore/{}/prune", repo.store()); | |
1316 | ||
1317 | let group: BackupGroup = group.parse()?; | |
1318 | ||
1319 | let output_format = extract_output_format(&mut param); | |
1320 | ||
1321 | let mut api_param = serde_json::to_value(prune_options)?; | |
1322 | if let Some(dry_run) = dry_run { | |
1323 | api_param["dry-run"] = dry_run.into(); | |
1324 | } | |
1325 | api_param["backup-type"] = group.backup_type().into(); | |
1326 | api_param["backup-id"] = group.backup_id().into(); | |
1327 | ||
1328 | let mut result = client.post(&path, Some(api_param)).await?; | |
1329 | ||
1330 | record_repository(&repo); | |
1331 | ||
1332 | let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> { | |
1333 | let item: PruneListItem = serde_json::from_value(record.to_owned())?; | |
1334 | let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?; | |
1335 | Ok(snapshot.relative_path().to_str().unwrap().to_owned()) | |
1336 | }; | |
1337 | ||
1338 | let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> { | |
1339 | Ok(match v.as_bool() { | |
1340 | Some(true) => "keep", | |
1341 | Some(false) => "remove", | |
1342 | None => "unknown", | |
1343 | }.to_string()) | |
1344 | }; | |
1345 | ||
1346 | let options = default_table_format_options() | |
1347 | .sortby("backup-type", false) | |
1348 | .sortby("backup-id", false) | |
1349 | .sortby("backup-time", false) | |
1350 | .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot")) | |
1351 | .column(ColumnConfig::new("backup-time").renderer(pbs_tools::format::render_epoch).header("date")) | |
1352 | .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action")) | |
1353 | ; | |
1354 | ||
1355 | let return_type = &pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE; | |
1356 | ||
1357 | let mut data = result["data"].take(); | |
1358 | ||
1359 | if quiet { | |
1360 | let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| { | |
1361 | item["keep"].as_bool() == Some(false) | |
1362 | }).cloned().collect(); | |
1363 | data = list.into(); | |
1364 | } | |
1365 | ||
1366 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
1367 | ||
1368 | Ok(Value::Null) | |
1369 | } | |
1370 | ||
1371 | #[api( | |
1372 | input: { | |
1373 | properties: { | |
1374 | repository: { | |
1375 | schema: REPO_URL_SCHEMA, | |
1376 | optional: true, | |
1377 | }, | |
1378 | "output-format": { | |
1379 | schema: OUTPUT_FORMAT, | |
1380 | optional: true, | |
1381 | }, | |
1382 | } | |
1383 | }, | |
1384 | returns: { | |
1385 | type: StorageStatus, | |
1386 | }, | |
1387 | )] | |
1388 | /// Get repository status. | |
1389 | async fn status(param: Value) -> Result<Value, Error> { | |
1390 | ||
1391 | let repo = extract_repository_from_value(¶m)?; | |
1392 | ||
1393 | let output_format = get_output_format(¶m); | |
1394 | ||
1395 | let client = connect(&repo)?; | |
1396 | ||
1397 | let path = format!("api2/json/admin/datastore/{}/status", repo.store()); | |
1398 | ||
1399 | let mut result = client.get(&path, None).await?; | |
1400 | let mut data = result["data"].take(); | |
1401 | ||
1402 | record_repository(&repo); | |
1403 | ||
1404 | let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> { | |
1405 | let v = v.as_u64().unwrap(); | |
1406 | let total = record["total"].as_u64().unwrap(); | |
1407 | let roundup = total/200; | |
1408 | let per = ((v+roundup)*100)/total; | |
1409 | let info = format!(" ({} %)", per); | |
1410 | Ok(format!("{} {:>8}", v, info)) | |
1411 | }; | |
1412 | ||
1413 | let options = default_table_format_options() | |
1414 | .noheader(true) | |
1415 | .column(ColumnConfig::new("total").renderer(render_total_percentage)) | |
1416 | .column(ColumnConfig::new("used").renderer(render_total_percentage)) | |
1417 | .column(ColumnConfig::new("avail").renderer(render_total_percentage)); | |
1418 | ||
1419 | let return_type = &API_METHOD_STATUS.returns; | |
1420 | ||
1421 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
1422 | ||
1423 | Ok(Value::Null) | |
1424 | } | |
1425 | ||
1426 | /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better | |
1427 | /// async use! | |
1428 | /// | |
1429 | /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`, | |
1430 | /// so that we can properly access it from multiple threads simultaneously while not issuing | |
1431 | /// duplicate simultaneous reads over http. | |
1432 | pub struct BufferedDynamicReadAt { | |
1433 | inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>, | |
1434 | } | |
1435 | ||
1436 | impl BufferedDynamicReadAt { | |
1437 | fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self { | |
1438 | Self { | |
1439 | inner: Mutex::new(inner), | |
1440 | } | |
1441 | } | |
1442 | } | |
1443 | ||
1444 | impl ReadAt for BufferedDynamicReadAt { | |
1445 | fn start_read_at<'a>( | |
1446 | self: Pin<&'a Self>, | |
1447 | _cx: &mut Context, | |
1448 | buf: &'a mut [u8], | |
1449 | offset: u64, | |
1450 | ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> { | |
1451 | MaybeReady::Ready(tokio::task::block_in_place(move || { | |
1452 | let mut reader = self.inner.lock().unwrap(); | |
1453 | reader.seek(SeekFrom::Start(offset))?; | |
1454 | Ok(reader.read(buf)?) | |
1455 | })) | |
1456 | } | |
1457 | ||
1458 | fn poll_complete<'a>( | |
1459 | self: Pin<&'a Self>, | |
1460 | _op: ReadAtOperation<'a>, | |
1461 | ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> { | |
1462 | panic!("BufferedDynamicReadAt::start_read_at returned Pending"); | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | fn main() { | |
1467 | pbs_tools::setup_libc_malloc_opts(); | |
1468 | ||
1469 | let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP) | |
1470 | .arg_param(&["backupspec"]) | |
1471 | .completion_cb("repository", complete_repository) | |
1472 | .completion_cb("backupspec", complete_backup_source) | |
1473 | .completion_cb("keyfile", complete_file_name) | |
1474 | .completion_cb("master-pubkey-file", complete_file_name) | |
1475 | .completion_cb("chunk-size", complete_chunk_size); | |
1476 | ||
1477 | let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK) | |
1478 | .completion_cb("repository", complete_repository) | |
1479 | .completion_cb("keyfile", complete_file_name); | |
1480 | ||
1481 | let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS) | |
1482 | .completion_cb("repository", complete_repository); | |
1483 | ||
1484 | let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION) | |
1485 | .completion_cb("repository", complete_repository); | |
1486 | ||
1487 | let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE) | |
1488 | .arg_param(&["snapshot", "archive-name", "target"]) | |
1489 | .completion_cb("repository", complete_repository) | |
1490 | .completion_cb("snapshot", complete_group_or_snapshot) | |
1491 | .completion_cb("archive-name", complete_archive_name) | |
1492 | .completion_cb("target", complete_file_name); | |
1493 | ||
1494 | let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE) | |
1495 | .arg_param(&["group"]) | |
1496 | .completion_cb("group", complete_backup_group) | |
1497 | .completion_cb("repository", complete_repository); | |
1498 | ||
1499 | let status_cmd_def = CliCommand::new(&API_METHOD_STATUS) | |
1500 | .completion_cb("repository", complete_repository); | |
1501 | ||
1502 | let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN) | |
1503 | .completion_cb("repository", complete_repository); | |
1504 | ||
1505 | let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT) | |
1506 | .completion_cb("repository", complete_repository); | |
1507 | ||
1508 | let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION) | |
1509 | .completion_cb("repository", complete_repository); | |
1510 | ||
1511 | let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER) | |
1512 | .arg_param(&["group", "new-owner"]) | |
1513 | .completion_cb("group", complete_backup_group) | |
1514 | .completion_cb("new-owner", complete_auth_id) | |
1515 | .completion_cb("repository", complete_repository); | |
1516 | ||
1517 | let cmd_def = CliCommandMap::new() | |
1518 | .insert("backup", backup_cmd_def) | |
1519 | .insert("garbage-collect", garbage_collect_cmd_def) | |
1520 | .insert("list", list_cmd_def) | |
1521 | .insert("login", login_cmd_def) | |
1522 | .insert("logout", logout_cmd_def) | |
1523 | .insert("prune", prune_cmd_def) | |
1524 | .insert("restore", restore_cmd_def) | |
1525 | .insert("snapshot", snapshot_mgtm_cli()) | |
1526 | .insert("status", status_cmd_def) | |
1527 | .insert("key", key::cli()) | |
1528 | .insert("mount", mount_cmd_def()) | |
1529 | .insert("map", map_cmd_def()) | |
1530 | .insert("unmap", unmap_cmd_def()) | |
1531 | .insert("catalog", catalog_mgmt_cli()) | |
1532 | .insert("task", task_mgmt_cli()) | |
1533 | .insert("version", version_cmd_def) | |
1534 | .insert("benchmark", benchmark_cmd_def) | |
1535 | .insert("change-owner", change_owner_cmd_def) | |
1536 | ||
1537 | .alias(&["files"], &["snapshot", "files"]) | |
1538 | .alias(&["forget"], &["snapshot", "forget"]) | |
1539 | .alias(&["upload-log"], &["snapshot", "upload-log"]) | |
1540 | .alias(&["snapshots"], &["snapshot", "list"]) | |
1541 | ; | |
1542 | ||
1543 | let rpcenv = CliEnvironment::new(); | |
1544 | run_cli_command(cmd_def, rpcenv, Some(|future| { | |
1545 | proxmox_async::runtime::main(future) | |
1546 | })); | |
1547 | } |