]>
Commit | Line | Data |
---|---|---|
1 | use std::collections::HashSet; | |
2 | use std::io::{self, Read, Write, Seek, SeekFrom}; | |
3 | use std::path::{Path, PathBuf}; | |
4 | use std::pin::Pin; | |
5 | use std::sync::{Arc, Mutex}; | |
6 | use std::task::Context; | |
7 | ||
8 | use anyhow::{bail, format_err, Error}; | |
9 | use futures::stream::{StreamExt, TryStreamExt}; | |
10 | use serde_json::{json, Value}; | |
11 | use tokio::sync::mpsc; | |
12 | use tokio_stream::wrappers::ReceiverStream; | |
13 | use xdg::BaseDirectories; | |
14 | ||
15 | use pathpatterns::{MatchEntry, MatchType, PatternFlag}; | |
16 | use proxmox::tools::fs::{file_get_json, replace_file, CreateOptions, image_size}; | |
17 | use proxmox_router::{ApiMethod, RpcEnvironment, cli::*}; | |
18 | use proxmox_schema::api; | |
19 | use proxmox_time::{strftime_local, epoch_i64}; | |
20 | use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; | |
21 | ||
22 | use pbs_api_types::{ | |
23 | BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, Authid, CryptMode, GroupListItem, | |
24 | PruneListItem, SnapshotListItem, StorageStatus, Fingerprint, PruneOptions, | |
25 | }; | |
26 | use pbs_client::{ | |
27 | BACKUP_SOURCE_SCHEMA, | |
28 | BackupReader, | |
29 | BackupRepository, | |
30 | BackupSpecificationType, | |
31 | BackupStats, | |
32 | BackupWriter, | |
33 | ChunkStream, | |
34 | FixedChunkStream, | |
35 | HttpClient, | |
36 | PxarBackupStream, | |
37 | RemoteChunkReader, | |
38 | UploadOptions, | |
39 | delete_ticket_info, | |
40 | parse_backup_specification, | |
41 | view_task_result, | |
42 | }; | |
43 | use pbs_client::catalog_shell::Shell; | |
44 | use pbs_client::tools::{ | |
45 | complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, | |
46 | complete_backup_source, complete_chunk_size, complete_group_or_snapshot, | |
47 | complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect, | |
48 | extract_repository_from_value, | |
49 | key_source::{ | |
50 | crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, | |
51 | KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, | |
52 | }, | |
53 | CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA, | |
54 | }; | |
55 | use pbs_config::key_config::{KeyConfig, decrypt_key, rsa_encrypt_key_config}; | |
56 | use pbs_datastore::CATALOG_NAME; | |
57 | use pbs_datastore::backup_info::{BackupDir, BackupGroup}; | |
58 | use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter}; | |
59 | use pbs_datastore::chunk_store::verify_chunk_size; | |
60 | use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader}; | |
61 | use pbs_datastore::fixed_index::FixedIndexReader; | |
62 | use pbs_datastore::index::IndexFile; | |
63 | use pbs_datastore::manifest::{ | |
64 | ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, archive_type, | |
65 | }; | |
66 | use pbs_datastore::read_chunk::AsyncReadChunk; | |
67 | use pbs_tools::sync::StdChannelWriter; | |
68 | use pbs_tools::tokio::TokioWriterAdapter; | |
69 | use pbs_tools::json; | |
70 | use pbs_tools::crypt_config::CryptConfig; | |
71 | ||
72 | mod benchmark; | |
73 | pub use benchmark::*; | |
74 | mod mount; | |
75 | pub use mount::*; | |
76 | mod task; | |
77 | pub use task::*; | |
78 | mod catalog; | |
79 | pub use catalog::*; | |
80 | mod snapshot; | |
81 | pub use snapshot::*; | |
82 | pub mod key; | |
83 | ||
84 | fn record_repository(repo: &BackupRepository) { | |
85 | ||
86 | let base = match BaseDirectories::with_prefix("proxmox-backup") { | |
87 | Ok(v) => v, | |
88 | _ => return, | |
89 | }; | |
90 | ||
91 | // usually $HOME/.cache/proxmox-backup/repo-list | |
92 | let path = match base.place_cache_file("repo-list") { | |
93 | Ok(v) => v, | |
94 | _ => return, | |
95 | }; | |
96 | ||
97 | let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({})); | |
98 | ||
99 | let repo = repo.to_string(); | |
100 | ||
101 | data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 }; | |
102 | ||
103 | let mut map = serde_json::map::Map::new(); | |
104 | ||
105 | loop { | |
106 | let mut max_used = 0; | |
107 | let mut max_repo = None; | |
108 | for (repo, count) in data.as_object().unwrap() { | |
109 | if map.contains_key(repo) { continue; } | |
110 | if let Some(count) = count.as_i64() { | |
111 | if count > max_used { | |
112 | max_used = count; | |
113 | max_repo = Some(repo); | |
114 | } | |
115 | } | |
116 | } | |
117 | if let Some(repo) = max_repo { | |
118 | map.insert(repo.to_owned(), json!(max_used)); | |
119 | } else { | |
120 | break; | |
121 | } | |
122 | if map.len() > 10 { // store max. 10 repos | |
123 | break; | |
124 | } | |
125 | } | |
126 | ||
127 | let new_data = json!(map); | |
128 | ||
129 | let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new()); | |
130 | } | |
131 | ||
132 | async fn api_datastore_list_snapshots( | |
133 | client: &HttpClient, | |
134 | store: &str, | |
135 | group: Option<BackupGroup>, | |
136 | ) -> Result<Value, Error> { | |
137 | ||
138 | let path = format!("api2/json/admin/datastore/{}/snapshots", store); | |
139 | ||
140 | let mut args = json!({}); | |
141 | if let Some(group) = group { | |
142 | args["backup-type"] = group.backup_type().into(); | |
143 | args["backup-id"] = group.backup_id().into(); | |
144 | } | |
145 | ||
146 | let mut result = client.get(&path, Some(args)).await?; | |
147 | ||
148 | Ok(result["data"].take()) | |
149 | } | |
150 | ||
151 | pub async fn api_datastore_latest_snapshot( | |
152 | client: &HttpClient, | |
153 | store: &str, | |
154 | group: BackupGroup, | |
155 | ) -> Result<(String, String, i64), Error> { | |
156 | ||
157 | let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?; | |
158 | let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?; | |
159 | ||
160 | if list.is_empty() { | |
161 | bail!("backup group {:?} does not contain any snapshots.", group.group_path()); | |
162 | } | |
163 | ||
164 | list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time)); | |
165 | ||
166 | let backup_time = list[0].backup_time; | |
167 | ||
168 | Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)) | |
169 | } | |
170 | ||
171 | async fn backup_directory<P: AsRef<Path>>( | |
172 | client: &BackupWriter, | |
173 | dir_path: P, | |
174 | archive_name: &str, | |
175 | chunk_size: Option<usize>, | |
176 | catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>, | |
177 | pxar_create_options: pbs_client::pxar::PxarCreateOptions, | |
178 | upload_options: UploadOptions, | |
179 | ) -> Result<BackupStats, Error> { | |
180 | ||
181 | let pxar_stream = PxarBackupStream::open( | |
182 | dir_path.as_ref(), | |
183 | catalog, | |
184 | pxar_create_options, | |
185 | )?; | |
186 | let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size); | |
187 | ||
188 | let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks | |
189 | ||
190 | let stream = ReceiverStream::new(rx) | |
191 | .map_err(Error::from); | |
192 | ||
193 | // spawn chunker inside a separate task so that it can run parallel | |
194 | tokio::spawn(async move { | |
195 | while let Some(v) = chunk_stream.next().await { | |
196 | let _ = tx.send(v).await; | |
197 | } | |
198 | }); | |
199 | ||
200 | if upload_options.fixed_size.is_some() { | |
201 | bail!("cannot backup directory with fixed chunk size!"); | |
202 | } | |
203 | ||
204 | let stats = client | |
205 | .upload_stream(archive_name, stream, upload_options) | |
206 | .await?; | |
207 | ||
208 | Ok(stats) | |
209 | } | |
210 | ||
211 | async fn backup_image<P: AsRef<Path>>( | |
212 | client: &BackupWriter, | |
213 | image_path: P, | |
214 | archive_name: &str, | |
215 | chunk_size: Option<usize>, | |
216 | upload_options: UploadOptions, | |
217 | ) -> Result<BackupStats, Error> { | |
218 | ||
219 | let path = image_path.as_ref().to_owned(); | |
220 | ||
221 | let file = tokio::fs::File::open(path).await?; | |
222 | ||
223 | let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) | |
224 | .map_err(Error::from); | |
225 | ||
226 | let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024)); | |
227 | ||
228 | if upload_options.fixed_size.is_none() { | |
229 | bail!("cannot backup image with dynamic chunk size!"); | |
230 | } | |
231 | ||
232 | let stats = client | |
233 | .upload_stream(archive_name, stream, upload_options) | |
234 | .await?; | |
235 | ||
236 | Ok(stats) | |
237 | } | |
238 | ||
239 | #[api( | |
240 | input: { | |
241 | properties: { | |
242 | repository: { | |
243 | schema: REPO_URL_SCHEMA, | |
244 | optional: true, | |
245 | }, | |
246 | "output-format": { | |
247 | schema: OUTPUT_FORMAT, | |
248 | optional: true, | |
249 | }, | |
250 | } | |
251 | } | |
252 | )] | |
253 | /// List backup groups. | |
254 | async fn list_backup_groups(param: Value) -> Result<Value, Error> { | |
255 | ||
256 | let output_format = get_output_format(¶m); | |
257 | ||
258 | let repo = extract_repository_from_value(¶m)?; | |
259 | ||
260 | let client = connect(&repo)?; | |
261 | ||
262 | let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); | |
263 | ||
264 | let mut result = client.get(&path, None).await?; | |
265 | ||
266 | record_repository(&repo); | |
267 | ||
268 | let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> { | |
269 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
270 | let group = BackupGroup::new(item.backup_type, item.backup_id); | |
271 | Ok(group.group_path().to_str().unwrap().to_owned()) | |
272 | }; | |
273 | ||
274 | let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> { | |
275 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
276 | let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?; | |
277 | Ok(snapshot.relative_path().to_str().unwrap().to_owned()) | |
278 | }; | |
279 | ||
280 | let render_files = |_v: &Value, record: &Value| -> Result<String, Error> { | |
281 | let item: GroupListItem = serde_json::from_value(record.to_owned())?; | |
282 | Ok(pbs_tools::format::render_backup_file_list(&item.files)) | |
283 | }; | |
284 | ||
285 | let options = default_table_format_options() | |
286 | .sortby("backup-type", false) | |
287 | .sortby("backup-id", false) | |
288 | .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group")) | |
289 | .column( | |
290 | ColumnConfig::new("last-backup") | |
291 | .renderer(render_last_backup) | |
292 | .header("last snapshot") | |
293 | .right_align(false) | |
294 | ) | |
295 | .column(ColumnConfig::new("backup-count")) | |
296 | .column(ColumnConfig::new("files").renderer(render_files)); | |
297 | ||
298 | let mut data: Value = result["data"].take(); | |
299 | ||
300 | let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE; | |
301 | ||
302 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
303 | ||
304 | Ok(Value::Null) | |
305 | } | |
306 | ||
307 | #[api( | |
308 | input: { | |
309 | properties: { | |
310 | repository: { | |
311 | schema: REPO_URL_SCHEMA, | |
312 | optional: true, | |
313 | }, | |
314 | group: { | |
315 | type: String, | |
316 | description: "Backup group.", | |
317 | }, | |
318 | "new-owner": { | |
319 | type: Authid, | |
320 | }, | |
321 | } | |
322 | } | |
323 | )] | |
324 | /// Change owner of a backup group | |
325 | async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> { | |
326 | ||
327 | let repo = extract_repository_from_value(¶m)?; | |
328 | ||
329 | let mut client = connect(&repo)?; | |
330 | ||
331 | param.as_object_mut().unwrap().remove("repository"); | |
332 | ||
333 | let group: BackupGroup = group.parse()?; | |
334 | ||
335 | param["backup-type"] = group.backup_type().into(); | |
336 | param["backup-id"] = group.backup_id().into(); | |
337 | ||
338 | let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store()); | |
339 | client.post(&path, Some(param)).await?; | |
340 | ||
341 | record_repository(&repo); | |
342 | ||
343 | Ok(()) | |
344 | } | |
345 | ||
346 | #[api( | |
347 | input: { | |
348 | properties: { | |
349 | repository: { | |
350 | schema: REPO_URL_SCHEMA, | |
351 | optional: true, | |
352 | }, | |
353 | } | |
354 | } | |
355 | )] | |
356 | /// Try to login. If successful, store ticket. | |
357 | async fn api_login(param: Value) -> Result<Value, Error> { | |
358 | ||
359 | let repo = extract_repository_from_value(¶m)?; | |
360 | ||
361 | let client = connect(&repo)?; | |
362 | client.login().await?; | |
363 | ||
364 | record_repository(&repo); | |
365 | ||
366 | Ok(Value::Null) | |
367 | } | |
368 | ||
369 | #[api( | |
370 | input: { | |
371 | properties: { | |
372 | repository: { | |
373 | schema: REPO_URL_SCHEMA, | |
374 | optional: true, | |
375 | }, | |
376 | } | |
377 | } | |
378 | )] | |
379 | /// Logout (delete stored ticket). | |
380 | fn api_logout(param: Value) -> Result<Value, Error> { | |
381 | ||
382 | let repo = extract_repository_from_value(¶m)?; | |
383 | ||
384 | delete_ticket_info("proxmox-backup", repo.host(), repo.user())?; | |
385 | ||
386 | Ok(Value::Null) | |
387 | } | |
388 | ||
389 | #[api( | |
390 | input: { | |
391 | properties: { | |
392 | repository: { | |
393 | schema: REPO_URL_SCHEMA, | |
394 | optional: true, | |
395 | }, | |
396 | "output-format": { | |
397 | schema: OUTPUT_FORMAT, | |
398 | optional: true, | |
399 | }, | |
400 | } | |
401 | } | |
402 | )] | |
403 | /// Show client and optional server version | |
404 | async fn api_version(param: Value) -> Result<(), Error> { | |
405 | ||
406 | let output_format = get_output_format(¶m); | |
407 | ||
408 | let mut version_info = json!({ | |
409 | "client": { | |
410 | "version": pbs_buildcfg::PROXMOX_PKG_VERSION, | |
411 | "release": pbs_buildcfg::PROXMOX_PKG_RELEASE, | |
412 | "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID, | |
413 | } | |
414 | }); | |
415 | ||
416 | let repo = extract_repository_from_value(¶m); | |
417 | if let Ok(repo) = repo { | |
418 | let client = connect(&repo)?; | |
419 | ||
420 | match client.get("api2/json/version", None).await { | |
421 | Ok(mut result) => version_info["server"] = result["data"].take(), | |
422 | Err(e) => eprintln!("could not connect to server - {}", e), | |
423 | } | |
424 | } | |
425 | if output_format == "text" { | |
426 | println!( | |
427 | "client version: {}.{}", | |
428 | pbs_buildcfg::PROXMOX_PKG_VERSION, | |
429 | pbs_buildcfg::PROXMOX_PKG_RELEASE, | |
430 | ); | |
431 | if let Some(server) = version_info["server"].as_object() { | |
432 | let server_version = server["version"].as_str().unwrap(); | |
433 | let server_release = server["release"].as_str().unwrap(); | |
434 | println!("server version: {}.{}", server_version, server_release); | |
435 | } | |
436 | } else { | |
437 | format_and_print_result(&version_info, &output_format); | |
438 | } | |
439 | ||
440 | Ok(()) | |
441 | } | |
442 | ||
443 | #[api( | |
444 | input: { | |
445 | properties: { | |
446 | repository: { | |
447 | schema: REPO_URL_SCHEMA, | |
448 | optional: true, | |
449 | }, | |
450 | "output-format": { | |
451 | schema: OUTPUT_FORMAT, | |
452 | optional: true, | |
453 | }, | |
454 | }, | |
455 | }, | |
456 | )] | |
457 | /// Start garbage collection for a specific repository. | |
458 | async fn start_garbage_collection(param: Value) -> Result<Value, Error> { | |
459 | ||
460 | let repo = extract_repository_from_value(¶m)?; | |
461 | ||
462 | let output_format = get_output_format(¶m); | |
463 | ||
464 | let mut client = connect(&repo)?; | |
465 | ||
466 | let path = format!("api2/json/admin/datastore/{}/gc", repo.store()); | |
467 | ||
468 | let result = client.post(&path, None).await?; | |
469 | ||
470 | record_repository(&repo); | |
471 | ||
472 | view_task_result(&mut client, result, &output_format).await?; | |
473 | ||
474 | Ok(Value::Null) | |
475 | } | |
476 | ||
477 | struct CatalogUploadResult { | |
478 | catalog_writer: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>, | |
479 | result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>, | |
480 | } | |
481 | ||
482 | fn spawn_catalog_upload( | |
483 | client: Arc<BackupWriter>, | |
484 | encrypt: bool, | |
485 | ) -> Result<CatalogUploadResult, Error> { | |
486 | let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes | |
487 | let catalog_stream = pbs_tools::blocking::StdChannelStream(catalog_rx); | |
488 | let catalog_chunk_size = 512*1024; | |
489 | let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size)); | |
490 | ||
491 | let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new(StdChannelWriter::new(catalog_tx)))?)); | |
492 | ||
493 | let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel(); | |
494 | ||
495 | let upload_options = UploadOptions { | |
496 | encrypt, | |
497 | compress: true, | |
498 | ..UploadOptions::default() | |
499 | }; | |
500 | ||
501 | tokio::spawn(async move { | |
502 | let catalog_upload_result = client | |
503 | .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options) | |
504 | .await; | |
505 | ||
506 | if let Err(ref err) = catalog_upload_result { | |
507 | eprintln!("catalog upload error - {}", err); | |
508 | client.cancel(); | |
509 | } | |
510 | ||
511 | let _ = catalog_result_tx.send(catalog_upload_result); | |
512 | }); | |
513 | ||
514 | Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx }) | |
515 | } | |
516 | ||
517 | #[api( | |
518 | input: { | |
519 | properties: { | |
520 | backupspec: { | |
521 | type: Array, | |
522 | description: "List of backup source specifications ([<label.ext>:<path>] ...)", | |
523 | items: { | |
524 | schema: BACKUP_SOURCE_SCHEMA, | |
525 | } | |
526 | }, | |
527 | repository: { | |
528 | schema: REPO_URL_SCHEMA, | |
529 | optional: true, | |
530 | }, | |
531 | "include-dev": { | |
532 | description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.", | |
533 | optional: true, | |
534 | items: { | |
535 | type: String, | |
536 | description: "Path to file.", | |
537 | } | |
538 | }, | |
539 | "all-file-systems": { | |
540 | type: Boolean, | |
541 | description: "Include all mounted subdirectories.", | |
542 | optional: true, | |
543 | }, | |
544 | keyfile: { | |
545 | schema: KEYFILE_SCHEMA, | |
546 | optional: true, | |
547 | }, | |
548 | "keyfd": { | |
549 | schema: KEYFD_SCHEMA, | |
550 | optional: true, | |
551 | }, | |
552 | "master-pubkey-file": { | |
553 | schema: MASTER_PUBKEY_FILE_SCHEMA, | |
554 | optional: true, | |
555 | }, | |
556 | "master-pubkey-fd": { | |
557 | schema: MASTER_PUBKEY_FD_SCHEMA, | |
558 | optional: true, | |
559 | }, | |
560 | "crypt-mode": { | |
561 | type: CryptMode, | |
562 | optional: true, | |
563 | }, | |
564 | "skip-lost-and-found": { | |
565 | type: Boolean, | |
566 | description: "Skip lost+found directory.", | |
567 | optional: true, | |
568 | }, | |
569 | "backup-type": { | |
570 | schema: BACKUP_TYPE_SCHEMA, | |
571 | optional: true, | |
572 | }, | |
573 | "backup-id": { | |
574 | schema: BACKUP_ID_SCHEMA, | |
575 | optional: true, | |
576 | }, | |
577 | "backup-time": { | |
578 | schema: BACKUP_TIME_SCHEMA, | |
579 | optional: true, | |
580 | }, | |
581 | "chunk-size": { | |
582 | schema: CHUNK_SIZE_SCHEMA, | |
583 | optional: true, | |
584 | }, | |
585 | "exclude": { | |
586 | type: Array, | |
587 | description: "List of paths or patterns for matching files to exclude.", | |
588 | optional: true, | |
589 | items: { | |
590 | type: String, | |
591 | description: "Path or match pattern.", | |
592 | } | |
593 | }, | |
594 | "entries-max": { | |
595 | type: Integer, | |
596 | description: "Max number of entries to hold in memory.", | |
597 | optional: true, | |
598 | default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize, | |
599 | }, | |
600 | "verbose": { | |
601 | type: Boolean, | |
602 | description: "Verbose output.", | |
603 | optional: true, | |
604 | }, | |
605 | } | |
606 | } | |
607 | )] | |
608 | /// Create (host) backup. | |
609 | async fn create_backup( | |
610 | param: Value, | |
611 | _info: &ApiMethod, | |
612 | _rpcenv: &mut dyn RpcEnvironment, | |
613 | ) -> Result<Value, Error> { | |
614 | ||
615 | let repo = extract_repository_from_value(¶m)?; | |
616 | ||
617 | let backupspec_list = json::required_array_param(¶m, "backupspec")?; | |
618 | ||
619 | let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false); | |
620 | ||
621 | let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false); | |
622 | ||
623 | let verbose = param["verbose"].as_bool().unwrap_or(false); | |
624 | ||
625 | let backup_time_opt = param["backup-time"].as_i64(); | |
626 | ||
627 | let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize); | |
628 | ||
629 | if let Some(size) = chunk_size_opt { | |
630 | verify_chunk_size(size)?; | |
631 | } | |
632 | ||
633 | let crypto = crypto_parameters(¶m)?; | |
634 | ||
635 | let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename()); | |
636 | ||
637 | let backup_type = param["backup-type"].as_str().unwrap_or("host"); | |
638 | ||
639 | let include_dev = param["include-dev"].as_array(); | |
640 | ||
641 | let entries_max = param["entries-max"].as_u64() | |
642 | .unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64); | |
643 | ||
644 | let empty = Vec::new(); | |
645 | let exclude_args = param["exclude"].as_array().unwrap_or(&empty); | |
646 | ||
647 | let mut pattern_list = Vec::with_capacity(exclude_args.len()); | |
648 | for entry in exclude_args { | |
649 | let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?; | |
650 | pattern_list.push( | |
651 | MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude) | |
652 | .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))? | |
653 | ); | |
654 | } | |
655 | ||
656 | let mut devices = if all_file_systems { None } else { Some(HashSet::new()) }; | |
657 | ||
658 | if let Some(include_dev) = include_dev { | |
659 | if all_file_systems { | |
660 | bail!("option 'all-file-systems' conflicts with option 'include-dev'"); | |
661 | } | |
662 | ||
663 | let mut set = HashSet::new(); | |
664 | for path in include_dev { | |
665 | let path = path.as_str().unwrap(); | |
666 | let stat = nix::sys::stat::stat(path) | |
667 | .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?; | |
668 | set.insert(stat.st_dev); | |
669 | } | |
670 | devices = Some(set); | |
671 | } | |
672 | ||
673 | let mut upload_list = vec![]; | |
674 | let mut target_set = HashSet::new(); | |
675 | ||
676 | for backupspec in backupspec_list { | |
677 | let spec = parse_backup_specification(backupspec.as_str().unwrap())?; | |
678 | let filename = &spec.config_string; | |
679 | let target = &spec.archive_name; | |
680 | ||
681 | if target_set.contains(target) { | |
682 | bail!("got target twice: '{}'", target); | |
683 | } | |
684 | target_set.insert(target.to_string()); | |
685 | ||
686 | use std::os::unix::fs::FileTypeExt; | |
687 | ||
688 | let metadata = std::fs::metadata(filename) | |
689 | .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?; | |
690 | let file_type = metadata.file_type(); | |
691 | ||
692 | match spec.spec_type { | |
693 | BackupSpecificationType::PXAR => { | |
694 | if !file_type.is_dir() { | |
695 | bail!("got unexpected file type (expected directory)"); | |
696 | } | |
697 | upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0)); | |
698 | } | |
699 | BackupSpecificationType::IMAGE => { | |
700 | if !(file_type.is_file() || file_type.is_block_device()) { | |
701 | bail!("got unexpected file type (expected file or block device)"); | |
702 | } | |
703 | ||
704 | let size = image_size(&PathBuf::from(filename))?; | |
705 | ||
706 | if size == 0 { bail!("got zero-sized file '{}'", filename); } | |
707 | ||
708 | upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size)); | |
709 | } | |
710 | BackupSpecificationType::CONFIG => { | |
711 | if !file_type.is_file() { | |
712 | bail!("got unexpected file type (expected regular file)"); | |
713 | } | |
714 | upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len())); | |
715 | } | |
716 | BackupSpecificationType::LOGFILE => { | |
717 | if !file_type.is_file() { | |
718 | bail!("got unexpected file type (expected regular file)"); | |
719 | } | |
720 | upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len())); | |
721 | } | |
722 | } | |
723 | } | |
724 | ||
725 | let backup_time = backup_time_opt.unwrap_or_else(epoch_i64); | |
726 | ||
727 | let client = connect(&repo)?; | |
728 | record_repository(&repo); | |
729 | ||
730 | println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?); | |
731 | ||
732 | println!("Client name: {}", proxmox::tools::nodename()); | |
733 | ||
734 | let start_time = std::time::Instant::now(); | |
735 | ||
736 | println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?); | |
737 | ||
738 | let (crypt_config, rsa_encrypted_key) = match crypto.enc_key { | |
739 | None => (None, None), | |
740 | Some(key_with_source) => { | |
741 | println!( | |
742 | "{}", | |
743 | format_key_source(&key_with_source.source, "encryption") | |
744 | ); | |
745 | ||
746 | let (key, created, fingerprint) = | |
747 | decrypt_key(&key_with_source.key, &get_encryption_key_password)?; | |
748 | println!("Encryption key fingerprint: {}", fingerprint); | |
749 | ||
750 | let crypt_config = CryptConfig::new(key)?; | |
751 | ||
752 | match crypto.master_pubkey { | |
753 | Some(pem_with_source) => { | |
754 | println!("{}", format_key_source(&pem_with_source.source, "master")); | |
755 | ||
756 | let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_with_source.key)?; | |
757 | ||
758 | let mut key_config = KeyConfig::without_password(key)?; | |
759 | key_config.created = created; // keep original value | |
760 | ||
761 | let enc_key = rsa_encrypt_key_config(rsa, &key_config)?; | |
762 | ||
763 | (Some(Arc::new(crypt_config)), Some(enc_key)) | |
764 | }, | |
765 | _ => (Some(Arc::new(crypt_config)), None), | |
766 | } | |
767 | } | |
768 | }; | |
769 | ||
770 | let client = BackupWriter::start( | |
771 | client, | |
772 | crypt_config.clone(), | |
773 | repo.store(), | |
774 | backup_type, | |
775 | &backup_id, | |
776 | backup_time, | |
777 | verbose, | |
778 | false | |
779 | ).await?; | |
780 | ||
781 | let download_previous_manifest = match client.previous_backup_time().await { | |
782 | Ok(Some(backup_time)) => { | |
783 | println!( | |
784 | "Downloading previous manifest ({})", | |
785 | strftime_local("%c", backup_time)? | |
786 | ); | |
787 | true | |
788 | } | |
789 | Ok(None) => { | |
790 | println!("No previous manifest available."); | |
791 | false | |
792 | } | |
793 | Err(_) => { | |
794 | // Fallback for outdated server, TODO remove/bubble up with 2.0 | |
795 | true | |
796 | } | |
797 | }; | |
798 | ||
799 | let previous_manifest = if download_previous_manifest { | |
800 | match client.download_previous_manifest().await { | |
801 | Ok(previous_manifest) => { | |
802 | match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) { | |
803 | Ok(()) => Some(Arc::new(previous_manifest)), | |
804 | Err(err) => { | |
805 | println!("Couldn't re-use previous manifest - {}", err); | |
806 | None | |
807 | } | |
808 | } | |
809 | } | |
810 | Err(err) => { | |
811 | println!("Couldn't download previous manifest - {}", err); | |
812 | None | |
813 | } | |
814 | } | |
815 | } else { | |
816 | None | |
817 | }; | |
818 | ||
819 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; | |
820 | let mut manifest = BackupManifest::new(snapshot); | |
821 | ||
822 | let mut catalog = None; | |
823 | let mut catalog_result_rx = None; | |
824 | ||
825 | for (backup_type, filename, target, size) in upload_list { | |
826 | match backup_type { | |
827 | BackupSpecificationType::CONFIG => { | |
828 | let upload_options = UploadOptions { | |
829 | compress: true, | |
830 | encrypt: crypto.mode == CryptMode::Encrypt, | |
831 | ..UploadOptions::default() | |
832 | }; | |
833 | ||
834 | println!("Upload config file '{}' to '{}' as {}", filename, repo, target); | |
835 | let stats = client | |
836 | .upload_blob_from_file(&filename, &target, upload_options) | |
837 | .await?; | |
838 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
839 | } | |
840 | BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ? | |
841 | let upload_options = UploadOptions { | |
842 | compress: true, | |
843 | encrypt: crypto.mode == CryptMode::Encrypt, | |
844 | ..UploadOptions::default() | |
845 | }; | |
846 | ||
847 | println!("Upload log file '{}' to '{}' as {}", filename, repo, target); | |
848 | let stats = client | |
849 | .upload_blob_from_file(&filename, &target, upload_options) | |
850 | .await?; | |
851 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
852 | } | |
853 | BackupSpecificationType::PXAR => { | |
854 | // start catalog upload on first use | |
855 | if catalog.is_none() { | |
856 | let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?; | |
857 | catalog = Some(catalog_upload_res.catalog_writer); | |
858 | catalog_result_rx = Some(catalog_upload_res.result); | |
859 | } | |
860 | let catalog = catalog.as_ref().unwrap(); | |
861 | ||
862 | println!("Upload directory '{}' to '{}' as {}", filename, repo, target); | |
863 | catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; | |
864 | ||
865 | let pxar_options = pbs_client::pxar::PxarCreateOptions { | |
866 | device_set: devices.clone(), | |
867 | patterns: pattern_list.clone(), | |
868 | entries_max: entries_max as usize, | |
869 | skip_lost_and_found, | |
870 | verbose, | |
871 | }; | |
872 | ||
873 | let upload_options = UploadOptions { | |
874 | previous_manifest: previous_manifest.clone(), | |
875 | compress: true, | |
876 | encrypt: crypto.mode == CryptMode::Encrypt, | |
877 | ..UploadOptions::default() | |
878 | }; | |
879 | ||
880 | let stats = backup_directory( | |
881 | &client, | |
882 | &filename, | |
883 | &target, | |
884 | chunk_size_opt, | |
885 | catalog.clone(), | |
886 | pxar_options, | |
887 | upload_options, | |
888 | ).await?; | |
889 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
890 | catalog.lock().unwrap().end_directory()?; | |
891 | } | |
892 | BackupSpecificationType::IMAGE => { | |
893 | println!("Upload image '{}' to '{:?}' as {}", filename, repo, target); | |
894 | ||
895 | let upload_options = UploadOptions { | |
896 | previous_manifest: previous_manifest.clone(), | |
897 | fixed_size: Some(size), | |
898 | compress: true, | |
899 | encrypt: crypto.mode == CryptMode::Encrypt, | |
900 | }; | |
901 | ||
902 | let stats = backup_image( | |
903 | &client, | |
904 | &filename, | |
905 | &target, | |
906 | chunk_size_opt, | |
907 | upload_options, | |
908 | ).await?; | |
909 | manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; | |
910 | } | |
911 | } | |
912 | } | |
913 | ||
914 | // finalize and upload catalog | |
915 | if let Some(catalog) = catalog { | |
916 | let mutex = Arc::try_unwrap(catalog) | |
917 | .map_err(|_| format_err!("unable to get catalog (still used)"))?; | |
918 | let mut catalog = mutex.into_inner().unwrap(); | |
919 | ||
920 | catalog.finish()?; | |
921 | ||
922 | drop(catalog); // close upload stream | |
923 | ||
924 | if let Some(catalog_result_rx) = catalog_result_rx { | |
925 | let stats = catalog_result_rx.await??; | |
926 | manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; | |
927 | } | |
928 | } | |
929 | ||
930 | if let Some(rsa_encrypted_key) = rsa_encrypted_key { | |
931 | let target = ENCRYPTED_KEY_BLOB_NAME; | |
932 | println!("Upload RSA encoded key to '{:?}' as {}", repo, target); | |
933 | let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() }; | |
934 | let stats = client | |
935 | .upload_blob_from_data(rsa_encrypted_key, target, options) | |
936 | .await?; | |
937 | manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; | |
938 | ||
939 | } | |
940 | // create manifest (index.json) | |
941 | // manifests are never encrypted, but include a signature | |
942 | let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref)) | |
943 | .map_err(|err| format_err!("unable to format manifest - {}", err))?; | |
944 | ||
945 | ||
946 | if verbose { println!("Upload index.json to '{}'", repo) }; | |
947 | let options = UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() }; | |
948 | client | |
949 | .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) | |
950 | .await?; | |
951 | ||
952 | client.finish().await?; | |
953 | ||
954 | let end_time = std::time::Instant::now(); | |
955 | let elapsed = end_time.duration_since(start_time); | |
956 | println!("Duration: {:.2}s", elapsed.as_secs_f64()); | |
957 | ||
958 | println!("End Time: {}", strftime_local("%c", epoch_i64())?); | |
959 | ||
960 | Ok(Value::Null) | |
961 | } | |
962 | ||
963 | async fn dump_image<W: Write>( | |
964 | client: Arc<BackupReader>, | |
965 | crypt_config: Option<Arc<CryptConfig>>, | |
966 | crypt_mode: CryptMode, | |
967 | index: FixedIndexReader, | |
968 | mut writer: W, | |
969 | verbose: bool, | |
970 | ) -> Result<(), Error> { | |
971 | ||
972 | let most_used = index.find_most_used_chunks(8); | |
973 | ||
974 | let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used); | |
975 | ||
976 | // Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy | |
977 | // and thus slows down reading. Instead, directly use RemoteChunkReader | |
978 | let mut per = 0; | |
979 | let mut bytes = 0; | |
980 | let start_time = std::time::Instant::now(); | |
981 | ||
982 | for pos in 0..index.index_count() { | |
983 | let digest = index.index_digest(pos).unwrap(); | |
984 | let raw_data = chunk_reader.read_chunk(&digest).await?; | |
985 | writer.write_all(&raw_data)?; | |
986 | bytes += raw_data.len(); | |
987 | if verbose { | |
988 | let next_per = ((pos+1)*100)/index.index_count(); | |
989 | if per != next_per { | |
990 | eprintln!("progress {}% (read {} bytes, duration {} sec)", | |
991 | next_per, bytes, start_time.elapsed().as_secs()); | |
992 | per = next_per; | |
993 | } | |
994 | } | |
995 | } | |
996 | ||
997 | let end_time = std::time::Instant::now(); | |
998 | let elapsed = end_time.duration_since(start_time); | |
999 | eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)", | |
1000 | bytes, | |
1001 | elapsed.as_secs_f64(), | |
1002 | bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64()) | |
1003 | ); | |
1004 | ||
1005 | ||
1006 | Ok(()) | |
1007 | } | |
1008 | ||
1009 | fn parse_archive_type(name: &str) -> (String, ArchiveType) { | |
1010 | if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { | |
1011 | (name.into(), archive_type(name).unwrap()) | |
1012 | } else if name.ends_with(".pxar") { | |
1013 | (format!("{}.didx", name), ArchiveType::DynamicIndex) | |
1014 | } else if name.ends_with(".img") { | |
1015 | (format!("{}.fidx", name), ArchiveType::FixedIndex) | |
1016 | } else { | |
1017 | (format!("{}.blob", name), ArchiveType::Blob) | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | #[api( | |
1022 | input: { | |
1023 | properties: { | |
1024 | repository: { | |
1025 | schema: REPO_URL_SCHEMA, | |
1026 | optional: true, | |
1027 | }, | |
1028 | snapshot: { | |
1029 | type: String, | |
1030 | description: "Group/Snapshot path.", | |
1031 | }, | |
1032 | "archive-name": { | |
1033 | description: "Backup archive name.", | |
1034 | type: String, | |
1035 | }, | |
1036 | target: { | |
1037 | type: String, | |
1038 | description: r###"Target directory path. Use '-' to write to standard output. | |
1039 | ||
1040 | We do not extract '.pxar' archives when writing to standard output. | |
1041 | ||
1042 | "### | |
1043 | }, | |
1044 | "allow-existing-dirs": { | |
1045 | type: Boolean, | |
1046 | description: "Do not fail if directories already exists.", | |
1047 | optional: true, | |
1048 | }, | |
1049 | keyfile: { | |
1050 | schema: KEYFILE_SCHEMA, | |
1051 | optional: true, | |
1052 | }, | |
1053 | "keyfd": { | |
1054 | schema: KEYFD_SCHEMA, | |
1055 | optional: true, | |
1056 | }, | |
1057 | "crypt-mode": { | |
1058 | type: CryptMode, | |
1059 | optional: true, | |
1060 | }, | |
1061 | } | |
1062 | } | |
1063 | )] | |
1064 | /// Restore backup repository. | |
1065 | async fn restore(param: Value) -> Result<Value, Error> { | |
1066 | let repo = extract_repository_from_value(¶m)?; | |
1067 | ||
1068 | let verbose = param["verbose"].as_bool().unwrap_or(false); | |
1069 | ||
1070 | let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false); | |
1071 | ||
1072 | let archive_name = json::required_string_param(¶m, "archive-name")?; | |
1073 | ||
1074 | let client = connect(&repo)?; | |
1075 | ||
1076 | record_repository(&repo); | |
1077 | ||
1078 | let path = json::required_string_param(¶m, "snapshot")?; | |
1079 | ||
1080 | let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { | |
1081 | let group: BackupGroup = path.parse()?; | |
1082 | api_datastore_latest_snapshot(&client, repo.store(), group).await? | |
1083 | } else { | |
1084 | let snapshot: BackupDir = path.parse()?; | |
1085 | (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time()) | |
1086 | }; | |
1087 | ||
1088 | let target = json::required_string_param(¶m, "target")?; | |
1089 | let target = if target == "-" { None } else { Some(target) }; | |
1090 | ||
1091 | let crypto = crypto_parameters(¶m)?; | |
1092 | ||
1093 | let crypt_config = match crypto.enc_key { | |
1094 | None => None, | |
1095 | Some(ref key) => { | |
1096 | let (key, _, _) = | |
1097 | decrypt_key(&key.key, &get_encryption_key_password).map_err(|err| { | |
1098 | eprintln!("{}", format_key_source(&key.source, "encryption")); | |
1099 | err | |
1100 | })?; | |
1101 | Some(Arc::new(CryptConfig::new(key)?)) | |
1102 | } | |
1103 | }; | |
1104 | ||
1105 | let client = BackupReader::start( | |
1106 | client, | |
1107 | crypt_config.clone(), | |
1108 | repo.store(), | |
1109 | &backup_type, | |
1110 | &backup_id, | |
1111 | backup_time, | |
1112 | true, | |
1113 | ).await?; | |
1114 | ||
1115 | let (archive_name, archive_type) = parse_archive_type(archive_name); | |
1116 | ||
1117 | let (manifest, backup_index_data) = client.download_manifest().await?; | |
1118 | ||
1119 | if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { | |
1120 | eprintln!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") | |
1121 | } else { | |
1122 | if manifest.signature.is_some() { | |
1123 | if let Some(key) = &crypto.enc_key { | |
1124 | eprintln!("{}", format_key_source(&key.source, "encryption")); | |
1125 | } | |
1126 | if let Some(config) = &crypt_config { | |
1127 | eprintln!("Fingerprint: {}", Fingerprint::new(config.fingerprint())); | |
1128 | } | |
1129 | } | |
1130 | manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; | |
1131 | } | |
1132 | ||
1133 | if archive_name == MANIFEST_BLOB_NAME { | |
1134 | if let Some(target) = target { | |
1135 | replace_file(target, &backup_index_data, CreateOptions::new())?; | |
1136 | } else { | |
1137 | let stdout = std::io::stdout(); | |
1138 | let mut writer = stdout.lock(); | |
1139 | writer.write_all(&backup_index_data) | |
1140 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1141 | } | |
1142 | ||
1143 | return Ok(Value::Null); | |
1144 | } | |
1145 | ||
1146 | let file_info = manifest.lookup_file_info(&archive_name)?; | |
1147 | ||
1148 | if archive_type == ArchiveType::Blob { | |
1149 | ||
1150 | let mut reader = client.download_blob(&manifest, &archive_name).await?; | |
1151 | ||
1152 | if let Some(target) = target { | |
1153 | let mut writer = std::fs::OpenOptions::new() | |
1154 | .write(true) | |
1155 | .create(true) | |
1156 | .create_new(true) | |
1157 | .open(target) | |
1158 | .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?; | |
1159 | std::io::copy(&mut reader, &mut writer)?; | |
1160 | } else { | |
1161 | let stdout = std::io::stdout(); | |
1162 | let mut writer = stdout.lock(); | |
1163 | std::io::copy(&mut reader, &mut writer) | |
1164 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1165 | } | |
1166 | ||
1167 | } else if archive_type == ArchiveType::DynamicIndex { | |
1168 | ||
1169 | let index = client.download_dynamic_index(&manifest, &archive_name).await?; | |
1170 | ||
1171 | let most_used = index.find_most_used_chunks(8); | |
1172 | ||
1173 | let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); | |
1174 | ||
1175 | let mut reader = BufferedDynamicReader::new(index, chunk_reader); | |
1176 | ||
1177 | let options = pbs_client::pxar::PxarExtractOptions { | |
1178 | match_list: &[], | |
1179 | extract_match_default: true, | |
1180 | allow_existing_dirs, | |
1181 | on_error: None, | |
1182 | }; | |
1183 | ||
1184 | if let Some(target) = target { | |
1185 | pbs_client::pxar::extract_archive( | |
1186 | pxar::decoder::Decoder::from_std(reader)?, | |
1187 | Path::new(target), | |
1188 | pbs_client::pxar::Flags::DEFAULT, | |
1189 | |path| { | |
1190 | if verbose { | |
1191 | println!("{:?}", path); | |
1192 | } | |
1193 | }, | |
1194 | options, | |
1195 | ) | |
1196 | .map_err(|err| format_err!("error extracting archive - {}", err))?; | |
1197 | } else { | |
1198 | let mut writer = std::fs::OpenOptions::new() | |
1199 | .write(true) | |
1200 | .open("/dev/stdout") | |
1201 | .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?; | |
1202 | ||
1203 | std::io::copy(&mut reader, &mut writer) | |
1204 | .map_err(|err| format_err!("unable to pipe data - {}", err))?; | |
1205 | } | |
1206 | } else if archive_type == ArchiveType::FixedIndex { | |
1207 | ||
1208 | let index = client.download_fixed_index(&manifest, &archive_name).await?; | |
1209 | ||
1210 | let mut writer = if let Some(target) = target { | |
1211 | std::fs::OpenOptions::new() | |
1212 | .write(true) | |
1213 | .create(true) | |
1214 | .create_new(true) | |
1215 | .open(target) | |
1216 | .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))? | |
1217 | } else { | |
1218 | std::fs::OpenOptions::new() | |
1219 | .write(true) | |
1220 | .open("/dev/stdout") | |
1221 | .map_err(|err| format_err!("unable to open /dev/stdout - {}", err))? | |
1222 | }; | |
1223 | ||
1224 | dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?; | |
1225 | } | |
1226 | ||
1227 | Ok(Value::Null) | |
1228 | } | |
1229 | ||
1230 | #[api( | |
1231 | input: { | |
1232 | properties: { | |
1233 | "dry-run": { | |
1234 | type: bool, | |
1235 | optional: true, | |
1236 | description: "Just show what prune would do, but do not delete anything.", | |
1237 | }, | |
1238 | group: { | |
1239 | type: String, | |
1240 | description: "Backup group", | |
1241 | }, | |
1242 | "prune-options": { | |
1243 | type: PruneOptions, | |
1244 | flatten: true, | |
1245 | }, | |
1246 | "output-format": { | |
1247 | schema: OUTPUT_FORMAT, | |
1248 | optional: true, | |
1249 | }, | |
1250 | quiet: { | |
1251 | type: bool, | |
1252 | optional: true, | |
1253 | default: false, | |
1254 | description: "Minimal output - only show removals.", | |
1255 | }, | |
1256 | repository: { | |
1257 | schema: REPO_URL_SCHEMA, | |
1258 | optional: true, | |
1259 | }, | |
1260 | }, | |
1261 | }, | |
1262 | )] | |
1263 | /// Prune a backup repository. | |
1264 | async fn prune( | |
1265 | dry_run: Option<bool>, | |
1266 | group: String, | |
1267 | prune_options: PruneOptions, | |
1268 | quiet: bool, | |
1269 | mut param: Value | |
1270 | ) -> Result<Value, Error> { | |
1271 | let repo = extract_repository_from_value(¶m)?; | |
1272 | ||
1273 | let mut client = connect(&repo)?; | |
1274 | ||
1275 | let path = format!("api2/json/admin/datastore/{}/prune", repo.store()); | |
1276 | ||
1277 | let group: BackupGroup = group.parse()?; | |
1278 | ||
1279 | let output_format = extract_output_format(&mut param); | |
1280 | ||
1281 | let mut api_param = serde_json::to_value(prune_options)?; | |
1282 | if let Some(dry_run) = dry_run { | |
1283 | api_param["dry-run"] = dry_run.into(); | |
1284 | } | |
1285 | api_param["backup-type"] = group.backup_type().into(); | |
1286 | api_param["backup-id"] = group.backup_id().into(); | |
1287 | ||
1288 | let mut result = client.post(&path, Some(api_param)).await?; | |
1289 | ||
1290 | record_repository(&repo); | |
1291 | ||
1292 | let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> { | |
1293 | let item: PruneListItem = serde_json::from_value(record.to_owned())?; | |
1294 | let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?; | |
1295 | Ok(snapshot.relative_path().to_str().unwrap().to_owned()) | |
1296 | }; | |
1297 | ||
1298 | let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> { | |
1299 | Ok(match v.as_bool() { | |
1300 | Some(true) => "keep", | |
1301 | Some(false) => "remove", | |
1302 | None => "unknown", | |
1303 | }.to_string()) | |
1304 | }; | |
1305 | ||
1306 | let options = default_table_format_options() | |
1307 | .sortby("backup-type", false) | |
1308 | .sortby("backup-id", false) | |
1309 | .sortby("backup-time", false) | |
1310 | .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot")) | |
1311 | .column(ColumnConfig::new("backup-time").renderer(pbs_tools::format::render_epoch).header("date")) | |
1312 | .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action")) | |
1313 | ; | |
1314 | ||
1315 | let return_type = &pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE; | |
1316 | ||
1317 | let mut data = result["data"].take(); | |
1318 | ||
1319 | if quiet { | |
1320 | let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| { | |
1321 | item["keep"].as_bool() == Some(false) | |
1322 | }).cloned().collect(); | |
1323 | data = list.into(); | |
1324 | } | |
1325 | ||
1326 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
1327 | ||
1328 | Ok(Value::Null) | |
1329 | } | |
1330 | ||
1331 | #[api( | |
1332 | input: { | |
1333 | properties: { | |
1334 | repository: { | |
1335 | schema: REPO_URL_SCHEMA, | |
1336 | optional: true, | |
1337 | }, | |
1338 | "output-format": { | |
1339 | schema: OUTPUT_FORMAT, | |
1340 | optional: true, | |
1341 | }, | |
1342 | } | |
1343 | }, | |
1344 | returns: { | |
1345 | type: StorageStatus, | |
1346 | }, | |
1347 | )] | |
1348 | /// Get repository status. | |
1349 | async fn status(param: Value) -> Result<Value, Error> { | |
1350 | ||
1351 | let repo = extract_repository_from_value(¶m)?; | |
1352 | ||
1353 | let output_format = get_output_format(¶m); | |
1354 | ||
1355 | let client = connect(&repo)?; | |
1356 | ||
1357 | let path = format!("api2/json/admin/datastore/{}/status", repo.store()); | |
1358 | ||
1359 | let mut result = client.get(&path, None).await?; | |
1360 | let mut data = result["data"].take(); | |
1361 | ||
1362 | record_repository(&repo); | |
1363 | ||
1364 | let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> { | |
1365 | let v = v.as_u64().unwrap(); | |
1366 | let total = record["total"].as_u64().unwrap(); | |
1367 | let roundup = total/200; | |
1368 | let per = ((v+roundup)*100)/total; | |
1369 | let info = format!(" ({} %)", per); | |
1370 | Ok(format!("{} {:>8}", v, info)) | |
1371 | }; | |
1372 | ||
1373 | let options = default_table_format_options() | |
1374 | .noheader(true) | |
1375 | .column(ColumnConfig::new("total").renderer(render_total_percentage)) | |
1376 | .column(ColumnConfig::new("used").renderer(render_total_percentage)) | |
1377 | .column(ColumnConfig::new("avail").renderer(render_total_percentage)); | |
1378 | ||
1379 | let return_type = &API_METHOD_STATUS.returns; | |
1380 | ||
1381 | format_and_print_result_full(&mut data, return_type, &output_format, &options); | |
1382 | ||
1383 | Ok(Value::Null) | |
1384 | } | |
1385 | ||
1386 | /// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better | |
1387 | /// async use! | |
1388 | /// | |
1389 | /// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`, | |
1390 | /// so that we can properly access it from multiple threads simultaneously while not issuing | |
1391 | /// duplicate simultaneous reads over http. | |
1392 | pub struct BufferedDynamicReadAt { | |
1393 | inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>, | |
1394 | } | |
1395 | ||
1396 | impl BufferedDynamicReadAt { | |
1397 | fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self { | |
1398 | Self { | |
1399 | inner: Mutex::new(inner), | |
1400 | } | |
1401 | } | |
1402 | } | |
1403 | ||
1404 | impl ReadAt for BufferedDynamicReadAt { | |
1405 | fn start_read_at<'a>( | |
1406 | self: Pin<&'a Self>, | |
1407 | _cx: &mut Context, | |
1408 | buf: &'a mut [u8], | |
1409 | offset: u64, | |
1410 | ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> { | |
1411 | MaybeReady::Ready(tokio::task::block_in_place(move || { | |
1412 | let mut reader = self.inner.lock().unwrap(); | |
1413 | reader.seek(SeekFrom::Start(offset))?; | |
1414 | Ok(reader.read(buf)?) | |
1415 | })) | |
1416 | } | |
1417 | ||
1418 | fn poll_complete<'a>( | |
1419 | self: Pin<&'a Self>, | |
1420 | _op: ReadAtOperation<'a>, | |
1421 | ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> { | |
1422 | panic!("BufferedDynamicReadAt::start_read_at returned Pending"); | |
1423 | } | |
1424 | } | |
1425 | ||
1426 | fn main() { | |
1427 | ||
1428 | let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP) | |
1429 | .arg_param(&["backupspec"]) | |
1430 | .completion_cb("repository", complete_repository) | |
1431 | .completion_cb("backupspec", complete_backup_source) | |
1432 | .completion_cb("keyfile", complete_file_name) | |
1433 | .completion_cb("master-pubkey-file", complete_file_name) | |
1434 | .completion_cb("chunk-size", complete_chunk_size); | |
1435 | ||
1436 | let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK) | |
1437 | .completion_cb("repository", complete_repository) | |
1438 | .completion_cb("keyfile", complete_file_name); | |
1439 | ||
1440 | let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS) | |
1441 | .completion_cb("repository", complete_repository); | |
1442 | ||
1443 | let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION) | |
1444 | .completion_cb("repository", complete_repository); | |
1445 | ||
1446 | let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE) | |
1447 | .arg_param(&["snapshot", "archive-name", "target"]) | |
1448 | .completion_cb("repository", complete_repository) | |
1449 | .completion_cb("snapshot", complete_group_or_snapshot) | |
1450 | .completion_cb("archive-name", complete_archive_name) | |
1451 | .completion_cb("target", complete_file_name); | |
1452 | ||
1453 | let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE) | |
1454 | .arg_param(&["group"]) | |
1455 | .completion_cb("group", complete_backup_group) | |
1456 | .completion_cb("repository", complete_repository); | |
1457 | ||
1458 | let status_cmd_def = CliCommand::new(&API_METHOD_STATUS) | |
1459 | .completion_cb("repository", complete_repository); | |
1460 | ||
1461 | let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN) | |
1462 | .completion_cb("repository", complete_repository); | |
1463 | ||
1464 | let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT) | |
1465 | .completion_cb("repository", complete_repository); | |
1466 | ||
1467 | let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION) | |
1468 | .completion_cb("repository", complete_repository); | |
1469 | ||
1470 | let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER) | |
1471 | .arg_param(&["group", "new-owner"]) | |
1472 | .completion_cb("group", complete_backup_group) | |
1473 | .completion_cb("new-owner", complete_auth_id) | |
1474 | .completion_cb("repository", complete_repository); | |
1475 | ||
1476 | let cmd_def = CliCommandMap::new() | |
1477 | .insert("backup", backup_cmd_def) | |
1478 | .insert("garbage-collect", garbage_collect_cmd_def) | |
1479 | .insert("list", list_cmd_def) | |
1480 | .insert("login", login_cmd_def) | |
1481 | .insert("logout", logout_cmd_def) | |
1482 | .insert("prune", prune_cmd_def) | |
1483 | .insert("restore", restore_cmd_def) | |
1484 | .insert("snapshot", snapshot_mgtm_cli()) | |
1485 | .insert("status", status_cmd_def) | |
1486 | .insert("key", key::cli()) | |
1487 | .insert("mount", mount_cmd_def()) | |
1488 | .insert("map", map_cmd_def()) | |
1489 | .insert("unmap", unmap_cmd_def()) | |
1490 | .insert("catalog", catalog_mgmt_cli()) | |
1491 | .insert("task", task_mgmt_cli()) | |
1492 | .insert("version", version_cmd_def) | |
1493 | .insert("benchmark", benchmark_cmd_def) | |
1494 | .insert("change-owner", change_owner_cmd_def) | |
1495 | ||
1496 | .alias(&["files"], &["snapshot", "files"]) | |
1497 | .alias(&["forget"], &["snapshot", "forget"]) | |
1498 | .alias(&["upload-log"], &["snapshot", "upload-log"]) | |
1499 | .alias(&["snapshots"], &["snapshot", "list"]) | |
1500 | ; | |
1501 | ||
1502 | let rpcenv = CliEnvironment::new(); | |
1503 | run_cli_command(cmd_def, rpcenv, Some(|future| { | |
1504 | pbs_runtime::main(future) | |
1505 | })); | |
1506 | } |