]>
Commit | Line | Data |
---|---|---|
1 | use std::collections::{HashSet, HashMap}; | |
2 | use std::convert::TryFrom; | |
3 | ||
4 | use chrono::{TimeZone, Local}; | |
5 | use anyhow::{bail, Error}; | |
6 | use futures::*; | |
7 | use hyper::http::request::Parts; | |
8 | use hyper::{header, Body, Response, StatusCode}; | |
9 | use serde_json::{json, Value}; | |
10 | ||
11 | use proxmox::api::{ | |
12 | api, ApiResponseFuture, ApiHandler, ApiMethod, Router, | |
13 | RpcEnvironment, RpcEnvironmentType, Permission}; | |
14 | use proxmox::api::router::SubdirMap; | |
15 | use proxmox::api::schema::*; | |
16 | use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions}; | |
17 | use proxmox::try_block; | |
18 | use proxmox::{http_err, identity, list_subdirs_api_method, sortable}; | |
19 | ||
20 | use crate::api2::types::*; | |
21 | use crate::backup::*; | |
22 | use crate::config::datastore; | |
23 | use crate::server::WorkerTask; | |
24 | use crate::tools; | |
25 | use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_ALLOCATE_SPACE}; | |
26 | ||
27 | fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> { | |
28 | ||
29 | let mut path = store.base_path(); | |
30 | path.push(backup_dir.relative_path()); | |
31 | path.push("index.json.blob"); | |
32 | ||
33 | let raw_data = file_get_contents(&path)?; | |
34 | let index_size = raw_data.len() as u64; | |
35 | let blob = DataBlob::from_raw(raw_data)?; | |
36 | ||
37 | let manifest = BackupManifest::try_from(blob)?; | |
38 | ||
39 | let mut result = Vec::new(); | |
40 | for item in manifest.files() { | |
41 | result.push(BackupContent { | |
42 | filename: item.filename.clone(), | |
43 | size: Some(item.size), | |
44 | }); | |
45 | } | |
46 | ||
47 | result.push(BackupContent { | |
48 | filename: "index.json.blob".to_string(), | |
49 | size: Some(index_size), | |
50 | }); | |
51 | ||
52 | Ok(result) | |
53 | } | |
54 | ||
55 | fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> { | |
56 | ||
57 | let mut group_hash = HashMap::new(); | |
58 | ||
59 | for info in backup_list { | |
60 | let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned(); | |
61 | let time_list = group_hash.entry(group_id).or_insert(vec![]); | |
62 | time_list.push(info); | |
63 | } | |
64 | ||
65 | group_hash | |
66 | } | |
67 | ||
68 | #[api( | |
69 | input: { | |
70 | properties: { | |
71 | store: { | |
72 | schema: DATASTORE_SCHEMA, | |
73 | }, | |
74 | }, | |
75 | }, | |
76 | returns: { | |
77 | type: Array, | |
78 | description: "Returns the list of backup groups.", | |
79 | items: { | |
80 | type: GroupListItem, | |
81 | } | |
82 | }, | |
83 | access: { | |
84 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
85 | }, | |
86 | )] | |
87 | /// List backup groups. | |
88 | fn list_groups( | |
89 | store: String, | |
90 | ) -> Result<Vec<GroupListItem>, Error> { | |
91 | ||
92 | let datastore = DataStore::lookup_datastore(&store)?; | |
93 | ||
94 | let backup_list = BackupInfo::list_backups(&datastore.base_path())?; | |
95 | ||
96 | let group_hash = group_backups(backup_list); | |
97 | ||
98 | let mut groups = Vec::new(); | |
99 | ||
100 | for (_group_id, mut list) in group_hash { | |
101 | ||
102 | BackupInfo::sort_list(&mut list, false); | |
103 | ||
104 | let info = &list[0]; | |
105 | let group = info.backup_dir.group(); | |
106 | ||
107 | let result_item = GroupListItem { | |
108 | backup_type: group.backup_type().to_string(), | |
109 | backup_id: group.backup_id().to_string(), | |
110 | last_backup: info.backup_dir.backup_time().timestamp(), | |
111 | backup_count: list.len() as u64, | |
112 | files: info.files.clone(), | |
113 | }; | |
114 | groups.push(result_item); | |
115 | } | |
116 | ||
117 | Ok(groups) | |
118 | } | |
119 | ||
120 | #[api( | |
121 | input: { | |
122 | properties: { | |
123 | store: { | |
124 | schema: DATASTORE_SCHEMA, | |
125 | }, | |
126 | "backup-type": { | |
127 | schema: BACKUP_TYPE_SCHEMA, | |
128 | }, | |
129 | "backup-id": { | |
130 | schema: BACKUP_ID_SCHEMA, | |
131 | }, | |
132 | "backup-time": { | |
133 | schema: BACKUP_TIME_SCHEMA, | |
134 | }, | |
135 | }, | |
136 | }, | |
137 | returns: { | |
138 | type: Array, | |
139 | description: "Returns the list of archive files inside a backup snapshots.", | |
140 | items: { | |
141 | type: BackupContent, | |
142 | } | |
143 | }, | |
144 | access: { | |
145 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
146 | }, | |
147 | )] | |
148 | /// List snapshot files. | |
149 | pub fn list_snapshot_files( | |
150 | store: String, | |
151 | backup_type: String, | |
152 | backup_id: String, | |
153 | backup_time: i64, | |
154 | _info: &ApiMethod, | |
155 | _rpcenv: &mut dyn RpcEnvironment, | |
156 | ) -> Result<Vec<BackupContent>, Error> { | |
157 | ||
158 | let datastore = DataStore::lookup_datastore(&store)?; | |
159 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time); | |
160 | ||
161 | let mut files = read_backup_index(&datastore, &snapshot)?; | |
162 | ||
163 | let info = BackupInfo::new(&datastore.base_path(), snapshot)?; | |
164 | ||
165 | let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { | |
166 | acc.insert(item.filename.clone()); | |
167 | acc | |
168 | }); | |
169 | ||
170 | for file in info.files { | |
171 | if file_set.contains(&file) { continue; } | |
172 | files.push(BackupContent { filename: file, size: None }); | |
173 | } | |
174 | ||
175 | Ok(files) | |
176 | } | |
177 | ||
178 | #[api( | |
179 | input: { | |
180 | properties: { | |
181 | store: { | |
182 | schema: DATASTORE_SCHEMA, | |
183 | }, | |
184 | "backup-type": { | |
185 | schema: BACKUP_TYPE_SCHEMA, | |
186 | }, | |
187 | "backup-id": { | |
188 | schema: BACKUP_ID_SCHEMA, | |
189 | }, | |
190 | "backup-time": { | |
191 | schema: BACKUP_TIME_SCHEMA, | |
192 | }, | |
193 | }, | |
194 | }, | |
195 | access: { | |
196 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false), | |
197 | }, | |
198 | )] | |
199 | /// Delete backup snapshot. | |
200 | fn delete_snapshot( | |
201 | store: String, | |
202 | backup_type: String, | |
203 | backup_id: String, | |
204 | backup_time: i64, | |
205 | _info: &ApiMethod, | |
206 | _rpcenv: &mut dyn RpcEnvironment, | |
207 | ) -> Result<Value, Error> { | |
208 | ||
209 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time); | |
210 | ||
211 | let datastore = DataStore::lookup_datastore(&store)?; | |
212 | ||
213 | datastore.remove_backup_dir(&snapshot)?; | |
214 | ||
215 | Ok(Value::Null) | |
216 | } | |
217 | ||
218 | #[api( | |
219 | input: { | |
220 | properties: { | |
221 | store: { | |
222 | schema: DATASTORE_SCHEMA, | |
223 | }, | |
224 | "backup-type": { | |
225 | optional: true, | |
226 | schema: BACKUP_TYPE_SCHEMA, | |
227 | }, | |
228 | "backup-id": { | |
229 | optional: true, | |
230 | schema: BACKUP_ID_SCHEMA, | |
231 | }, | |
232 | }, | |
233 | }, | |
234 | returns: { | |
235 | type: Array, | |
236 | description: "Returns the list of snapshots.", | |
237 | items: { | |
238 | type: SnapshotListItem, | |
239 | } | |
240 | }, | |
241 | access: { | |
242 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
243 | }, | |
244 | )] | |
245 | /// List backup snapshots. | |
246 | pub fn list_snapshots ( | |
247 | param: Value, | |
248 | _info: &ApiMethod, | |
249 | _rpcenv: &mut dyn RpcEnvironment, | |
250 | ) -> Result<Vec<SnapshotListItem>, Error> { | |
251 | ||
252 | let store = tools::required_string_param(¶m, "store")?; | |
253 | let backup_type = param["backup-type"].as_str(); | |
254 | let backup_id = param["backup-id"].as_str(); | |
255 | ||
256 | let datastore = DataStore::lookup_datastore(store)?; | |
257 | ||
258 | let base_path = datastore.base_path(); | |
259 | ||
260 | let backup_list = BackupInfo::list_backups(&base_path)?; | |
261 | ||
262 | let mut snapshots = vec![]; | |
263 | ||
264 | for info in backup_list { | |
265 | let group = info.backup_dir.group(); | |
266 | if let Some(backup_type) = backup_type { | |
267 | if backup_type != group.backup_type() { continue; } | |
268 | } | |
269 | if let Some(backup_id) = backup_id { | |
270 | if backup_id != group.backup_id() { continue; } | |
271 | } | |
272 | ||
273 | let mut result_item = SnapshotListItem { | |
274 | backup_type: group.backup_type().to_string(), | |
275 | backup_id: group.backup_id().to_string(), | |
276 | backup_time: info.backup_dir.backup_time().timestamp(), | |
277 | files: info.files, | |
278 | size: None, | |
279 | }; | |
280 | ||
281 | if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) { | |
282 | let mut backup_size = 0; | |
283 | for item in index.iter() { | |
284 | if let Some(item_size) = item.size { | |
285 | backup_size += item_size; | |
286 | } | |
287 | } | |
288 | result_item.size = Some(backup_size); | |
289 | } | |
290 | ||
291 | snapshots.push(result_item); | |
292 | } | |
293 | ||
294 | Ok(snapshots) | |
295 | } | |
296 | ||
297 | #[api( | |
298 | input: { | |
299 | properties: { | |
300 | store: { | |
301 | schema: DATASTORE_SCHEMA, | |
302 | }, | |
303 | }, | |
304 | }, | |
305 | returns: { | |
306 | type: StorageStatus, | |
307 | }, | |
308 | access: { | |
309 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
310 | }, | |
311 | )] | |
312 | /// Get datastore status. | |
313 | pub fn status( | |
314 | store: String, | |
315 | _info: &ApiMethod, | |
316 | _rpcenv: &mut dyn RpcEnvironment, | |
317 | ) -> Result<StorageStatus, Error> { | |
318 | ||
319 | let datastore = DataStore::lookup_datastore(&store)?; | |
320 | ||
321 | let base_path = datastore.base_path(); | |
322 | ||
323 | let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() }; | |
324 | ||
325 | use nix::NixPath; | |
326 | ||
327 | let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?; | |
328 | nix::errno::Errno::result(res)?; | |
329 | ||
330 | let bsize = stat.f_bsize as u64; | |
331 | ||
332 | Ok(StorageStatus { | |
333 | total: stat.f_blocks*bsize, | |
334 | used: (stat.f_blocks-stat.f_bfree)*bsize, | |
335 | avail: stat.f_bavail*bsize, | |
336 | }) | |
337 | } | |
338 | ||
339 | #[macro_export] | |
340 | macro_rules! add_common_prune_prameters { | |
341 | ( [ $( $list1:tt )* ] ) => { | |
342 | add_common_prune_prameters!([$( $list1 )* ] , []) | |
343 | }; | |
344 | ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => { | |
345 | [ | |
346 | $( $list1 )* | |
347 | ( | |
348 | "keep-daily", | |
349 | true, | |
350 | &IntegerSchema::new("Number of daily backups to keep.") | |
351 | .minimum(1) | |
352 | .schema() | |
353 | ), | |
354 | ( | |
355 | "keep-hourly", | |
356 | true, | |
357 | &IntegerSchema::new("Number of hourly backups to keep.") | |
358 | .minimum(1) | |
359 | .schema() | |
360 | ), | |
361 | ( | |
362 | "keep-last", | |
363 | true, | |
364 | &IntegerSchema::new("Number of backups to keep.") | |
365 | .minimum(1) | |
366 | .schema() | |
367 | ), | |
368 | ( | |
369 | "keep-monthly", | |
370 | true, | |
371 | &IntegerSchema::new("Number of monthly backups to keep.") | |
372 | .minimum(1) | |
373 | .schema() | |
374 | ), | |
375 | ( | |
376 | "keep-weekly", | |
377 | true, | |
378 | &IntegerSchema::new("Number of weekly backups to keep.") | |
379 | .minimum(1) | |
380 | .schema() | |
381 | ), | |
382 | ( | |
383 | "keep-yearly", | |
384 | true, | |
385 | &IntegerSchema::new("Number of yearly backups to keep.") | |
386 | .minimum(1) | |
387 | .schema() | |
388 | ), | |
389 | $( $list2 )* | |
390 | ] | |
391 | } | |
392 | } | |
393 | ||
394 | const API_METHOD_PRUNE: ApiMethod = ApiMethod::new( | |
395 | &ApiHandler::Sync(&prune), | |
396 | &ObjectSchema::new( | |
397 | "Prune the datastore.", | |
398 | &add_common_prune_prameters!([ | |
399 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
400 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
401 | ("dry-run", true, &BooleanSchema::new( | |
402 | "Just show what prune would do, but do not delete anything.") | |
403 | .schema() | |
404 | ), | |
405 | ],[ | |
406 | ("store", false, &DATASTORE_SCHEMA), | |
407 | ]) | |
408 | ) | |
409 | ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false)); | |
410 | ||
411 | fn prune( | |
412 | param: Value, | |
413 | _info: &ApiMethod, | |
414 | _rpcenv: &mut dyn RpcEnvironment, | |
415 | ) -> Result<Value, Error> { | |
416 | ||
417 | let store = param["store"].as_str().unwrap(); | |
418 | ||
419 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
420 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
421 | ||
422 | let dry_run = param["dry-run"].as_bool().unwrap_or(false); | |
423 | ||
424 | let group = BackupGroup::new(backup_type, backup_id); | |
425 | ||
426 | let datastore = DataStore::lookup_datastore(store)?; | |
427 | ||
428 | let prune_options = PruneOptions { | |
429 | keep_last: param["keep-last"].as_u64(), | |
430 | keep_hourly: param["keep-hourly"].as_u64(), | |
431 | keep_daily: param["keep-daily"].as_u64(), | |
432 | keep_weekly: param["keep-weekly"].as_u64(), | |
433 | keep_monthly: param["keep-monthly"].as_u64(), | |
434 | keep_yearly: param["keep-yearly"].as_u64(), | |
435 | }; | |
436 | ||
437 | let worker_id = format!("{}_{}_{}", store, backup_type, backup_id); | |
438 | ||
439 | let mut prune_result = Vec::new(); | |
440 | ||
441 | let list = group.list_backups(&datastore.base_path())?; | |
442 | ||
443 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
444 | ||
445 | prune_info.reverse(); // delete older snapshots first | |
446 | ||
447 | let keep_all = !prune_options.keeps_something(); | |
448 | ||
449 | if dry_run { | |
450 | for (info, mut keep) in prune_info { | |
451 | if keep_all { keep = true; } | |
452 | ||
453 | let backup_time = info.backup_dir.backup_time(); | |
454 | let group = info.backup_dir.group(); | |
455 | ||
456 | prune_result.push(json!({ | |
457 | "backup-type": group.backup_type(), | |
458 | "backup-id": group.backup_id(), | |
459 | "backup-time": backup_time.timestamp(), | |
460 | "keep": keep, | |
461 | })); | |
462 | } | |
463 | return Ok(json!(prune_result)); | |
464 | } | |
465 | ||
466 | ||
467 | // We use a WorkerTask just to have a task log, but run synchrounously | |
468 | let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?; | |
469 | ||
470 | let result = try_block! { | |
471 | if keep_all { | |
472 | worker.log("No prune selection - keeping all files."); | |
473 | } else { | |
474 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); | |
475 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
476 | store, backup_type, backup_id)); | |
477 | } | |
478 | ||
479 | for (info, mut keep) in prune_info { | |
480 | if keep_all { keep = true; } | |
481 | ||
482 | let backup_time = info.backup_dir.backup_time(); | |
483 | let timestamp = BackupDir::backup_time_to_string(backup_time); | |
484 | let group = info.backup_dir.group(); | |
485 | ||
486 | ||
487 | let msg = format!( | |
488 | "{}/{}/{} {}", | |
489 | group.backup_type(), | |
490 | group.backup_id(), | |
491 | timestamp, | |
492 | if keep { "keep" } else { "remove" }, | |
493 | ); | |
494 | ||
495 | worker.log(msg); | |
496 | ||
497 | prune_result.push(json!({ | |
498 | "backup-type": group.backup_type(), | |
499 | "backup-id": group.backup_id(), | |
500 | "backup-time": backup_time.timestamp(), | |
501 | "keep": keep, | |
502 | })); | |
503 | ||
504 | if !(dry_run || keep) { | |
505 | datastore.remove_backup_dir(&info.backup_dir)?; | |
506 | } | |
507 | } | |
508 | ||
509 | Ok(()) | |
510 | }; | |
511 | ||
512 | worker.log_result(&result); | |
513 | ||
514 | if let Err(err) = result { | |
515 | bail!("prune failed - {}", err); | |
516 | }; | |
517 | ||
518 | Ok(json!(prune_result)) | |
519 | } | |
520 | ||
521 | #[api( | |
522 | input: { | |
523 | properties: { | |
524 | store: { | |
525 | schema: DATASTORE_SCHEMA, | |
526 | }, | |
527 | }, | |
528 | }, | |
529 | returns: { | |
530 | schema: UPID_SCHEMA, | |
531 | }, | |
532 | access: { | |
533 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false), | |
534 | }, | |
535 | )] | |
536 | /// Start garbage collection. | |
537 | fn start_garbage_collection( | |
538 | store: String, | |
539 | _info: &ApiMethod, | |
540 | rpcenv: &mut dyn RpcEnvironment, | |
541 | ) -> Result<Value, Error> { | |
542 | ||
543 | let datastore = DataStore::lookup_datastore(&store)?; | |
544 | ||
545 | println!("Starting garbage collection on store {}", store); | |
546 | ||
547 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; | |
548 | ||
549 | let upid_str = WorkerTask::new_thread( | |
550 | "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker| | |
551 | { | |
552 | worker.log(format!("starting garbage collection on store {}", store)); | |
553 | datastore.garbage_collection(worker) | |
554 | })?; | |
555 | ||
556 | Ok(json!(upid_str)) | |
557 | } | |
558 | ||
559 | #[api( | |
560 | input: { | |
561 | properties: { | |
562 | store: { | |
563 | schema: DATASTORE_SCHEMA, | |
564 | }, | |
565 | }, | |
566 | }, | |
567 | returns: { | |
568 | type: GarbageCollectionStatus, | |
569 | }, | |
570 | access: { | |
571 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
572 | }, | |
573 | )] | |
574 | /// Garbage collection status. | |
575 | pub fn garbage_collection_status( | |
576 | store: String, | |
577 | _info: &ApiMethod, | |
578 | _rpcenv: &mut dyn RpcEnvironment, | |
579 | ) -> Result<GarbageCollectionStatus, Error> { | |
580 | ||
581 | let datastore = DataStore::lookup_datastore(&store)?; | |
582 | ||
583 | let status = datastore.last_gc_status(); | |
584 | ||
585 | Ok(status) | |
586 | } | |
587 | ||
588 | #[api( | |
589 | access: { | |
590 | permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false), | |
591 | }, | |
592 | )] | |
593 | /// Datastore list | |
594 | fn get_datastore_list( | |
595 | _param: Value, | |
596 | _info: &ApiMethod, | |
597 | _rpcenv: &mut dyn RpcEnvironment, | |
598 | ) -> Result<Value, Error> { | |
599 | ||
600 | let (config, _digest) = datastore::config()?; | |
601 | ||
602 | Ok(config.convert_to_array("store", None, &[])) | |
603 | } | |
604 | ||
605 | #[sortable] | |
606 | pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new( | |
607 | &ApiHandler::AsyncHttp(&download_file), | |
608 | &ObjectSchema::new( | |
609 | "Download single raw file from backup snapshot.", | |
610 | &sorted!([ | |
611 | ("store", false, &DATASTORE_SCHEMA), | |
612 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
613 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
614 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
615 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
616 | ]), | |
617 | ) | |
618 | ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false)); | |
619 | ||
620 | fn download_file( | |
621 | _parts: Parts, | |
622 | _req_body: Body, | |
623 | param: Value, | |
624 | _info: &ApiMethod, | |
625 | _rpcenv: Box<dyn RpcEnvironment>, | |
626 | ) -> ApiResponseFuture { | |
627 | ||
628 | async move { | |
629 | let store = tools::required_string_param(¶m, "store")?; | |
630 | ||
631 | let datastore = DataStore::lookup_datastore(store)?; | |
632 | ||
633 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); | |
634 | ||
635 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
636 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
637 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
638 | ||
639 | println!("Download {} from {} ({}/{}/{}/{})", file_name, store, | |
640 | backup_type, backup_id, Local.timestamp(backup_time, 0), file_name); | |
641 | ||
642 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); | |
643 | ||
644 | let mut path = datastore.base_path(); | |
645 | path.push(backup_dir.relative_path()); | |
646 | path.push(&file_name); | |
647 | ||
648 | let file = tokio::fs::File::open(path) | |
649 | .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err))) | |
650 | .await?; | |
651 | ||
652 | let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) | |
653 | .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())); | |
654 | let body = Body::wrap_stream(payload); | |
655 | ||
656 | // fixme: set other headers ? | |
657 | Ok(Response::builder() | |
658 | .status(StatusCode::OK) | |
659 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
660 | .body(body) | |
661 | .unwrap()) | |
662 | }.boxed() | |
663 | } | |
664 | ||
665 | #[sortable] | |
666 | pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( | |
667 | &ApiHandler::AsyncHttp(&upload_backup_log), | |
668 | &ObjectSchema::new( | |
669 | "Download single raw file from backup snapshot.", | |
670 | &sorted!([ | |
671 | ("store", false, &DATASTORE_SCHEMA), | |
672 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
673 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
674 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
675 | ]), | |
676 | ) | |
677 | ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false)); | |
678 | ||
679 | fn upload_backup_log( | |
680 | _parts: Parts, | |
681 | req_body: Body, | |
682 | param: Value, | |
683 | _info: &ApiMethod, | |
684 | _rpcenv: Box<dyn RpcEnvironment>, | |
685 | ) -> ApiResponseFuture { | |
686 | ||
687 | async move { | |
688 | let store = tools::required_string_param(¶m, "store")?; | |
689 | ||
690 | let datastore = DataStore::lookup_datastore(store)?; | |
691 | ||
692 | let file_name = "client.log.blob"; | |
693 | ||
694 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
695 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
696 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
697 | ||
698 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); | |
699 | ||
700 | let mut path = datastore.base_path(); | |
701 | path.push(backup_dir.relative_path()); | |
702 | path.push(&file_name); | |
703 | ||
704 | if path.exists() { | |
705 | bail!("backup already contains a log."); | |
706 | } | |
707 | ||
708 | println!("Upload backup log to {}/{}/{}/{}/{}", store, | |
709 | backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name); | |
710 | ||
711 | let data = req_body | |
712 | .map_err(Error::from) | |
713 | .try_fold(Vec::new(), |mut acc, chunk| { | |
714 | acc.extend_from_slice(&*chunk); | |
715 | future::ok::<_, Error>(acc) | |
716 | }) | |
717 | .await?; | |
718 | ||
719 | let blob = DataBlob::from_raw(data)?; | |
720 | // always verify CRC at server side | |
721 | blob.verify_crc()?; | |
722 | let raw_data = blob.raw_data(); | |
723 | replace_file(&path, raw_data, CreateOptions::new())?; | |
724 | ||
725 | // fixme: use correct formatter | |
726 | Ok(crate::server::formatter::json_response(Ok(Value::Null))) | |
727 | }.boxed() | |
728 | } | |
729 | ||
730 | #[sortable] | |
731 | const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ | |
732 | ( | |
733 | "download", | |
734 | &Router::new() | |
735 | .download(&API_METHOD_DOWNLOAD_FILE) | |
736 | ), | |
737 | ( | |
738 | "files", | |
739 | &Router::new() | |
740 | .get(&API_METHOD_LIST_SNAPSHOT_FILES) | |
741 | ), | |
742 | ( | |
743 | "gc", | |
744 | &Router::new() | |
745 | .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) | |
746 | .post(&API_METHOD_START_GARBAGE_COLLECTION) | |
747 | ), | |
748 | ( | |
749 | "groups", | |
750 | &Router::new() | |
751 | .get(&API_METHOD_LIST_GROUPS) | |
752 | ), | |
753 | ( | |
754 | "prune", | |
755 | &Router::new() | |
756 | .post(&API_METHOD_PRUNE) | |
757 | ), | |
758 | ( | |
759 | "snapshots", | |
760 | &Router::new() | |
761 | .get(&API_METHOD_LIST_SNAPSHOTS) | |
762 | .delete(&API_METHOD_DELETE_SNAPSHOT) | |
763 | ), | |
764 | ( | |
765 | "status", | |
766 | &Router::new() | |
767 | .get(&API_METHOD_STATUS) | |
768 | ), | |
769 | ( | |
770 | "upload-backup-log", | |
771 | &Router::new() | |
772 | .upload(&API_METHOD_UPLOAD_BACKUP_LOG) | |
773 | ), | |
774 | ]; | |
775 | ||
776 | const DATASTORE_INFO_ROUTER: Router = Router::new() | |
777 | .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS)) | |
778 | .subdirs(DATASTORE_INFO_SUBDIRS); | |
779 | ||
780 | ||
781 | pub const ROUTER: Router = Router::new() | |
782 | .get(&API_METHOD_GET_DATASTORE_LIST) | |
783 | .match_all("store", &DATASTORE_INFO_ROUTER); |