]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
garbage_collect: call fail_on_abort to abort GV when requested.
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::convert::TryFrom;
3
4 use chrono::{TimeZone, Local};
5 use anyhow::{bail, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use crate::api2::types::*;
21 use crate::backup::*;
22 use crate::config::datastore;
23 use crate::config::cached_user_info::CachedUserInfo;
24
25 use crate::server::WorkerTask;
26 use crate::tools;
27 use crate::config::acl::{
28 PRIV_DATASTORE_AUDIT,
29 PRIV_DATASTORE_MODIFY,
30 PRIV_DATASTORE_READ,
31 PRIV_DATASTORE_PRUNE,
32 PRIV_DATASTORE_BACKUP,
33 };
34
35 fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
36 let owner = store.get_owner(group)?;
37 if &owner != userid {
38 bail!("backup owner check failed ({} != {})", userid, owner);
39 }
40 Ok(())
41 }
42
43 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
44
45 let mut path = store.base_path();
46 path.push(backup_dir.relative_path());
47 path.push("index.json.blob");
48
49 let raw_data = file_get_contents(&path)?;
50 let index_size = raw_data.len() as u64;
51 let blob = DataBlob::from_raw(raw_data)?;
52
53 let manifest = BackupManifest::try_from(blob)?;
54
55 let mut result = Vec::new();
56 for item in manifest.files() {
57 result.push(BackupContent {
58 filename: item.filename.clone(),
59 size: Some(item.size),
60 });
61 }
62
63 result.push(BackupContent {
64 filename: "index.json.blob".to_string(),
65 size: Some(index_size),
66 });
67
68 Ok(result)
69 }
70
71 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
72
73 let mut group_hash = HashMap::new();
74
75 for info in backup_list {
76 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
77 let time_list = group_hash.entry(group_id).or_insert(vec![]);
78 time_list.push(info);
79 }
80
81 group_hash
82 }
83
84 #[api(
85 input: {
86 properties: {
87 store: {
88 schema: DATASTORE_SCHEMA,
89 },
90 },
91 },
92 returns: {
93 type: Array,
94 description: "Returns the list of backup groups.",
95 items: {
96 type: GroupListItem,
97 }
98 },
99 access: {
100 permission: &Permission::Privilege(
101 &["datastore", "{store}"],
102 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
103 true),
104 },
105 )]
106 /// List backup groups.
107 fn list_groups(
108 store: String,
109 rpcenv: &mut dyn RpcEnvironment,
110 ) -> Result<Vec<GroupListItem>, Error> {
111
112 let username = rpcenv.get_user().unwrap();
113 let user_info = CachedUserInfo::new()?;
114 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
115
116 let datastore = DataStore::lookup_datastore(&store)?;
117
118 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
119
120 let group_hash = group_backups(backup_list);
121
122 let mut groups = Vec::new();
123
124 for (_group_id, mut list) in group_hash {
125
126 BackupInfo::sort_list(&mut list, false);
127
128 let info = &list[0];
129
130 let group = info.backup_dir.group();
131
132 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
133 if !list_all {
134 let owner = datastore.get_owner(group)?;
135 if owner != username { continue; }
136 }
137
138 let result_item = GroupListItem {
139 backup_type: group.backup_type().to_string(),
140 backup_id: group.backup_id().to_string(),
141 last_backup: info.backup_dir.backup_time().timestamp(),
142 backup_count: list.len() as u64,
143 files: info.files.clone(),
144 };
145 groups.push(result_item);
146 }
147
148 Ok(groups)
149 }
150
151 #[api(
152 input: {
153 properties: {
154 store: {
155 schema: DATASTORE_SCHEMA,
156 },
157 "backup-type": {
158 schema: BACKUP_TYPE_SCHEMA,
159 },
160 "backup-id": {
161 schema: BACKUP_ID_SCHEMA,
162 },
163 "backup-time": {
164 schema: BACKUP_TIME_SCHEMA,
165 },
166 },
167 },
168 returns: {
169 type: Array,
170 description: "Returns the list of archive files inside a backup snapshots.",
171 items: {
172 type: BackupContent,
173 }
174 },
175 access: {
176 permission: &Permission::Privilege(
177 &["datastore", "{store}"],
178 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
179 true),
180 },
181 )]
182 /// List snapshot files.
183 pub fn list_snapshot_files(
184 store: String,
185 backup_type: String,
186 backup_id: String,
187 backup_time: i64,
188 _info: &ApiMethod,
189 rpcenv: &mut dyn RpcEnvironment,
190 ) -> Result<Vec<BackupContent>, Error> {
191
192 let username = rpcenv.get_user().unwrap();
193 let user_info = CachedUserInfo::new()?;
194 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
195
196 let datastore = DataStore::lookup_datastore(&store)?;
197
198 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
199
200 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
201 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
202
203 let mut files = read_backup_index(&datastore, &snapshot)?;
204
205 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
206
207 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
208 acc.insert(item.filename.clone());
209 acc
210 });
211
212 for file in info.files {
213 if file_set.contains(&file) { continue; }
214 files.push(BackupContent { filename: file, size: None });
215 }
216
217 Ok(files)
218 }
219
220 #[api(
221 input: {
222 properties: {
223 store: {
224 schema: DATASTORE_SCHEMA,
225 },
226 "backup-type": {
227 schema: BACKUP_TYPE_SCHEMA,
228 },
229 "backup-id": {
230 schema: BACKUP_ID_SCHEMA,
231 },
232 "backup-time": {
233 schema: BACKUP_TIME_SCHEMA,
234 },
235 },
236 },
237 access: {
238 permission: &Permission::Privilege(
239 &["datastore", "{store}"],
240 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
241 true),
242 },
243 )]
244 /// Delete backup snapshot.
245 fn delete_snapshot(
246 store: String,
247 backup_type: String,
248 backup_id: String,
249 backup_time: i64,
250 _info: &ApiMethod,
251 rpcenv: &mut dyn RpcEnvironment,
252 ) -> Result<Value, Error> {
253
254 let username = rpcenv.get_user().unwrap();
255 let user_info = CachedUserInfo::new()?;
256 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
257
258 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
259
260 let datastore = DataStore::lookup_datastore(&store)?;
261
262 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
263 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
264
265 datastore.remove_backup_dir(&snapshot)?;
266
267 Ok(Value::Null)
268 }
269
270 #[api(
271 input: {
272 properties: {
273 store: {
274 schema: DATASTORE_SCHEMA,
275 },
276 "backup-type": {
277 optional: true,
278 schema: BACKUP_TYPE_SCHEMA,
279 },
280 "backup-id": {
281 optional: true,
282 schema: BACKUP_ID_SCHEMA,
283 },
284 },
285 },
286 returns: {
287 type: Array,
288 description: "Returns the list of snapshots.",
289 items: {
290 type: SnapshotListItem,
291 }
292 },
293 access: {
294 permission: &Permission::Privilege(
295 &["datastore", "{store}"],
296 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
297 true),
298 },
299 )]
300 /// List backup snapshots.
301 pub fn list_snapshots (
302 store: String,
303 backup_type: Option<String>,
304 backup_id: Option<String>,
305 _param: Value,
306 _info: &ApiMethod,
307 rpcenv: &mut dyn RpcEnvironment,
308 ) -> Result<Vec<SnapshotListItem>, Error> {
309
310 let username = rpcenv.get_user().unwrap();
311 let user_info = CachedUserInfo::new()?;
312 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
313
314 let datastore = DataStore::lookup_datastore(&store)?;
315
316 let base_path = datastore.base_path();
317
318 let backup_list = BackupInfo::list_backups(&base_path)?;
319
320 let mut snapshots = vec![];
321
322 for info in backup_list {
323 let group = info.backup_dir.group();
324 if let Some(ref backup_type) = backup_type {
325 if backup_type != group.backup_type() { continue; }
326 }
327 if let Some(ref backup_id) = backup_id {
328 if backup_id != group.backup_id() { continue; }
329 }
330
331 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
332 if !list_all {
333 let owner = datastore.get_owner(group)?;
334 if owner != username { continue; }
335 }
336
337 let mut result_item = SnapshotListItem {
338 backup_type: group.backup_type().to_string(),
339 backup_id: group.backup_id().to_string(),
340 backup_time: info.backup_dir.backup_time().timestamp(),
341 files: info.files,
342 size: None,
343 };
344
345 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
346 let mut backup_size = 0;
347 for item in index.iter() {
348 if let Some(item_size) = item.size {
349 backup_size += item_size;
350 }
351 }
352 result_item.size = Some(backup_size);
353 }
354
355 snapshots.push(result_item);
356 }
357
358 Ok(snapshots)
359 }
360
361 #[api(
362 input: {
363 properties: {
364 store: {
365 schema: DATASTORE_SCHEMA,
366 },
367 },
368 },
369 returns: {
370 type: StorageStatus,
371 },
372 access: {
373 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
374 },
375 )]
376 /// Get datastore status.
377 pub fn status(
378 store: String,
379 _info: &ApiMethod,
380 _rpcenv: &mut dyn RpcEnvironment,
381 ) -> Result<StorageStatus, Error> {
382
383 let datastore = DataStore::lookup_datastore(&store)?;
384
385 let base_path = datastore.base_path();
386
387 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
388
389 use nix::NixPath;
390
391 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
392 nix::errno::Errno::result(res)?;
393
394 let bsize = stat.f_bsize as u64;
395
396 Ok(StorageStatus {
397 total: stat.f_blocks*bsize,
398 used: (stat.f_blocks-stat.f_bfree)*bsize,
399 avail: stat.f_bavail*bsize,
400 })
401 }
402
403 #[macro_export]
404 macro_rules! add_common_prune_prameters {
405 ( [ $( $list1:tt )* ] ) => {
406 add_common_prune_prameters!([$( $list1 )* ] , [])
407 };
408 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
409 [
410 $( $list1 )*
411 (
412 "keep-daily",
413 true,
414 &IntegerSchema::new("Number of daily backups to keep.")
415 .minimum(1)
416 .schema()
417 ),
418 (
419 "keep-hourly",
420 true,
421 &IntegerSchema::new("Number of hourly backups to keep.")
422 .minimum(1)
423 .schema()
424 ),
425 (
426 "keep-last",
427 true,
428 &IntegerSchema::new("Number of backups to keep.")
429 .minimum(1)
430 .schema()
431 ),
432 (
433 "keep-monthly",
434 true,
435 &IntegerSchema::new("Number of monthly backups to keep.")
436 .minimum(1)
437 .schema()
438 ),
439 (
440 "keep-weekly",
441 true,
442 &IntegerSchema::new("Number of weekly backups to keep.")
443 .minimum(1)
444 .schema()
445 ),
446 (
447 "keep-yearly",
448 true,
449 &IntegerSchema::new("Number of yearly backups to keep.")
450 .minimum(1)
451 .schema()
452 ),
453 $( $list2 )*
454 ]
455 }
456 }
457
458 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
459 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
460 PruneListItem::API_SCHEMA
461 ).schema();
462
463 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
464 &ApiHandler::Sync(&prune),
465 &ObjectSchema::new(
466 "Prune the datastore.",
467 &add_common_prune_prameters!([
468 ("backup-id", false, &BACKUP_ID_SCHEMA),
469 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
470 ("dry-run", true, &BooleanSchema::new(
471 "Just show what prune would do, but do not delete anything.")
472 .schema()
473 ),
474 ],[
475 ("store", false, &DATASTORE_SCHEMA),
476 ])
477 ))
478 .returns(&API_RETURN_SCHEMA_PRUNE)
479 .access(None, &Permission::Privilege(
480 &["datastore", "{store}"],
481 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
482 true)
483 );
484
485 fn prune(
486 param: Value,
487 _info: &ApiMethod,
488 rpcenv: &mut dyn RpcEnvironment,
489 ) -> Result<Value, Error> {
490
491 let store = tools::required_string_param(&param, "store")?;
492 let backup_type = tools::required_string_param(&param, "backup-type")?;
493 let backup_id = tools::required_string_param(&param, "backup-id")?;
494
495 let username = rpcenv.get_user().unwrap();
496 let user_info = CachedUserInfo::new()?;
497 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
498
499 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
500
501 let group = BackupGroup::new(backup_type, backup_id);
502
503 let datastore = DataStore::lookup_datastore(&store)?;
504
505 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
506 if !allowed { check_backup_owner(&datastore, &group, &username)?; }
507
508 let prune_options = PruneOptions {
509 keep_last: param["keep-last"].as_u64(),
510 keep_hourly: param["keep-hourly"].as_u64(),
511 keep_daily: param["keep-daily"].as_u64(),
512 keep_weekly: param["keep-weekly"].as_u64(),
513 keep_monthly: param["keep-monthly"].as_u64(),
514 keep_yearly: param["keep-yearly"].as_u64(),
515 };
516
517 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
518
519 let mut prune_result = Vec::new();
520
521 let list = group.list_backups(&datastore.base_path())?;
522
523 let mut prune_info = compute_prune_info(list, &prune_options)?;
524
525 prune_info.reverse(); // delete older snapshots first
526
527 let keep_all = !prune_options.keeps_something();
528
529 if dry_run {
530 for (info, mut keep) in prune_info {
531 if keep_all { keep = true; }
532
533 let backup_time = info.backup_dir.backup_time();
534 let group = info.backup_dir.group();
535
536 prune_result.push(json!({
537 "backup-type": group.backup_type(),
538 "backup-id": group.backup_id(),
539 "backup-time": backup_time.timestamp(),
540 "keep": keep,
541 }));
542 }
543 return Ok(json!(prune_result));
544 }
545
546
547 // We use a WorkerTask just to have a task log, but run synchrounously
548 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
549
550 let result = try_block! {
551 if keep_all {
552 worker.log("No prune selection - keeping all files.");
553 } else {
554 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
555 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
556 store, backup_type, backup_id));
557 }
558
559 for (info, mut keep) in prune_info {
560 if keep_all { keep = true; }
561
562 let backup_time = info.backup_dir.backup_time();
563 let timestamp = BackupDir::backup_time_to_string(backup_time);
564 let group = info.backup_dir.group();
565
566
567 let msg = format!(
568 "{}/{}/{} {}",
569 group.backup_type(),
570 group.backup_id(),
571 timestamp,
572 if keep { "keep" } else { "remove" },
573 );
574
575 worker.log(msg);
576
577 prune_result.push(json!({
578 "backup-type": group.backup_type(),
579 "backup-id": group.backup_id(),
580 "backup-time": backup_time.timestamp(),
581 "keep": keep,
582 }));
583
584 if !(dry_run || keep) {
585 datastore.remove_backup_dir(&info.backup_dir)?;
586 }
587 }
588
589 Ok(())
590 };
591
592 worker.log_result(&result);
593
594 if let Err(err) = result {
595 bail!("prune failed - {}", err);
596 };
597
598 Ok(json!(prune_result))
599 }
600
601 #[api(
602 input: {
603 properties: {
604 store: {
605 schema: DATASTORE_SCHEMA,
606 },
607 },
608 },
609 returns: {
610 schema: UPID_SCHEMA,
611 },
612 access: {
613 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
614 },
615 )]
616 /// Start garbage collection.
617 fn start_garbage_collection(
618 store: String,
619 _info: &ApiMethod,
620 rpcenv: &mut dyn RpcEnvironment,
621 ) -> Result<Value, Error> {
622
623 let datastore = DataStore::lookup_datastore(&store)?;
624
625 println!("Starting garbage collection on store {}", store);
626
627 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
628
629 let upid_str = WorkerTask::new_thread(
630 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
631 {
632 worker.log(format!("starting garbage collection on store {}", store));
633 datastore.garbage_collection(&worker)
634 })?;
635
636 Ok(json!(upid_str))
637 }
638
639 #[api(
640 input: {
641 properties: {
642 store: {
643 schema: DATASTORE_SCHEMA,
644 },
645 },
646 },
647 returns: {
648 type: GarbageCollectionStatus,
649 },
650 access: {
651 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
652 },
653 )]
654 /// Garbage collection status.
655 pub fn garbage_collection_status(
656 store: String,
657 _info: &ApiMethod,
658 _rpcenv: &mut dyn RpcEnvironment,
659 ) -> Result<GarbageCollectionStatus, Error> {
660
661 let datastore = DataStore::lookup_datastore(&store)?;
662
663 let status = datastore.last_gc_status();
664
665 Ok(status)
666 }
667
668 #[api(
669 returns: {
670 description: "List the accessible datastores.",
671 type: Array,
672 items: {
673 description: "Datastore name and description.",
674 properties: {
675 store: {
676 schema: DATASTORE_SCHEMA,
677 },
678 comment: {
679 optional: true,
680 schema: SINGLE_LINE_COMMENT_SCHEMA,
681 },
682 },
683 },
684 },
685 access: {
686 permission: &Permission::Anybody,
687 },
688 )]
689 /// Datastore list
690 fn get_datastore_list(
691 _param: Value,
692 _info: &ApiMethod,
693 rpcenv: &mut dyn RpcEnvironment,
694 ) -> Result<Value, Error> {
695
696 let (config, _digest) = datastore::config()?;
697
698 let username = rpcenv.get_user().unwrap();
699 let user_info = CachedUserInfo::new()?;
700
701 let mut list = Vec::new();
702
703 for (store, (_, data)) in &config.sections {
704 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
705 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
706 if allowed {
707 let mut entry = json!({ "store": store });
708 if let Some(comment) = data["comment"].as_str() {
709 entry["comment"] = comment.into();
710 }
711 list.push(entry);
712 }
713 }
714
715 Ok(list.into())
716 }
717
718 #[sortable]
719 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
720 &ApiHandler::AsyncHttp(&download_file),
721 &ObjectSchema::new(
722 "Download single raw file from backup snapshot.",
723 &sorted!([
724 ("store", false, &DATASTORE_SCHEMA),
725 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
726 ("backup-id", false, &BACKUP_ID_SCHEMA),
727 ("backup-time", false, &BACKUP_TIME_SCHEMA),
728 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
729 ]),
730 )
731 ).access(None, &Permission::Privilege(
732 &["datastore", "{store}"],
733 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
734 true)
735 );
736
737 fn download_file(
738 _parts: Parts,
739 _req_body: Body,
740 param: Value,
741 _info: &ApiMethod,
742 rpcenv: Box<dyn RpcEnvironment>,
743 ) -> ApiResponseFuture {
744
745 async move {
746 let store = tools::required_string_param(&param, "store")?;
747 let datastore = DataStore::lookup_datastore(store)?;
748
749 let username = rpcenv.get_user().unwrap();
750 let user_info = CachedUserInfo::new()?;
751 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
752
753 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
754
755 let backup_type = tools::required_string_param(&param, "backup-type")?;
756 let backup_id = tools::required_string_param(&param, "backup-id")?;
757 let backup_time = tools::required_integer_param(&param, "backup-time")?;
758
759 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
760
761 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
762 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
763
764 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
765 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
766
767 let mut path = datastore.base_path();
768 path.push(backup_dir.relative_path());
769 path.push(&file_name);
770
771 let file = tokio::fs::File::open(path)
772 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
773 .await?;
774
775 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
776 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
777 let body = Body::wrap_stream(payload);
778
779 // fixme: set other headers ?
780 Ok(Response::builder()
781 .status(StatusCode::OK)
782 .header(header::CONTENT_TYPE, "application/octet-stream")
783 .body(body)
784 .unwrap())
785 }.boxed()
786 }
787
788 #[sortable]
789 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
790 &ApiHandler::AsyncHttp(&upload_backup_log),
791 &ObjectSchema::new(
792 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
793 &sorted!([
794 ("store", false, &DATASTORE_SCHEMA),
795 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
796 ("backup-id", false, &BACKUP_ID_SCHEMA),
797 ("backup-time", false, &BACKUP_TIME_SCHEMA),
798 ]),
799 )
800 ).access(
801 Some("Only the backup creator/owner is allowed to do this."),
802 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
803 );
804
805 fn upload_backup_log(
806 _parts: Parts,
807 req_body: Body,
808 param: Value,
809 _info: &ApiMethod,
810 rpcenv: Box<dyn RpcEnvironment>,
811 ) -> ApiResponseFuture {
812
813 async move {
814 let store = tools::required_string_param(&param, "store")?;
815 let datastore = DataStore::lookup_datastore(store)?;
816
817 let file_name = "client.log.blob";
818
819 let backup_type = tools::required_string_param(&param, "backup-type")?;
820 let backup_id = tools::required_string_param(&param, "backup-id")?;
821 let backup_time = tools::required_integer_param(&param, "backup-time")?;
822
823 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
824
825 let username = rpcenv.get_user().unwrap();
826 check_backup_owner(&datastore, backup_dir.group(), &username)?;
827
828 let mut path = datastore.base_path();
829 path.push(backup_dir.relative_path());
830 path.push(&file_name);
831
832 if path.exists() {
833 bail!("backup already contains a log.");
834 }
835
836 println!("Upload backup log to {}/{}/{}/{}/{}", store,
837 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
838
839 let data = req_body
840 .map_err(Error::from)
841 .try_fold(Vec::new(), |mut acc, chunk| {
842 acc.extend_from_slice(&*chunk);
843 future::ok::<_, Error>(acc)
844 })
845 .await?;
846
847 let blob = DataBlob::from_raw(data)?;
848 // always verify CRC at server side
849 blob.verify_crc()?;
850 let raw_data = blob.raw_data();
851 replace_file(&path, raw_data, CreateOptions::new())?;
852
853 // fixme: use correct formatter
854 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
855 }.boxed()
856 }
857
858 #[sortable]
859 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
860 (
861 "download",
862 &Router::new()
863 .download(&API_METHOD_DOWNLOAD_FILE)
864 ),
865 (
866 "files",
867 &Router::new()
868 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
869 ),
870 (
871 "gc",
872 &Router::new()
873 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
874 .post(&API_METHOD_START_GARBAGE_COLLECTION)
875 ),
876 (
877 "groups",
878 &Router::new()
879 .get(&API_METHOD_LIST_GROUPS)
880 ),
881 (
882 "prune",
883 &Router::new()
884 .post(&API_METHOD_PRUNE)
885 ),
886 (
887 "snapshots",
888 &Router::new()
889 .get(&API_METHOD_LIST_SNAPSHOTS)
890 .delete(&API_METHOD_DELETE_SNAPSHOT)
891 ),
892 (
893 "status",
894 &Router::new()
895 .get(&API_METHOD_STATUS)
896 ),
897 (
898 "upload-backup-log",
899 &Router::new()
900 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
901 ),
902 ];
903
904 const DATASTORE_INFO_ROUTER: Router = Router::new()
905 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
906 .subdirs(DATASTORE_INFO_SUBDIRS);
907
908
909 pub const ROUTER: Router = Router::new()
910 .get(&API_METHOD_GET_DATASTORE_LIST)
911 .match_all("store", &DATASTORE_INFO_ROUTER);