]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
cleanup: manifest is always CryptMode::None
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use pxar::accessor::aio::Accessor;
21 use pxar::EntryKind;
22
23 use crate::api2::types::*;
24 use crate::api2::node::rrd::create_value_from_rrd;
25 use crate::backup::*;
26 use crate::config::datastore;
27 use crate::config::cached_user_info::CachedUserInfo;
28
29 use crate::server::WorkerTask;
30 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
31 use crate::config::acl::{
32 PRIV_DATASTORE_AUDIT,
33 PRIV_DATASTORE_MODIFY,
34 PRIV_DATASTORE_READ,
35 PRIV_DATASTORE_PRUNE,
36 PRIV_DATASTORE_BACKUP,
37 };
38
39 fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
40 let owner = store.get_owner(group)?;
41 if &owner != userid {
42 bail!("backup owner check failed ({} != {})", userid, owner);
43 }
44 Ok(())
45 }
46
47 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
48
49 let (manifest, index_size) = store.load_manifest(backup_dir)?;
50
51 let mut result = Vec::new();
52 for item in manifest.files() {
53 result.push(BackupContent {
54 filename: item.filename.clone(),
55 crypt_mode: Some(item.crypt_mode),
56 size: Some(item.size),
57 });
58 }
59
60 result.push(BackupContent {
61 filename: MANIFEST_BLOB_NAME.to_string(),
62 crypt_mode: Some(CryptMode::None),
63 size: Some(index_size),
64 });
65
66 Ok(result)
67 }
68
69 fn get_all_snapshot_files(
70 store: &DataStore,
71 info: &BackupInfo,
72 ) -> Result<Vec<BackupContent>, Error> {
73 let mut files = read_backup_index(&store, &info.backup_dir)?;
74
75 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
76 acc.insert(item.filename.clone());
77 acc
78 });
79
80 for file in &info.files {
81 if file_set.contains(file) { continue; }
82 files.push(BackupContent {
83 filename: file.to_string(),
84 size: None,
85 crypt_mode: None,
86 });
87 }
88
89 Ok(files)
90 }
91
92 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
93
94 let mut group_hash = HashMap::new();
95
96 for info in backup_list {
97 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
98 let time_list = group_hash.entry(group_id).or_insert(vec![]);
99 time_list.push(info);
100 }
101
102 group_hash
103 }
104
105 #[api(
106 input: {
107 properties: {
108 store: {
109 schema: DATASTORE_SCHEMA,
110 },
111 },
112 },
113 returns: {
114 type: Array,
115 description: "Returns the list of backup groups.",
116 items: {
117 type: GroupListItem,
118 }
119 },
120 access: {
121 permission: &Permission::Privilege(
122 &["datastore", "{store}"],
123 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
124 true),
125 },
126 )]
127 /// List backup groups.
128 fn list_groups(
129 store: String,
130 rpcenv: &mut dyn RpcEnvironment,
131 ) -> Result<Vec<GroupListItem>, Error> {
132
133 let username = rpcenv.get_user().unwrap();
134 let user_info = CachedUserInfo::new()?;
135 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
136
137 let datastore = DataStore::lookup_datastore(&store)?;
138
139 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
140
141 let group_hash = group_backups(backup_list);
142
143 let mut groups = Vec::new();
144
145 for (_group_id, mut list) in group_hash {
146
147 BackupInfo::sort_list(&mut list, false);
148
149 let info = &list[0];
150
151 let group = info.backup_dir.group();
152
153 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
154 let owner = datastore.get_owner(group)?;
155 if !list_all {
156 if owner != username { continue; }
157 }
158
159 let result_item = GroupListItem {
160 backup_type: group.backup_type().to_string(),
161 backup_id: group.backup_id().to_string(),
162 last_backup: info.backup_dir.backup_time().timestamp(),
163 backup_count: list.len() as u64,
164 files: info.files.clone(),
165 owner: Some(owner),
166 };
167 groups.push(result_item);
168 }
169
170 Ok(groups)
171 }
172
173 #[api(
174 input: {
175 properties: {
176 store: {
177 schema: DATASTORE_SCHEMA,
178 },
179 "backup-type": {
180 schema: BACKUP_TYPE_SCHEMA,
181 },
182 "backup-id": {
183 schema: BACKUP_ID_SCHEMA,
184 },
185 "backup-time": {
186 schema: BACKUP_TIME_SCHEMA,
187 },
188 },
189 },
190 returns: {
191 type: Array,
192 description: "Returns the list of archive files inside a backup snapshots.",
193 items: {
194 type: BackupContent,
195 }
196 },
197 access: {
198 permission: &Permission::Privilege(
199 &["datastore", "{store}"],
200 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
201 true),
202 },
203 )]
204 /// List snapshot files.
205 pub fn list_snapshot_files(
206 store: String,
207 backup_type: String,
208 backup_id: String,
209 backup_time: i64,
210 _info: &ApiMethod,
211 rpcenv: &mut dyn RpcEnvironment,
212 ) -> Result<Vec<BackupContent>, Error> {
213
214 let username = rpcenv.get_user().unwrap();
215 let user_info = CachedUserInfo::new()?;
216 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
217
218 let datastore = DataStore::lookup_datastore(&store)?;
219
220 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
221
222 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
223 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
224
225 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
226
227 get_all_snapshot_files(&datastore, &info)
228 }
229
230 #[api(
231 input: {
232 properties: {
233 store: {
234 schema: DATASTORE_SCHEMA,
235 },
236 "backup-type": {
237 schema: BACKUP_TYPE_SCHEMA,
238 },
239 "backup-id": {
240 schema: BACKUP_ID_SCHEMA,
241 },
242 "backup-time": {
243 schema: BACKUP_TIME_SCHEMA,
244 },
245 },
246 },
247 access: {
248 permission: &Permission::Privilege(
249 &["datastore", "{store}"],
250 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
251 true),
252 },
253 )]
254 /// Delete backup snapshot.
255 fn delete_snapshot(
256 store: String,
257 backup_type: String,
258 backup_id: String,
259 backup_time: i64,
260 _info: &ApiMethod,
261 rpcenv: &mut dyn RpcEnvironment,
262 ) -> Result<Value, Error> {
263
264 let username = rpcenv.get_user().unwrap();
265 let user_info = CachedUserInfo::new()?;
266 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
267
268 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
269
270 let datastore = DataStore::lookup_datastore(&store)?;
271
272 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
273 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
274
275 datastore.remove_backup_dir(&snapshot, false)?;
276
277 Ok(Value::Null)
278 }
279
280 #[api(
281 input: {
282 properties: {
283 store: {
284 schema: DATASTORE_SCHEMA,
285 },
286 "backup-type": {
287 optional: true,
288 schema: BACKUP_TYPE_SCHEMA,
289 },
290 "backup-id": {
291 optional: true,
292 schema: BACKUP_ID_SCHEMA,
293 },
294 },
295 },
296 returns: {
297 type: Array,
298 description: "Returns the list of snapshots.",
299 items: {
300 type: SnapshotListItem,
301 }
302 },
303 access: {
304 permission: &Permission::Privilege(
305 &["datastore", "{store}"],
306 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
307 true),
308 },
309 )]
310 /// List backup snapshots.
311 pub fn list_snapshots (
312 store: String,
313 backup_type: Option<String>,
314 backup_id: Option<String>,
315 _param: Value,
316 _info: &ApiMethod,
317 rpcenv: &mut dyn RpcEnvironment,
318 ) -> Result<Vec<SnapshotListItem>, Error> {
319
320 let username = rpcenv.get_user().unwrap();
321 let user_info = CachedUserInfo::new()?;
322 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
323
324 let datastore = DataStore::lookup_datastore(&store)?;
325
326 let base_path = datastore.base_path();
327
328 let backup_list = BackupInfo::list_backups(&base_path)?;
329
330 let mut snapshots = vec![];
331
332 for info in backup_list {
333 let group = info.backup_dir.group();
334 if let Some(ref backup_type) = backup_type {
335 if backup_type != group.backup_type() { continue; }
336 }
337 if let Some(ref backup_id) = backup_id {
338 if backup_id != group.backup_id() { continue; }
339 }
340
341 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
342 let owner = datastore.get_owner(group)?;
343
344 if !list_all {
345 if owner != username { continue; }
346 }
347
348 let mut size = None;
349
350 let files = match get_all_snapshot_files(&datastore, &info) {
351 Ok(files) => {
352 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
353 files
354 },
355 Err(err) => {
356 eprintln!("error during snapshot file listing: '{}'", err);
357 info
358 .files
359 .iter()
360 .map(|x| BackupContent {
361 filename: x.to_string(),
362 size: None,
363 crypt_mode: None,
364 })
365 .collect()
366 },
367 };
368
369 let result_item = SnapshotListItem {
370 backup_type: group.backup_type().to_string(),
371 backup_id: group.backup_id().to_string(),
372 backup_time: info.backup_dir.backup_time().timestamp(),
373 files,
374 size,
375 owner: Some(owner),
376 };
377
378 snapshots.push(result_item);
379 }
380
381 Ok(snapshots)
382 }
383
384 #[api(
385 input: {
386 properties: {
387 store: {
388 schema: DATASTORE_SCHEMA,
389 },
390 },
391 },
392 returns: {
393 type: StorageStatus,
394 },
395 access: {
396 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
397 },
398 )]
399 /// Get datastore status.
400 pub fn status(
401 store: String,
402 _info: &ApiMethod,
403 _rpcenv: &mut dyn RpcEnvironment,
404 ) -> Result<StorageStatus, Error> {
405 let datastore = DataStore::lookup_datastore(&store)?;
406 crate::tools::disks::disk_usage(&datastore.base_path())
407 }
408
409 #[api(
410 input: {
411 properties: {
412 store: {
413 schema: DATASTORE_SCHEMA,
414 },
415 "backup-type": {
416 schema: BACKUP_TYPE_SCHEMA,
417 optional: true,
418 },
419 "backup-id": {
420 schema: BACKUP_ID_SCHEMA,
421 optional: true,
422 },
423 "backup-time": {
424 schema: BACKUP_TIME_SCHEMA,
425 optional: true,
426 },
427 },
428 },
429 returns: {
430 schema: UPID_SCHEMA,
431 },
432 access: {
433 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
434 },
435 )]
436 /// Verify backups.
437 ///
438 /// This function can verify a single backup snapshot, all backup from a backup group,
439 /// or all backups in the datastore.
440 pub fn verify(
441 store: String,
442 backup_type: Option<String>,
443 backup_id: Option<String>,
444 backup_time: Option<i64>,
445 rpcenv: &mut dyn RpcEnvironment,
446 ) -> Result<Value, Error> {
447 let datastore = DataStore::lookup_datastore(&store)?;
448
449 let worker_id;
450
451 let mut backup_dir = None;
452 let mut backup_group = None;
453
454 match (backup_type, backup_id, backup_time) {
455 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
456 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
457 let dir = BackupDir::new(backup_type, backup_id, backup_time);
458 backup_dir = Some(dir);
459 }
460 (Some(backup_type), Some(backup_id), None) => {
461 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
462 let group = BackupGroup::new(backup_type, backup_id);
463 backup_group = Some(group);
464 }
465 (None, None, None) => {
466 worker_id = store.clone();
467 }
468 _ => bail!("parameters do not spefify a backup group or snapshot"),
469 }
470
471 let username = rpcenv.get_user().unwrap();
472 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
473
474 let upid_str = WorkerTask::new_thread(
475 "verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
476 {
477 let failed_dirs = if let Some(backup_dir) = backup_dir {
478 let mut verified_chunks = HashSet::with_capacity(1024*16);
479 let mut corrupt_chunks = HashSet::with_capacity(64);
480 let mut res = Vec::new();
481 if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
482 res.push(backup_dir.to_string());
483 }
484 res
485 } else if let Some(backup_group) = backup_group {
486 verify_backup_group(&datastore, &backup_group, &worker)?
487 } else {
488 verify_all_backups(&datastore, &worker)?
489 };
490 if failed_dirs.len() > 0 {
491 worker.log("Failed to verify following snapshots:");
492 for dir in failed_dirs {
493 worker.log(format!("\t{}", dir));
494 }
495 bail!("verfication failed - please check the log for details");
496 }
497 Ok(())
498 })?;
499
500 Ok(json!(upid_str))
501 }
502
503 #[macro_export]
504 macro_rules! add_common_prune_prameters {
505 ( [ $( $list1:tt )* ] ) => {
506 add_common_prune_prameters!([$( $list1 )* ] , [])
507 };
508 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
509 [
510 $( $list1 )*
511 (
512 "keep-daily",
513 true,
514 &PRUNE_SCHEMA_KEEP_DAILY,
515 ),
516 (
517 "keep-hourly",
518 true,
519 &PRUNE_SCHEMA_KEEP_HOURLY,
520 ),
521 (
522 "keep-last",
523 true,
524 &PRUNE_SCHEMA_KEEP_LAST,
525 ),
526 (
527 "keep-monthly",
528 true,
529 &PRUNE_SCHEMA_KEEP_MONTHLY,
530 ),
531 (
532 "keep-weekly",
533 true,
534 &PRUNE_SCHEMA_KEEP_WEEKLY,
535 ),
536 (
537 "keep-yearly",
538 true,
539 &PRUNE_SCHEMA_KEEP_YEARLY,
540 ),
541 $( $list2 )*
542 ]
543 }
544 }
545
546 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
547 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
548 &PruneListItem::API_SCHEMA
549 ).schema();
550
551 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
552 &ApiHandler::Sync(&prune),
553 &ObjectSchema::new(
554 "Prune the datastore.",
555 &add_common_prune_prameters!([
556 ("backup-id", false, &BACKUP_ID_SCHEMA),
557 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
558 ("dry-run", true, &BooleanSchema::new(
559 "Just show what prune would do, but do not delete anything.")
560 .schema()
561 ),
562 ],[
563 ("store", false, &DATASTORE_SCHEMA),
564 ])
565 ))
566 .returns(&API_RETURN_SCHEMA_PRUNE)
567 .access(None, &Permission::Privilege(
568 &["datastore", "{store}"],
569 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
570 true)
571 );
572
573 fn prune(
574 param: Value,
575 _info: &ApiMethod,
576 rpcenv: &mut dyn RpcEnvironment,
577 ) -> Result<Value, Error> {
578
579 let store = tools::required_string_param(&param, "store")?;
580 let backup_type = tools::required_string_param(&param, "backup-type")?;
581 let backup_id = tools::required_string_param(&param, "backup-id")?;
582
583 let username = rpcenv.get_user().unwrap();
584 let user_info = CachedUserInfo::new()?;
585 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
586
587 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
588
589 let group = BackupGroup::new(backup_type, backup_id);
590
591 let datastore = DataStore::lookup_datastore(&store)?;
592
593 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
594 if !allowed { check_backup_owner(&datastore, &group, &username)?; }
595
596 let prune_options = PruneOptions {
597 keep_last: param["keep-last"].as_u64(),
598 keep_hourly: param["keep-hourly"].as_u64(),
599 keep_daily: param["keep-daily"].as_u64(),
600 keep_weekly: param["keep-weekly"].as_u64(),
601 keep_monthly: param["keep-monthly"].as_u64(),
602 keep_yearly: param["keep-yearly"].as_u64(),
603 };
604
605 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
606
607 let mut prune_result = Vec::new();
608
609 let list = group.list_backups(&datastore.base_path())?;
610
611 let mut prune_info = compute_prune_info(list, &prune_options)?;
612
613 prune_info.reverse(); // delete older snapshots first
614
615 let keep_all = !prune_options.keeps_something();
616
617 if dry_run {
618 for (info, mut keep) in prune_info {
619 if keep_all { keep = true; }
620
621 let backup_time = info.backup_dir.backup_time();
622 let group = info.backup_dir.group();
623
624 prune_result.push(json!({
625 "backup-type": group.backup_type(),
626 "backup-id": group.backup_id(),
627 "backup-time": backup_time.timestamp(),
628 "keep": keep,
629 }));
630 }
631 return Ok(json!(prune_result));
632 }
633
634
635 // We use a WorkerTask just to have a task log, but run synchrounously
636 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
637
638 let result = try_block! {
639 if keep_all {
640 worker.log("No prune selection - keeping all files.");
641 } else {
642 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
643 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
644 store, backup_type, backup_id));
645 }
646
647 for (info, mut keep) in prune_info {
648 if keep_all { keep = true; }
649
650 let backup_time = info.backup_dir.backup_time();
651 let timestamp = BackupDir::backup_time_to_string(backup_time);
652 let group = info.backup_dir.group();
653
654
655 let msg = format!(
656 "{}/{}/{} {}",
657 group.backup_type(),
658 group.backup_id(),
659 timestamp,
660 if keep { "keep" } else { "remove" },
661 );
662
663 worker.log(msg);
664
665 prune_result.push(json!({
666 "backup-type": group.backup_type(),
667 "backup-id": group.backup_id(),
668 "backup-time": backup_time.timestamp(),
669 "keep": keep,
670 }));
671
672 if !(dry_run || keep) {
673 datastore.remove_backup_dir(&info.backup_dir, true)?;
674 }
675 }
676
677 Ok(())
678 };
679
680 worker.log_result(&result);
681
682 if let Err(err) = result {
683 bail!("prune failed - {}", err);
684 };
685
686 Ok(json!(prune_result))
687 }
688
689 #[api(
690 input: {
691 properties: {
692 store: {
693 schema: DATASTORE_SCHEMA,
694 },
695 },
696 },
697 returns: {
698 schema: UPID_SCHEMA,
699 },
700 access: {
701 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
702 },
703 )]
704 /// Start garbage collection.
705 fn start_garbage_collection(
706 store: String,
707 _info: &ApiMethod,
708 rpcenv: &mut dyn RpcEnvironment,
709 ) -> Result<Value, Error> {
710
711 let datastore = DataStore::lookup_datastore(&store)?;
712
713 println!("Starting garbage collection on store {}", store);
714
715 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
716
717 let upid_str = WorkerTask::new_thread(
718 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
719 {
720 worker.log(format!("starting garbage collection on store {}", store));
721 datastore.garbage_collection(&worker)
722 })?;
723
724 Ok(json!(upid_str))
725 }
726
727 #[api(
728 input: {
729 properties: {
730 store: {
731 schema: DATASTORE_SCHEMA,
732 },
733 },
734 },
735 returns: {
736 type: GarbageCollectionStatus,
737 },
738 access: {
739 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
740 },
741 )]
742 /// Garbage collection status.
743 pub fn garbage_collection_status(
744 store: String,
745 _info: &ApiMethod,
746 _rpcenv: &mut dyn RpcEnvironment,
747 ) -> Result<GarbageCollectionStatus, Error> {
748
749 let datastore = DataStore::lookup_datastore(&store)?;
750
751 let status = datastore.last_gc_status();
752
753 Ok(status)
754 }
755
756 #[api(
757 returns: {
758 description: "List the accessible datastores.",
759 type: Array,
760 items: {
761 description: "Datastore name and description.",
762 properties: {
763 store: {
764 schema: DATASTORE_SCHEMA,
765 },
766 comment: {
767 optional: true,
768 schema: SINGLE_LINE_COMMENT_SCHEMA,
769 },
770 },
771 },
772 },
773 access: {
774 permission: &Permission::Anybody,
775 },
776 )]
777 /// Datastore list
778 fn get_datastore_list(
779 _param: Value,
780 _info: &ApiMethod,
781 rpcenv: &mut dyn RpcEnvironment,
782 ) -> Result<Value, Error> {
783
784 let (config, _digest) = datastore::config()?;
785
786 let username = rpcenv.get_user().unwrap();
787 let user_info = CachedUserInfo::new()?;
788
789 let mut list = Vec::new();
790
791 for (store, (_, data)) in &config.sections {
792 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
793 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
794 if allowed {
795 let mut entry = json!({ "store": store });
796 if let Some(comment) = data["comment"].as_str() {
797 entry["comment"] = comment.into();
798 }
799 list.push(entry);
800 }
801 }
802
803 Ok(list.into())
804 }
805
806 #[sortable]
807 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
808 &ApiHandler::AsyncHttp(&download_file),
809 &ObjectSchema::new(
810 "Download single raw file from backup snapshot.",
811 &sorted!([
812 ("store", false, &DATASTORE_SCHEMA),
813 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
814 ("backup-id", false, &BACKUP_ID_SCHEMA),
815 ("backup-time", false, &BACKUP_TIME_SCHEMA),
816 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
817 ]),
818 )
819 ).access(None, &Permission::Privilege(
820 &["datastore", "{store}"],
821 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
822 true)
823 );
824
825 fn download_file(
826 _parts: Parts,
827 _req_body: Body,
828 param: Value,
829 _info: &ApiMethod,
830 rpcenv: Box<dyn RpcEnvironment>,
831 ) -> ApiResponseFuture {
832
833 async move {
834 let store = tools::required_string_param(&param, "store")?;
835 let datastore = DataStore::lookup_datastore(store)?;
836
837 let username = rpcenv.get_user().unwrap();
838 let user_info = CachedUserInfo::new()?;
839 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
840
841 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
842
843 let backup_type = tools::required_string_param(&param, "backup-type")?;
844 let backup_id = tools::required_string_param(&param, "backup-id")?;
845 let backup_time = tools::required_integer_param(&param, "backup-time")?;
846
847 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
848
849 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
850 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
851
852 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
853
854 let mut path = datastore.base_path();
855 path.push(backup_dir.relative_path());
856 path.push(&file_name);
857
858 let file = tokio::fs::File::open(&path)
859 .await
860 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
861
862 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
863 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
864 .map_err(move |err| {
865 eprintln!("error during streaming of '{:?}' - {}", &path, err);
866 err
867 });
868 let body = Body::wrap_stream(payload);
869
870 // fixme: set other headers ?
871 Ok(Response::builder()
872 .status(StatusCode::OK)
873 .header(header::CONTENT_TYPE, "application/octet-stream")
874 .body(body)
875 .unwrap())
876 }.boxed()
877 }
878
879 #[sortable]
880 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
881 &ApiHandler::AsyncHttp(&download_file_decoded),
882 &ObjectSchema::new(
883 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
884 &sorted!([
885 ("store", false, &DATASTORE_SCHEMA),
886 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
887 ("backup-id", false, &BACKUP_ID_SCHEMA),
888 ("backup-time", false, &BACKUP_TIME_SCHEMA),
889 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
890 ]),
891 )
892 ).access(None, &Permission::Privilege(
893 &["datastore", "{store}"],
894 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
895 true)
896 );
897
898 fn download_file_decoded(
899 _parts: Parts,
900 _req_body: Body,
901 param: Value,
902 _info: &ApiMethod,
903 rpcenv: Box<dyn RpcEnvironment>,
904 ) -> ApiResponseFuture {
905
906 async move {
907 let store = tools::required_string_param(&param, "store")?;
908 let datastore = DataStore::lookup_datastore(store)?;
909
910 let username = rpcenv.get_user().unwrap();
911 let user_info = CachedUserInfo::new()?;
912 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
913
914 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
915
916 let backup_type = tools::required_string_param(&param, "backup-type")?;
917 let backup_id = tools::required_string_param(&param, "backup-id")?;
918 let backup_time = tools::required_integer_param(&param, "backup-time")?;
919
920 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
921
922 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
923 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
924
925 let files = read_backup_index(&datastore, &backup_dir)?;
926 for file in files {
927 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
928 bail!("cannot decode '{}' - is encrypted", file_name);
929 }
930 }
931
932 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
933
934 let mut path = datastore.base_path();
935 path.push(backup_dir.relative_path());
936 path.push(&file_name);
937
938 let extension = file_name.rsplitn(2, '.').next().unwrap();
939
940 let body = match extension {
941 "didx" => {
942 let index = DynamicIndexReader::open(&path)
943 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
944
945 let chunk_reader = LocalChunkReader::new(datastore, None);
946 let reader = AsyncIndexReader::new(index, chunk_reader);
947 Body::wrap_stream(AsyncReaderStream::new(reader)
948 .map_err(move |err| {
949 eprintln!("error during streaming of '{:?}' - {}", path, err);
950 err
951 }))
952 },
953 "fidx" => {
954 let index = FixedIndexReader::open(&path)
955 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
956
957 let chunk_reader = LocalChunkReader::new(datastore, None);
958 let reader = AsyncIndexReader::new(index, chunk_reader);
959 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
960 .map_err(move |err| {
961 eprintln!("error during streaming of '{:?}' - {}", path, err);
962 err
963 }))
964 },
965 "blob" => {
966 let file = std::fs::File::open(&path)
967 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
968
969 Body::wrap_stream(
970 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
971 .map_err(move |err| {
972 eprintln!("error during streaming of '{:?}' - {}", path, err);
973 err
974 })
975 )
976 },
977 extension => {
978 bail!("cannot download '{}' files", extension);
979 },
980 };
981
982 // fixme: set other headers ?
983 Ok(Response::builder()
984 .status(StatusCode::OK)
985 .header(header::CONTENT_TYPE, "application/octet-stream")
986 .body(body)
987 .unwrap())
988 }.boxed()
989 }
990
991 #[sortable]
992 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
993 &ApiHandler::AsyncHttp(&upload_backup_log),
994 &ObjectSchema::new(
995 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
996 &sorted!([
997 ("store", false, &DATASTORE_SCHEMA),
998 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
999 ("backup-id", false, &BACKUP_ID_SCHEMA),
1000 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1001 ]),
1002 )
1003 ).access(
1004 Some("Only the backup creator/owner is allowed to do this."),
1005 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1006 );
1007
1008 fn upload_backup_log(
1009 _parts: Parts,
1010 req_body: Body,
1011 param: Value,
1012 _info: &ApiMethod,
1013 rpcenv: Box<dyn RpcEnvironment>,
1014 ) -> ApiResponseFuture {
1015
1016 async move {
1017 let store = tools::required_string_param(&param, "store")?;
1018 let datastore = DataStore::lookup_datastore(store)?;
1019
1020 let file_name = CLIENT_LOG_BLOB_NAME;
1021
1022 let backup_type = tools::required_string_param(&param, "backup-type")?;
1023 let backup_id = tools::required_string_param(&param, "backup-id")?;
1024 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1025
1026 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1027
1028 let username = rpcenv.get_user().unwrap();
1029 check_backup_owner(&datastore, backup_dir.group(), &username)?;
1030
1031 let mut path = datastore.base_path();
1032 path.push(backup_dir.relative_path());
1033 path.push(&file_name);
1034
1035 if path.exists() {
1036 bail!("backup already contains a log.");
1037 }
1038
1039 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1040 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1041
1042 let data = req_body
1043 .map_err(Error::from)
1044 .try_fold(Vec::new(), |mut acc, chunk| {
1045 acc.extend_from_slice(&*chunk);
1046 future::ok::<_, Error>(acc)
1047 })
1048 .await?;
1049
1050 // always verify blob/CRC at server side
1051 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1052
1053 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1054
1055 // fixme: use correct formatter
1056 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1057 }.boxed()
1058 }
1059
1060 #[api(
1061 input: {
1062 properties: {
1063 store: {
1064 schema: DATASTORE_SCHEMA,
1065 },
1066 "backup-type": {
1067 schema: BACKUP_TYPE_SCHEMA,
1068 },
1069 "backup-id": {
1070 schema: BACKUP_ID_SCHEMA,
1071 },
1072 "backup-time": {
1073 schema: BACKUP_TIME_SCHEMA,
1074 },
1075 "filepath": {
1076 description: "Base64 encoded path.",
1077 type: String,
1078 }
1079 },
1080 },
1081 access: {
1082 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1083 },
1084 )]
1085 /// Get the entries of the given path of the catalog
1086 fn catalog(
1087 store: String,
1088 backup_type: String,
1089 backup_id: String,
1090 backup_time: i64,
1091 filepath: String,
1092 _param: Value,
1093 _info: &ApiMethod,
1094 rpcenv: &mut dyn RpcEnvironment,
1095 ) -> Result<Value, Error> {
1096 let datastore = DataStore::lookup_datastore(&store)?;
1097
1098 let username = rpcenv.get_user().unwrap();
1099 let user_info = CachedUserInfo::new()?;
1100 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1101
1102 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1103
1104 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1105 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1106
1107 let mut path = datastore.base_path();
1108 path.push(backup_dir.relative_path());
1109 path.push(CATALOG_NAME);
1110
1111 let index = DynamicIndexReader::open(&path)
1112 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1113
1114 let chunk_reader = LocalChunkReader::new(datastore, None);
1115 let reader = BufferedDynamicReader::new(index, chunk_reader);
1116
1117 let mut catalog_reader = CatalogReader::new(reader);
1118 let mut current = catalog_reader.root()?;
1119 let mut components = vec![];
1120
1121
1122 if filepath != "root" {
1123 components = base64::decode(filepath)?;
1124 if components.len() > 0 && components[0] == '/' as u8 {
1125 components.remove(0);
1126 }
1127 for component in components.split(|c| *c == '/' as u8) {
1128 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1129 current = entry;
1130 } else {
1131 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1132 }
1133 }
1134 }
1135
1136 let mut res = Vec::new();
1137
1138 for direntry in catalog_reader.read_dir(&current)? {
1139 let mut components = components.clone();
1140 components.push('/' as u8);
1141 components.extend(&direntry.name);
1142 let path = base64::encode(components);
1143 let text = String::from_utf8_lossy(&direntry.name);
1144 let mut entry = json!({
1145 "filepath": path,
1146 "text": text,
1147 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1148 "leaf": true,
1149 });
1150 match direntry.attr {
1151 DirEntryAttribute::Directory { start: _ } => {
1152 entry["leaf"] = false.into();
1153 },
1154 DirEntryAttribute::File { size, mtime } => {
1155 entry["size"] = size.into();
1156 entry["mtime"] = mtime.into();
1157 },
1158 _ => {},
1159 }
1160 res.push(entry);
1161 }
1162
1163 Ok(res.into())
1164 }
1165
1166 #[sortable]
1167 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1168 &ApiHandler::AsyncHttp(&pxar_file_download),
1169 &ObjectSchema::new(
1170 "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
1171 &sorted!([
1172 ("store", false, &DATASTORE_SCHEMA),
1173 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1174 ("backup-id", false, &BACKUP_ID_SCHEMA),
1175 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1176 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1177 ]),
1178 )
1179 ).access(None, &Permission::Privilege(
1180 &["datastore", "{store}"],
1181 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1182 true)
1183 );
1184
1185 fn pxar_file_download(
1186 _parts: Parts,
1187 _req_body: Body,
1188 param: Value,
1189 _info: &ApiMethod,
1190 rpcenv: Box<dyn RpcEnvironment>,
1191 ) -> ApiResponseFuture {
1192
1193 async move {
1194 let store = tools::required_string_param(&param, "store")?;
1195 let datastore = DataStore::lookup_datastore(&store)?;
1196
1197 let username = rpcenv.get_user().unwrap();
1198 let user_info = CachedUserInfo::new()?;
1199 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1200
1201 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1202
1203 let backup_type = tools::required_string_param(&param, "backup-type")?;
1204 let backup_id = tools::required_string_param(&param, "backup-id")?;
1205 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1206
1207 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1208
1209 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1210 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1211
1212 let mut path = datastore.base_path();
1213 path.push(backup_dir.relative_path());
1214
1215 let mut components = base64::decode(&filepath)?;
1216 if components.len() > 0 && components[0] == '/' as u8 {
1217 components.remove(0);
1218 }
1219
1220 let mut split = components.splitn(2, |c| *c == '/' as u8);
1221 let pxar_name = split.next().unwrap();
1222 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1223
1224 path.push(OsStr::from_bytes(&pxar_name));
1225
1226 let index = DynamicIndexReader::open(&path)
1227 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1228
1229 let chunk_reader = LocalChunkReader::new(datastore, None);
1230 let reader = BufferedDynamicReader::new(index, chunk_reader);
1231 let archive_size = reader.archive_size();
1232 let reader = LocalDynamicReadAt::new(reader);
1233
1234 let decoder = Accessor::new(reader, archive_size).await?;
1235 let root = decoder.open_root().await?;
1236 let file = root
1237 .lookup(OsStr::from_bytes(file_path)).await?
1238 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1239
1240 let file = match file.kind() {
1241 EntryKind::File { .. } => file,
1242 EntryKind::Hardlink(_) => {
1243 decoder.follow_hardlink(&file).await?
1244 },
1245 // TODO symlink
1246 other => bail!("cannot download file of type {:?}", other),
1247 };
1248
1249 let body = Body::wrap_stream(
1250 AsyncReaderStream::new(file.contents().await?)
1251 .map_err(move |err| {
1252 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1253 err
1254 })
1255 );
1256
1257 // fixme: set other headers ?
1258 Ok(Response::builder()
1259 .status(StatusCode::OK)
1260 .header(header::CONTENT_TYPE, "application/octet-stream")
1261 .body(body)
1262 .unwrap())
1263 }.boxed()
1264 }
1265
1266 #[api(
1267 input: {
1268 properties: {
1269 store: {
1270 schema: DATASTORE_SCHEMA,
1271 },
1272 timeframe: {
1273 type: RRDTimeFrameResolution,
1274 },
1275 cf: {
1276 type: RRDMode,
1277 },
1278 },
1279 },
1280 access: {
1281 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1282 },
1283 )]
1284 /// Read datastore stats
1285 fn get_rrd_stats(
1286 store: String,
1287 timeframe: RRDTimeFrameResolution,
1288 cf: RRDMode,
1289 _param: Value,
1290 ) -> Result<Value, Error> {
1291
1292 create_value_from_rrd(
1293 &format!("datastore/{}", store),
1294 &[
1295 "total", "used",
1296 "read_ios", "read_bytes",
1297 "write_ios", "write_bytes",
1298 "io_ticks",
1299 ],
1300 timeframe,
1301 cf,
1302 )
1303 }
1304
1305 #[api(
1306 input: {
1307 properties: {
1308 store: {
1309 schema: DATASTORE_SCHEMA,
1310 },
1311 "backup-type": {
1312 schema: BACKUP_TYPE_SCHEMA,
1313 },
1314 "backup-id": {
1315 schema: BACKUP_ID_SCHEMA,
1316 },
1317 "backup-time": {
1318 schema: BACKUP_TIME_SCHEMA,
1319 },
1320 },
1321 },
1322 access: {
1323 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1324 },
1325 )]
1326 /// Get "notes" for a specific backup
1327 fn get_notes(
1328 store: String,
1329 backup_type: String,
1330 backup_id: String,
1331 backup_time: i64,
1332 rpcenv: &mut dyn RpcEnvironment,
1333 ) -> Result<String, Error> {
1334 let datastore = DataStore::lookup_datastore(&store)?;
1335
1336 let username = rpcenv.get_user().unwrap();
1337 let user_info = CachedUserInfo::new()?;
1338 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1339
1340 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1341
1342 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1343 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1344
1345 let manifest = datastore.load_manifest_json(&backup_dir)?;
1346
1347 let notes = manifest["unprotected"]["notes"]
1348 .as_str()
1349 .unwrap_or("");
1350
1351 Ok(String::from(notes))
1352 }
1353
1354 #[api(
1355 input: {
1356 properties: {
1357 store: {
1358 schema: DATASTORE_SCHEMA,
1359 },
1360 "backup-type": {
1361 schema: BACKUP_TYPE_SCHEMA,
1362 },
1363 "backup-id": {
1364 schema: BACKUP_ID_SCHEMA,
1365 },
1366 "backup-time": {
1367 schema: BACKUP_TIME_SCHEMA,
1368 },
1369 notes: {
1370 description: "A multiline text.",
1371 },
1372 },
1373 },
1374 access: {
1375 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1376 },
1377 )]
1378 /// Set "notes" for a specific backup
1379 fn set_notes(
1380 store: String,
1381 backup_type: String,
1382 backup_id: String,
1383 backup_time: i64,
1384 notes: String,
1385 rpcenv: &mut dyn RpcEnvironment,
1386 ) -> Result<(), Error> {
1387 let datastore = DataStore::lookup_datastore(&store)?;
1388
1389 let username = rpcenv.get_user().unwrap();
1390 let user_info = CachedUserInfo::new()?;
1391 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1392
1393 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1394
1395 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1396 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1397
1398 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1399
1400 manifest["unprotected"]["notes"] = notes.into();
1401
1402 datastore.store_manifest(&backup_dir, manifest)?;
1403
1404 Ok(())
1405 }
1406
1407 #[sortable]
1408 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1409 (
1410 "catalog",
1411 &Router::new()
1412 .get(&API_METHOD_CATALOG)
1413 ),
1414 (
1415 "download",
1416 &Router::new()
1417 .download(&API_METHOD_DOWNLOAD_FILE)
1418 ),
1419 (
1420 "download-decoded",
1421 &Router::new()
1422 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1423 ),
1424 (
1425 "files",
1426 &Router::new()
1427 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1428 ),
1429 (
1430 "gc",
1431 &Router::new()
1432 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1433 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1434 ),
1435 (
1436 "groups",
1437 &Router::new()
1438 .get(&API_METHOD_LIST_GROUPS)
1439 ),
1440 (
1441 "notes",
1442 &Router::new()
1443 .get(&API_METHOD_GET_NOTES)
1444 .put(&API_METHOD_SET_NOTES)
1445 ),
1446 (
1447 "prune",
1448 &Router::new()
1449 .post(&API_METHOD_PRUNE)
1450 ),
1451 (
1452 "pxar-file-download",
1453 &Router::new()
1454 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1455 ),
1456 (
1457 "rrd",
1458 &Router::new()
1459 .get(&API_METHOD_GET_RRD_STATS)
1460 ),
1461 (
1462 "snapshots",
1463 &Router::new()
1464 .get(&API_METHOD_LIST_SNAPSHOTS)
1465 .delete(&API_METHOD_DELETE_SNAPSHOT)
1466 ),
1467 (
1468 "status",
1469 &Router::new()
1470 .get(&API_METHOD_STATUS)
1471 ),
1472 (
1473 "upload-backup-log",
1474 &Router::new()
1475 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1476 ),
1477 (
1478 "verify",
1479 &Router::new()
1480 .post(&API_METHOD_VERIFY)
1481 ),
1482 ];
1483
1484 const DATASTORE_INFO_ROUTER: Router = Router::new()
1485 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1486 .subdirs(DATASTORE_INFO_SUBDIRS);
1487
1488
1489 pub const ROUTER: Router = Router::new()
1490 .get(&API_METHOD_GET_DATASTORE_LIST)
1491 .match_all("store", &DATASTORE_INFO_ROUTER);