1 use std
::collections
::{HashSet, HashMap}
;
3 use std
::os
::unix
::ffi
::OsStrExt
;
4 use std
::sync
::{Arc, Mutex}
;
5 use std
::path
::{Path, PathBuf}
;
8 use anyhow
::{bail, format_err, Error}
;
10 use hyper
::http
::request
::Parts
;
11 use hyper
::{header, Body, Response, StatusCode}
;
12 use serde_json
::{json, Value}
;
15 api
, ApiResponseFuture
, ApiHandler
, ApiMethod
, Router
,
16 RpcEnvironment
, RpcEnvironmentType
, Permission
18 use proxmox
::api
::router
::SubdirMap
;
19 use proxmox
::api
::schema
::*;
20 use proxmox
::tools
::fs
::{replace_file, CreateOptions}
;
21 use proxmox
::{http_err, identity, list_subdirs_api_method, sortable}
;
23 use pxar
::accessor
::aio
::{Accessor, FileContents, FileEntry}
;
26 use crate::api2
::types
::*;
27 use crate::api2
::node
::rrd
::create_value_from_rrd
;
29 use crate::config
::datastore
;
30 use crate::config
::cached_user_info
::CachedUserInfo
;
32 use crate::server
::WorkerTask
;
35 zip
::{ZipEncoder, ZipEntry}
,
36 AsyncChannelWriter
, AsyncReaderStream
, WrappedReaderStream
,
39 use crate::config
::acl
::{
41 PRIV_DATASTORE_MODIFY
,
44 PRIV_DATASTORE_BACKUP
,
47 fn check_backup_owner(
51 ) -> Result
<(), Error
> {
52 let owner
= store
.get_owner(group
)?
;
54 bail
!("backup owner check failed ({} != {})", userid
, owner
);
61 backup_dir
: &BackupDir
,
62 ) -> Result
<(BackupManifest
, Vec
<BackupContent
>), Error
> {
64 let (manifest
, index_size
) = store
.load_manifest(backup_dir
)?
;
66 let mut result
= Vec
::new();
67 for item
in manifest
.files() {
68 result
.push(BackupContent
{
69 filename
: item
.filename
.clone(),
70 crypt_mode
: Some(item
.crypt_mode
),
71 size
: Some(item
.size
),
75 result
.push(BackupContent
{
76 filename
: MANIFEST_BLOB_NAME
.to_string(),
77 crypt_mode
: match manifest
.signature
{
78 Some(_
) => Some(CryptMode
::SignOnly
),
79 None
=> Some(CryptMode
::None
),
81 size
: Some(index_size
),
84 Ok((manifest
, result
))
87 fn get_all_snapshot_files(
90 ) -> Result
<(BackupManifest
, Vec
<BackupContent
>), Error
> {
92 let (manifest
, mut files
) = read_backup_index(&store
, &info
.backup_dir
)?
;
94 let file_set
= files
.iter().fold(HashSet
::new(), |mut acc
, item
| {
95 acc
.insert(item
.filename
.clone());
99 for file
in &info
.files
{
100 if file_set
.contains(file
) { continue; }
101 files
.push(BackupContent
{
102 filename
: file
.to_string(),
108 Ok((manifest
, files
))
111 fn group_backups(backup_list
: Vec
<BackupInfo
>) -> HashMap
<String
, Vec
<BackupInfo
>> {
113 let mut group_hash
= HashMap
::new();
115 for info
in backup_list
{
116 let group_id
= info
.backup_dir
.group().group_path().to_str().unwrap().to_owned();
117 let time_list
= group_hash
.entry(group_id
).or_insert(vec
![]);
118 time_list
.push(info
);
128 schema
: DATASTORE_SCHEMA
,
134 description
: "Returns the list of backup groups.",
140 permission
: &Permission
::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
,
146 /// List backup groups.
149 rpcenv
: &mut dyn RpcEnvironment
,
150 ) -> Result
<Vec
<GroupListItem
>, Error
> {
152 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
153 let user_info
= CachedUserInfo
::new()?
;
154 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
156 let datastore
= DataStore
::lookup_datastore(&store
)?
;
158 let backup_list
= BackupInfo
::list_backups(&datastore
.base_path())?
;
160 let group_hash
= group_backups(backup_list
);
162 let mut groups
= Vec
::new();
164 for (_group_id
, mut list
) in group_hash
{
166 BackupInfo
::sort_list(&mut list
, false);
170 let group
= info
.backup_dir
.group();
172 let list_all
= (user_privs
& PRIV_DATASTORE_AUDIT
) != 0;
173 let owner
= datastore
.get_owner(group
)?
;
174 if !list_all
&& owner
!= userid
{
178 let result_item
= GroupListItem
{
179 backup_type
: group
.backup_type().to_string(),
180 backup_id
: group
.backup_id().to_string(),
181 last_backup
: info
.backup_dir
.backup_time(),
182 backup_count
: list
.len() as u64,
183 files
: info
.files
.clone(),
186 groups
.push(result_item
);
196 schema
: DATASTORE_SCHEMA
,
199 schema
: BACKUP_TYPE_SCHEMA
,
202 schema
: BACKUP_ID_SCHEMA
,
205 schema
: BACKUP_TIME_SCHEMA
,
211 description
: "Returns the list of archive files inside a backup snapshots.",
217 permission
: &Permission
::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
223 /// List snapshot files.
224 pub fn list_snapshot_files(
230 rpcenv
: &mut dyn RpcEnvironment
,
231 ) -> Result
<Vec
<BackupContent
>, Error
> {
233 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
234 let user_info
= CachedUserInfo
::new()?
;
235 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
237 let datastore
= DataStore
::lookup_datastore(&store
)?
;
239 let snapshot
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
241 let allowed
= (user_privs
& (PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_READ
)) != 0;
242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
244 let info
= BackupInfo
::new(&datastore
.base_path(), snapshot
)?
;
246 let (_manifest
, files
) = get_all_snapshot_files(&datastore
, &info
)?
;
255 schema
: DATASTORE_SCHEMA
,
258 schema
: BACKUP_TYPE_SCHEMA
,
261 schema
: BACKUP_ID_SCHEMA
,
264 schema
: BACKUP_TIME_SCHEMA
,
269 permission
: &Permission
::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_PRUNE
,
275 /// Delete backup snapshot.
282 rpcenv
: &mut dyn RpcEnvironment
,
283 ) -> Result
<Value
, Error
> {
285 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
286 let user_info
= CachedUserInfo
::new()?
;
287 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
289 let snapshot
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
291 let datastore
= DataStore
::lookup_datastore(&store
)?
;
293 let allowed
= (user_privs
& PRIV_DATASTORE_MODIFY
) != 0;
294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
296 datastore
.remove_backup_dir(&snapshot
, false)?
;
305 schema
: DATASTORE_SCHEMA
,
309 schema
: BACKUP_TYPE_SCHEMA
,
313 schema
: BACKUP_ID_SCHEMA
,
319 description
: "Returns the list of snapshots.",
321 type: SnapshotListItem
,
325 permission
: &Permission
::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
,
331 /// List backup snapshots.
332 pub fn list_snapshots (
334 backup_type
: Option
<String
>,
335 backup_id
: Option
<String
>,
338 rpcenv
: &mut dyn RpcEnvironment
,
339 ) -> Result
<Vec
<SnapshotListItem
>, Error
> {
341 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
342 let user_info
= CachedUserInfo
::new()?
;
343 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
345 let datastore
= DataStore
::lookup_datastore(&store
)?
;
347 let base_path
= datastore
.base_path();
349 let backup_list
= BackupInfo
::list_backups(&base_path
)?
;
351 let mut snapshots
= vec
![];
353 for info
in backup_list
{
354 let group
= info
.backup_dir
.group();
355 if let Some(ref backup_type
) = backup_type
{
356 if backup_type
!= group
.backup_type() { continue; }
358 if let Some(ref backup_id
) = backup_id
{
359 if backup_id
!= group
.backup_id() { continue; }
362 let list_all
= (user_privs
& PRIV_DATASTORE_AUDIT
) != 0;
363 let owner
= datastore
.get_owner(group
)?
;
365 if !list_all
&& owner
!= userid
{
371 let (comment
, verification
, files
) = match get_all_snapshot_files(&datastore
, &info
) {
372 Ok((manifest
, files
)) => {
373 size
= Some(files
.iter().map(|x
| x
.size
.unwrap_or(0)).sum());
374 // extract the first line from notes
375 let comment
: Option
<String
> = manifest
.unprotected
["notes"]
377 .and_then(|notes
| notes
.lines().next())
380 let verify
= manifest
.unprotected
["verify_state"].clone();
381 let verify
: Option
<SnapshotVerifyState
> = match serde_json
::from_value(verify
) {
382 Ok(verify
) => verify
,
384 eprintln
!("error parsing verification state : '{}'", err
);
389 (comment
, verify
, files
)
392 eprintln
!("error during snapshot file listing: '{}'", err
);
399 .map(|x
| BackupContent
{
400 filename
: x
.to_string(),
409 let result_item
= SnapshotListItem
{
410 backup_type
: group
.backup_type().to_string(),
411 backup_id
: group
.backup_id().to_string(),
412 backup_time
: info
.backup_dir
.backup_time(),
420 snapshots
.push(result_item
);
426 // returns a map from type to (group_count, snapshot_count)
427 fn get_snaphots_count(store
: &DataStore
) -> Result
<HashMap
<String
, (usize, usize)>, Error
> {
428 let base_path
= store
.base_path();
429 let backup_list
= BackupInfo
::list_backups(&base_path
)?
;
430 let mut groups
= HashSet
::new();
431 let mut result
: HashMap
<String
, (usize, usize)> = HashMap
::new();
432 for info
in backup_list
{
433 let group
= info
.backup_dir
.group();
435 let id
= group
.backup_id();
436 let backup_type
= group
.backup_type();
438 let mut new_id
= false;
440 if groups
.insert(format
!("{}-{}", &backup_type
, &id
)) {
444 if let Some(mut counts
) = result
.get_mut(backup_type
) {
450 result
.insert(backup_type
.to_string(), (1, 1));
461 schema
: DATASTORE_SCHEMA
,
466 description
: "The overall Datastore status and information.",
473 description
: "Group and Snapshot counts per Type",
478 type: GarbageCollectionStatus
,
483 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
, true),
486 /// Get datastore status.
490 _rpcenv
: &mut dyn RpcEnvironment
,
491 ) -> Result
<Value
, Error
> {
492 let datastore
= DataStore
::lookup_datastore(&store
)?
;
493 let storage_status
= crate::tools
::disks
::disk_usage(&datastore
.base_path())?
;
494 let counts
= get_snaphots_count(&datastore
)?
;
495 let gc_status
= datastore
.last_gc_status();
498 "storage": storage_status
,
500 "gc-status": gc_status
,
510 schema
: DATASTORE_SCHEMA
,
513 schema
: BACKUP_TYPE_SCHEMA
,
517 schema
: BACKUP_ID_SCHEMA
,
521 schema
: BACKUP_TIME_SCHEMA
,
530 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true), // fixme
535 /// This function can verify a single backup snapshot, all backup from a backup group,
536 /// or all backups in the datastore.
539 backup_type
: Option
<String
>,
540 backup_id
: Option
<String
>,
541 backup_time
: Option
<i64>,
542 rpcenv
: &mut dyn RpcEnvironment
,
543 ) -> Result
<Value
, Error
> {
544 let datastore
= DataStore
::lookup_datastore(&store
)?
;
548 let mut backup_dir
= None
;
549 let mut backup_group
= None
;
550 let mut worker_type
= "verify";
552 match (backup_type
, backup_id
, backup_time
) {
553 (Some(backup_type
), Some(backup_id
), Some(backup_time
)) => {
554 worker_id
= format
!("{}:{}/{}/{:08X}", store
, backup_type
, backup_id
, backup_time
);
555 let dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
556 backup_dir
= Some(dir
);
557 worker_type
= "verify_snapshot";
559 (Some(backup_type
), Some(backup_id
), None
) => {
560 worker_id
= format
!("{}:{}/{}", store
, backup_type
, backup_id
);
561 let group
= BackupGroup
::new(backup_type
, backup_id
);
562 backup_group
= Some(group
);
563 worker_type
= "verify_group";
565 (None
, None
, None
) => {
566 worker_id
= store
.clone();
568 _
=> bail
!("parameters do not specify a backup group or snapshot"),
571 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
572 let to_stdout
= if rpcenv
.env_type() == RpcEnvironmentType
::CLI { true }
else { false }
;
574 let upid_str
= WorkerTask
::new_thread(
576 Some(worker_id
.clone()),
580 let verified_chunks
= Arc
::new(Mutex
::new(HashSet
::with_capacity(1024*16)));
581 let corrupt_chunks
= Arc
::new(Mutex
::new(HashSet
::with_capacity(64)));
583 let failed_dirs
= if let Some(backup_dir
) = backup_dir
{
584 let mut res
= Vec
::new();
585 if !verify_backup_dir(
591 worker
.upid().clone(),
594 res
.push(backup_dir
.to_string());
597 } else if let Some(backup_group
) = backup_group
{
598 let (_count
, failed_dirs
) = verify_backup_group(
610 verify_all_backups(datastore
, worker
.clone(), worker
.upid(), None
)?
612 if failed_dirs
.len() > 0 {
613 worker
.log("Failed to verify following snapshots:");
614 for dir
in failed_dirs
{
615 worker
.log(format
!("\t{}", dir
));
617 bail
!("verification failed - please check the log for details");
627 macro_rules
! add_common_prune_prameters
{
628 ( [ $
( $list1
:tt
)* ] ) => {
629 add_common_prune_prameters
!([$
( $list1
)* ] , [])
631 ( [ $
( $list1
:tt
)* ] , [ $
( $list2
:tt
)* ] ) => {
637 &PRUNE_SCHEMA_KEEP_DAILY
,
642 &PRUNE_SCHEMA_KEEP_HOURLY
,
647 &PRUNE_SCHEMA_KEEP_LAST
,
652 &PRUNE_SCHEMA_KEEP_MONTHLY
,
657 &PRUNE_SCHEMA_KEEP_WEEKLY
,
662 &PRUNE_SCHEMA_KEEP_YEARLY
,
669 pub const API_RETURN_SCHEMA_PRUNE
: Schema
= ArraySchema
::new(
670 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
671 &PruneListItem
::API_SCHEMA
674 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
675 &ApiHandler
::Sync(&prune
),
677 "Prune the datastore.",
678 &add_common_prune_prameters
!([
679 ("backup-id", false, &BACKUP_ID_SCHEMA
),
680 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
681 ("dry-run", true, &BooleanSchema
::new(
682 "Just show what prune would do, but do not delete anything.")
686 ("store", false, &DATASTORE_SCHEMA
),
689 .returns(&API_RETURN_SCHEMA_PRUNE
)
690 .access(None
, &Permission
::Privilege(
691 &["datastore", "{store}"],
692 PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_PRUNE
,
699 rpcenv
: &mut dyn RpcEnvironment
,
700 ) -> Result
<Value
, Error
> {
702 let store
= tools
::required_string_param(¶m
, "store")?
;
703 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
704 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
706 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
707 let user_info
= CachedUserInfo
::new()?
;
708 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
710 let dry_run
= param
["dry-run"].as_bool().unwrap_or(false);
712 let group
= BackupGroup
::new(backup_type
, backup_id
);
714 let datastore
= DataStore
::lookup_datastore(&store
)?
;
716 let allowed
= (user_privs
& PRIV_DATASTORE_MODIFY
) != 0;
717 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
719 let prune_options
= PruneOptions
{
720 keep_last
: param
["keep-last"].as_u64(),
721 keep_hourly
: param
["keep-hourly"].as_u64(),
722 keep_daily
: param
["keep-daily"].as_u64(),
723 keep_weekly
: param
["keep-weekly"].as_u64(),
724 keep_monthly
: param
["keep-monthly"].as_u64(),
725 keep_yearly
: param
["keep-yearly"].as_u64(),
728 let worker_id
= format
!("{}:{}/{}", store
, backup_type
, backup_id
);
730 let mut prune_result
= Vec
::new();
732 let list
= group
.list_backups(&datastore
.base_path())?
;
734 let mut prune_info
= compute_prune_info(list
, &prune_options
)?
;
736 prune_info
.reverse(); // delete older snapshots first
738 let keep_all
= !prune_options
.keeps_something();
741 for (info
, mut keep
) in prune_info
{
742 if keep_all { keep = true; }
744 let backup_time
= info
.backup_dir
.backup_time();
745 let group
= info
.backup_dir
.group();
747 prune_result
.push(json
!({
748 "backup-type": group
.backup_type(),
749 "backup-id": group
.backup_id(),
750 "backup-time": backup_time
,
754 return Ok(json
!(prune_result
));
758 // We use a WorkerTask just to have a task log, but run synchrounously
759 let worker
= WorkerTask
::new("prune", Some(worker_id
), Userid
::root_userid().clone(), true)?
;
762 worker
.log("No prune selection - keeping all files.");
764 worker
.log(format
!("retention options: {}", prune_options
.cli_options_string()));
765 worker
.log(format
!("Starting prune on store \"{}\" group \"{}/{}\"",
766 store
, backup_type
, backup_id
));
769 for (info
, mut keep
) in prune_info
{
770 if keep_all { keep = true; }
772 let backup_time
= info
.backup_dir
.backup_time();
773 let timestamp
= info
.backup_dir
.backup_time_string();
774 let group
= info
.backup_dir
.group();
782 if keep { "keep" }
else { "remove" }
,
787 prune_result
.push(json
!({
788 "backup-type": group
.backup_type(),
789 "backup-id": group
.backup_id(),
790 "backup-time": backup_time
,
794 if !(dry_run
|| keep
) {
795 if let Err(err
) = datastore
.remove_backup_dir(&info
.backup_dir
, false) {
798 "failed to remove dir {:?}: {}",
799 info
.backup_dir
.relative_path(), err
806 worker
.log_result(&Ok(()));
808 Ok(json
!(prune_result
))
815 schema
: DATASTORE_SCHEMA
,
823 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, false),
826 /// Start garbage collection.
827 fn start_garbage_collection(
830 rpcenv
: &mut dyn RpcEnvironment
,
831 ) -> Result
<Value
, Error
> {
833 let datastore
= DataStore
::lookup_datastore(&store
)?
;
835 println
!("Starting garbage collection on store {}", store
);
837 let to_stdout
= if rpcenv
.env_type() == RpcEnvironmentType
::CLI { true }
else { false }
;
839 let upid_str
= WorkerTask
::new_thread(
840 "garbage_collection",
842 Userid
::root_userid().clone(),
845 worker
.log(format
!("starting garbage collection on store {}", store
));
846 datastore
.garbage_collection(&*worker
, worker
.upid())
857 schema
: DATASTORE_SCHEMA
,
862 type: GarbageCollectionStatus
,
865 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
, false),
868 /// Garbage collection status.
869 pub fn garbage_collection_status(
872 _rpcenv
: &mut dyn RpcEnvironment
,
873 ) -> Result
<GarbageCollectionStatus
, Error
> {
875 let datastore
= DataStore
::lookup_datastore(&store
)?
;
877 let status
= datastore
.last_gc_status();
884 description
: "List the accessible datastores.",
887 description
: "Datastore name and description.",
890 schema
: DATASTORE_SCHEMA
,
894 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
900 permission
: &Permission
::Anybody
,
904 fn get_datastore_list(
907 rpcenv
: &mut dyn RpcEnvironment
,
908 ) -> Result
<Value
, Error
> {
910 let (config
, _digest
) = datastore
::config()?
;
912 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
913 let user_info
= CachedUserInfo
::new()?
;
915 let mut list
= Vec
::new();
917 for (store
, (_
, data
)) in &config
.sections
{
918 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
919 let allowed
= (user_privs
& (PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
)) != 0;
921 let mut entry
= json
!({ "store": store }
);
922 if let Some(comment
) = data
["comment"].as_str() {
923 entry
["comment"] = comment
.into();
933 pub const API_METHOD_DOWNLOAD_FILE
: ApiMethod
= ApiMethod
::new(
934 &ApiHandler
::AsyncHttp(&download_file
),
936 "Download single raw file from backup snapshot.",
938 ("store", false, &DATASTORE_SCHEMA
),
939 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
940 ("backup-id", false, &BACKUP_ID_SCHEMA
),
941 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
942 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA
),
945 ).access(None
, &Permission
::Privilege(
946 &["datastore", "{store}"],
947 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
956 rpcenv
: Box
<dyn RpcEnvironment
>,
957 ) -> ApiResponseFuture
{
960 let store
= tools
::required_string_param(¶m
, "store")?
;
961 let datastore
= DataStore
::lookup_datastore(store
)?
;
963 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
964 let user_info
= CachedUserInfo
::new()?
;
965 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
967 let file_name
= tools
::required_string_param(¶m
, "file-name")?
.to_owned();
969 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
970 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
971 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
973 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
975 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
976 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
978 println
!("Download {} from {} ({}/{})", file_name
, store
, backup_dir
, file_name
);
980 let mut path
= datastore
.base_path();
981 path
.push(backup_dir
.relative_path());
982 path
.push(&file_name
);
984 let file
= tokio
::fs
::File
::open(&path
)
986 .map_err(|err
| http_err
!(BAD_REQUEST
, "File open failed: {}", err
))?
;
988 let payload
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
989 .map_ok(|bytes
| hyper
::body
::Bytes
::from(bytes
.freeze()))
990 .map_err(move |err
| {
991 eprintln
!("error during streaming of '{:?}' - {}", &path
, err
);
994 let body
= Body
::wrap_stream(payload
);
996 // fixme: set other headers ?
997 Ok(Response
::builder()
998 .status(StatusCode
::OK
)
999 .header(header
::CONTENT_TYPE
, "application/octet-stream")
1006 pub const API_METHOD_DOWNLOAD_FILE_DECODED
: ApiMethod
= ApiMethod
::new(
1007 &ApiHandler
::AsyncHttp(&download_file_decoded
),
1009 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1011 ("store", false, &DATASTORE_SCHEMA
),
1012 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
1013 ("backup-id", false, &BACKUP_ID_SCHEMA
),
1014 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
1015 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA
),
1018 ).access(None
, &Permission
::Privilege(
1019 &["datastore", "{store}"],
1020 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
1024 fn download_file_decoded(
1029 rpcenv
: Box
<dyn RpcEnvironment
>,
1030 ) -> ApiResponseFuture
{
1033 let store
= tools
::required_string_param(¶m
, "store")?
;
1034 let datastore
= DataStore
::lookup_datastore(store
)?
;
1036 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1037 let user_info
= CachedUserInfo
::new()?
;
1038 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1040 let file_name
= tools
::required_string_param(¶m
, "file-name")?
.to_owned();
1042 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
1043 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
1044 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
1046 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1048 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1049 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1051 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
1053 if file
.filename
== file_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
1054 bail
!("cannot decode '{}' - is encrypted", file_name
);
1058 println
!("Download {} from {} ({}/{})", file_name
, store
, backup_dir
, file_name
);
1060 let mut path
= datastore
.base_path();
1061 path
.push(backup_dir
.relative_path());
1062 path
.push(&file_name
);
1064 let extension
= file_name
.rsplitn(2, '
.'
).next().unwrap();
1066 let body
= match extension
{
1068 let index
= DynamicIndexReader
::open(&path
)
1069 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1070 let (csum
, size
) = index
.compute_csum();
1071 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1073 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1074 let reader
= AsyncIndexReader
::new(index
, chunk_reader
);
1075 Body
::wrap_stream(AsyncReaderStream
::new(reader
)
1076 .map_err(move |err
| {
1077 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1082 let index
= FixedIndexReader
::open(&path
)
1083 .map_err(|err
| format_err
!("unable to read fixed index '{:?}' - {}", &path
, err
))?
;
1085 let (csum
, size
) = index
.compute_csum();
1086 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1088 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1089 let reader
= AsyncIndexReader
::new(index
, chunk_reader
);
1090 Body
::wrap_stream(AsyncReaderStream
::with_buffer_size(reader
, 4*1024*1024)
1091 .map_err(move |err
| {
1092 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1097 let file
= std
::fs
::File
::open(&path
)
1098 .map_err(|err
| http_err
!(BAD_REQUEST
, "File open failed: {}", err
))?
;
1100 // FIXME: load full blob to verify index checksum?
1103 WrappedReaderStream
::new(DataBlobReader
::new(file
, None
)?
)
1104 .map_err(move |err
| {
1105 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1111 bail
!("cannot download '{}' files", extension
);
1115 // fixme: set other headers ?
1116 Ok(Response
::builder()
1117 .status(StatusCode
::OK
)
1118 .header(header
::CONTENT_TYPE
, "application/octet-stream")
1125 pub const API_METHOD_UPLOAD_BACKUP_LOG
: ApiMethod
= ApiMethod
::new(
1126 &ApiHandler
::AsyncHttp(&upload_backup_log
),
1128 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1130 ("store", false, &DATASTORE_SCHEMA
),
1131 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
1132 ("backup-id", false, &BACKUP_ID_SCHEMA
),
1133 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
1137 Some("Only the backup creator/owner is allowed to do this."),
1138 &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP
, false)
1141 fn upload_backup_log(
1146 rpcenv
: Box
<dyn RpcEnvironment
>,
1147 ) -> ApiResponseFuture
{
1150 let store
= tools
::required_string_param(¶m
, "store")?
;
1151 let datastore
= DataStore
::lookup_datastore(store
)?
;
1153 let file_name
= CLIENT_LOG_BLOB_NAME
;
1155 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
1156 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
1157 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
1159 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1161 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1162 check_backup_owner(&datastore
, backup_dir
.group(), &userid
)?
;
1164 let mut path
= datastore
.base_path();
1165 path
.push(backup_dir
.relative_path());
1166 path
.push(&file_name
);
1169 bail
!("backup already contains a log.");
1172 println
!("Upload backup log to {}/{}/{}/{}/{}", store
,
1173 backup_type
, backup_id
, backup_dir
.backup_time_string(), file_name
);
1176 .map_err(Error
::from
)
1177 .try_fold(Vec
::new(), |mut acc
, chunk
| {
1178 acc
.extend_from_slice(&*chunk
);
1179 future
::ok
::<_
, Error
>(acc
)
1183 // always verify blob/CRC at server side
1184 let blob
= DataBlob
::load_from_reader(&mut &data
[..])?
;
1186 replace_file(&path
, blob
.raw_data(), CreateOptions
::new())?
;
1188 // fixme: use correct formatter
1189 Ok(crate::server
::formatter
::json_response(Ok(Value
::Null
)))
1197 schema
: DATASTORE_SCHEMA
,
1200 schema
: BACKUP_TYPE_SCHEMA
,
1203 schema
: BACKUP_ID_SCHEMA
,
1206 schema
: BACKUP_TIME_SCHEMA
,
1209 description
: "Base64 encoded path.",
1215 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true),
1218 /// Get the entries of the given path of the catalog
1221 backup_type
: String
,
1227 rpcenv
: &mut dyn RpcEnvironment
,
1228 ) -> Result
<Value
, Error
> {
1229 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1231 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1232 let user_info
= CachedUserInfo
::new()?
;
1233 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1235 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1237 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1238 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1240 let file_name
= CATALOG_NAME
;
1242 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
1244 if file
.filename
== file_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
1245 bail
!("cannot decode '{}' - is encrypted", file_name
);
1249 let mut path
= datastore
.base_path();
1250 path
.push(backup_dir
.relative_path());
1251 path
.push(file_name
);
1253 let index
= DynamicIndexReader
::open(&path
)
1254 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1256 let (csum
, size
) = index
.compute_csum();
1257 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1259 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1260 let reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1262 let mut catalog_reader
= CatalogReader
::new(reader
);
1263 let mut current
= catalog_reader
.root()?
;
1264 let mut components
= vec
![];
1267 if filepath
!= "root" {
1268 components
= base64
::decode(filepath
)?
;
1269 if components
.len() > 0 && components
[0] == '
/'
as u8 {
1270 components
.remove(0);
1272 for component
in components
.split(|c
| *c
== '
/'
as u8) {
1273 if let Some(entry
) = catalog_reader
.lookup(¤t
, component
)?
{
1276 bail
!("path {:?} not found in catalog", &String
::from_utf8_lossy(&components
));
1281 let mut res
= Vec
::new();
1283 for direntry
in catalog_reader
.read_dir(¤t
)?
{
1284 let mut components
= components
.clone();
1285 components
.push('
/'
as u8);
1286 components
.extend(&direntry
.name
);
1287 let path
= base64
::encode(components
);
1288 let text
= String
::from_utf8_lossy(&direntry
.name
);
1289 let mut entry
= json
!({
1292 "type": CatalogEntryType
::from(&direntry
.attr
).to_string(),
1295 match direntry
.attr
{
1296 DirEntryAttribute
::Directory { start: _ }
=> {
1297 entry
["leaf"] = false.into();
1299 DirEntryAttribute
::File { size, mtime }
=> {
1300 entry
["size"] = size
.into();
1301 entry
["mtime"] = mtime
.into();
1311 fn recurse_files
<'a
, T
, W
>(
1312 zip
: &'a
mut ZipEncoder
<W
>,
1313 decoder
: &'a
mut Accessor
<T
>,
1316 ) -> Pin
<Box
<dyn Future
<Output
= Result
<(), Error
>> + Send
+ 'a
>>
1318 T
: Clone
+ pxar
::accessor
::ReadAt
+ Unpin
+ Send
+ Sync
+ '
static,
1319 W
: tokio
::io
::AsyncWrite
+ Unpin
+ Send
+ '
static,
1321 Box
::pin(async
move {
1322 let metadata
= file
.entry().metadata();
1323 let path
= file
.entry().path().strip_prefix(&prefix
)?
.to_path_buf();
1326 EntryKind
::File { .. }
=> {
1327 let entry
= ZipEntry
::new(
1329 metadata
.stat
.mtime
.secs
,
1330 metadata
.stat
.mode
as u16,
1333 zip
.add_entry(entry
, Some(file
.contents().await?
))
1335 .map_err(|err
| format_err
!("could not send file entry: {}", err
))?
;
1337 EntryKind
::Hardlink(_
) => {
1338 let realfile
= decoder
.follow_hardlink(&file
).await?
;
1339 let entry
= ZipEntry
::new(
1341 metadata
.stat
.mtime
.secs
,
1342 metadata
.stat
.mode
as u16,
1345 zip
.add_entry(entry
, Some(realfile
.contents().await?
))
1347 .map_err(|err
| format_err
!("could not send file entry: {}", err
))?
;
1349 EntryKind
::Directory
=> {
1350 let dir
= file
.enter_directory().await?
;
1351 let mut readdir
= dir
.read_dir();
1352 let entry
= ZipEntry
::new(
1354 metadata
.stat
.mtime
.secs
,
1355 metadata
.stat
.mode
as u16,
1358 zip
.add_entry
::<FileContents
<T
>>(entry
, None
).await?
;
1359 while let Some(entry
) = readdir
.next().await
{
1360 let entry
= entry?
.decode_entry().await?
;
1361 recurse_files(zip
, decoder
, prefix
, entry
).await?
;
1364 _
=> {}
// ignore all else
1372 pub const API_METHOD_PXAR_FILE_DOWNLOAD
: ApiMethod
= ApiMethod
::new(
1373 &ApiHandler
::AsyncHttp(&pxar_file_download
),
1375 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1377 ("store", false, &DATASTORE_SCHEMA
),
1378 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
1379 ("backup-id", false, &BACKUP_ID_SCHEMA
),
1380 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
1381 ("filepath", false, &StringSchema
::new("Base64 encoded path").schema()),
1384 ).access(None
, &Permission
::Privilege(
1385 &["datastore", "{store}"],
1386 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
1390 fn pxar_file_download(
1395 rpcenv
: Box
<dyn RpcEnvironment
>,
1396 ) -> ApiResponseFuture
{
1399 let store
= tools
::required_string_param(¶m
, "store")?
;
1400 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1402 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1403 let user_info
= CachedUserInfo
::new()?
;
1404 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1406 let filepath
= tools
::required_string_param(¶m
, "filepath")?
.to_owned();
1408 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
1409 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
1410 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
1412 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1414 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1415 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1417 let mut components
= base64
::decode(&filepath
)?
;
1418 if components
.len() > 0 && components
[0] == '
/'
as u8 {
1419 components
.remove(0);
1422 let mut split
= components
.splitn(2, |c
| *c
== '
/'
as u8);
1423 let pxar_name
= std
::str::from_utf8(split
.next().unwrap())?
;
1424 let file_path
= split
.next().ok_or(format_err
!("filepath looks strange '{}'", filepath
))?
;
1425 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
1427 if file
.filename
== pxar_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
1428 bail
!("cannot decode '{}' - is encrypted", pxar_name
);
1432 let mut path
= datastore
.base_path();
1433 path
.push(backup_dir
.relative_path());
1434 path
.push(pxar_name
);
1436 let index
= DynamicIndexReader
::open(&path
)
1437 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1439 let (csum
, size
) = index
.compute_csum();
1440 manifest
.verify_file(&pxar_name
, &csum
, size
)?
;
1442 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1443 let reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1444 let archive_size
= reader
.archive_size();
1445 let reader
= LocalDynamicReadAt
::new(reader
);
1447 let decoder
= Accessor
::new(reader
, archive_size
).await?
;
1448 let root
= decoder
.open_root().await?
;
1450 .lookup(OsStr
::from_bytes(file_path
)).await?
1451 .ok_or(format_err
!("error opening '{:?}'", file_path
))?
;
1453 let body
= match file
.kind() {
1454 EntryKind
::File { .. }
=> Body
::wrap_stream(
1455 AsyncReaderStream
::new(file
.contents().await?
).map_err(move |err
| {
1456 eprintln
!("error during streaming of file '{:?}' - {}", filepath
, err
);
1460 EntryKind
::Hardlink(_
) => Body
::wrap_stream(
1461 AsyncReaderStream
::new(decoder
.follow_hardlink(&file
).await?
.contents().await?
)
1462 .map_err(move |err
| {
1464 "error during streaming of hardlink '{:?}' - {}",
1470 EntryKind
::Directory
=> {
1471 let (sender
, receiver
) = tokio
::sync
::mpsc
::channel(100);
1472 let mut prefix
= PathBuf
::new();
1473 let mut components
= file
.entry().path().components();
1474 components
.next_back(); // discar last
1475 for comp
in components
{
1479 let channelwriter
= AsyncChannelWriter
::new(sender
, 1024 * 1024);
1481 crate::server
::spawn_internal_task(async
move {
1482 let mut zipencoder
= ZipEncoder
::new(channelwriter
);
1483 let mut decoder
= decoder
;
1484 recurse_files(&mut zipencoder
, &mut decoder
, &prefix
, file
)
1486 .map_err(|err
| eprintln
!("error during creating of zip: {}", err
))?
;
1491 .map_err(|err
| eprintln
!("error during finishing of zip: {}", err
))
1494 Body
::wrap_stream(receiver
.map_err(move |err
| {
1495 eprintln
!("error during streaming of zip '{:?}' - {}", filepath
, err
);
1499 other
=> bail
!("cannot download file of type {:?}", other
),
1502 // fixme: set other headers ?
1503 Ok(Response
::builder()
1504 .status(StatusCode
::OK
)
1505 .header(header
::CONTENT_TYPE
, "application/octet-stream")
1515 schema
: DATASTORE_SCHEMA
,
1518 type: RRDTimeFrameResolution
,
1526 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
, true),
1529 /// Read datastore stats
1532 timeframe
: RRDTimeFrameResolution
,
1535 ) -> Result
<Value
, Error
> {
1537 create_value_from_rrd(
1538 &format
!("datastore/{}", store
),
1541 "read_ios", "read_bytes",
1542 "write_ios", "write_bytes",
1554 schema
: DATASTORE_SCHEMA
,
1557 schema
: BACKUP_TYPE_SCHEMA
,
1560 schema
: BACKUP_ID_SCHEMA
,
1563 schema
: BACKUP_TIME_SCHEMA
,
1568 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true),
1571 /// Get "notes" for a specific backup
1574 backup_type
: String
,
1577 rpcenv
: &mut dyn RpcEnvironment
,
1578 ) -> Result
<String
, Error
> {
1579 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1581 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1582 let user_info
= CachedUserInfo
::new()?
;
1583 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1585 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1587 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1588 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1590 let (manifest
, _
) = datastore
.load_manifest(&backup_dir
)?
;
1592 let notes
= manifest
.unprotected
["notes"]
1596 Ok(String
::from(notes
))
1603 schema
: DATASTORE_SCHEMA
,
1606 schema
: BACKUP_TYPE_SCHEMA
,
1609 schema
: BACKUP_ID_SCHEMA
,
1612 schema
: BACKUP_TIME_SCHEMA
,
1615 description
: "A multiline text.",
1620 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, true),
1623 /// Set "notes" for a specific backup
1626 backup_type
: String
,
1630 rpcenv
: &mut dyn RpcEnvironment
,
1631 ) -> Result
<(), Error
> {
1632 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1634 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1635 let user_info
= CachedUserInfo
::new()?
;
1636 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1638 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1640 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1641 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1643 datastore
.update_manifest(&backup_dir
,|manifest
| {
1644 manifest
.unprotected
["notes"] = notes
.into();
1645 }).map_err(|err
| format_err
!("unable to update manifest blob - {}", err
))?
;
1654 schema
: DATASTORE_SCHEMA
,
1657 schema
: BACKUP_TYPE_SCHEMA
,
1660 schema
: BACKUP_ID_SCHEMA
,
1668 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, true),
1671 /// Change owner of a backup group
1672 fn set_backup_owner(
1674 backup_type
: String
,
1677 _rpcenv
: &mut dyn RpcEnvironment
,
1678 ) -> Result
<(), Error
> {
1680 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1682 let backup_group
= BackupGroup
::new(backup_type
, backup_id
);
1684 let user_info
= CachedUserInfo
::new()?
;
1686 if !user_info
.is_active_user(&new_owner
) {
1687 bail
!("user '{}' is inactive or non-existent", new_owner
);
1690 datastore
.set_owner(&backup_group
, &new_owner
, true)?
;
1696 const DATASTORE_INFO_SUBDIRS
: SubdirMap
= &[
1700 .get(&API_METHOD_CATALOG
)
1705 .post(&API_METHOD_SET_BACKUP_OWNER
)
1710 .download(&API_METHOD_DOWNLOAD_FILE
)
1715 .download(&API_METHOD_DOWNLOAD_FILE_DECODED
)
1720 .get(&API_METHOD_LIST_SNAPSHOT_FILES
)
1725 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS
)
1726 .post(&API_METHOD_START_GARBAGE_COLLECTION
)
1731 .get(&API_METHOD_LIST_GROUPS
)
1736 .get(&API_METHOD_GET_NOTES
)
1737 .put(&API_METHOD_SET_NOTES
)
1742 .post(&API_METHOD_PRUNE
)
1745 "pxar-file-download",
1747 .download(&API_METHOD_PXAR_FILE_DOWNLOAD
)
1752 .get(&API_METHOD_GET_RRD_STATS
)
1757 .get(&API_METHOD_LIST_SNAPSHOTS
)
1758 .delete(&API_METHOD_DELETE_SNAPSHOT
)
1763 .get(&API_METHOD_STATUS
)
1766 "upload-backup-log",
1768 .upload(&API_METHOD_UPLOAD_BACKUP_LOG
)
1773 .post(&API_METHOD_VERIFY
)
1777 const DATASTORE_INFO_ROUTER
: Router
= Router
::new()
1778 .get(&list_subdirs_api_method
!(DATASTORE_INFO_SUBDIRS
))
1779 .subdirs(DATASTORE_INFO_SUBDIRS
);
1782 pub const ROUTER
: Router
= Router
::new()
1783 .get(&API_METHOD_GET_DATASTORE_LIST
)
1784 .match_all("store", &DATASTORE_INFO_ROUTER
);