1 use std
::collections
::{HashSet, HashMap}
;
3 use std
::os
::unix
::ffi
::OsStrExt
;
4 use std
::sync
::{Arc, Mutex}
;
6 use anyhow
::{bail, format_err, Error}
;
8 use hyper
::http
::request
::Parts
;
9 use hyper
::{header, Body, Response, StatusCode}
;
10 use serde_json
::{json, Value}
;
13 api
, ApiResponseFuture
, ApiHandler
, ApiMethod
, Router
,
14 RpcEnvironment
, RpcEnvironmentType
, Permission
16 use proxmox
::api
::router
::SubdirMap
;
17 use proxmox
::api
::schema
::*;
18 use proxmox
::tools
::fs
::{replace_file, CreateOptions}
;
19 use proxmox
::{http_err, identity, list_subdirs_api_method, sortable}
;
21 use pxar
::accessor
::aio
::Accessor
;
24 use crate::api2
::types
::*;
25 use crate::api2
::node
::rrd
::create_value_from_rrd
;
27 use crate::config
::datastore
;
28 use crate::config
::cached_user_info
::CachedUserInfo
;
30 use crate::server
::WorkerTask
;
31 use crate::tools
::{self, AsyncReaderStream, WrappedReaderStream}
;
32 use crate::config
::acl
::{
34 PRIV_DATASTORE_MODIFY
,
37 PRIV_DATASTORE_BACKUP
,
40 fn check_backup_owner(
44 ) -> Result
<(), Error
> {
45 let owner
= store
.get_owner(group
)?
;
47 bail
!("backup owner check failed ({} != {})", userid
, owner
);
54 backup_dir
: &BackupDir
,
55 ) -> Result
<(BackupManifest
, Vec
<BackupContent
>), Error
> {
57 let (manifest
, index_size
) = store
.load_manifest(backup_dir
)?
;
59 let mut result
= Vec
::new();
60 for item
in manifest
.files() {
61 result
.push(BackupContent
{
62 filename
: item
.filename
.clone(),
63 crypt_mode
: Some(item
.crypt_mode
),
64 size
: Some(item
.size
),
68 result
.push(BackupContent
{
69 filename
: MANIFEST_BLOB_NAME
.to_string(),
70 crypt_mode
: match manifest
.signature
{
71 Some(_
) => Some(CryptMode
::SignOnly
),
72 None
=> Some(CryptMode
::None
),
74 size
: Some(index_size
),
77 Ok((manifest
, result
))
80 fn get_all_snapshot_files(
83 ) -> Result
<(BackupManifest
, Vec
<BackupContent
>), Error
> {
85 let (manifest
, mut files
) = read_backup_index(&store
, &info
.backup_dir
)?
;
87 let file_set
= files
.iter().fold(HashSet
::new(), |mut acc
, item
| {
88 acc
.insert(item
.filename
.clone());
92 for file
in &info
.files
{
93 if file_set
.contains(file
) { continue; }
94 files
.push(BackupContent
{
95 filename
: file
.to_string(),
101 Ok((manifest
, files
))
104 fn group_backups(backup_list
: Vec
<BackupInfo
>) -> HashMap
<String
, Vec
<BackupInfo
>> {
106 let mut group_hash
= HashMap
::new();
108 for info
in backup_list
{
109 let group_id
= info
.backup_dir
.group().group_path().to_str().unwrap().to_owned();
110 let time_list
= group_hash
.entry(group_id
).or_insert(vec
![]);
111 time_list
.push(info
);
121 schema
: DATASTORE_SCHEMA
,
127 description
: "Returns the list of backup groups.",
133 permission
: &Permission
::Privilege(
134 &["datastore", "{store}"],
135 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
,
139 /// List backup groups.
142 rpcenv
: &mut dyn RpcEnvironment
,
143 ) -> Result
<Vec
<GroupListItem
>, Error
> {
145 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
146 let user_info
= CachedUserInfo
::new()?
;
147 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
149 let datastore
= DataStore
::lookup_datastore(&store
)?
;
151 let backup_list
= BackupInfo
::list_backups(&datastore
.base_path())?
;
153 let group_hash
= group_backups(backup_list
);
155 let mut groups
= Vec
::new();
157 for (_group_id
, mut list
) in group_hash
{
159 BackupInfo
::sort_list(&mut list
, false);
163 let group
= info
.backup_dir
.group();
165 let list_all
= (user_privs
& PRIV_DATASTORE_AUDIT
) != 0;
166 let owner
= datastore
.get_owner(group
)?
;
168 if owner
!= userid { continue; }
171 let result_item
= GroupListItem
{
172 backup_type
: group
.backup_type().to_string(),
173 backup_id
: group
.backup_id().to_string(),
174 last_backup
: info
.backup_dir
.backup_time(),
175 backup_count
: list
.len() as u64,
176 files
: info
.files
.clone(),
179 groups
.push(result_item
);
189 schema
: DATASTORE_SCHEMA
,
192 schema
: BACKUP_TYPE_SCHEMA
,
195 schema
: BACKUP_ID_SCHEMA
,
198 schema
: BACKUP_TIME_SCHEMA
,
204 description
: "Returns the list of archive files inside a backup snapshots.",
210 permission
: &Permission
::Privilege(
211 &["datastore", "{store}"],
212 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
216 /// List snapshot files.
217 pub fn list_snapshot_files(
223 rpcenv
: &mut dyn RpcEnvironment
,
224 ) -> Result
<Vec
<BackupContent
>, Error
> {
226 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
227 let user_info
= CachedUserInfo
::new()?
;
228 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
230 let datastore
= DataStore
::lookup_datastore(&store
)?
;
232 let snapshot
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
234 let allowed
= (user_privs
& (PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_READ
)) != 0;
235 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
237 let info
= BackupInfo
::new(&datastore
.base_path(), snapshot
)?
;
239 let (_manifest
, files
) = get_all_snapshot_files(&datastore
, &info
)?
;
248 schema
: DATASTORE_SCHEMA
,
251 schema
: BACKUP_TYPE_SCHEMA
,
254 schema
: BACKUP_ID_SCHEMA
,
257 schema
: BACKUP_TIME_SCHEMA
,
262 permission
: &Permission
::Privilege(
263 &["datastore", "{store}"],
264 PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_PRUNE
,
268 /// Delete backup snapshot.
275 rpcenv
: &mut dyn RpcEnvironment
,
276 ) -> Result
<Value
, Error
> {
278 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
279 let user_info
= CachedUserInfo
::new()?
;
280 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
282 let snapshot
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
284 let datastore
= DataStore
::lookup_datastore(&store
)?
;
286 let allowed
= (user_privs
& PRIV_DATASTORE_MODIFY
) != 0;
287 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
289 datastore
.remove_backup_dir(&snapshot
, false)?
;
298 schema
: DATASTORE_SCHEMA
,
302 schema
: BACKUP_TYPE_SCHEMA
,
306 schema
: BACKUP_ID_SCHEMA
,
312 description
: "Returns the list of snapshots.",
314 type: SnapshotListItem
,
318 permission
: &Permission
::Privilege(
319 &["datastore", "{store}"],
320 PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
,
324 /// List backup snapshots.
325 pub fn list_snapshots (
327 backup_type
: Option
<String
>,
328 backup_id
: Option
<String
>,
331 rpcenv
: &mut dyn RpcEnvironment
,
332 ) -> Result
<Vec
<SnapshotListItem
>, Error
> {
334 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
335 let user_info
= CachedUserInfo
::new()?
;
336 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
338 let datastore
= DataStore
::lookup_datastore(&store
)?
;
340 let base_path
= datastore
.base_path();
342 let backup_list
= BackupInfo
::list_backups(&base_path
)?
;
344 let mut snapshots
= vec
![];
346 for info
in backup_list
{
347 let group
= info
.backup_dir
.group();
348 if let Some(ref backup_type
) = backup_type
{
349 if backup_type
!= group
.backup_type() { continue; }
351 if let Some(ref backup_id
) = backup_id
{
352 if backup_id
!= group
.backup_id() { continue; }
355 let list_all
= (user_privs
& PRIV_DATASTORE_AUDIT
) != 0;
356 let owner
= datastore
.get_owner(group
)?
;
359 if owner
!= userid { continue; }
364 let (comment
, verification
, files
) = match get_all_snapshot_files(&datastore
, &info
) {
365 Ok((manifest
, files
)) => {
366 size
= Some(files
.iter().map(|x
| x
.size
.unwrap_or(0)).sum());
367 // extract the first line from notes
368 let comment
: Option
<String
> = manifest
.unprotected
["notes"]
370 .and_then(|notes
| notes
.lines().next())
373 let verify
= manifest
.unprotected
["verify_state"].clone();
374 let verify
: Option
<SnapshotVerifyState
> = match serde_json
::from_value(verify
) {
375 Ok(verify
) => verify
,
377 eprintln
!("error parsing verification state : '{}'", err
);
382 (comment
, verify
, files
)
385 eprintln
!("error during snapshot file listing: '{}'", err
);
392 .map(|x
| BackupContent
{
393 filename
: x
.to_string(),
402 let result_item
= SnapshotListItem
{
403 backup_type
: group
.backup_type().to_string(),
404 backup_id
: group
.backup_id().to_string(),
405 backup_time
: info
.backup_dir
.backup_time(),
413 snapshots
.push(result_item
);
423 schema
: DATASTORE_SCHEMA
,
431 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
, true),
434 /// Get datastore status.
438 _rpcenv
: &mut dyn RpcEnvironment
,
439 ) -> Result
<StorageStatus
, Error
> {
440 let datastore
= DataStore
::lookup_datastore(&store
)?
;
441 crate::tools
::disks
::disk_usage(&datastore
.base_path())
448 schema
: DATASTORE_SCHEMA
,
451 schema
: BACKUP_TYPE_SCHEMA
,
455 schema
: BACKUP_ID_SCHEMA
,
459 schema
: BACKUP_TIME_SCHEMA
,
468 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true), // fixme
473 /// This function can verify a single backup snapshot, all backup from a backup group,
474 /// or all backups in the datastore.
477 backup_type
: Option
<String
>,
478 backup_id
: Option
<String
>,
479 backup_time
: Option
<i64>,
480 rpcenv
: &mut dyn RpcEnvironment
,
481 ) -> Result
<Value
, Error
> {
482 let datastore
= DataStore
::lookup_datastore(&store
)?
;
486 let mut backup_dir
= None
;
487 let mut backup_group
= None
;
489 match (backup_type
, backup_id
, backup_time
) {
490 (Some(backup_type
), Some(backup_id
), Some(backup_time
)) => {
491 worker_id
= format
!("{}_{}_{}_{:08X}", store
, backup_type
, backup_id
, backup_time
);
492 let dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
493 backup_dir
= Some(dir
);
495 (Some(backup_type
), Some(backup_id
), None
) => {
496 worker_id
= format
!("{}_{}_{}", store
, backup_type
, backup_id
);
497 let group
= BackupGroup
::new(backup_type
, backup_id
);
498 backup_group
= Some(group
);
500 (None
, None
, None
) => {
501 worker_id
= store
.clone();
503 _
=> bail
!("parameters do not specify a backup group or snapshot"),
506 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
507 let to_stdout
= if rpcenv
.env_type() == RpcEnvironmentType
::CLI { true }
else { false }
;
509 let upid_str
= WorkerTask
::new_thread(
511 Some(worker_id
.clone()),
515 let verified_chunks
= Arc
::new(Mutex
::new(HashSet
::with_capacity(1024*16)));
516 let corrupt_chunks
= Arc
::new(Mutex
::new(HashSet
::with_capacity(64)));
518 let failed_dirs
= if let Some(backup_dir
) = backup_dir
{
519 let mut res
= Vec
::new();
520 if !verify_backup_dir(
526 worker
.upid().clone(),
528 res
.push(backup_dir
.to_string());
531 } else if let Some(backup_group
) = backup_group
{
532 let (_count
, failed_dirs
) = verify_backup_group(
543 verify_all_backups(datastore
, worker
.clone(), worker
.upid())?
545 if failed_dirs
.len() > 0 {
546 worker
.log("Failed to verify following snapshots:");
547 for dir
in failed_dirs
{
548 worker
.log(format
!("\t{}", dir
));
550 bail
!("verification failed - please check the log for details");
560 macro_rules
! add_common_prune_prameters
{
561 ( [ $
( $list1
:tt
)* ] ) => {
562 add_common_prune_prameters
!([$
( $list1
)* ] , [])
564 ( [ $
( $list1
:tt
)* ] , [ $
( $list2
:tt
)* ] ) => {
570 &PRUNE_SCHEMA_KEEP_DAILY
,
575 &PRUNE_SCHEMA_KEEP_HOURLY
,
580 &PRUNE_SCHEMA_KEEP_LAST
,
585 &PRUNE_SCHEMA_KEEP_MONTHLY
,
590 &PRUNE_SCHEMA_KEEP_WEEKLY
,
595 &PRUNE_SCHEMA_KEEP_YEARLY
,
602 pub const API_RETURN_SCHEMA_PRUNE
: Schema
= ArraySchema
::new(
603 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
604 &PruneListItem
::API_SCHEMA
607 const API_METHOD_PRUNE
: ApiMethod
= ApiMethod
::new(
608 &ApiHandler
::Sync(&prune
),
610 "Prune the datastore.",
611 &add_common_prune_prameters
!([
612 ("backup-id", false, &BACKUP_ID_SCHEMA
),
613 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
614 ("dry-run", true, &BooleanSchema
::new(
615 "Just show what prune would do, but do not delete anything.")
619 ("store", false, &DATASTORE_SCHEMA
),
622 .returns(&API_RETURN_SCHEMA_PRUNE
)
623 .access(None
, &Permission
::Privilege(
624 &["datastore", "{store}"],
625 PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_PRUNE
,
632 rpcenv
: &mut dyn RpcEnvironment
,
633 ) -> Result
<Value
, Error
> {
635 let store
= tools
::required_string_param(¶m
, "store")?
;
636 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
637 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
639 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
640 let user_info
= CachedUserInfo
::new()?
;
641 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
643 let dry_run
= param
["dry-run"].as_bool().unwrap_or(false);
645 let group
= BackupGroup
::new(backup_type
, backup_id
);
647 let datastore
= DataStore
::lookup_datastore(&store
)?
;
649 let allowed
= (user_privs
& PRIV_DATASTORE_MODIFY
) != 0;
650 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
652 let prune_options
= PruneOptions
{
653 keep_last
: param
["keep-last"].as_u64(),
654 keep_hourly
: param
["keep-hourly"].as_u64(),
655 keep_daily
: param
["keep-daily"].as_u64(),
656 keep_weekly
: param
["keep-weekly"].as_u64(),
657 keep_monthly
: param
["keep-monthly"].as_u64(),
658 keep_yearly
: param
["keep-yearly"].as_u64(),
661 let worker_id
= format
!("{}_{}_{}", store
, backup_type
, backup_id
);
663 let mut prune_result
= Vec
::new();
665 let list
= group
.list_backups(&datastore
.base_path())?
;
667 let mut prune_info
= compute_prune_info(list
, &prune_options
)?
;
669 prune_info
.reverse(); // delete older snapshots first
671 let keep_all
= !prune_options
.keeps_something();
674 for (info
, mut keep
) in prune_info
{
675 if keep_all { keep = true; }
677 let backup_time
= info
.backup_dir
.backup_time();
678 let group
= info
.backup_dir
.group();
680 prune_result
.push(json
!({
681 "backup-type": group
.backup_type(),
682 "backup-id": group
.backup_id(),
683 "backup-time": backup_time
,
687 return Ok(json
!(prune_result
));
691 // We use a WorkerTask just to have a task log, but run synchrounously
692 let worker
= WorkerTask
::new("prune", Some(worker_id
), Userid
::root_userid().clone(), true)?
;
695 worker
.log("No prune selection - keeping all files.");
697 worker
.log(format
!("retention options: {}", prune_options
.cli_options_string()));
698 worker
.log(format
!("Starting prune on store \"{}\" group \"{}/{}\"",
699 store
, backup_type
, backup_id
));
702 for (info
, mut keep
) in prune_info
{
703 if keep_all { keep = true; }
705 let backup_time
= info
.backup_dir
.backup_time();
706 let timestamp
= info
.backup_dir
.backup_time_string();
707 let group
= info
.backup_dir
.group();
715 if keep { "keep" }
else { "remove" }
,
720 prune_result
.push(json
!({
721 "backup-type": group
.backup_type(),
722 "backup-id": group
.backup_id(),
723 "backup-time": backup_time
,
727 if !(dry_run
|| keep
) {
728 if let Err(err
) = datastore
.remove_backup_dir(&info
.backup_dir
, false) {
731 "failed to remove dir {:?}: {}",
732 info
.backup_dir
.relative_path(), err
739 worker
.log_result(&Ok(()));
741 Ok(json
!(prune_result
))
748 schema
: DATASTORE_SCHEMA
,
756 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, false),
759 /// Start garbage collection.
760 fn start_garbage_collection(
763 rpcenv
: &mut dyn RpcEnvironment
,
764 ) -> Result
<Value
, Error
> {
766 let datastore
= DataStore
::lookup_datastore(&store
)?
;
768 println
!("Starting garbage collection on store {}", store
);
770 let to_stdout
= if rpcenv
.env_type() == RpcEnvironmentType
::CLI { true }
else { false }
;
772 let upid_str
= WorkerTask
::new_thread(
773 "garbage_collection",
775 Userid
::root_userid().clone(),
778 worker
.log(format
!("starting garbage collection on store {}", store
));
779 datastore
.garbage_collection(&*worker
, worker
.upid())
790 schema
: DATASTORE_SCHEMA
,
795 type: GarbageCollectionStatus
,
798 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
, false),
801 /// Garbage collection status.
802 pub fn garbage_collection_status(
805 _rpcenv
: &mut dyn RpcEnvironment
,
806 ) -> Result
<GarbageCollectionStatus
, Error
> {
808 let datastore
= DataStore
::lookup_datastore(&store
)?
;
810 let status
= datastore
.last_gc_status();
817 description
: "List the accessible datastores.",
820 description
: "Datastore name and description.",
823 schema
: DATASTORE_SCHEMA
,
827 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
833 permission
: &Permission
::Anybody
,
837 fn get_datastore_list(
840 rpcenv
: &mut dyn RpcEnvironment
,
841 ) -> Result
<Value
, Error
> {
843 let (config
, _digest
) = datastore
::config()?
;
845 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
846 let user_info
= CachedUserInfo
::new()?
;
848 let mut list
= Vec
::new();
850 for (store
, (_
, data
)) in &config
.sections
{
851 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
852 let allowed
= (user_privs
& (PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
)) != 0;
854 let mut entry
= json
!({ "store": store }
);
855 if let Some(comment
) = data
["comment"].as_str() {
856 entry
["comment"] = comment
.into();
866 pub const API_METHOD_DOWNLOAD_FILE
: ApiMethod
= ApiMethod
::new(
867 &ApiHandler
::AsyncHttp(&download_file
),
869 "Download single raw file from backup snapshot.",
871 ("store", false, &DATASTORE_SCHEMA
),
872 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
873 ("backup-id", false, &BACKUP_ID_SCHEMA
),
874 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
875 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA
),
878 ).access(None
, &Permission
::Privilege(
879 &["datastore", "{store}"],
880 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
889 rpcenv
: Box
<dyn RpcEnvironment
>,
890 ) -> ApiResponseFuture
{
893 let store
= tools
::required_string_param(¶m
, "store")?
;
894 let datastore
= DataStore
::lookup_datastore(store
)?
;
896 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
897 let user_info
= CachedUserInfo
::new()?
;
898 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
900 let file_name
= tools
::required_string_param(¶m
, "file-name")?
.to_owned();
902 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
903 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
904 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
906 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
908 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
909 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
911 println
!("Download {} from {} ({}/{})", file_name
, store
, backup_dir
, file_name
);
913 let mut path
= datastore
.base_path();
914 path
.push(backup_dir
.relative_path());
915 path
.push(&file_name
);
917 let file
= tokio
::fs
::File
::open(&path
)
919 .map_err(|err
| http_err
!(BAD_REQUEST
, "File open failed: {}", err
))?
;
921 let payload
= tokio_util
::codec
::FramedRead
::new(file
, tokio_util
::codec
::BytesCodec
::new())
922 .map_ok(|bytes
| hyper
::body
::Bytes
::from(bytes
.freeze()))
923 .map_err(move |err
| {
924 eprintln
!("error during streaming of '{:?}' - {}", &path
, err
);
927 let body
= Body
::wrap_stream(payload
);
929 // fixme: set other headers ?
930 Ok(Response
::builder()
931 .status(StatusCode
::OK
)
932 .header(header
::CONTENT_TYPE
, "application/octet-stream")
939 pub const API_METHOD_DOWNLOAD_FILE_DECODED
: ApiMethod
= ApiMethod
::new(
940 &ApiHandler
::AsyncHttp(&download_file_decoded
),
942 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
944 ("store", false, &DATASTORE_SCHEMA
),
945 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
946 ("backup-id", false, &BACKUP_ID_SCHEMA
),
947 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
948 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA
),
951 ).access(None
, &Permission
::Privilege(
952 &["datastore", "{store}"],
953 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
957 fn download_file_decoded(
962 rpcenv
: Box
<dyn RpcEnvironment
>,
963 ) -> ApiResponseFuture
{
966 let store
= tools
::required_string_param(¶m
, "store")?
;
967 let datastore
= DataStore
::lookup_datastore(store
)?
;
969 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
970 let user_info
= CachedUserInfo
::new()?
;
971 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
973 let file_name
= tools
::required_string_param(¶m
, "file-name")?
.to_owned();
975 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
976 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
977 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
979 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
981 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
982 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
984 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
986 if file
.filename
== file_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
987 bail
!("cannot decode '{}' - is encrypted", file_name
);
991 println
!("Download {} from {} ({}/{})", file_name
, store
, backup_dir
, file_name
);
993 let mut path
= datastore
.base_path();
994 path
.push(backup_dir
.relative_path());
995 path
.push(&file_name
);
997 let extension
= file_name
.rsplitn(2, '
.'
).next().unwrap();
999 let body
= match extension
{
1001 let index
= DynamicIndexReader
::open(&path
)
1002 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1003 let (csum
, size
) = index
.compute_csum();
1004 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1006 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1007 let reader
= AsyncIndexReader
::new(index
, chunk_reader
);
1008 Body
::wrap_stream(AsyncReaderStream
::new(reader
)
1009 .map_err(move |err
| {
1010 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1015 let index
= FixedIndexReader
::open(&path
)
1016 .map_err(|err
| format_err
!("unable to read fixed index '{:?}' - {}", &path
, err
))?
;
1018 let (csum
, size
) = index
.compute_csum();
1019 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1021 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1022 let reader
= AsyncIndexReader
::new(index
, chunk_reader
);
1023 Body
::wrap_stream(AsyncReaderStream
::with_buffer_size(reader
, 4*1024*1024)
1024 .map_err(move |err
| {
1025 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1030 let file
= std
::fs
::File
::open(&path
)
1031 .map_err(|err
| http_err
!(BAD_REQUEST
, "File open failed: {}", err
))?
;
1033 // FIXME: load full blob to verify index checksum?
1036 WrappedReaderStream
::new(DataBlobReader
::new(file
, None
)?
)
1037 .map_err(move |err
| {
1038 eprintln
!("error during streaming of '{:?}' - {}", path
, err
);
1044 bail
!("cannot download '{}' files", extension
);
1048 // fixme: set other headers ?
1049 Ok(Response
::builder()
1050 .status(StatusCode
::OK
)
1051 .header(header
::CONTENT_TYPE
, "application/octet-stream")
1058 pub const API_METHOD_UPLOAD_BACKUP_LOG
: ApiMethod
= ApiMethod
::new(
1059 &ApiHandler
::AsyncHttp(&upload_backup_log
),
1061 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1063 ("store", false, &DATASTORE_SCHEMA
),
1064 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
1065 ("backup-id", false, &BACKUP_ID_SCHEMA
),
1066 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
1070 Some("Only the backup creator/owner is allowed to do this."),
1071 &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP
, false)
1074 fn upload_backup_log(
1079 rpcenv
: Box
<dyn RpcEnvironment
>,
1080 ) -> ApiResponseFuture
{
1083 let store
= tools
::required_string_param(¶m
, "store")?
;
1084 let datastore
= DataStore
::lookup_datastore(store
)?
;
1086 let file_name
= CLIENT_LOG_BLOB_NAME
;
1088 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
1089 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
1090 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
1092 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1094 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1095 check_backup_owner(&datastore
, backup_dir
.group(), &userid
)?
;
1097 let mut path
= datastore
.base_path();
1098 path
.push(backup_dir
.relative_path());
1099 path
.push(&file_name
);
1102 bail
!("backup already contains a log.");
1105 println
!("Upload backup log to {}/{}/{}/{}/{}", store
,
1106 backup_type
, backup_id
, backup_dir
.backup_time_string(), file_name
);
1109 .map_err(Error
::from
)
1110 .try_fold(Vec
::new(), |mut acc
, chunk
| {
1111 acc
.extend_from_slice(&*chunk
);
1112 future
::ok
::<_
, Error
>(acc
)
1116 // always verify blob/CRC at server side
1117 let blob
= DataBlob
::load_from_reader(&mut &data
[..])?
;
1119 replace_file(&path
, blob
.raw_data(), CreateOptions
::new())?
;
1121 // fixme: use correct formatter
1122 Ok(crate::server
::formatter
::json_response(Ok(Value
::Null
)))
1130 schema
: DATASTORE_SCHEMA
,
1133 schema
: BACKUP_TYPE_SCHEMA
,
1136 schema
: BACKUP_ID_SCHEMA
,
1139 schema
: BACKUP_TIME_SCHEMA
,
1142 description
: "Base64 encoded path.",
1148 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true),
1151 /// Get the entries of the given path of the catalog
1154 backup_type
: String
,
1160 rpcenv
: &mut dyn RpcEnvironment
,
1161 ) -> Result
<Value
, Error
> {
1162 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1164 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1165 let user_info
= CachedUserInfo
::new()?
;
1166 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1168 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1170 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1171 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1173 let file_name
= CATALOG_NAME
;
1175 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
1177 if file
.filename
== file_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
1178 bail
!("cannot decode '{}' - is encrypted", file_name
);
1182 let mut path
= datastore
.base_path();
1183 path
.push(backup_dir
.relative_path());
1184 path
.push(file_name
);
1186 let index
= DynamicIndexReader
::open(&path
)
1187 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1189 let (csum
, size
) = index
.compute_csum();
1190 manifest
.verify_file(&file_name
, &csum
, size
)?
;
1192 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1193 let reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1195 let mut catalog_reader
= CatalogReader
::new(reader
);
1196 let mut current
= catalog_reader
.root()?
;
1197 let mut components
= vec
![];
1200 if filepath
!= "root" {
1201 components
= base64
::decode(filepath
)?
;
1202 if components
.len() > 0 && components
[0] == '
/'
as u8 {
1203 components
.remove(0);
1205 for component
in components
.split(|c
| *c
== '
/'
as u8) {
1206 if let Some(entry
) = catalog_reader
.lookup(¤t
, component
)?
{
1209 bail
!("path {:?} not found in catalog", &String
::from_utf8_lossy(&components
));
1214 let mut res
= Vec
::new();
1216 for direntry
in catalog_reader
.read_dir(¤t
)?
{
1217 let mut components
= components
.clone();
1218 components
.push('
/'
as u8);
1219 components
.extend(&direntry
.name
);
1220 let path
= base64
::encode(components
);
1221 let text
= String
::from_utf8_lossy(&direntry
.name
);
1222 let mut entry
= json
!({
1225 "type": CatalogEntryType
::from(&direntry
.attr
).to_string(),
1228 match direntry
.attr
{
1229 DirEntryAttribute
::Directory { start: _ }
=> {
1230 entry
["leaf"] = false.into();
1232 DirEntryAttribute
::File { size, mtime }
=> {
1233 entry
["size"] = size
.into();
1234 entry
["mtime"] = mtime
.into();
1245 pub const API_METHOD_PXAR_FILE_DOWNLOAD
: ApiMethod
= ApiMethod
::new(
1246 &ApiHandler
::AsyncHttp(&pxar_file_download
),
1248 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1250 ("store", false, &DATASTORE_SCHEMA
),
1251 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
1252 ("backup-id", false, &BACKUP_ID_SCHEMA
),
1253 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
1254 ("filepath", false, &StringSchema
::new("Base64 encoded path").schema()),
1257 ).access(None
, &Permission
::Privilege(
1258 &["datastore", "{store}"],
1259 PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
,
1263 fn pxar_file_download(
1268 rpcenv
: Box
<dyn RpcEnvironment
>,
1269 ) -> ApiResponseFuture
{
1272 let store
= tools
::required_string_param(¶m
, "store")?
;
1273 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1275 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1276 let user_info
= CachedUserInfo
::new()?
;
1277 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1279 let filepath
= tools
::required_string_param(¶m
, "filepath")?
.to_owned();
1281 let backup_type
= tools
::required_string_param(¶m
, "backup-type")?
;
1282 let backup_id
= tools
::required_string_param(¶m
, "backup-id")?
;
1283 let backup_time
= tools
::required_integer_param(¶m
, "backup-time")?
;
1285 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1287 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1288 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1290 let mut components
= base64
::decode(&filepath
)?
;
1291 if components
.len() > 0 && components
[0] == '
/'
as u8 {
1292 components
.remove(0);
1295 let mut split
= components
.splitn(2, |c
| *c
== '
/'
as u8);
1296 let pxar_name
= std
::str::from_utf8(split
.next().unwrap())?
;
1297 let file_path
= split
.next().ok_or(format_err
!("filepath looks strange '{}'", filepath
))?
;
1298 let (manifest
, files
) = read_backup_index(&datastore
, &backup_dir
)?
;
1300 if file
.filename
== pxar_name
&& file
.crypt_mode
== Some(CryptMode
::Encrypt
) {
1301 bail
!("cannot decode '{}' - is encrypted", pxar_name
);
1305 let mut path
= datastore
.base_path();
1306 path
.push(backup_dir
.relative_path());
1307 path
.push(pxar_name
);
1309 let index
= DynamicIndexReader
::open(&path
)
1310 .map_err(|err
| format_err
!("unable to read dynamic index '{:?}' - {}", &path
, err
))?
;
1312 let (csum
, size
) = index
.compute_csum();
1313 manifest
.verify_file(&pxar_name
, &csum
, size
)?
;
1315 let chunk_reader
= LocalChunkReader
::new(datastore
, None
, CryptMode
::None
);
1316 let reader
= BufferedDynamicReader
::new(index
, chunk_reader
);
1317 let archive_size
= reader
.archive_size();
1318 let reader
= LocalDynamicReadAt
::new(reader
);
1320 let decoder
= Accessor
::new(reader
, archive_size
).await?
;
1321 let root
= decoder
.open_root().await?
;
1323 .lookup(OsStr
::from_bytes(file_path
)).await?
1324 .ok_or(format_err
!("error opening '{:?}'", file_path
))?
;
1326 let file
= match file
.kind() {
1327 EntryKind
::File { .. }
=> file
,
1328 EntryKind
::Hardlink(_
) => {
1329 decoder
.follow_hardlink(&file
).await?
1332 other
=> bail
!("cannot download file of type {:?}", other
),
1335 let body
= Body
::wrap_stream(
1336 AsyncReaderStream
::new(file
.contents().await?
)
1337 .map_err(move |err
| {
1338 eprintln
!("error during streaming of '{:?}' - {}", filepath
, err
);
1343 // fixme: set other headers ?
1344 Ok(Response
::builder()
1345 .status(StatusCode
::OK
)
1346 .header(header
::CONTENT_TYPE
, "application/octet-stream")
1356 schema
: DATASTORE_SCHEMA
,
1359 type: RRDTimeFrameResolution
,
1367 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_BACKUP
, true),
1370 /// Read datastore stats
1373 timeframe
: RRDTimeFrameResolution
,
1376 ) -> Result
<Value
, Error
> {
1378 create_value_from_rrd(
1379 &format
!("datastore/{}", store
),
1382 "read_ios", "read_bytes",
1383 "write_ios", "write_bytes",
1395 schema
: DATASTORE_SCHEMA
,
1398 schema
: BACKUP_TYPE_SCHEMA
,
1401 schema
: BACKUP_ID_SCHEMA
,
1404 schema
: BACKUP_TIME_SCHEMA
,
1409 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ
| PRIV_DATASTORE_BACKUP
, true),
1412 /// Get "notes" for a specific backup
1415 backup_type
: String
,
1418 rpcenv
: &mut dyn RpcEnvironment
,
1419 ) -> Result
<String
, Error
> {
1420 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1422 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1423 let user_info
= CachedUserInfo
::new()?
;
1424 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1426 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1428 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1429 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1431 let manifest
= datastore
.load_manifest_json(&backup_dir
)?
;
1433 let notes
= manifest
["unprotected"]["notes"]
1437 Ok(String
::from(notes
))
1444 schema
: DATASTORE_SCHEMA
,
1447 schema
: BACKUP_TYPE_SCHEMA
,
1450 schema
: BACKUP_ID_SCHEMA
,
1453 schema
: BACKUP_TIME_SCHEMA
,
1456 description
: "A multiline text.",
1461 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, true),
1464 /// Set "notes" for a specific backup
1467 backup_type
: String
,
1471 rpcenv
: &mut dyn RpcEnvironment
,
1472 ) -> Result
<(), Error
> {
1473 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1475 let userid
: Userid
= rpcenv
.get_user().unwrap().parse()?
;
1476 let user_info
= CachedUserInfo
::new()?
;
1477 let user_privs
= user_info
.lookup_privs(&userid
, &["datastore", &store
]);
1479 let backup_dir
= BackupDir
::new(backup_type
, backup_id
, backup_time
)?
;
1481 let allowed
= (user_privs
& PRIV_DATASTORE_READ
) != 0;
1482 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1484 let mut manifest
= datastore
.load_manifest_json(&backup_dir
)?
;
1486 manifest
["unprotected"]["notes"] = notes
.into();
1488 datastore
.store_manifest(&backup_dir
, manifest
)?
;
1497 schema
: DATASTORE_SCHEMA
,
1500 schema
: BACKUP_TYPE_SCHEMA
,
1503 schema
: BACKUP_ID_SCHEMA
,
1511 permission
: &Permission
::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY
, true),
1514 /// Change owner of a backup group
1515 fn set_backup_owner(
1517 backup_type
: String
,
1520 _rpcenv
: &mut dyn RpcEnvironment
,
1521 ) -> Result
<(), Error
> {
1523 let datastore
= DataStore
::lookup_datastore(&store
)?
;
1525 let backup_group
= BackupGroup
::new(backup_type
, backup_id
);
1527 let user_info
= CachedUserInfo
::new()?
;
1529 if !user_info
.is_active_user(&new_owner
) {
1530 bail
!("user '{}' is inactive or non-existent", new_owner
);
1533 datastore
.set_owner(&backup_group
, &new_owner
, true)?
;
1539 const DATASTORE_INFO_SUBDIRS
: SubdirMap
= &[
1543 .get(&API_METHOD_CATALOG
)
1548 .post(&API_METHOD_SET_BACKUP_OWNER
)
1553 .download(&API_METHOD_DOWNLOAD_FILE
)
1558 .download(&API_METHOD_DOWNLOAD_FILE_DECODED
)
1563 .get(&API_METHOD_LIST_SNAPSHOT_FILES
)
1568 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS
)
1569 .post(&API_METHOD_START_GARBAGE_COLLECTION
)
1574 .get(&API_METHOD_LIST_GROUPS
)
1579 .get(&API_METHOD_GET_NOTES
)
1580 .put(&API_METHOD_SET_NOTES
)
1585 .post(&API_METHOD_PRUNE
)
1588 "pxar-file-download",
1590 .download(&API_METHOD_PXAR_FILE_DOWNLOAD
)
1595 .get(&API_METHOD_GET_RRD_STATS
)
1600 .get(&API_METHOD_LIST_SNAPSHOTS
)
1601 .delete(&API_METHOD_DELETE_SNAPSHOT
)
1606 .get(&API_METHOD_STATUS
)
1609 "upload-backup-log",
1611 .upload(&API_METHOD_UPLOAD_BACKUP_LOG
)
1616 .post(&API_METHOD_VERIFY
)
1620 const DATASTORE_INFO_ROUTER
: Router
= Router
::new()
1621 .get(&list_subdirs_api_method
!(DATASTORE_INFO_SUBDIRS
))
1622 .subdirs(DATASTORE_INFO_SUBDIRS
);
1625 pub const ROUTER
: Router
= Router
::new()
1626 .get(&API_METHOD_GET_DATASTORE_LIST
)
1627 .match_all("store", &DATASTORE_INFO_ROUTER
);