]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
mark signed manifests as such
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission
14 };
15 use proxmox::api::router::SubdirMap;
16 use proxmox::api::schema::*;
17 use proxmox::tools::fs::{replace_file, CreateOptions};
18 use proxmox::try_block;
19 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
20
21 use pxar::accessor::aio::Accessor;
22 use pxar::EntryKind;
23
24 use crate::api2::types::*;
25 use crate::api2::node::rrd::create_value_from_rrd;
26 use crate::backup::*;
27 use crate::config::datastore;
28 use crate::config::cached_user_info::CachedUserInfo;
29
30 use crate::server::WorkerTask;
31 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
32 use crate::config::acl::{
33 PRIV_DATASTORE_AUDIT,
34 PRIV_DATASTORE_MODIFY,
35 PRIV_DATASTORE_READ,
36 PRIV_DATASTORE_PRUNE,
37 PRIV_DATASTORE_BACKUP,
38 };
39
40 fn check_backup_owner(
41 store: &DataStore,
42 group: &BackupGroup,
43 userid: &Userid,
44 ) -> Result<(), Error> {
45 let owner = store.get_owner(group)?;
46 if &owner != userid {
47 bail!("backup owner check failed ({} != {})", userid, owner);
48 }
49 Ok(())
50 }
51
52 fn read_backup_index(
53 store: &DataStore,
54 backup_dir: &BackupDir,
55 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
56
57 let (manifest, index_size) = store.load_manifest(backup_dir)?;
58
59 let mut result = Vec::new();
60 for item in manifest.files() {
61 result.push(BackupContent {
62 filename: item.filename.clone(),
63 crypt_mode: Some(item.crypt_mode),
64 size: Some(item.size),
65 });
66 }
67
68 result.push(BackupContent {
69 filename: MANIFEST_BLOB_NAME.to_string(),
70 crypt_mode: match manifest.signature {
71 Some(_) => Some(CryptMode::SignOnly),
72 None => Some(CryptMode::None),
73 },
74 size: Some(index_size),
75 });
76
77 Ok((manifest, result))
78 }
79
80 fn get_all_snapshot_files(
81 store: &DataStore,
82 info: &BackupInfo,
83 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
84
85 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
86
87 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
88 acc.insert(item.filename.clone());
89 acc
90 });
91
92 for file in &info.files {
93 if file_set.contains(file) { continue; }
94 files.push(BackupContent {
95 filename: file.to_string(),
96 size: None,
97 crypt_mode: None,
98 });
99 }
100
101 Ok((manifest, files))
102 }
103
104 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
105
106 let mut group_hash = HashMap::new();
107
108 for info in backup_list {
109 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
110 let time_list = group_hash.entry(group_id).or_insert(vec![]);
111 time_list.push(info);
112 }
113
114 group_hash
115 }
116
117 #[api(
118 input: {
119 properties: {
120 store: {
121 schema: DATASTORE_SCHEMA,
122 },
123 },
124 },
125 returns: {
126 type: Array,
127 description: "Returns the list of backup groups.",
128 items: {
129 type: GroupListItem,
130 }
131 },
132 access: {
133 permission: &Permission::Privilege(
134 &["datastore", "{store}"],
135 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
136 true),
137 },
138 )]
139 /// List backup groups.
140 fn list_groups(
141 store: String,
142 rpcenv: &mut dyn RpcEnvironment,
143 ) -> Result<Vec<GroupListItem>, Error> {
144
145 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
146 let user_info = CachedUserInfo::new()?;
147 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
148
149 let datastore = DataStore::lookup_datastore(&store)?;
150
151 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
152
153 let group_hash = group_backups(backup_list);
154
155 let mut groups = Vec::new();
156
157 for (_group_id, mut list) in group_hash {
158
159 BackupInfo::sort_list(&mut list, false);
160
161 let info = &list[0];
162
163 let group = info.backup_dir.group();
164
165 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
166 let owner = datastore.get_owner(group)?;
167 if !list_all {
168 if owner != userid { continue; }
169 }
170
171 let result_item = GroupListItem {
172 backup_type: group.backup_type().to_string(),
173 backup_id: group.backup_id().to_string(),
174 last_backup: info.backup_dir.backup_time().timestamp(),
175 backup_count: list.len() as u64,
176 files: info.files.clone(),
177 owner: Some(owner),
178 };
179 groups.push(result_item);
180 }
181
182 Ok(groups)
183 }
184
185 #[api(
186 input: {
187 properties: {
188 store: {
189 schema: DATASTORE_SCHEMA,
190 },
191 "backup-type": {
192 schema: BACKUP_TYPE_SCHEMA,
193 },
194 "backup-id": {
195 schema: BACKUP_ID_SCHEMA,
196 },
197 "backup-time": {
198 schema: BACKUP_TIME_SCHEMA,
199 },
200 },
201 },
202 returns: {
203 type: Array,
204 description: "Returns the list of archive files inside a backup snapshots.",
205 items: {
206 type: BackupContent,
207 }
208 },
209 access: {
210 permission: &Permission::Privilege(
211 &["datastore", "{store}"],
212 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
213 true),
214 },
215 )]
216 /// List snapshot files.
217 pub fn list_snapshot_files(
218 store: String,
219 backup_type: String,
220 backup_id: String,
221 backup_time: i64,
222 _info: &ApiMethod,
223 rpcenv: &mut dyn RpcEnvironment,
224 ) -> Result<Vec<BackupContent>, Error> {
225
226 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
227 let user_info = CachedUserInfo::new()?;
228 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
229
230 let datastore = DataStore::lookup_datastore(&store)?;
231
232 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
233
234 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
235 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
236
237 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
238
239 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
240
241 Ok(files)
242 }
243
244 #[api(
245 input: {
246 properties: {
247 store: {
248 schema: DATASTORE_SCHEMA,
249 },
250 "backup-type": {
251 schema: BACKUP_TYPE_SCHEMA,
252 },
253 "backup-id": {
254 schema: BACKUP_ID_SCHEMA,
255 },
256 "backup-time": {
257 schema: BACKUP_TIME_SCHEMA,
258 },
259 },
260 },
261 access: {
262 permission: &Permission::Privilege(
263 &["datastore", "{store}"],
264 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
265 true),
266 },
267 )]
268 /// Delete backup snapshot.
269 fn delete_snapshot(
270 store: String,
271 backup_type: String,
272 backup_id: String,
273 backup_time: i64,
274 _info: &ApiMethod,
275 rpcenv: &mut dyn RpcEnvironment,
276 ) -> Result<Value, Error> {
277
278 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
279 let user_info = CachedUserInfo::new()?;
280 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
281
282 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
283
284 let datastore = DataStore::lookup_datastore(&store)?;
285
286 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
287 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
288
289 datastore.remove_backup_dir(&snapshot, false)?;
290
291 Ok(Value::Null)
292 }
293
294 #[api(
295 input: {
296 properties: {
297 store: {
298 schema: DATASTORE_SCHEMA,
299 },
300 "backup-type": {
301 optional: true,
302 schema: BACKUP_TYPE_SCHEMA,
303 },
304 "backup-id": {
305 optional: true,
306 schema: BACKUP_ID_SCHEMA,
307 },
308 },
309 },
310 returns: {
311 type: Array,
312 description: "Returns the list of snapshots.",
313 items: {
314 type: SnapshotListItem,
315 }
316 },
317 access: {
318 permission: &Permission::Privilege(
319 &["datastore", "{store}"],
320 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
321 true),
322 },
323 )]
324 /// List backup snapshots.
325 pub fn list_snapshots (
326 store: String,
327 backup_type: Option<String>,
328 backup_id: Option<String>,
329 _param: Value,
330 _info: &ApiMethod,
331 rpcenv: &mut dyn RpcEnvironment,
332 ) -> Result<Vec<SnapshotListItem>, Error> {
333
334 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
335 let user_info = CachedUserInfo::new()?;
336 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
337
338 let datastore = DataStore::lookup_datastore(&store)?;
339
340 let base_path = datastore.base_path();
341
342 let backup_list = BackupInfo::list_backups(&base_path)?;
343
344 let mut snapshots = vec![];
345
346 for info in backup_list {
347 let group = info.backup_dir.group();
348 if let Some(ref backup_type) = backup_type {
349 if backup_type != group.backup_type() { continue; }
350 }
351 if let Some(ref backup_id) = backup_id {
352 if backup_id != group.backup_id() { continue; }
353 }
354
355 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
356 let owner = datastore.get_owner(group)?;
357
358 if !list_all {
359 if owner != userid { continue; }
360 }
361
362 let mut size = None;
363
364 let (comment, files) = match get_all_snapshot_files(&datastore, &info) {
365 Ok((manifest, files)) => {
366 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
367 // extract the first line from notes
368 let comment: Option<String> = manifest.unprotected["notes"]
369 .as_str()
370 .and_then(|notes| notes.lines().next())
371 .map(String::from);
372
373 (comment, files)
374 },
375 Err(err) => {
376 eprintln!("error during snapshot file listing: '{}'", err);
377 (
378 None,
379 info
380 .files
381 .iter()
382 .map(|x| BackupContent {
383 filename: x.to_string(),
384 size: None,
385 crypt_mode: None,
386 })
387 .collect()
388 )
389 },
390 };
391
392 let result_item = SnapshotListItem {
393 backup_type: group.backup_type().to_string(),
394 backup_id: group.backup_id().to_string(),
395 backup_time: info.backup_dir.backup_time().timestamp(),
396 comment,
397 files,
398 size,
399 owner: Some(owner),
400 };
401
402 snapshots.push(result_item);
403 }
404
405 Ok(snapshots)
406 }
407
408 #[api(
409 input: {
410 properties: {
411 store: {
412 schema: DATASTORE_SCHEMA,
413 },
414 },
415 },
416 returns: {
417 type: StorageStatus,
418 },
419 access: {
420 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
421 },
422 )]
423 /// Get datastore status.
424 pub fn status(
425 store: String,
426 _info: &ApiMethod,
427 _rpcenv: &mut dyn RpcEnvironment,
428 ) -> Result<StorageStatus, Error> {
429 let datastore = DataStore::lookup_datastore(&store)?;
430 crate::tools::disks::disk_usage(&datastore.base_path())
431 }
432
433 #[api(
434 input: {
435 properties: {
436 store: {
437 schema: DATASTORE_SCHEMA,
438 },
439 "backup-type": {
440 schema: BACKUP_TYPE_SCHEMA,
441 optional: true,
442 },
443 "backup-id": {
444 schema: BACKUP_ID_SCHEMA,
445 optional: true,
446 },
447 "backup-time": {
448 schema: BACKUP_TIME_SCHEMA,
449 optional: true,
450 },
451 },
452 },
453 returns: {
454 schema: UPID_SCHEMA,
455 },
456 access: {
457 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
458 },
459 )]
460 /// Verify backups.
461 ///
462 /// This function can verify a single backup snapshot, all backup from a backup group,
463 /// or all backups in the datastore.
464 pub fn verify(
465 store: String,
466 backup_type: Option<String>,
467 backup_id: Option<String>,
468 backup_time: Option<i64>,
469 rpcenv: &mut dyn RpcEnvironment,
470 ) -> Result<Value, Error> {
471 let datastore = DataStore::lookup_datastore(&store)?;
472
473 let worker_id;
474
475 let mut backup_dir = None;
476 let mut backup_group = None;
477
478 match (backup_type, backup_id, backup_time) {
479 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
480 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
481 let dir = BackupDir::new(backup_type, backup_id, backup_time);
482 backup_dir = Some(dir);
483 }
484 (Some(backup_type), Some(backup_id), None) => {
485 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
486 let group = BackupGroup::new(backup_type, backup_id);
487 backup_group = Some(group);
488 }
489 (None, None, None) => {
490 worker_id = store.clone();
491 }
492 _ => bail!("parameters do not spefify a backup group or snapshot"),
493 }
494
495 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
496 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
497
498 let upid_str = WorkerTask::new_thread(
499 "verify",
500 Some(worker_id.clone()),
501 userid,
502 to_stdout,
503 move |worker| {
504 let failed_dirs = if let Some(backup_dir) = backup_dir {
505 let mut verified_chunks = HashSet::with_capacity(1024*16);
506 let mut corrupt_chunks = HashSet::with_capacity(64);
507 let mut res = Vec::new();
508 if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
509 res.push(backup_dir.to_string());
510 }
511 res
512 } else if let Some(backup_group) = backup_group {
513 verify_backup_group(&datastore, &backup_group, &worker)?
514 } else {
515 verify_all_backups(&datastore, &worker)?
516 };
517 if failed_dirs.len() > 0 {
518 worker.log("Failed to verify following snapshots:");
519 for dir in failed_dirs {
520 worker.log(format!("\t{}", dir));
521 }
522 bail!("verfication failed - please check the log for details");
523 }
524 Ok(())
525 },
526 )?;
527
528 Ok(json!(upid_str))
529 }
530
531 #[macro_export]
532 macro_rules! add_common_prune_prameters {
533 ( [ $( $list1:tt )* ] ) => {
534 add_common_prune_prameters!([$( $list1 )* ] , [])
535 };
536 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
537 [
538 $( $list1 )*
539 (
540 "keep-daily",
541 true,
542 &PRUNE_SCHEMA_KEEP_DAILY,
543 ),
544 (
545 "keep-hourly",
546 true,
547 &PRUNE_SCHEMA_KEEP_HOURLY,
548 ),
549 (
550 "keep-last",
551 true,
552 &PRUNE_SCHEMA_KEEP_LAST,
553 ),
554 (
555 "keep-monthly",
556 true,
557 &PRUNE_SCHEMA_KEEP_MONTHLY,
558 ),
559 (
560 "keep-weekly",
561 true,
562 &PRUNE_SCHEMA_KEEP_WEEKLY,
563 ),
564 (
565 "keep-yearly",
566 true,
567 &PRUNE_SCHEMA_KEEP_YEARLY,
568 ),
569 $( $list2 )*
570 ]
571 }
572 }
573
574 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
575 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
576 &PruneListItem::API_SCHEMA
577 ).schema();
578
579 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
580 &ApiHandler::Sync(&prune),
581 &ObjectSchema::new(
582 "Prune the datastore.",
583 &add_common_prune_prameters!([
584 ("backup-id", false, &BACKUP_ID_SCHEMA),
585 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
586 ("dry-run", true, &BooleanSchema::new(
587 "Just show what prune would do, but do not delete anything.")
588 .schema()
589 ),
590 ],[
591 ("store", false, &DATASTORE_SCHEMA),
592 ])
593 ))
594 .returns(&API_RETURN_SCHEMA_PRUNE)
595 .access(None, &Permission::Privilege(
596 &["datastore", "{store}"],
597 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
598 true)
599 );
600
601 fn prune(
602 param: Value,
603 _info: &ApiMethod,
604 rpcenv: &mut dyn RpcEnvironment,
605 ) -> Result<Value, Error> {
606
607 let store = tools::required_string_param(&param, "store")?;
608 let backup_type = tools::required_string_param(&param, "backup-type")?;
609 let backup_id = tools::required_string_param(&param, "backup-id")?;
610
611 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
612 let user_info = CachedUserInfo::new()?;
613 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
614
615 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
616
617 let group = BackupGroup::new(backup_type, backup_id);
618
619 let datastore = DataStore::lookup_datastore(&store)?;
620
621 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
622 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
623
624 let prune_options = PruneOptions {
625 keep_last: param["keep-last"].as_u64(),
626 keep_hourly: param["keep-hourly"].as_u64(),
627 keep_daily: param["keep-daily"].as_u64(),
628 keep_weekly: param["keep-weekly"].as_u64(),
629 keep_monthly: param["keep-monthly"].as_u64(),
630 keep_yearly: param["keep-yearly"].as_u64(),
631 };
632
633 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
634
635 let mut prune_result = Vec::new();
636
637 let list = group.list_backups(&datastore.base_path())?;
638
639 let mut prune_info = compute_prune_info(list, &prune_options)?;
640
641 prune_info.reverse(); // delete older snapshots first
642
643 let keep_all = !prune_options.keeps_something();
644
645 if dry_run {
646 for (info, mut keep) in prune_info {
647 if keep_all { keep = true; }
648
649 let backup_time = info.backup_dir.backup_time();
650 let group = info.backup_dir.group();
651
652 prune_result.push(json!({
653 "backup-type": group.backup_type(),
654 "backup-id": group.backup_id(),
655 "backup-time": backup_time.timestamp(),
656 "keep": keep,
657 }));
658 }
659 return Ok(json!(prune_result));
660 }
661
662
663 // We use a WorkerTask just to have a task log, but run synchrounously
664 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
665
666 let result = try_block! {
667 if keep_all {
668 worker.log("No prune selection - keeping all files.");
669 } else {
670 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
671 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
672 store, backup_type, backup_id));
673 }
674
675 for (info, mut keep) in prune_info {
676 if keep_all { keep = true; }
677
678 let backup_time = info.backup_dir.backup_time();
679 let timestamp = BackupDir::backup_time_to_string(backup_time);
680 let group = info.backup_dir.group();
681
682
683 let msg = format!(
684 "{}/{}/{} {}",
685 group.backup_type(),
686 group.backup_id(),
687 timestamp,
688 if keep { "keep" } else { "remove" },
689 );
690
691 worker.log(msg);
692
693 prune_result.push(json!({
694 "backup-type": group.backup_type(),
695 "backup-id": group.backup_id(),
696 "backup-time": backup_time.timestamp(),
697 "keep": keep,
698 }));
699
700 if !(dry_run || keep) {
701 datastore.remove_backup_dir(&info.backup_dir, true)?;
702 }
703 }
704
705 Ok(())
706 };
707
708 worker.log_result(&result);
709
710 if let Err(err) = result {
711 bail!("prune failed - {}", err);
712 };
713
714 Ok(json!(prune_result))
715 }
716
717 #[api(
718 input: {
719 properties: {
720 store: {
721 schema: DATASTORE_SCHEMA,
722 },
723 },
724 },
725 returns: {
726 schema: UPID_SCHEMA,
727 },
728 access: {
729 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
730 },
731 )]
732 /// Start garbage collection.
733 fn start_garbage_collection(
734 store: String,
735 _info: &ApiMethod,
736 rpcenv: &mut dyn RpcEnvironment,
737 ) -> Result<Value, Error> {
738
739 let datastore = DataStore::lookup_datastore(&store)?;
740
741 println!("Starting garbage collection on store {}", store);
742
743 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
744
745 let upid_str = WorkerTask::new_thread(
746 "garbage_collection",
747 Some(store.clone()),
748 Userid::root_userid().clone(),
749 to_stdout,
750 move |worker| {
751 worker.log(format!("starting garbage collection on store {}", store));
752 datastore.garbage_collection(&worker)
753 },
754 )?;
755
756 Ok(json!(upid_str))
757 }
758
759 #[api(
760 input: {
761 properties: {
762 store: {
763 schema: DATASTORE_SCHEMA,
764 },
765 },
766 },
767 returns: {
768 type: GarbageCollectionStatus,
769 },
770 access: {
771 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
772 },
773 )]
774 /// Garbage collection status.
775 pub fn garbage_collection_status(
776 store: String,
777 _info: &ApiMethod,
778 _rpcenv: &mut dyn RpcEnvironment,
779 ) -> Result<GarbageCollectionStatus, Error> {
780
781 let datastore = DataStore::lookup_datastore(&store)?;
782
783 let status = datastore.last_gc_status();
784
785 Ok(status)
786 }
787
788 #[api(
789 returns: {
790 description: "List the accessible datastores.",
791 type: Array,
792 items: {
793 description: "Datastore name and description.",
794 properties: {
795 store: {
796 schema: DATASTORE_SCHEMA,
797 },
798 comment: {
799 optional: true,
800 schema: SINGLE_LINE_COMMENT_SCHEMA,
801 },
802 },
803 },
804 },
805 access: {
806 permission: &Permission::Anybody,
807 },
808 )]
809 /// Datastore list
810 fn get_datastore_list(
811 _param: Value,
812 _info: &ApiMethod,
813 rpcenv: &mut dyn RpcEnvironment,
814 ) -> Result<Value, Error> {
815
816 let (config, _digest) = datastore::config()?;
817
818 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
819 let user_info = CachedUserInfo::new()?;
820
821 let mut list = Vec::new();
822
823 for (store, (_, data)) in &config.sections {
824 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
825 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
826 if allowed {
827 let mut entry = json!({ "store": store });
828 if let Some(comment) = data["comment"].as_str() {
829 entry["comment"] = comment.into();
830 }
831 list.push(entry);
832 }
833 }
834
835 Ok(list.into())
836 }
837
838 #[sortable]
839 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
840 &ApiHandler::AsyncHttp(&download_file),
841 &ObjectSchema::new(
842 "Download single raw file from backup snapshot.",
843 &sorted!([
844 ("store", false, &DATASTORE_SCHEMA),
845 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
846 ("backup-id", false, &BACKUP_ID_SCHEMA),
847 ("backup-time", false, &BACKUP_TIME_SCHEMA),
848 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
849 ]),
850 )
851 ).access(None, &Permission::Privilege(
852 &["datastore", "{store}"],
853 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
854 true)
855 );
856
857 fn download_file(
858 _parts: Parts,
859 _req_body: Body,
860 param: Value,
861 _info: &ApiMethod,
862 rpcenv: Box<dyn RpcEnvironment>,
863 ) -> ApiResponseFuture {
864
865 async move {
866 let store = tools::required_string_param(&param, "store")?;
867 let datastore = DataStore::lookup_datastore(store)?;
868
869 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
870 let user_info = CachedUserInfo::new()?;
871 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
872
873 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
874
875 let backup_type = tools::required_string_param(&param, "backup-type")?;
876 let backup_id = tools::required_string_param(&param, "backup-id")?;
877 let backup_time = tools::required_integer_param(&param, "backup-time")?;
878
879 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
880
881 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
882 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
883
884 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
885
886 let mut path = datastore.base_path();
887 path.push(backup_dir.relative_path());
888 path.push(&file_name);
889
890 let file = tokio::fs::File::open(&path)
891 .await
892 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
893
894 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
895 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
896 .map_err(move |err| {
897 eprintln!("error during streaming of '{:?}' - {}", &path, err);
898 err
899 });
900 let body = Body::wrap_stream(payload);
901
902 // fixme: set other headers ?
903 Ok(Response::builder()
904 .status(StatusCode::OK)
905 .header(header::CONTENT_TYPE, "application/octet-stream")
906 .body(body)
907 .unwrap())
908 }.boxed()
909 }
910
911 #[sortable]
912 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
913 &ApiHandler::AsyncHttp(&download_file_decoded),
914 &ObjectSchema::new(
915 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
916 &sorted!([
917 ("store", false, &DATASTORE_SCHEMA),
918 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
919 ("backup-id", false, &BACKUP_ID_SCHEMA),
920 ("backup-time", false, &BACKUP_TIME_SCHEMA),
921 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
922 ]),
923 )
924 ).access(None, &Permission::Privilege(
925 &["datastore", "{store}"],
926 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
927 true)
928 );
929
930 fn download_file_decoded(
931 _parts: Parts,
932 _req_body: Body,
933 param: Value,
934 _info: &ApiMethod,
935 rpcenv: Box<dyn RpcEnvironment>,
936 ) -> ApiResponseFuture {
937
938 async move {
939 let store = tools::required_string_param(&param, "store")?;
940 let datastore = DataStore::lookup_datastore(store)?;
941
942 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
943 let user_info = CachedUserInfo::new()?;
944 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
945
946 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
947
948 let backup_type = tools::required_string_param(&param, "backup-type")?;
949 let backup_id = tools::required_string_param(&param, "backup-id")?;
950 let backup_time = tools::required_integer_param(&param, "backup-time")?;
951
952 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
953
954 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
955 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
956
957 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
958 for file in files {
959 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
960 bail!("cannot decode '{}' - is encrypted", file_name);
961 }
962 }
963
964 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
965
966 let mut path = datastore.base_path();
967 path.push(backup_dir.relative_path());
968 path.push(&file_name);
969
970 let extension = file_name.rsplitn(2, '.').next().unwrap();
971
972 let body = match extension {
973 "didx" => {
974 let index = DynamicIndexReader::open(&path)
975 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
976 let (csum, size) = index.compute_csum();
977 manifest.verify_file(&file_name, &csum, size)?;
978
979 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
980 let reader = AsyncIndexReader::new(index, chunk_reader);
981 Body::wrap_stream(AsyncReaderStream::new(reader)
982 .map_err(move |err| {
983 eprintln!("error during streaming of '{:?}' - {}", path, err);
984 err
985 }))
986 },
987 "fidx" => {
988 let index = FixedIndexReader::open(&path)
989 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
990
991 let (csum, size) = index.compute_csum();
992 manifest.verify_file(&file_name, &csum, size)?;
993
994 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
995 let reader = AsyncIndexReader::new(index, chunk_reader);
996 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
997 .map_err(move |err| {
998 eprintln!("error during streaming of '{:?}' - {}", path, err);
999 err
1000 }))
1001 },
1002 "blob" => {
1003 let file = std::fs::File::open(&path)
1004 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1005
1006 // FIXME: load full blob to verify index checksum?
1007
1008 Body::wrap_stream(
1009 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1010 .map_err(move |err| {
1011 eprintln!("error during streaming of '{:?}' - {}", path, err);
1012 err
1013 })
1014 )
1015 },
1016 extension => {
1017 bail!("cannot download '{}' files", extension);
1018 },
1019 };
1020
1021 // fixme: set other headers ?
1022 Ok(Response::builder()
1023 .status(StatusCode::OK)
1024 .header(header::CONTENT_TYPE, "application/octet-stream")
1025 .body(body)
1026 .unwrap())
1027 }.boxed()
1028 }
1029
1030 #[sortable]
1031 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1032 &ApiHandler::AsyncHttp(&upload_backup_log),
1033 &ObjectSchema::new(
1034 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1035 &sorted!([
1036 ("store", false, &DATASTORE_SCHEMA),
1037 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1038 ("backup-id", false, &BACKUP_ID_SCHEMA),
1039 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1040 ]),
1041 )
1042 ).access(
1043 Some("Only the backup creator/owner is allowed to do this."),
1044 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1045 );
1046
1047 fn upload_backup_log(
1048 _parts: Parts,
1049 req_body: Body,
1050 param: Value,
1051 _info: &ApiMethod,
1052 rpcenv: Box<dyn RpcEnvironment>,
1053 ) -> ApiResponseFuture {
1054
1055 async move {
1056 let store = tools::required_string_param(&param, "store")?;
1057 let datastore = DataStore::lookup_datastore(store)?;
1058
1059 let file_name = CLIENT_LOG_BLOB_NAME;
1060
1061 let backup_type = tools::required_string_param(&param, "backup-type")?;
1062 let backup_id = tools::required_string_param(&param, "backup-id")?;
1063 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1064
1065 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1066
1067 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1068 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1069
1070 let mut path = datastore.base_path();
1071 path.push(backup_dir.relative_path());
1072 path.push(&file_name);
1073
1074 if path.exists() {
1075 bail!("backup already contains a log.");
1076 }
1077
1078 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1079 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1080
1081 let data = req_body
1082 .map_err(Error::from)
1083 .try_fold(Vec::new(), |mut acc, chunk| {
1084 acc.extend_from_slice(&*chunk);
1085 future::ok::<_, Error>(acc)
1086 })
1087 .await?;
1088
1089 // always verify blob/CRC at server side
1090 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1091
1092 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1093
1094 // fixme: use correct formatter
1095 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1096 }.boxed()
1097 }
1098
1099 #[api(
1100 input: {
1101 properties: {
1102 store: {
1103 schema: DATASTORE_SCHEMA,
1104 },
1105 "backup-type": {
1106 schema: BACKUP_TYPE_SCHEMA,
1107 },
1108 "backup-id": {
1109 schema: BACKUP_ID_SCHEMA,
1110 },
1111 "backup-time": {
1112 schema: BACKUP_TIME_SCHEMA,
1113 },
1114 "filepath": {
1115 description: "Base64 encoded path.",
1116 type: String,
1117 }
1118 },
1119 },
1120 access: {
1121 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1122 },
1123 )]
1124 /// Get the entries of the given path of the catalog
1125 fn catalog(
1126 store: String,
1127 backup_type: String,
1128 backup_id: String,
1129 backup_time: i64,
1130 filepath: String,
1131 _param: Value,
1132 _info: &ApiMethod,
1133 rpcenv: &mut dyn RpcEnvironment,
1134 ) -> Result<Value, Error> {
1135 let datastore = DataStore::lookup_datastore(&store)?;
1136
1137 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1138 let user_info = CachedUserInfo::new()?;
1139 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1140
1141 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1142
1143 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1144 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1145
1146 let file_name = CATALOG_NAME;
1147
1148 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1149 for file in files {
1150 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1151 bail!("cannot decode '{}' - is encrypted", file_name);
1152 }
1153 }
1154
1155 let mut path = datastore.base_path();
1156 path.push(backup_dir.relative_path());
1157 path.push(file_name);
1158
1159 let index = DynamicIndexReader::open(&path)
1160 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1161
1162 let (csum, size) = index.compute_csum();
1163 manifest.verify_file(&file_name, &csum, size)?;
1164
1165 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1166 let reader = BufferedDynamicReader::new(index, chunk_reader);
1167
1168 let mut catalog_reader = CatalogReader::new(reader);
1169 let mut current = catalog_reader.root()?;
1170 let mut components = vec![];
1171
1172
1173 if filepath != "root" {
1174 components = base64::decode(filepath)?;
1175 if components.len() > 0 && components[0] == '/' as u8 {
1176 components.remove(0);
1177 }
1178 for component in components.split(|c| *c == '/' as u8) {
1179 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1180 current = entry;
1181 } else {
1182 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1183 }
1184 }
1185 }
1186
1187 let mut res = Vec::new();
1188
1189 for direntry in catalog_reader.read_dir(&current)? {
1190 let mut components = components.clone();
1191 components.push('/' as u8);
1192 components.extend(&direntry.name);
1193 let path = base64::encode(components);
1194 let text = String::from_utf8_lossy(&direntry.name);
1195 let mut entry = json!({
1196 "filepath": path,
1197 "text": text,
1198 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1199 "leaf": true,
1200 });
1201 match direntry.attr {
1202 DirEntryAttribute::Directory { start: _ } => {
1203 entry["leaf"] = false.into();
1204 },
1205 DirEntryAttribute::File { size, mtime } => {
1206 entry["size"] = size.into();
1207 entry["mtime"] = mtime.into();
1208 },
1209 _ => {},
1210 }
1211 res.push(entry);
1212 }
1213
1214 Ok(res.into())
1215 }
1216
1217 #[sortable]
1218 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1219 &ApiHandler::AsyncHttp(&pxar_file_download),
1220 &ObjectSchema::new(
1221 "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
1222 &sorted!([
1223 ("store", false, &DATASTORE_SCHEMA),
1224 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1225 ("backup-id", false, &BACKUP_ID_SCHEMA),
1226 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1227 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1228 ]),
1229 )
1230 ).access(None, &Permission::Privilege(
1231 &["datastore", "{store}"],
1232 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1233 true)
1234 );
1235
1236 fn pxar_file_download(
1237 _parts: Parts,
1238 _req_body: Body,
1239 param: Value,
1240 _info: &ApiMethod,
1241 rpcenv: Box<dyn RpcEnvironment>,
1242 ) -> ApiResponseFuture {
1243
1244 async move {
1245 let store = tools::required_string_param(&param, "store")?;
1246 let datastore = DataStore::lookup_datastore(&store)?;
1247
1248 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1249 let user_info = CachedUserInfo::new()?;
1250 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1251
1252 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1253
1254 let backup_type = tools::required_string_param(&param, "backup-type")?;
1255 let backup_id = tools::required_string_param(&param, "backup-id")?;
1256 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1257
1258 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1259
1260 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1261 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1262
1263 let mut components = base64::decode(&filepath)?;
1264 if components.len() > 0 && components[0] == '/' as u8 {
1265 components.remove(0);
1266 }
1267
1268 let mut split = components.splitn(2, |c| *c == '/' as u8);
1269 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1270 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1271 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1272 for file in files {
1273 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1274 bail!("cannot decode '{}' - is encrypted", pxar_name);
1275 }
1276 }
1277
1278 let mut path = datastore.base_path();
1279 path.push(backup_dir.relative_path());
1280 path.push(pxar_name);
1281
1282 let index = DynamicIndexReader::open(&path)
1283 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1284
1285 let (csum, size) = index.compute_csum();
1286 manifest.verify_file(&pxar_name, &csum, size)?;
1287
1288 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1289 let reader = BufferedDynamicReader::new(index, chunk_reader);
1290 let archive_size = reader.archive_size();
1291 let reader = LocalDynamicReadAt::new(reader);
1292
1293 let decoder = Accessor::new(reader, archive_size).await?;
1294 let root = decoder.open_root().await?;
1295 let file = root
1296 .lookup(OsStr::from_bytes(file_path)).await?
1297 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1298
1299 let file = match file.kind() {
1300 EntryKind::File { .. } => file,
1301 EntryKind::Hardlink(_) => {
1302 decoder.follow_hardlink(&file).await?
1303 },
1304 // TODO symlink
1305 other => bail!("cannot download file of type {:?}", other),
1306 };
1307
1308 let body = Body::wrap_stream(
1309 AsyncReaderStream::new(file.contents().await?)
1310 .map_err(move |err| {
1311 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1312 err
1313 })
1314 );
1315
1316 // fixme: set other headers ?
1317 Ok(Response::builder()
1318 .status(StatusCode::OK)
1319 .header(header::CONTENT_TYPE, "application/octet-stream")
1320 .body(body)
1321 .unwrap())
1322 }.boxed()
1323 }
1324
1325 #[api(
1326 input: {
1327 properties: {
1328 store: {
1329 schema: DATASTORE_SCHEMA,
1330 },
1331 timeframe: {
1332 type: RRDTimeFrameResolution,
1333 },
1334 cf: {
1335 type: RRDMode,
1336 },
1337 },
1338 },
1339 access: {
1340 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1341 },
1342 )]
1343 /// Read datastore stats
1344 fn get_rrd_stats(
1345 store: String,
1346 timeframe: RRDTimeFrameResolution,
1347 cf: RRDMode,
1348 _param: Value,
1349 ) -> Result<Value, Error> {
1350
1351 create_value_from_rrd(
1352 &format!("datastore/{}", store),
1353 &[
1354 "total", "used",
1355 "read_ios", "read_bytes",
1356 "write_ios", "write_bytes",
1357 "io_ticks",
1358 ],
1359 timeframe,
1360 cf,
1361 )
1362 }
1363
1364 #[api(
1365 input: {
1366 properties: {
1367 store: {
1368 schema: DATASTORE_SCHEMA,
1369 },
1370 "backup-type": {
1371 schema: BACKUP_TYPE_SCHEMA,
1372 },
1373 "backup-id": {
1374 schema: BACKUP_ID_SCHEMA,
1375 },
1376 "backup-time": {
1377 schema: BACKUP_TIME_SCHEMA,
1378 },
1379 },
1380 },
1381 access: {
1382 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1383 },
1384 )]
1385 /// Get "notes" for a specific backup
1386 fn get_notes(
1387 store: String,
1388 backup_type: String,
1389 backup_id: String,
1390 backup_time: i64,
1391 rpcenv: &mut dyn RpcEnvironment,
1392 ) -> Result<String, Error> {
1393 let datastore = DataStore::lookup_datastore(&store)?;
1394
1395 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1396 let user_info = CachedUserInfo::new()?;
1397 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1398
1399 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1400
1401 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1402 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1403
1404 let manifest = datastore.load_manifest_json(&backup_dir)?;
1405
1406 let notes = manifest["unprotected"]["notes"]
1407 .as_str()
1408 .unwrap_or("");
1409
1410 Ok(String::from(notes))
1411 }
1412
1413 #[api(
1414 input: {
1415 properties: {
1416 store: {
1417 schema: DATASTORE_SCHEMA,
1418 },
1419 "backup-type": {
1420 schema: BACKUP_TYPE_SCHEMA,
1421 },
1422 "backup-id": {
1423 schema: BACKUP_ID_SCHEMA,
1424 },
1425 "backup-time": {
1426 schema: BACKUP_TIME_SCHEMA,
1427 },
1428 notes: {
1429 description: "A multiline text.",
1430 },
1431 },
1432 },
1433 access: {
1434 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1435 },
1436 )]
1437 /// Set "notes" for a specific backup
1438 fn set_notes(
1439 store: String,
1440 backup_type: String,
1441 backup_id: String,
1442 backup_time: i64,
1443 notes: String,
1444 rpcenv: &mut dyn RpcEnvironment,
1445 ) -> Result<(), Error> {
1446 let datastore = DataStore::lookup_datastore(&store)?;
1447
1448 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1449 let user_info = CachedUserInfo::new()?;
1450 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1451
1452 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1453
1454 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1455 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1456
1457 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1458
1459 manifest["unprotected"]["notes"] = notes.into();
1460
1461 datastore.store_manifest(&backup_dir, manifest)?;
1462
1463 Ok(())
1464 }
1465
1466 #[sortable]
1467 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1468 (
1469 "catalog",
1470 &Router::new()
1471 .get(&API_METHOD_CATALOG)
1472 ),
1473 (
1474 "download",
1475 &Router::new()
1476 .download(&API_METHOD_DOWNLOAD_FILE)
1477 ),
1478 (
1479 "download-decoded",
1480 &Router::new()
1481 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1482 ),
1483 (
1484 "files",
1485 &Router::new()
1486 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1487 ),
1488 (
1489 "gc",
1490 &Router::new()
1491 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1492 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1493 ),
1494 (
1495 "groups",
1496 &Router::new()
1497 .get(&API_METHOD_LIST_GROUPS)
1498 ),
1499 (
1500 "notes",
1501 &Router::new()
1502 .get(&API_METHOD_GET_NOTES)
1503 .put(&API_METHOD_SET_NOTES)
1504 ),
1505 (
1506 "prune",
1507 &Router::new()
1508 .post(&API_METHOD_PRUNE)
1509 ),
1510 (
1511 "pxar-file-download",
1512 &Router::new()
1513 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1514 ),
1515 (
1516 "rrd",
1517 &Router::new()
1518 .get(&API_METHOD_GET_RRD_STATS)
1519 ),
1520 (
1521 "snapshots",
1522 &Router::new()
1523 .get(&API_METHOD_LIST_SNAPSHOTS)
1524 .delete(&API_METHOD_DELETE_SNAPSHOT)
1525 ),
1526 (
1527 "status",
1528 &Router::new()
1529 .get(&API_METHOD_STATUS)
1530 ),
1531 (
1532 "upload-backup-log",
1533 &Router::new()
1534 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1535 ),
1536 (
1537 "verify",
1538 &Router::new()
1539 .post(&API_METHOD_VERIFY)
1540 ),
1541 ];
1542
1543 const DATASTORE_INFO_ROUTER: Router = Router::new()
1544 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1545 .subdirs(DATASTORE_INFO_SUBDIRS);
1546
1547
1548 pub const ROUTER: Router = Router::new()
1549 .get(&API_METHOD_GET_DATASTORE_LIST)
1550 .match_all("store", &DATASTORE_INFO_ROUTER);