]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
src/backup/verify.rs: use global hashes (instead of per group)
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5
6 use anyhow::{bail, format_err, Error};
7 use futures::*;
8 use hyper::http::request::Parts;
9 use hyper::{header, Body, Response, StatusCode};
10 use serde_json::{json, Value};
11
12 use proxmox::api::{
13 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
14 RpcEnvironment, RpcEnvironmentType, Permission
15 };
16 use proxmox::api::router::SubdirMap;
17 use proxmox::api::schema::*;
18 use proxmox::tools::fs::{replace_file, CreateOptions};
19 use proxmox::try_block;
20 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
21
22 use pxar::accessor::aio::Accessor;
23 use pxar::EntryKind;
24
25 use crate::api2::types::*;
26 use crate::api2::node::rrd::create_value_from_rrd;
27 use crate::backup::*;
28 use crate::config::datastore;
29 use crate::config::cached_user_info::CachedUserInfo;
30
31 use crate::server::WorkerTask;
32 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
33 use crate::config::acl::{
34 PRIV_DATASTORE_AUDIT,
35 PRIV_DATASTORE_MODIFY,
36 PRIV_DATASTORE_READ,
37 PRIV_DATASTORE_PRUNE,
38 PRIV_DATASTORE_BACKUP,
39 };
40
41 fn check_backup_owner(
42 store: &DataStore,
43 group: &BackupGroup,
44 userid: &Userid,
45 ) -> Result<(), Error> {
46 let owner = store.get_owner(group)?;
47 if &owner != userid {
48 bail!("backup owner check failed ({} != {})", userid, owner);
49 }
50 Ok(())
51 }
52
53 fn read_backup_index(
54 store: &DataStore,
55 backup_dir: &BackupDir,
56 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
57
58 let (manifest, index_size) = store.load_manifest(backup_dir)?;
59
60 let mut result = Vec::new();
61 for item in manifest.files() {
62 result.push(BackupContent {
63 filename: item.filename.clone(),
64 crypt_mode: Some(item.crypt_mode),
65 size: Some(item.size),
66 });
67 }
68
69 result.push(BackupContent {
70 filename: MANIFEST_BLOB_NAME.to_string(),
71 crypt_mode: match manifest.signature {
72 Some(_) => Some(CryptMode::SignOnly),
73 None => Some(CryptMode::None),
74 },
75 size: Some(index_size),
76 });
77
78 Ok((manifest, result))
79 }
80
81 fn get_all_snapshot_files(
82 store: &DataStore,
83 info: &BackupInfo,
84 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
85
86 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
87
88 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
89 acc.insert(item.filename.clone());
90 acc
91 });
92
93 for file in &info.files {
94 if file_set.contains(file) { continue; }
95 files.push(BackupContent {
96 filename: file.to_string(),
97 size: None,
98 crypt_mode: None,
99 });
100 }
101
102 Ok((manifest, files))
103 }
104
105 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
106
107 let mut group_hash = HashMap::new();
108
109 for info in backup_list {
110 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
111 let time_list = group_hash.entry(group_id).or_insert(vec![]);
112 time_list.push(info);
113 }
114
115 group_hash
116 }
117
118 #[api(
119 input: {
120 properties: {
121 store: {
122 schema: DATASTORE_SCHEMA,
123 },
124 },
125 },
126 returns: {
127 type: Array,
128 description: "Returns the list of backup groups.",
129 items: {
130 type: GroupListItem,
131 }
132 },
133 access: {
134 permission: &Permission::Privilege(
135 &["datastore", "{store}"],
136 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
137 true),
138 },
139 )]
140 /// List backup groups.
141 fn list_groups(
142 store: String,
143 rpcenv: &mut dyn RpcEnvironment,
144 ) -> Result<Vec<GroupListItem>, Error> {
145
146 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
147 let user_info = CachedUserInfo::new()?;
148 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
149
150 let datastore = DataStore::lookup_datastore(&store)?;
151
152 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
153
154 let group_hash = group_backups(backup_list);
155
156 let mut groups = Vec::new();
157
158 for (_group_id, mut list) in group_hash {
159
160 BackupInfo::sort_list(&mut list, false);
161
162 let info = &list[0];
163
164 let group = info.backup_dir.group();
165
166 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
167 let owner = datastore.get_owner(group)?;
168 if !list_all {
169 if owner != userid { continue; }
170 }
171
172 let result_item = GroupListItem {
173 backup_type: group.backup_type().to_string(),
174 backup_id: group.backup_id().to_string(),
175 last_backup: info.backup_dir.backup_time().timestamp(),
176 backup_count: list.len() as u64,
177 files: info.files.clone(),
178 owner: Some(owner),
179 };
180 groups.push(result_item);
181 }
182
183 Ok(groups)
184 }
185
186 #[api(
187 input: {
188 properties: {
189 store: {
190 schema: DATASTORE_SCHEMA,
191 },
192 "backup-type": {
193 schema: BACKUP_TYPE_SCHEMA,
194 },
195 "backup-id": {
196 schema: BACKUP_ID_SCHEMA,
197 },
198 "backup-time": {
199 schema: BACKUP_TIME_SCHEMA,
200 },
201 },
202 },
203 returns: {
204 type: Array,
205 description: "Returns the list of archive files inside a backup snapshots.",
206 items: {
207 type: BackupContent,
208 }
209 },
210 access: {
211 permission: &Permission::Privilege(
212 &["datastore", "{store}"],
213 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
214 true),
215 },
216 )]
217 /// List snapshot files.
218 pub fn list_snapshot_files(
219 store: String,
220 backup_type: String,
221 backup_id: String,
222 backup_time: i64,
223 _info: &ApiMethod,
224 rpcenv: &mut dyn RpcEnvironment,
225 ) -> Result<Vec<BackupContent>, Error> {
226
227 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
228 let user_info = CachedUserInfo::new()?;
229 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
230
231 let datastore = DataStore::lookup_datastore(&store)?;
232
233 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
234
235 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
236 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
237
238 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
239
240 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
241
242 Ok(files)
243 }
244
245 #[api(
246 input: {
247 properties: {
248 store: {
249 schema: DATASTORE_SCHEMA,
250 },
251 "backup-type": {
252 schema: BACKUP_TYPE_SCHEMA,
253 },
254 "backup-id": {
255 schema: BACKUP_ID_SCHEMA,
256 },
257 "backup-time": {
258 schema: BACKUP_TIME_SCHEMA,
259 },
260 },
261 },
262 access: {
263 permission: &Permission::Privilege(
264 &["datastore", "{store}"],
265 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
266 true),
267 },
268 )]
269 /// Delete backup snapshot.
270 fn delete_snapshot(
271 store: String,
272 backup_type: String,
273 backup_id: String,
274 backup_time: i64,
275 _info: &ApiMethod,
276 rpcenv: &mut dyn RpcEnvironment,
277 ) -> Result<Value, Error> {
278
279 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
280 let user_info = CachedUserInfo::new()?;
281 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
282
283 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
284
285 let datastore = DataStore::lookup_datastore(&store)?;
286
287 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
288 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
289
290 datastore.remove_backup_dir(&snapshot, false)?;
291
292 Ok(Value::Null)
293 }
294
295 #[api(
296 input: {
297 properties: {
298 store: {
299 schema: DATASTORE_SCHEMA,
300 },
301 "backup-type": {
302 optional: true,
303 schema: BACKUP_TYPE_SCHEMA,
304 },
305 "backup-id": {
306 optional: true,
307 schema: BACKUP_ID_SCHEMA,
308 },
309 },
310 },
311 returns: {
312 type: Array,
313 description: "Returns the list of snapshots.",
314 items: {
315 type: SnapshotListItem,
316 }
317 },
318 access: {
319 permission: &Permission::Privilege(
320 &["datastore", "{store}"],
321 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
322 true),
323 },
324 )]
325 /// List backup snapshots.
326 pub fn list_snapshots (
327 store: String,
328 backup_type: Option<String>,
329 backup_id: Option<String>,
330 _param: Value,
331 _info: &ApiMethod,
332 rpcenv: &mut dyn RpcEnvironment,
333 ) -> Result<Vec<SnapshotListItem>, Error> {
334
335 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
336 let user_info = CachedUserInfo::new()?;
337 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
338
339 let datastore = DataStore::lookup_datastore(&store)?;
340
341 let base_path = datastore.base_path();
342
343 let backup_list = BackupInfo::list_backups(&base_path)?;
344
345 let mut snapshots = vec![];
346
347 for info in backup_list {
348 let group = info.backup_dir.group();
349 if let Some(ref backup_type) = backup_type {
350 if backup_type != group.backup_type() { continue; }
351 }
352 if let Some(ref backup_id) = backup_id {
353 if backup_id != group.backup_id() { continue; }
354 }
355
356 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
357 let owner = datastore.get_owner(group)?;
358
359 if !list_all {
360 if owner != userid { continue; }
361 }
362
363 let mut size = None;
364
365 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
366 Ok((manifest, files)) => {
367 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
368 // extract the first line from notes
369 let comment: Option<String> = manifest.unprotected["notes"]
370 .as_str()
371 .and_then(|notes| notes.lines().next())
372 .map(String::from);
373
374 let verify = manifest.unprotected["verify_state"].clone();
375 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
376 Ok(verify) => verify,
377 Err(err) => {
378 eprintln!("error parsing verification state : '{}'", err);
379 None
380 }
381 };
382
383 (comment, verify, files)
384 },
385 Err(err) => {
386 eprintln!("error during snapshot file listing: '{}'", err);
387 (
388 None,
389 None,
390 info
391 .files
392 .iter()
393 .map(|x| BackupContent {
394 filename: x.to_string(),
395 size: None,
396 crypt_mode: None,
397 })
398 .collect()
399 )
400 },
401 };
402
403 let result_item = SnapshotListItem {
404 backup_type: group.backup_type().to_string(),
405 backup_id: group.backup_id().to_string(),
406 backup_time: info.backup_dir.backup_time().timestamp(),
407 comment,
408 verification,
409 files,
410 size,
411 owner: Some(owner),
412 };
413
414 snapshots.push(result_item);
415 }
416
417 Ok(snapshots)
418 }
419
420 #[api(
421 input: {
422 properties: {
423 store: {
424 schema: DATASTORE_SCHEMA,
425 },
426 },
427 },
428 returns: {
429 type: StorageStatus,
430 },
431 access: {
432 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
433 },
434 )]
435 /// Get datastore status.
436 pub fn status(
437 store: String,
438 _info: &ApiMethod,
439 _rpcenv: &mut dyn RpcEnvironment,
440 ) -> Result<StorageStatus, Error> {
441 let datastore = DataStore::lookup_datastore(&store)?;
442 crate::tools::disks::disk_usage(&datastore.base_path())
443 }
444
445 #[api(
446 input: {
447 properties: {
448 store: {
449 schema: DATASTORE_SCHEMA,
450 },
451 "backup-type": {
452 schema: BACKUP_TYPE_SCHEMA,
453 optional: true,
454 },
455 "backup-id": {
456 schema: BACKUP_ID_SCHEMA,
457 optional: true,
458 },
459 "backup-time": {
460 schema: BACKUP_TIME_SCHEMA,
461 optional: true,
462 },
463 },
464 },
465 returns: {
466 schema: UPID_SCHEMA,
467 },
468 access: {
469 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
470 },
471 )]
472 /// Verify backups.
473 ///
474 /// This function can verify a single backup snapshot, all backup from a backup group,
475 /// or all backups in the datastore.
476 pub fn verify(
477 store: String,
478 backup_type: Option<String>,
479 backup_id: Option<String>,
480 backup_time: Option<i64>,
481 rpcenv: &mut dyn RpcEnvironment,
482 ) -> Result<Value, Error> {
483 let datastore = DataStore::lookup_datastore(&store)?;
484
485 let worker_id;
486
487 let mut backup_dir = None;
488 let mut backup_group = None;
489
490 match (backup_type, backup_id, backup_time) {
491 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
492 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
493 let dir = BackupDir::new(backup_type, backup_id, backup_time);
494 backup_dir = Some(dir);
495 }
496 (Some(backup_type), Some(backup_id), None) => {
497 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
498 let group = BackupGroup::new(backup_type, backup_id);
499 backup_group = Some(group);
500 }
501 (None, None, None) => {
502 worker_id = store.clone();
503 }
504 _ => bail!("parameters do not specify a backup group or snapshot"),
505 }
506
507 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
508 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
509
510 let upid_str = WorkerTask::new_thread(
511 "verify",
512 Some(worker_id.clone()),
513 userid,
514 to_stdout,
515 move |worker| {
516 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
517 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
518
519 let failed_dirs = if let Some(backup_dir) = backup_dir {
520 let mut res = Vec::new();
521 if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
522 res.push(backup_dir.to_string());
523 }
524 res
525 } else if let Some(backup_group) = backup_group {
526 verify_backup_group(datastore, &backup_group, verified_chunks, corrupt_chunks, worker.clone())?
527 } else {
528 verify_all_backups(datastore, worker.clone())?
529 };
530 if failed_dirs.len() > 0 {
531 worker.log("Failed to verify following snapshots:");
532 for dir in failed_dirs {
533 worker.log(format!("\t{}", dir));
534 }
535 bail!("verification failed - please check the log for details");
536 }
537 Ok(())
538 },
539 )?;
540
541 Ok(json!(upid_str))
542 }
543
544 #[macro_export]
545 macro_rules! add_common_prune_prameters {
546 ( [ $( $list1:tt )* ] ) => {
547 add_common_prune_prameters!([$( $list1 )* ] , [])
548 };
549 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
550 [
551 $( $list1 )*
552 (
553 "keep-daily",
554 true,
555 &PRUNE_SCHEMA_KEEP_DAILY,
556 ),
557 (
558 "keep-hourly",
559 true,
560 &PRUNE_SCHEMA_KEEP_HOURLY,
561 ),
562 (
563 "keep-last",
564 true,
565 &PRUNE_SCHEMA_KEEP_LAST,
566 ),
567 (
568 "keep-monthly",
569 true,
570 &PRUNE_SCHEMA_KEEP_MONTHLY,
571 ),
572 (
573 "keep-weekly",
574 true,
575 &PRUNE_SCHEMA_KEEP_WEEKLY,
576 ),
577 (
578 "keep-yearly",
579 true,
580 &PRUNE_SCHEMA_KEEP_YEARLY,
581 ),
582 $( $list2 )*
583 ]
584 }
585 }
586
587 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
588 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
589 &PruneListItem::API_SCHEMA
590 ).schema();
591
592 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
593 &ApiHandler::Sync(&prune),
594 &ObjectSchema::new(
595 "Prune the datastore.",
596 &add_common_prune_prameters!([
597 ("backup-id", false, &BACKUP_ID_SCHEMA),
598 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
599 ("dry-run", true, &BooleanSchema::new(
600 "Just show what prune would do, but do not delete anything.")
601 .schema()
602 ),
603 ],[
604 ("store", false, &DATASTORE_SCHEMA),
605 ])
606 ))
607 .returns(&API_RETURN_SCHEMA_PRUNE)
608 .access(None, &Permission::Privilege(
609 &["datastore", "{store}"],
610 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
611 true)
612 );
613
614 fn prune(
615 param: Value,
616 _info: &ApiMethod,
617 rpcenv: &mut dyn RpcEnvironment,
618 ) -> Result<Value, Error> {
619
620 let store = tools::required_string_param(&param, "store")?;
621 let backup_type = tools::required_string_param(&param, "backup-type")?;
622 let backup_id = tools::required_string_param(&param, "backup-id")?;
623
624 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
625 let user_info = CachedUserInfo::new()?;
626 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
627
628 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
629
630 let group = BackupGroup::new(backup_type, backup_id);
631
632 let datastore = DataStore::lookup_datastore(&store)?;
633
634 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
635 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
636
637 let prune_options = PruneOptions {
638 keep_last: param["keep-last"].as_u64(),
639 keep_hourly: param["keep-hourly"].as_u64(),
640 keep_daily: param["keep-daily"].as_u64(),
641 keep_weekly: param["keep-weekly"].as_u64(),
642 keep_monthly: param["keep-monthly"].as_u64(),
643 keep_yearly: param["keep-yearly"].as_u64(),
644 };
645
646 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
647
648 let mut prune_result = Vec::new();
649
650 let list = group.list_backups(&datastore.base_path())?;
651
652 let mut prune_info = compute_prune_info(list, &prune_options)?;
653
654 prune_info.reverse(); // delete older snapshots first
655
656 let keep_all = !prune_options.keeps_something();
657
658 if dry_run {
659 for (info, mut keep) in prune_info {
660 if keep_all { keep = true; }
661
662 let backup_time = info.backup_dir.backup_time();
663 let group = info.backup_dir.group();
664
665 prune_result.push(json!({
666 "backup-type": group.backup_type(),
667 "backup-id": group.backup_id(),
668 "backup-time": backup_time.timestamp(),
669 "keep": keep,
670 }));
671 }
672 return Ok(json!(prune_result));
673 }
674
675
676 // We use a WorkerTask just to have a task log, but run synchrounously
677 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
678
679 let result = try_block! {
680 if keep_all {
681 worker.log("No prune selection - keeping all files.");
682 } else {
683 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
684 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
685 store, backup_type, backup_id));
686 }
687
688 for (info, mut keep) in prune_info {
689 if keep_all { keep = true; }
690
691 let backup_time = info.backup_dir.backup_time();
692 let timestamp = BackupDir::backup_time_to_string(backup_time);
693 let group = info.backup_dir.group();
694
695
696 let msg = format!(
697 "{}/{}/{} {}",
698 group.backup_type(),
699 group.backup_id(),
700 timestamp,
701 if keep { "keep" } else { "remove" },
702 );
703
704 worker.log(msg);
705
706 prune_result.push(json!({
707 "backup-type": group.backup_type(),
708 "backup-id": group.backup_id(),
709 "backup-time": backup_time.timestamp(),
710 "keep": keep,
711 }));
712
713 if !(dry_run || keep) {
714 datastore.remove_backup_dir(&info.backup_dir, true)?;
715 }
716 }
717
718 Ok(())
719 };
720
721 worker.log_result(&result);
722
723 if let Err(err) = result {
724 bail!("prune failed - {}", err);
725 };
726
727 Ok(json!(prune_result))
728 }
729
730 #[api(
731 input: {
732 properties: {
733 store: {
734 schema: DATASTORE_SCHEMA,
735 },
736 },
737 },
738 returns: {
739 schema: UPID_SCHEMA,
740 },
741 access: {
742 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
743 },
744 )]
745 /// Start garbage collection.
746 fn start_garbage_collection(
747 store: String,
748 _info: &ApiMethod,
749 rpcenv: &mut dyn RpcEnvironment,
750 ) -> Result<Value, Error> {
751
752 let datastore = DataStore::lookup_datastore(&store)?;
753
754 println!("Starting garbage collection on store {}", store);
755
756 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
757
758 let upid_str = WorkerTask::new_thread(
759 "garbage_collection",
760 Some(store.clone()),
761 Userid::root_userid().clone(),
762 to_stdout,
763 move |worker| {
764 worker.log(format!("starting garbage collection on store {}", store));
765 datastore.garbage_collection(&worker)
766 },
767 )?;
768
769 Ok(json!(upid_str))
770 }
771
772 #[api(
773 input: {
774 properties: {
775 store: {
776 schema: DATASTORE_SCHEMA,
777 },
778 },
779 },
780 returns: {
781 type: GarbageCollectionStatus,
782 },
783 access: {
784 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
785 },
786 )]
787 /// Garbage collection status.
788 pub fn garbage_collection_status(
789 store: String,
790 _info: &ApiMethod,
791 _rpcenv: &mut dyn RpcEnvironment,
792 ) -> Result<GarbageCollectionStatus, Error> {
793
794 let datastore = DataStore::lookup_datastore(&store)?;
795
796 let status = datastore.last_gc_status();
797
798 Ok(status)
799 }
800
801 #[api(
802 returns: {
803 description: "List the accessible datastores.",
804 type: Array,
805 items: {
806 description: "Datastore name and description.",
807 properties: {
808 store: {
809 schema: DATASTORE_SCHEMA,
810 },
811 comment: {
812 optional: true,
813 schema: SINGLE_LINE_COMMENT_SCHEMA,
814 },
815 },
816 },
817 },
818 access: {
819 permission: &Permission::Anybody,
820 },
821 )]
822 /// Datastore list
823 fn get_datastore_list(
824 _param: Value,
825 _info: &ApiMethod,
826 rpcenv: &mut dyn RpcEnvironment,
827 ) -> Result<Value, Error> {
828
829 let (config, _digest) = datastore::config()?;
830
831 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
832 let user_info = CachedUserInfo::new()?;
833
834 let mut list = Vec::new();
835
836 for (store, (_, data)) in &config.sections {
837 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
838 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
839 if allowed {
840 let mut entry = json!({ "store": store });
841 if let Some(comment) = data["comment"].as_str() {
842 entry["comment"] = comment.into();
843 }
844 list.push(entry);
845 }
846 }
847
848 Ok(list.into())
849 }
850
851 #[sortable]
852 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
853 &ApiHandler::AsyncHttp(&download_file),
854 &ObjectSchema::new(
855 "Download single raw file from backup snapshot.",
856 &sorted!([
857 ("store", false, &DATASTORE_SCHEMA),
858 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
859 ("backup-id", false, &BACKUP_ID_SCHEMA),
860 ("backup-time", false, &BACKUP_TIME_SCHEMA),
861 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
862 ]),
863 )
864 ).access(None, &Permission::Privilege(
865 &["datastore", "{store}"],
866 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
867 true)
868 );
869
870 fn download_file(
871 _parts: Parts,
872 _req_body: Body,
873 param: Value,
874 _info: &ApiMethod,
875 rpcenv: Box<dyn RpcEnvironment>,
876 ) -> ApiResponseFuture {
877
878 async move {
879 let store = tools::required_string_param(&param, "store")?;
880 let datastore = DataStore::lookup_datastore(store)?;
881
882 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
883 let user_info = CachedUserInfo::new()?;
884 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
885
886 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
887
888 let backup_type = tools::required_string_param(&param, "backup-type")?;
889 let backup_id = tools::required_string_param(&param, "backup-id")?;
890 let backup_time = tools::required_integer_param(&param, "backup-time")?;
891
892 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
893
894 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
895 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
896
897 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
898
899 let mut path = datastore.base_path();
900 path.push(backup_dir.relative_path());
901 path.push(&file_name);
902
903 let file = tokio::fs::File::open(&path)
904 .await
905 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
906
907 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
908 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
909 .map_err(move |err| {
910 eprintln!("error during streaming of '{:?}' - {}", &path, err);
911 err
912 });
913 let body = Body::wrap_stream(payload);
914
915 // fixme: set other headers ?
916 Ok(Response::builder()
917 .status(StatusCode::OK)
918 .header(header::CONTENT_TYPE, "application/octet-stream")
919 .body(body)
920 .unwrap())
921 }.boxed()
922 }
923
924 #[sortable]
925 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
926 &ApiHandler::AsyncHttp(&download_file_decoded),
927 &ObjectSchema::new(
928 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
929 &sorted!([
930 ("store", false, &DATASTORE_SCHEMA),
931 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
932 ("backup-id", false, &BACKUP_ID_SCHEMA),
933 ("backup-time", false, &BACKUP_TIME_SCHEMA),
934 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
935 ]),
936 )
937 ).access(None, &Permission::Privilege(
938 &["datastore", "{store}"],
939 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
940 true)
941 );
942
943 fn download_file_decoded(
944 _parts: Parts,
945 _req_body: Body,
946 param: Value,
947 _info: &ApiMethod,
948 rpcenv: Box<dyn RpcEnvironment>,
949 ) -> ApiResponseFuture {
950
951 async move {
952 let store = tools::required_string_param(&param, "store")?;
953 let datastore = DataStore::lookup_datastore(store)?;
954
955 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
956 let user_info = CachedUserInfo::new()?;
957 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
958
959 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
960
961 let backup_type = tools::required_string_param(&param, "backup-type")?;
962 let backup_id = tools::required_string_param(&param, "backup-id")?;
963 let backup_time = tools::required_integer_param(&param, "backup-time")?;
964
965 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
966
967 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
968 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
969
970 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
971 for file in files {
972 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
973 bail!("cannot decode '{}' - is encrypted", file_name);
974 }
975 }
976
977 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
978
979 let mut path = datastore.base_path();
980 path.push(backup_dir.relative_path());
981 path.push(&file_name);
982
983 let extension = file_name.rsplitn(2, '.').next().unwrap();
984
985 let body = match extension {
986 "didx" => {
987 let index = DynamicIndexReader::open(&path)
988 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
989 let (csum, size) = index.compute_csum();
990 manifest.verify_file(&file_name, &csum, size)?;
991
992 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
993 let reader = AsyncIndexReader::new(index, chunk_reader);
994 Body::wrap_stream(AsyncReaderStream::new(reader)
995 .map_err(move |err| {
996 eprintln!("error during streaming of '{:?}' - {}", path, err);
997 err
998 }))
999 },
1000 "fidx" => {
1001 let index = FixedIndexReader::open(&path)
1002 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1003
1004 let (csum, size) = index.compute_csum();
1005 manifest.verify_file(&file_name, &csum, size)?;
1006
1007 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1008 let reader = AsyncIndexReader::new(index, chunk_reader);
1009 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1010 .map_err(move |err| {
1011 eprintln!("error during streaming of '{:?}' - {}", path, err);
1012 err
1013 }))
1014 },
1015 "blob" => {
1016 let file = std::fs::File::open(&path)
1017 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1018
1019 // FIXME: load full blob to verify index checksum?
1020
1021 Body::wrap_stream(
1022 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1023 .map_err(move |err| {
1024 eprintln!("error during streaming of '{:?}' - {}", path, err);
1025 err
1026 })
1027 )
1028 },
1029 extension => {
1030 bail!("cannot download '{}' files", extension);
1031 },
1032 };
1033
1034 // fixme: set other headers ?
1035 Ok(Response::builder()
1036 .status(StatusCode::OK)
1037 .header(header::CONTENT_TYPE, "application/octet-stream")
1038 .body(body)
1039 .unwrap())
1040 }.boxed()
1041 }
1042
1043 #[sortable]
1044 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1045 &ApiHandler::AsyncHttp(&upload_backup_log),
1046 &ObjectSchema::new(
1047 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1048 &sorted!([
1049 ("store", false, &DATASTORE_SCHEMA),
1050 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1051 ("backup-id", false, &BACKUP_ID_SCHEMA),
1052 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1053 ]),
1054 )
1055 ).access(
1056 Some("Only the backup creator/owner is allowed to do this."),
1057 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1058 );
1059
1060 fn upload_backup_log(
1061 _parts: Parts,
1062 req_body: Body,
1063 param: Value,
1064 _info: &ApiMethod,
1065 rpcenv: Box<dyn RpcEnvironment>,
1066 ) -> ApiResponseFuture {
1067
1068 async move {
1069 let store = tools::required_string_param(&param, "store")?;
1070 let datastore = DataStore::lookup_datastore(store)?;
1071
1072 let file_name = CLIENT_LOG_BLOB_NAME;
1073
1074 let backup_type = tools::required_string_param(&param, "backup-type")?;
1075 let backup_id = tools::required_string_param(&param, "backup-id")?;
1076 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1077
1078 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1079
1080 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1081 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1082
1083 let mut path = datastore.base_path();
1084 path.push(backup_dir.relative_path());
1085 path.push(&file_name);
1086
1087 if path.exists() {
1088 bail!("backup already contains a log.");
1089 }
1090
1091 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1092 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1093
1094 let data = req_body
1095 .map_err(Error::from)
1096 .try_fold(Vec::new(), |mut acc, chunk| {
1097 acc.extend_from_slice(&*chunk);
1098 future::ok::<_, Error>(acc)
1099 })
1100 .await?;
1101
1102 // always verify blob/CRC at server side
1103 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1104
1105 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1106
1107 // fixme: use correct formatter
1108 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1109 }.boxed()
1110 }
1111
1112 #[api(
1113 input: {
1114 properties: {
1115 store: {
1116 schema: DATASTORE_SCHEMA,
1117 },
1118 "backup-type": {
1119 schema: BACKUP_TYPE_SCHEMA,
1120 },
1121 "backup-id": {
1122 schema: BACKUP_ID_SCHEMA,
1123 },
1124 "backup-time": {
1125 schema: BACKUP_TIME_SCHEMA,
1126 },
1127 "filepath": {
1128 description: "Base64 encoded path.",
1129 type: String,
1130 }
1131 },
1132 },
1133 access: {
1134 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1135 },
1136 )]
1137 /// Get the entries of the given path of the catalog
1138 fn catalog(
1139 store: String,
1140 backup_type: String,
1141 backup_id: String,
1142 backup_time: i64,
1143 filepath: String,
1144 _param: Value,
1145 _info: &ApiMethod,
1146 rpcenv: &mut dyn RpcEnvironment,
1147 ) -> Result<Value, Error> {
1148 let datastore = DataStore::lookup_datastore(&store)?;
1149
1150 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1151 let user_info = CachedUserInfo::new()?;
1152 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1153
1154 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1155
1156 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1157 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1158
1159 let file_name = CATALOG_NAME;
1160
1161 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1162 for file in files {
1163 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1164 bail!("cannot decode '{}' - is encrypted", file_name);
1165 }
1166 }
1167
1168 let mut path = datastore.base_path();
1169 path.push(backup_dir.relative_path());
1170 path.push(file_name);
1171
1172 let index = DynamicIndexReader::open(&path)
1173 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1174
1175 let (csum, size) = index.compute_csum();
1176 manifest.verify_file(&file_name, &csum, size)?;
1177
1178 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1179 let reader = BufferedDynamicReader::new(index, chunk_reader);
1180
1181 let mut catalog_reader = CatalogReader::new(reader);
1182 let mut current = catalog_reader.root()?;
1183 let mut components = vec![];
1184
1185
1186 if filepath != "root" {
1187 components = base64::decode(filepath)?;
1188 if components.len() > 0 && components[0] == '/' as u8 {
1189 components.remove(0);
1190 }
1191 for component in components.split(|c| *c == '/' as u8) {
1192 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1193 current = entry;
1194 } else {
1195 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1196 }
1197 }
1198 }
1199
1200 let mut res = Vec::new();
1201
1202 for direntry in catalog_reader.read_dir(&current)? {
1203 let mut components = components.clone();
1204 components.push('/' as u8);
1205 components.extend(&direntry.name);
1206 let path = base64::encode(components);
1207 let text = String::from_utf8_lossy(&direntry.name);
1208 let mut entry = json!({
1209 "filepath": path,
1210 "text": text,
1211 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1212 "leaf": true,
1213 });
1214 match direntry.attr {
1215 DirEntryAttribute::Directory { start: _ } => {
1216 entry["leaf"] = false.into();
1217 },
1218 DirEntryAttribute::File { size, mtime } => {
1219 entry["size"] = size.into();
1220 entry["mtime"] = mtime.into();
1221 },
1222 _ => {},
1223 }
1224 res.push(entry);
1225 }
1226
1227 Ok(res.into())
1228 }
1229
1230 #[sortable]
1231 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1232 &ApiHandler::AsyncHttp(&pxar_file_download),
1233 &ObjectSchema::new(
1234 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1235 &sorted!([
1236 ("store", false, &DATASTORE_SCHEMA),
1237 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1238 ("backup-id", false, &BACKUP_ID_SCHEMA),
1239 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1240 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1241 ]),
1242 )
1243 ).access(None, &Permission::Privilege(
1244 &["datastore", "{store}"],
1245 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1246 true)
1247 );
1248
1249 fn pxar_file_download(
1250 _parts: Parts,
1251 _req_body: Body,
1252 param: Value,
1253 _info: &ApiMethod,
1254 rpcenv: Box<dyn RpcEnvironment>,
1255 ) -> ApiResponseFuture {
1256
1257 async move {
1258 let store = tools::required_string_param(&param, "store")?;
1259 let datastore = DataStore::lookup_datastore(&store)?;
1260
1261 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1262 let user_info = CachedUserInfo::new()?;
1263 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1264
1265 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1266
1267 let backup_type = tools::required_string_param(&param, "backup-type")?;
1268 let backup_id = tools::required_string_param(&param, "backup-id")?;
1269 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1270
1271 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1272
1273 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1274 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1275
1276 let mut components = base64::decode(&filepath)?;
1277 if components.len() > 0 && components[0] == '/' as u8 {
1278 components.remove(0);
1279 }
1280
1281 let mut split = components.splitn(2, |c| *c == '/' as u8);
1282 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1283 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1284 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1285 for file in files {
1286 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1287 bail!("cannot decode '{}' - is encrypted", pxar_name);
1288 }
1289 }
1290
1291 let mut path = datastore.base_path();
1292 path.push(backup_dir.relative_path());
1293 path.push(pxar_name);
1294
1295 let index = DynamicIndexReader::open(&path)
1296 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1297
1298 let (csum, size) = index.compute_csum();
1299 manifest.verify_file(&pxar_name, &csum, size)?;
1300
1301 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1302 let reader = BufferedDynamicReader::new(index, chunk_reader);
1303 let archive_size = reader.archive_size();
1304 let reader = LocalDynamicReadAt::new(reader);
1305
1306 let decoder = Accessor::new(reader, archive_size).await?;
1307 let root = decoder.open_root().await?;
1308 let file = root
1309 .lookup(OsStr::from_bytes(file_path)).await?
1310 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1311
1312 let file = match file.kind() {
1313 EntryKind::File { .. } => file,
1314 EntryKind::Hardlink(_) => {
1315 decoder.follow_hardlink(&file).await?
1316 },
1317 // TODO symlink
1318 other => bail!("cannot download file of type {:?}", other),
1319 };
1320
1321 let body = Body::wrap_stream(
1322 AsyncReaderStream::new(file.contents().await?)
1323 .map_err(move |err| {
1324 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1325 err
1326 })
1327 );
1328
1329 // fixme: set other headers ?
1330 Ok(Response::builder()
1331 .status(StatusCode::OK)
1332 .header(header::CONTENT_TYPE, "application/octet-stream")
1333 .body(body)
1334 .unwrap())
1335 }.boxed()
1336 }
1337
1338 #[api(
1339 input: {
1340 properties: {
1341 store: {
1342 schema: DATASTORE_SCHEMA,
1343 },
1344 timeframe: {
1345 type: RRDTimeFrameResolution,
1346 },
1347 cf: {
1348 type: RRDMode,
1349 },
1350 },
1351 },
1352 access: {
1353 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1354 },
1355 )]
1356 /// Read datastore stats
1357 fn get_rrd_stats(
1358 store: String,
1359 timeframe: RRDTimeFrameResolution,
1360 cf: RRDMode,
1361 _param: Value,
1362 ) -> Result<Value, Error> {
1363
1364 create_value_from_rrd(
1365 &format!("datastore/{}", store),
1366 &[
1367 "total", "used",
1368 "read_ios", "read_bytes",
1369 "write_ios", "write_bytes",
1370 "io_ticks",
1371 ],
1372 timeframe,
1373 cf,
1374 )
1375 }
1376
1377 #[api(
1378 input: {
1379 properties: {
1380 store: {
1381 schema: DATASTORE_SCHEMA,
1382 },
1383 "backup-type": {
1384 schema: BACKUP_TYPE_SCHEMA,
1385 },
1386 "backup-id": {
1387 schema: BACKUP_ID_SCHEMA,
1388 },
1389 "backup-time": {
1390 schema: BACKUP_TIME_SCHEMA,
1391 },
1392 },
1393 },
1394 access: {
1395 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1396 },
1397 )]
1398 /// Get "notes" for a specific backup
1399 fn get_notes(
1400 store: String,
1401 backup_type: String,
1402 backup_id: String,
1403 backup_time: i64,
1404 rpcenv: &mut dyn RpcEnvironment,
1405 ) -> Result<String, Error> {
1406 let datastore = DataStore::lookup_datastore(&store)?;
1407
1408 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1409 let user_info = CachedUserInfo::new()?;
1410 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1411
1412 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1413
1414 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1415 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1416
1417 let manifest = datastore.load_manifest_json(&backup_dir)?;
1418
1419 let notes = manifest["unprotected"]["notes"]
1420 .as_str()
1421 .unwrap_or("");
1422
1423 Ok(String::from(notes))
1424 }
1425
1426 #[api(
1427 input: {
1428 properties: {
1429 store: {
1430 schema: DATASTORE_SCHEMA,
1431 },
1432 "backup-type": {
1433 schema: BACKUP_TYPE_SCHEMA,
1434 },
1435 "backup-id": {
1436 schema: BACKUP_ID_SCHEMA,
1437 },
1438 "backup-time": {
1439 schema: BACKUP_TIME_SCHEMA,
1440 },
1441 notes: {
1442 description: "A multiline text.",
1443 },
1444 },
1445 },
1446 access: {
1447 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1448 },
1449 )]
1450 /// Set "notes" for a specific backup
1451 fn set_notes(
1452 store: String,
1453 backup_type: String,
1454 backup_id: String,
1455 backup_time: i64,
1456 notes: String,
1457 rpcenv: &mut dyn RpcEnvironment,
1458 ) -> Result<(), Error> {
1459 let datastore = DataStore::lookup_datastore(&store)?;
1460
1461 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1462 let user_info = CachedUserInfo::new()?;
1463 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1464
1465 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1466
1467 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1468 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1469
1470 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1471
1472 manifest["unprotected"]["notes"] = notes.into();
1473
1474 datastore.store_manifest(&backup_dir, manifest)?;
1475
1476 Ok(())
1477 }
1478
1479 #[sortable]
1480 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1481 (
1482 "catalog",
1483 &Router::new()
1484 .get(&API_METHOD_CATALOG)
1485 ),
1486 (
1487 "download",
1488 &Router::new()
1489 .download(&API_METHOD_DOWNLOAD_FILE)
1490 ),
1491 (
1492 "download-decoded",
1493 &Router::new()
1494 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1495 ),
1496 (
1497 "files",
1498 &Router::new()
1499 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1500 ),
1501 (
1502 "gc",
1503 &Router::new()
1504 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1505 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1506 ),
1507 (
1508 "groups",
1509 &Router::new()
1510 .get(&API_METHOD_LIST_GROUPS)
1511 ),
1512 (
1513 "notes",
1514 &Router::new()
1515 .get(&API_METHOD_GET_NOTES)
1516 .put(&API_METHOD_SET_NOTES)
1517 ),
1518 (
1519 "prune",
1520 &Router::new()
1521 .post(&API_METHOD_PRUNE)
1522 ),
1523 (
1524 "pxar-file-download",
1525 &Router::new()
1526 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1527 ),
1528 (
1529 "rrd",
1530 &Router::new()
1531 .get(&API_METHOD_GET_RRD_STATS)
1532 ),
1533 (
1534 "snapshots",
1535 &Router::new()
1536 .get(&API_METHOD_LIST_SNAPSHOTS)
1537 .delete(&API_METHOD_DELETE_SNAPSHOT)
1538 ),
1539 (
1540 "status",
1541 &Router::new()
1542 .get(&API_METHOD_STATUS)
1543 ),
1544 (
1545 "upload-backup-log",
1546 &Router::new()
1547 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1548 ),
1549 (
1550 "verify",
1551 &Router::new()
1552 .post(&API_METHOD_VERIFY)
1553 ),
1554 ];
1555
1556 const DATASTORE_INFO_ROUTER: Router = Router::new()
1557 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1558 .subdirs(DATASTORE_INFO_SUBDIRS);
1559
1560
1561 pub const ROUTER: Router = Router::new()
1562 .get(&API_METHOD_GET_DATASTORE_LIST)
1563 .match_all("store", &DATASTORE_INFO_ROUTER);