]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
src/backup/verify.rs: use separate thread to load data
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5
6 use anyhow::{bail, format_err, Error};
7 use futures::*;
8 use hyper::http::request::Parts;
9 use hyper::{header, Body, Response, StatusCode};
10 use serde_json::{json, Value};
11
12 use proxmox::api::{
13 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
14 RpcEnvironment, RpcEnvironmentType, Permission
15 };
16 use proxmox::api::router::SubdirMap;
17 use proxmox::api::schema::*;
18 use proxmox::tools::fs::{replace_file, CreateOptions};
19 use proxmox::try_block;
20 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
21
22 use pxar::accessor::aio::Accessor;
23 use pxar::EntryKind;
24
25 use crate::api2::types::*;
26 use crate::api2::node::rrd::create_value_from_rrd;
27 use crate::backup::*;
28 use crate::config::datastore;
29 use crate::config::cached_user_info::CachedUserInfo;
30
31 use crate::server::WorkerTask;
32 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
33 use crate::config::acl::{
34 PRIV_DATASTORE_AUDIT,
35 PRIV_DATASTORE_MODIFY,
36 PRIV_DATASTORE_READ,
37 PRIV_DATASTORE_PRUNE,
38 PRIV_DATASTORE_BACKUP,
39 };
40
41 fn check_backup_owner(
42 store: &DataStore,
43 group: &BackupGroup,
44 userid: &Userid,
45 ) -> Result<(), Error> {
46 let owner = store.get_owner(group)?;
47 if &owner != userid {
48 bail!("backup owner check failed ({} != {})", userid, owner);
49 }
50 Ok(())
51 }
52
53 fn read_backup_index(
54 store: &DataStore,
55 backup_dir: &BackupDir,
56 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
57
58 let (manifest, index_size) = store.load_manifest(backup_dir)?;
59
60 let mut result = Vec::new();
61 for item in manifest.files() {
62 result.push(BackupContent {
63 filename: item.filename.clone(),
64 crypt_mode: Some(item.crypt_mode),
65 size: Some(item.size),
66 });
67 }
68
69 result.push(BackupContent {
70 filename: MANIFEST_BLOB_NAME.to_string(),
71 crypt_mode: match manifest.signature {
72 Some(_) => Some(CryptMode::SignOnly),
73 None => Some(CryptMode::None),
74 },
75 size: Some(index_size),
76 });
77
78 Ok((manifest, result))
79 }
80
81 fn get_all_snapshot_files(
82 store: &DataStore,
83 info: &BackupInfo,
84 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
85
86 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
87
88 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
89 acc.insert(item.filename.clone());
90 acc
91 });
92
93 for file in &info.files {
94 if file_set.contains(file) { continue; }
95 files.push(BackupContent {
96 filename: file.to_string(),
97 size: None,
98 crypt_mode: None,
99 });
100 }
101
102 Ok((manifest, files))
103 }
104
105 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
106
107 let mut group_hash = HashMap::new();
108
109 for info in backup_list {
110 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
111 let time_list = group_hash.entry(group_id).or_insert(vec![]);
112 time_list.push(info);
113 }
114
115 group_hash
116 }
117
118 #[api(
119 input: {
120 properties: {
121 store: {
122 schema: DATASTORE_SCHEMA,
123 },
124 },
125 },
126 returns: {
127 type: Array,
128 description: "Returns the list of backup groups.",
129 items: {
130 type: GroupListItem,
131 }
132 },
133 access: {
134 permission: &Permission::Privilege(
135 &["datastore", "{store}"],
136 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
137 true),
138 },
139 )]
140 /// List backup groups.
141 fn list_groups(
142 store: String,
143 rpcenv: &mut dyn RpcEnvironment,
144 ) -> Result<Vec<GroupListItem>, Error> {
145
146 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
147 let user_info = CachedUserInfo::new()?;
148 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
149
150 let datastore = DataStore::lookup_datastore(&store)?;
151
152 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
153
154 let group_hash = group_backups(backup_list);
155
156 let mut groups = Vec::new();
157
158 for (_group_id, mut list) in group_hash {
159
160 BackupInfo::sort_list(&mut list, false);
161
162 let info = &list[0];
163
164 let group = info.backup_dir.group();
165
166 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
167 let owner = datastore.get_owner(group)?;
168 if !list_all {
169 if owner != userid { continue; }
170 }
171
172 let result_item = GroupListItem {
173 backup_type: group.backup_type().to_string(),
174 backup_id: group.backup_id().to_string(),
175 last_backup: info.backup_dir.backup_time().timestamp(),
176 backup_count: list.len() as u64,
177 files: info.files.clone(),
178 owner: Some(owner),
179 };
180 groups.push(result_item);
181 }
182
183 Ok(groups)
184 }
185
186 #[api(
187 input: {
188 properties: {
189 store: {
190 schema: DATASTORE_SCHEMA,
191 },
192 "backup-type": {
193 schema: BACKUP_TYPE_SCHEMA,
194 },
195 "backup-id": {
196 schema: BACKUP_ID_SCHEMA,
197 },
198 "backup-time": {
199 schema: BACKUP_TIME_SCHEMA,
200 },
201 },
202 },
203 returns: {
204 type: Array,
205 description: "Returns the list of archive files inside a backup snapshots.",
206 items: {
207 type: BackupContent,
208 }
209 },
210 access: {
211 permission: &Permission::Privilege(
212 &["datastore", "{store}"],
213 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
214 true),
215 },
216 )]
217 /// List snapshot files.
218 pub fn list_snapshot_files(
219 store: String,
220 backup_type: String,
221 backup_id: String,
222 backup_time: i64,
223 _info: &ApiMethod,
224 rpcenv: &mut dyn RpcEnvironment,
225 ) -> Result<Vec<BackupContent>, Error> {
226
227 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
228 let user_info = CachedUserInfo::new()?;
229 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
230
231 let datastore = DataStore::lookup_datastore(&store)?;
232
233 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
234
235 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
236 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
237
238 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
239
240 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
241
242 Ok(files)
243 }
244
245 #[api(
246 input: {
247 properties: {
248 store: {
249 schema: DATASTORE_SCHEMA,
250 },
251 "backup-type": {
252 schema: BACKUP_TYPE_SCHEMA,
253 },
254 "backup-id": {
255 schema: BACKUP_ID_SCHEMA,
256 },
257 "backup-time": {
258 schema: BACKUP_TIME_SCHEMA,
259 },
260 },
261 },
262 access: {
263 permission: &Permission::Privilege(
264 &["datastore", "{store}"],
265 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
266 true),
267 },
268 )]
269 /// Delete backup snapshot.
270 fn delete_snapshot(
271 store: String,
272 backup_type: String,
273 backup_id: String,
274 backup_time: i64,
275 _info: &ApiMethod,
276 rpcenv: &mut dyn RpcEnvironment,
277 ) -> Result<Value, Error> {
278
279 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
280 let user_info = CachedUserInfo::new()?;
281 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
282
283 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
284
285 let datastore = DataStore::lookup_datastore(&store)?;
286
287 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
288 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
289
290 datastore.remove_backup_dir(&snapshot, false)?;
291
292 Ok(Value::Null)
293 }
294
295 #[api(
296 input: {
297 properties: {
298 store: {
299 schema: DATASTORE_SCHEMA,
300 },
301 "backup-type": {
302 optional: true,
303 schema: BACKUP_TYPE_SCHEMA,
304 },
305 "backup-id": {
306 optional: true,
307 schema: BACKUP_ID_SCHEMA,
308 },
309 },
310 },
311 returns: {
312 type: Array,
313 description: "Returns the list of snapshots.",
314 items: {
315 type: SnapshotListItem,
316 }
317 },
318 access: {
319 permission: &Permission::Privilege(
320 &["datastore", "{store}"],
321 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
322 true),
323 },
324 )]
325 /// List backup snapshots.
326 pub fn list_snapshots (
327 store: String,
328 backup_type: Option<String>,
329 backup_id: Option<String>,
330 _param: Value,
331 _info: &ApiMethod,
332 rpcenv: &mut dyn RpcEnvironment,
333 ) -> Result<Vec<SnapshotListItem>, Error> {
334
335 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
336 let user_info = CachedUserInfo::new()?;
337 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
338
339 let datastore = DataStore::lookup_datastore(&store)?;
340
341 let base_path = datastore.base_path();
342
343 let backup_list = BackupInfo::list_backups(&base_path)?;
344
345 let mut snapshots = vec![];
346
347 for info in backup_list {
348 let group = info.backup_dir.group();
349 if let Some(ref backup_type) = backup_type {
350 if backup_type != group.backup_type() { continue; }
351 }
352 if let Some(ref backup_id) = backup_id {
353 if backup_id != group.backup_id() { continue; }
354 }
355
356 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
357 let owner = datastore.get_owner(group)?;
358
359 if !list_all {
360 if owner != userid { continue; }
361 }
362
363 let mut size = None;
364
365 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
366 Ok((manifest, files)) => {
367 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
368 // extract the first line from notes
369 let comment: Option<String> = manifest.unprotected["notes"]
370 .as_str()
371 .and_then(|notes| notes.lines().next())
372 .map(String::from);
373
374 let verify = manifest.unprotected["verify_state"].clone();
375 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
376 Ok(verify) => verify,
377 Err(err) => {
378 eprintln!("error parsing verification state : '{}'", err);
379 None
380 }
381 };
382
383 (comment, verify, files)
384 },
385 Err(err) => {
386 eprintln!("error during snapshot file listing: '{}'", err);
387 (
388 None,
389 None,
390 info
391 .files
392 .iter()
393 .map(|x| BackupContent {
394 filename: x.to_string(),
395 size: None,
396 crypt_mode: None,
397 })
398 .collect()
399 )
400 },
401 };
402
403 let result_item = SnapshotListItem {
404 backup_type: group.backup_type().to_string(),
405 backup_id: group.backup_id().to_string(),
406 backup_time: info.backup_dir.backup_time().timestamp(),
407 comment,
408 verification,
409 files,
410 size,
411 owner: Some(owner),
412 };
413
414 snapshots.push(result_item);
415 }
416
417 Ok(snapshots)
418 }
419
420 #[api(
421 input: {
422 properties: {
423 store: {
424 schema: DATASTORE_SCHEMA,
425 },
426 },
427 },
428 returns: {
429 type: StorageStatus,
430 },
431 access: {
432 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
433 },
434 )]
435 /// Get datastore status.
436 pub fn status(
437 store: String,
438 _info: &ApiMethod,
439 _rpcenv: &mut dyn RpcEnvironment,
440 ) -> Result<StorageStatus, Error> {
441 let datastore = DataStore::lookup_datastore(&store)?;
442 crate::tools::disks::disk_usage(&datastore.base_path())
443 }
444
445 #[api(
446 input: {
447 properties: {
448 store: {
449 schema: DATASTORE_SCHEMA,
450 },
451 "backup-type": {
452 schema: BACKUP_TYPE_SCHEMA,
453 optional: true,
454 },
455 "backup-id": {
456 schema: BACKUP_ID_SCHEMA,
457 optional: true,
458 },
459 "backup-time": {
460 schema: BACKUP_TIME_SCHEMA,
461 optional: true,
462 },
463 },
464 },
465 returns: {
466 schema: UPID_SCHEMA,
467 },
468 access: {
469 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
470 },
471 )]
472 /// Verify backups.
473 ///
474 /// This function can verify a single backup snapshot, all backup from a backup group,
475 /// or all backups in the datastore.
476 pub fn verify(
477 store: String,
478 backup_type: Option<String>,
479 backup_id: Option<String>,
480 backup_time: Option<i64>,
481 rpcenv: &mut dyn RpcEnvironment,
482 ) -> Result<Value, Error> {
483 let datastore = DataStore::lookup_datastore(&store)?;
484
485 let worker_id;
486
487 let mut backup_dir = None;
488 let mut backup_group = None;
489
490 match (backup_type, backup_id, backup_time) {
491 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
492 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
493 let dir = BackupDir::new(backup_type, backup_id, backup_time);
494 backup_dir = Some(dir);
495 }
496 (Some(backup_type), Some(backup_id), None) => {
497 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
498 let group = BackupGroup::new(backup_type, backup_id);
499 backup_group = Some(group);
500 }
501 (None, None, None) => {
502 worker_id = store.clone();
503 }
504 _ => bail!("parameters do not specify a backup group or snapshot"),
505 }
506
507 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
508 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
509
510 let upid_str = WorkerTask::new_thread(
511 "verify",
512 Some(worker_id.clone()),
513 userid,
514 to_stdout,
515 move |worker| {
516 let failed_dirs = if let Some(backup_dir) = backup_dir {
517 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
518 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
519 let mut res = Vec::new();
520 if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
521 res.push(backup_dir.to_string());
522 }
523 res
524 } else if let Some(backup_group) = backup_group {
525 verify_backup_group(datastore, &backup_group, worker.clone())?
526 } else {
527 verify_all_backups(datastore, worker.clone())?
528 };
529 if failed_dirs.len() > 0 {
530 worker.log("Failed to verify following snapshots:");
531 for dir in failed_dirs {
532 worker.log(format!("\t{}", dir));
533 }
534 bail!("verification failed - please check the log for details");
535 }
536 Ok(())
537 },
538 )?;
539
540 Ok(json!(upid_str))
541 }
542
543 #[macro_export]
544 macro_rules! add_common_prune_prameters {
545 ( [ $( $list1:tt )* ] ) => {
546 add_common_prune_prameters!([$( $list1 )* ] , [])
547 };
548 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
549 [
550 $( $list1 )*
551 (
552 "keep-daily",
553 true,
554 &PRUNE_SCHEMA_KEEP_DAILY,
555 ),
556 (
557 "keep-hourly",
558 true,
559 &PRUNE_SCHEMA_KEEP_HOURLY,
560 ),
561 (
562 "keep-last",
563 true,
564 &PRUNE_SCHEMA_KEEP_LAST,
565 ),
566 (
567 "keep-monthly",
568 true,
569 &PRUNE_SCHEMA_KEEP_MONTHLY,
570 ),
571 (
572 "keep-weekly",
573 true,
574 &PRUNE_SCHEMA_KEEP_WEEKLY,
575 ),
576 (
577 "keep-yearly",
578 true,
579 &PRUNE_SCHEMA_KEEP_YEARLY,
580 ),
581 $( $list2 )*
582 ]
583 }
584 }
585
586 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
587 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
588 &PruneListItem::API_SCHEMA
589 ).schema();
590
591 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
592 &ApiHandler::Sync(&prune),
593 &ObjectSchema::new(
594 "Prune the datastore.",
595 &add_common_prune_prameters!([
596 ("backup-id", false, &BACKUP_ID_SCHEMA),
597 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
598 ("dry-run", true, &BooleanSchema::new(
599 "Just show what prune would do, but do not delete anything.")
600 .schema()
601 ),
602 ],[
603 ("store", false, &DATASTORE_SCHEMA),
604 ])
605 ))
606 .returns(&API_RETURN_SCHEMA_PRUNE)
607 .access(None, &Permission::Privilege(
608 &["datastore", "{store}"],
609 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
610 true)
611 );
612
613 fn prune(
614 param: Value,
615 _info: &ApiMethod,
616 rpcenv: &mut dyn RpcEnvironment,
617 ) -> Result<Value, Error> {
618
619 let store = tools::required_string_param(&param, "store")?;
620 let backup_type = tools::required_string_param(&param, "backup-type")?;
621 let backup_id = tools::required_string_param(&param, "backup-id")?;
622
623 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
624 let user_info = CachedUserInfo::new()?;
625 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
626
627 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
628
629 let group = BackupGroup::new(backup_type, backup_id);
630
631 let datastore = DataStore::lookup_datastore(&store)?;
632
633 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
634 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
635
636 let prune_options = PruneOptions {
637 keep_last: param["keep-last"].as_u64(),
638 keep_hourly: param["keep-hourly"].as_u64(),
639 keep_daily: param["keep-daily"].as_u64(),
640 keep_weekly: param["keep-weekly"].as_u64(),
641 keep_monthly: param["keep-monthly"].as_u64(),
642 keep_yearly: param["keep-yearly"].as_u64(),
643 };
644
645 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
646
647 let mut prune_result = Vec::new();
648
649 let list = group.list_backups(&datastore.base_path())?;
650
651 let mut prune_info = compute_prune_info(list, &prune_options)?;
652
653 prune_info.reverse(); // delete older snapshots first
654
655 let keep_all = !prune_options.keeps_something();
656
657 if dry_run {
658 for (info, mut keep) in prune_info {
659 if keep_all { keep = true; }
660
661 let backup_time = info.backup_dir.backup_time();
662 let group = info.backup_dir.group();
663
664 prune_result.push(json!({
665 "backup-type": group.backup_type(),
666 "backup-id": group.backup_id(),
667 "backup-time": backup_time.timestamp(),
668 "keep": keep,
669 }));
670 }
671 return Ok(json!(prune_result));
672 }
673
674
675 // We use a WorkerTask just to have a task log, but run synchrounously
676 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
677
678 let result = try_block! {
679 if keep_all {
680 worker.log("No prune selection - keeping all files.");
681 } else {
682 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
683 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
684 store, backup_type, backup_id));
685 }
686
687 for (info, mut keep) in prune_info {
688 if keep_all { keep = true; }
689
690 let backup_time = info.backup_dir.backup_time();
691 let timestamp = BackupDir::backup_time_to_string(backup_time);
692 let group = info.backup_dir.group();
693
694
695 let msg = format!(
696 "{}/{}/{} {}",
697 group.backup_type(),
698 group.backup_id(),
699 timestamp,
700 if keep { "keep" } else { "remove" },
701 );
702
703 worker.log(msg);
704
705 prune_result.push(json!({
706 "backup-type": group.backup_type(),
707 "backup-id": group.backup_id(),
708 "backup-time": backup_time.timestamp(),
709 "keep": keep,
710 }));
711
712 if !(dry_run || keep) {
713 datastore.remove_backup_dir(&info.backup_dir, true)?;
714 }
715 }
716
717 Ok(())
718 };
719
720 worker.log_result(&result);
721
722 if let Err(err) = result {
723 bail!("prune failed - {}", err);
724 };
725
726 Ok(json!(prune_result))
727 }
728
729 #[api(
730 input: {
731 properties: {
732 store: {
733 schema: DATASTORE_SCHEMA,
734 },
735 },
736 },
737 returns: {
738 schema: UPID_SCHEMA,
739 },
740 access: {
741 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
742 },
743 )]
744 /// Start garbage collection.
745 fn start_garbage_collection(
746 store: String,
747 _info: &ApiMethod,
748 rpcenv: &mut dyn RpcEnvironment,
749 ) -> Result<Value, Error> {
750
751 let datastore = DataStore::lookup_datastore(&store)?;
752
753 println!("Starting garbage collection on store {}", store);
754
755 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
756
757 let upid_str = WorkerTask::new_thread(
758 "garbage_collection",
759 Some(store.clone()),
760 Userid::root_userid().clone(),
761 to_stdout,
762 move |worker| {
763 worker.log(format!("starting garbage collection on store {}", store));
764 datastore.garbage_collection(&worker)
765 },
766 )?;
767
768 Ok(json!(upid_str))
769 }
770
771 #[api(
772 input: {
773 properties: {
774 store: {
775 schema: DATASTORE_SCHEMA,
776 },
777 },
778 },
779 returns: {
780 type: GarbageCollectionStatus,
781 },
782 access: {
783 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
784 },
785 )]
786 /// Garbage collection status.
787 pub fn garbage_collection_status(
788 store: String,
789 _info: &ApiMethod,
790 _rpcenv: &mut dyn RpcEnvironment,
791 ) -> Result<GarbageCollectionStatus, Error> {
792
793 let datastore = DataStore::lookup_datastore(&store)?;
794
795 let status = datastore.last_gc_status();
796
797 Ok(status)
798 }
799
800 #[api(
801 returns: {
802 description: "List the accessible datastores.",
803 type: Array,
804 items: {
805 description: "Datastore name and description.",
806 properties: {
807 store: {
808 schema: DATASTORE_SCHEMA,
809 },
810 comment: {
811 optional: true,
812 schema: SINGLE_LINE_COMMENT_SCHEMA,
813 },
814 },
815 },
816 },
817 access: {
818 permission: &Permission::Anybody,
819 },
820 )]
821 /// Datastore list
822 fn get_datastore_list(
823 _param: Value,
824 _info: &ApiMethod,
825 rpcenv: &mut dyn RpcEnvironment,
826 ) -> Result<Value, Error> {
827
828 let (config, _digest) = datastore::config()?;
829
830 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
831 let user_info = CachedUserInfo::new()?;
832
833 let mut list = Vec::new();
834
835 for (store, (_, data)) in &config.sections {
836 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
837 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
838 if allowed {
839 let mut entry = json!({ "store": store });
840 if let Some(comment) = data["comment"].as_str() {
841 entry["comment"] = comment.into();
842 }
843 list.push(entry);
844 }
845 }
846
847 Ok(list.into())
848 }
849
850 #[sortable]
851 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
852 &ApiHandler::AsyncHttp(&download_file),
853 &ObjectSchema::new(
854 "Download single raw file from backup snapshot.",
855 &sorted!([
856 ("store", false, &DATASTORE_SCHEMA),
857 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
858 ("backup-id", false, &BACKUP_ID_SCHEMA),
859 ("backup-time", false, &BACKUP_TIME_SCHEMA),
860 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
861 ]),
862 )
863 ).access(None, &Permission::Privilege(
864 &["datastore", "{store}"],
865 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
866 true)
867 );
868
869 fn download_file(
870 _parts: Parts,
871 _req_body: Body,
872 param: Value,
873 _info: &ApiMethod,
874 rpcenv: Box<dyn RpcEnvironment>,
875 ) -> ApiResponseFuture {
876
877 async move {
878 let store = tools::required_string_param(&param, "store")?;
879 let datastore = DataStore::lookup_datastore(store)?;
880
881 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
882 let user_info = CachedUserInfo::new()?;
883 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
884
885 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
886
887 let backup_type = tools::required_string_param(&param, "backup-type")?;
888 let backup_id = tools::required_string_param(&param, "backup-id")?;
889 let backup_time = tools::required_integer_param(&param, "backup-time")?;
890
891 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
892
893 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
894 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
895
896 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
897
898 let mut path = datastore.base_path();
899 path.push(backup_dir.relative_path());
900 path.push(&file_name);
901
902 let file = tokio::fs::File::open(&path)
903 .await
904 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
905
906 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
907 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
908 .map_err(move |err| {
909 eprintln!("error during streaming of '{:?}' - {}", &path, err);
910 err
911 });
912 let body = Body::wrap_stream(payload);
913
914 // fixme: set other headers ?
915 Ok(Response::builder()
916 .status(StatusCode::OK)
917 .header(header::CONTENT_TYPE, "application/octet-stream")
918 .body(body)
919 .unwrap())
920 }.boxed()
921 }
922
923 #[sortable]
924 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
925 &ApiHandler::AsyncHttp(&download_file_decoded),
926 &ObjectSchema::new(
927 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
928 &sorted!([
929 ("store", false, &DATASTORE_SCHEMA),
930 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
931 ("backup-id", false, &BACKUP_ID_SCHEMA),
932 ("backup-time", false, &BACKUP_TIME_SCHEMA),
933 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
934 ]),
935 )
936 ).access(None, &Permission::Privilege(
937 &["datastore", "{store}"],
938 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
939 true)
940 );
941
942 fn download_file_decoded(
943 _parts: Parts,
944 _req_body: Body,
945 param: Value,
946 _info: &ApiMethod,
947 rpcenv: Box<dyn RpcEnvironment>,
948 ) -> ApiResponseFuture {
949
950 async move {
951 let store = tools::required_string_param(&param, "store")?;
952 let datastore = DataStore::lookup_datastore(store)?;
953
954 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
955 let user_info = CachedUserInfo::new()?;
956 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
957
958 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
959
960 let backup_type = tools::required_string_param(&param, "backup-type")?;
961 let backup_id = tools::required_string_param(&param, "backup-id")?;
962 let backup_time = tools::required_integer_param(&param, "backup-time")?;
963
964 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
965
966 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
967 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
968
969 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
970 for file in files {
971 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
972 bail!("cannot decode '{}' - is encrypted", file_name);
973 }
974 }
975
976 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
977
978 let mut path = datastore.base_path();
979 path.push(backup_dir.relative_path());
980 path.push(&file_name);
981
982 let extension = file_name.rsplitn(2, '.').next().unwrap();
983
984 let body = match extension {
985 "didx" => {
986 let index = DynamicIndexReader::open(&path)
987 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
988 let (csum, size) = index.compute_csum();
989 manifest.verify_file(&file_name, &csum, size)?;
990
991 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
992 let reader = AsyncIndexReader::new(index, chunk_reader);
993 Body::wrap_stream(AsyncReaderStream::new(reader)
994 .map_err(move |err| {
995 eprintln!("error during streaming of '{:?}' - {}", path, err);
996 err
997 }))
998 },
999 "fidx" => {
1000 let index = FixedIndexReader::open(&path)
1001 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1002
1003 let (csum, size) = index.compute_csum();
1004 manifest.verify_file(&file_name, &csum, size)?;
1005
1006 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1007 let reader = AsyncIndexReader::new(index, chunk_reader);
1008 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1009 .map_err(move |err| {
1010 eprintln!("error during streaming of '{:?}' - {}", path, err);
1011 err
1012 }))
1013 },
1014 "blob" => {
1015 let file = std::fs::File::open(&path)
1016 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1017
1018 // FIXME: load full blob to verify index checksum?
1019
1020 Body::wrap_stream(
1021 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1022 .map_err(move |err| {
1023 eprintln!("error during streaming of '{:?}' - {}", path, err);
1024 err
1025 })
1026 )
1027 },
1028 extension => {
1029 bail!("cannot download '{}' files", extension);
1030 },
1031 };
1032
1033 // fixme: set other headers ?
1034 Ok(Response::builder()
1035 .status(StatusCode::OK)
1036 .header(header::CONTENT_TYPE, "application/octet-stream")
1037 .body(body)
1038 .unwrap())
1039 }.boxed()
1040 }
1041
1042 #[sortable]
1043 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1044 &ApiHandler::AsyncHttp(&upload_backup_log),
1045 &ObjectSchema::new(
1046 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1047 &sorted!([
1048 ("store", false, &DATASTORE_SCHEMA),
1049 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1050 ("backup-id", false, &BACKUP_ID_SCHEMA),
1051 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1052 ]),
1053 )
1054 ).access(
1055 Some("Only the backup creator/owner is allowed to do this."),
1056 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1057 );
1058
1059 fn upload_backup_log(
1060 _parts: Parts,
1061 req_body: Body,
1062 param: Value,
1063 _info: &ApiMethod,
1064 rpcenv: Box<dyn RpcEnvironment>,
1065 ) -> ApiResponseFuture {
1066
1067 async move {
1068 let store = tools::required_string_param(&param, "store")?;
1069 let datastore = DataStore::lookup_datastore(store)?;
1070
1071 let file_name = CLIENT_LOG_BLOB_NAME;
1072
1073 let backup_type = tools::required_string_param(&param, "backup-type")?;
1074 let backup_id = tools::required_string_param(&param, "backup-id")?;
1075 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1076
1077 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1078
1079 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1080 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1081
1082 let mut path = datastore.base_path();
1083 path.push(backup_dir.relative_path());
1084 path.push(&file_name);
1085
1086 if path.exists() {
1087 bail!("backup already contains a log.");
1088 }
1089
1090 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1091 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1092
1093 let data = req_body
1094 .map_err(Error::from)
1095 .try_fold(Vec::new(), |mut acc, chunk| {
1096 acc.extend_from_slice(&*chunk);
1097 future::ok::<_, Error>(acc)
1098 })
1099 .await?;
1100
1101 // always verify blob/CRC at server side
1102 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1103
1104 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1105
1106 // fixme: use correct formatter
1107 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1108 }.boxed()
1109 }
1110
1111 #[api(
1112 input: {
1113 properties: {
1114 store: {
1115 schema: DATASTORE_SCHEMA,
1116 },
1117 "backup-type": {
1118 schema: BACKUP_TYPE_SCHEMA,
1119 },
1120 "backup-id": {
1121 schema: BACKUP_ID_SCHEMA,
1122 },
1123 "backup-time": {
1124 schema: BACKUP_TIME_SCHEMA,
1125 },
1126 "filepath": {
1127 description: "Base64 encoded path.",
1128 type: String,
1129 }
1130 },
1131 },
1132 access: {
1133 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1134 },
1135 )]
1136 /// Get the entries of the given path of the catalog
1137 fn catalog(
1138 store: String,
1139 backup_type: String,
1140 backup_id: String,
1141 backup_time: i64,
1142 filepath: String,
1143 _param: Value,
1144 _info: &ApiMethod,
1145 rpcenv: &mut dyn RpcEnvironment,
1146 ) -> Result<Value, Error> {
1147 let datastore = DataStore::lookup_datastore(&store)?;
1148
1149 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1150 let user_info = CachedUserInfo::new()?;
1151 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1152
1153 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1154
1155 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1156 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1157
1158 let file_name = CATALOG_NAME;
1159
1160 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1161 for file in files {
1162 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1163 bail!("cannot decode '{}' - is encrypted", file_name);
1164 }
1165 }
1166
1167 let mut path = datastore.base_path();
1168 path.push(backup_dir.relative_path());
1169 path.push(file_name);
1170
1171 let index = DynamicIndexReader::open(&path)
1172 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1173
1174 let (csum, size) = index.compute_csum();
1175 manifest.verify_file(&file_name, &csum, size)?;
1176
1177 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1178 let reader = BufferedDynamicReader::new(index, chunk_reader);
1179
1180 let mut catalog_reader = CatalogReader::new(reader);
1181 let mut current = catalog_reader.root()?;
1182 let mut components = vec![];
1183
1184
1185 if filepath != "root" {
1186 components = base64::decode(filepath)?;
1187 if components.len() > 0 && components[0] == '/' as u8 {
1188 components.remove(0);
1189 }
1190 for component in components.split(|c| *c == '/' as u8) {
1191 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1192 current = entry;
1193 } else {
1194 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1195 }
1196 }
1197 }
1198
1199 let mut res = Vec::new();
1200
1201 for direntry in catalog_reader.read_dir(&current)? {
1202 let mut components = components.clone();
1203 components.push('/' as u8);
1204 components.extend(&direntry.name);
1205 let path = base64::encode(components);
1206 let text = String::from_utf8_lossy(&direntry.name);
1207 let mut entry = json!({
1208 "filepath": path,
1209 "text": text,
1210 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1211 "leaf": true,
1212 });
1213 match direntry.attr {
1214 DirEntryAttribute::Directory { start: _ } => {
1215 entry["leaf"] = false.into();
1216 },
1217 DirEntryAttribute::File { size, mtime } => {
1218 entry["size"] = size.into();
1219 entry["mtime"] = mtime.into();
1220 },
1221 _ => {},
1222 }
1223 res.push(entry);
1224 }
1225
1226 Ok(res.into())
1227 }
1228
1229 #[sortable]
1230 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1231 &ApiHandler::AsyncHttp(&pxar_file_download),
1232 &ObjectSchema::new(
1233 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1234 &sorted!([
1235 ("store", false, &DATASTORE_SCHEMA),
1236 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1237 ("backup-id", false, &BACKUP_ID_SCHEMA),
1238 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1239 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1240 ]),
1241 )
1242 ).access(None, &Permission::Privilege(
1243 &["datastore", "{store}"],
1244 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1245 true)
1246 );
1247
1248 fn pxar_file_download(
1249 _parts: Parts,
1250 _req_body: Body,
1251 param: Value,
1252 _info: &ApiMethod,
1253 rpcenv: Box<dyn RpcEnvironment>,
1254 ) -> ApiResponseFuture {
1255
1256 async move {
1257 let store = tools::required_string_param(&param, "store")?;
1258 let datastore = DataStore::lookup_datastore(&store)?;
1259
1260 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1261 let user_info = CachedUserInfo::new()?;
1262 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1263
1264 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1265
1266 let backup_type = tools::required_string_param(&param, "backup-type")?;
1267 let backup_id = tools::required_string_param(&param, "backup-id")?;
1268 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1269
1270 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1271
1272 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1273 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1274
1275 let mut components = base64::decode(&filepath)?;
1276 if components.len() > 0 && components[0] == '/' as u8 {
1277 components.remove(0);
1278 }
1279
1280 let mut split = components.splitn(2, |c| *c == '/' as u8);
1281 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1282 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1283 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1284 for file in files {
1285 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1286 bail!("cannot decode '{}' - is encrypted", pxar_name);
1287 }
1288 }
1289
1290 let mut path = datastore.base_path();
1291 path.push(backup_dir.relative_path());
1292 path.push(pxar_name);
1293
1294 let index = DynamicIndexReader::open(&path)
1295 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1296
1297 let (csum, size) = index.compute_csum();
1298 manifest.verify_file(&pxar_name, &csum, size)?;
1299
1300 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1301 let reader = BufferedDynamicReader::new(index, chunk_reader);
1302 let archive_size = reader.archive_size();
1303 let reader = LocalDynamicReadAt::new(reader);
1304
1305 let decoder = Accessor::new(reader, archive_size).await?;
1306 let root = decoder.open_root().await?;
1307 let file = root
1308 .lookup(OsStr::from_bytes(file_path)).await?
1309 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1310
1311 let file = match file.kind() {
1312 EntryKind::File { .. } => file,
1313 EntryKind::Hardlink(_) => {
1314 decoder.follow_hardlink(&file).await?
1315 },
1316 // TODO symlink
1317 other => bail!("cannot download file of type {:?}", other),
1318 };
1319
1320 let body = Body::wrap_stream(
1321 AsyncReaderStream::new(file.contents().await?)
1322 .map_err(move |err| {
1323 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1324 err
1325 })
1326 );
1327
1328 // fixme: set other headers ?
1329 Ok(Response::builder()
1330 .status(StatusCode::OK)
1331 .header(header::CONTENT_TYPE, "application/octet-stream")
1332 .body(body)
1333 .unwrap())
1334 }.boxed()
1335 }
1336
1337 #[api(
1338 input: {
1339 properties: {
1340 store: {
1341 schema: DATASTORE_SCHEMA,
1342 },
1343 timeframe: {
1344 type: RRDTimeFrameResolution,
1345 },
1346 cf: {
1347 type: RRDMode,
1348 },
1349 },
1350 },
1351 access: {
1352 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1353 },
1354 )]
1355 /// Read datastore stats
1356 fn get_rrd_stats(
1357 store: String,
1358 timeframe: RRDTimeFrameResolution,
1359 cf: RRDMode,
1360 _param: Value,
1361 ) -> Result<Value, Error> {
1362
1363 create_value_from_rrd(
1364 &format!("datastore/{}", store),
1365 &[
1366 "total", "used",
1367 "read_ios", "read_bytes",
1368 "write_ios", "write_bytes",
1369 "io_ticks",
1370 ],
1371 timeframe,
1372 cf,
1373 )
1374 }
1375
1376 #[api(
1377 input: {
1378 properties: {
1379 store: {
1380 schema: DATASTORE_SCHEMA,
1381 },
1382 "backup-type": {
1383 schema: BACKUP_TYPE_SCHEMA,
1384 },
1385 "backup-id": {
1386 schema: BACKUP_ID_SCHEMA,
1387 },
1388 "backup-time": {
1389 schema: BACKUP_TIME_SCHEMA,
1390 },
1391 },
1392 },
1393 access: {
1394 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1395 },
1396 )]
1397 /// Get "notes" for a specific backup
1398 fn get_notes(
1399 store: String,
1400 backup_type: String,
1401 backup_id: String,
1402 backup_time: i64,
1403 rpcenv: &mut dyn RpcEnvironment,
1404 ) -> Result<String, Error> {
1405 let datastore = DataStore::lookup_datastore(&store)?;
1406
1407 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1408 let user_info = CachedUserInfo::new()?;
1409 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1410
1411 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1412
1413 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1414 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1415
1416 let manifest = datastore.load_manifest_json(&backup_dir)?;
1417
1418 let notes = manifest["unprotected"]["notes"]
1419 .as_str()
1420 .unwrap_or("");
1421
1422 Ok(String::from(notes))
1423 }
1424
1425 #[api(
1426 input: {
1427 properties: {
1428 store: {
1429 schema: DATASTORE_SCHEMA,
1430 },
1431 "backup-type": {
1432 schema: BACKUP_TYPE_SCHEMA,
1433 },
1434 "backup-id": {
1435 schema: BACKUP_ID_SCHEMA,
1436 },
1437 "backup-time": {
1438 schema: BACKUP_TIME_SCHEMA,
1439 },
1440 notes: {
1441 description: "A multiline text.",
1442 },
1443 },
1444 },
1445 access: {
1446 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1447 },
1448 )]
1449 /// Set "notes" for a specific backup
1450 fn set_notes(
1451 store: String,
1452 backup_type: String,
1453 backup_id: String,
1454 backup_time: i64,
1455 notes: String,
1456 rpcenv: &mut dyn RpcEnvironment,
1457 ) -> Result<(), Error> {
1458 let datastore = DataStore::lookup_datastore(&store)?;
1459
1460 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1461 let user_info = CachedUserInfo::new()?;
1462 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1463
1464 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1465
1466 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1467 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1468
1469 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1470
1471 manifest["unprotected"]["notes"] = notes.into();
1472
1473 datastore.store_manifest(&backup_dir, manifest)?;
1474
1475 Ok(())
1476 }
1477
1478 #[sortable]
1479 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1480 (
1481 "catalog",
1482 &Router::new()
1483 .get(&API_METHOD_CATALOG)
1484 ),
1485 (
1486 "download",
1487 &Router::new()
1488 .download(&API_METHOD_DOWNLOAD_FILE)
1489 ),
1490 (
1491 "download-decoded",
1492 &Router::new()
1493 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1494 ),
1495 (
1496 "files",
1497 &Router::new()
1498 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1499 ),
1500 (
1501 "gc",
1502 &Router::new()
1503 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1504 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1505 ),
1506 (
1507 "groups",
1508 &Router::new()
1509 .get(&API_METHOD_LIST_GROUPS)
1510 ),
1511 (
1512 "notes",
1513 &Router::new()
1514 .get(&API_METHOD_GET_NOTES)
1515 .put(&API_METHOD_SET_NOTES)
1516 ),
1517 (
1518 "prune",
1519 &Router::new()
1520 .post(&API_METHOD_PRUNE)
1521 ),
1522 (
1523 "pxar-file-download",
1524 &Router::new()
1525 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1526 ),
1527 (
1528 "rrd",
1529 &Router::new()
1530 .get(&API_METHOD_GET_RRD_STATS)
1531 ),
1532 (
1533 "snapshots",
1534 &Router::new()
1535 .get(&API_METHOD_LIST_SNAPSHOTS)
1536 .delete(&API_METHOD_DELETE_SNAPSHOT)
1537 ),
1538 (
1539 "status",
1540 &Router::new()
1541 .get(&API_METHOD_STATUS)
1542 ),
1543 (
1544 "upload-backup-log",
1545 &Router::new()
1546 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1547 ),
1548 (
1549 "verify",
1550 &Router::new()
1551 .post(&API_METHOD_VERIFY)
1552 ),
1553 ];
1554
1555 const DATASTORE_INFO_ROUTER: Router = Router::new()
1556 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1557 .subdirs(DATASTORE_INFO_SUBDIRS);
1558
1559
1560 pub const ROUTER: Router = Router::new()
1561 .get(&API_METHOD_GET_DATASTORE_LIST)
1562 .match_all("store", &DATASTORE_INFO_ROUTER);