]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
156ebf835abb229adea3e92dcb19d79829f55bd3
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission
14 };
15 use proxmox::api::router::SubdirMap;
16 use proxmox::api::schema::*;
17 use proxmox::tools::fs::{replace_file, CreateOptions};
18 use proxmox::try_block;
19 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
20
21 use pxar::accessor::aio::Accessor;
22 use pxar::EntryKind;
23
24 use crate::api2::types::*;
25 use crate::api2::node::rrd::create_value_from_rrd;
26 use crate::backup::*;
27 use crate::config::datastore;
28 use crate::config::cached_user_info::CachedUserInfo;
29
30 use crate::server::WorkerTask;
31 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
32 use crate::config::acl::{
33 PRIV_DATASTORE_AUDIT,
34 PRIV_DATASTORE_MODIFY,
35 PRIV_DATASTORE_READ,
36 PRIV_DATASTORE_PRUNE,
37 PRIV_DATASTORE_BACKUP,
38 };
39
40 fn check_backup_owner(
41 store: &DataStore,
42 group: &BackupGroup,
43 userid: &Userid,
44 ) -> Result<(), Error> {
45 let owner = store.get_owner(group)?;
46 if &owner != userid {
47 bail!("backup owner check failed ({} != {})", userid, owner);
48 }
49 Ok(())
50 }
51
52 fn read_backup_index(
53 store: &DataStore,
54 backup_dir: &BackupDir,
55 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
56
57 let (manifest, index_size) = store.load_manifest(backup_dir)?;
58
59 let mut result = Vec::new();
60 for item in manifest.files() {
61 result.push(BackupContent {
62 filename: item.filename.clone(),
63 crypt_mode: Some(item.crypt_mode),
64 size: Some(item.size),
65 });
66 }
67
68 result.push(BackupContent {
69 filename: MANIFEST_BLOB_NAME.to_string(),
70 crypt_mode: match manifest.signature {
71 Some(_) => Some(CryptMode::SignOnly),
72 None => Some(CryptMode::None),
73 },
74 size: Some(index_size),
75 });
76
77 Ok((manifest, result))
78 }
79
80 fn get_all_snapshot_files(
81 store: &DataStore,
82 info: &BackupInfo,
83 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
84
85 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
86
87 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
88 acc.insert(item.filename.clone());
89 acc
90 });
91
92 for file in &info.files {
93 if file_set.contains(file) { continue; }
94 files.push(BackupContent {
95 filename: file.to_string(),
96 size: None,
97 crypt_mode: None,
98 });
99 }
100
101 Ok((manifest, files))
102 }
103
104 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
105
106 let mut group_hash = HashMap::new();
107
108 for info in backup_list {
109 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
110 let time_list = group_hash.entry(group_id).or_insert(vec![]);
111 time_list.push(info);
112 }
113
114 group_hash
115 }
116
117 #[api(
118 input: {
119 properties: {
120 store: {
121 schema: DATASTORE_SCHEMA,
122 },
123 },
124 },
125 returns: {
126 type: Array,
127 description: "Returns the list of backup groups.",
128 items: {
129 type: GroupListItem,
130 }
131 },
132 access: {
133 permission: &Permission::Privilege(
134 &["datastore", "{store}"],
135 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
136 true),
137 },
138 )]
139 /// List backup groups.
140 fn list_groups(
141 store: String,
142 rpcenv: &mut dyn RpcEnvironment,
143 ) -> Result<Vec<GroupListItem>, Error> {
144
145 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
146 let user_info = CachedUserInfo::new()?;
147 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
148
149 let datastore = DataStore::lookup_datastore(&store)?;
150
151 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
152
153 let group_hash = group_backups(backup_list);
154
155 let mut groups = Vec::new();
156
157 for (_group_id, mut list) in group_hash {
158
159 BackupInfo::sort_list(&mut list, false);
160
161 let info = &list[0];
162
163 let group = info.backup_dir.group();
164
165 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
166 let owner = datastore.get_owner(group)?;
167 if !list_all {
168 if owner != userid { continue; }
169 }
170
171 let result_item = GroupListItem {
172 backup_type: group.backup_type().to_string(),
173 backup_id: group.backup_id().to_string(),
174 last_backup: info.backup_dir.backup_time().timestamp(),
175 backup_count: list.len() as u64,
176 files: info.files.clone(),
177 owner: Some(owner),
178 };
179 groups.push(result_item);
180 }
181
182 Ok(groups)
183 }
184
185 #[api(
186 input: {
187 properties: {
188 store: {
189 schema: DATASTORE_SCHEMA,
190 },
191 "backup-type": {
192 schema: BACKUP_TYPE_SCHEMA,
193 },
194 "backup-id": {
195 schema: BACKUP_ID_SCHEMA,
196 },
197 "backup-time": {
198 schema: BACKUP_TIME_SCHEMA,
199 },
200 },
201 },
202 returns: {
203 type: Array,
204 description: "Returns the list of archive files inside a backup snapshots.",
205 items: {
206 type: BackupContent,
207 }
208 },
209 access: {
210 permission: &Permission::Privilege(
211 &["datastore", "{store}"],
212 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
213 true),
214 },
215 )]
216 /// List snapshot files.
217 pub fn list_snapshot_files(
218 store: String,
219 backup_type: String,
220 backup_id: String,
221 backup_time: i64,
222 _info: &ApiMethod,
223 rpcenv: &mut dyn RpcEnvironment,
224 ) -> Result<Vec<BackupContent>, Error> {
225
226 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
227 let user_info = CachedUserInfo::new()?;
228 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
229
230 let datastore = DataStore::lookup_datastore(&store)?;
231
232 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
233
234 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
235 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
236
237 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
238
239 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
240
241 Ok(files)
242 }
243
244 #[api(
245 input: {
246 properties: {
247 store: {
248 schema: DATASTORE_SCHEMA,
249 },
250 "backup-type": {
251 schema: BACKUP_TYPE_SCHEMA,
252 },
253 "backup-id": {
254 schema: BACKUP_ID_SCHEMA,
255 },
256 "backup-time": {
257 schema: BACKUP_TIME_SCHEMA,
258 },
259 },
260 },
261 access: {
262 permission: &Permission::Privilege(
263 &["datastore", "{store}"],
264 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
265 true),
266 },
267 )]
268 /// Delete backup snapshot.
269 fn delete_snapshot(
270 store: String,
271 backup_type: String,
272 backup_id: String,
273 backup_time: i64,
274 _info: &ApiMethod,
275 rpcenv: &mut dyn RpcEnvironment,
276 ) -> Result<Value, Error> {
277
278 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
279 let user_info = CachedUserInfo::new()?;
280 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
281
282 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
283
284 let datastore = DataStore::lookup_datastore(&store)?;
285
286 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
287 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
288
289 datastore.remove_backup_dir(&snapshot, false)?;
290
291 Ok(Value::Null)
292 }
293
294 #[api(
295 input: {
296 properties: {
297 store: {
298 schema: DATASTORE_SCHEMA,
299 },
300 "backup-type": {
301 optional: true,
302 schema: BACKUP_TYPE_SCHEMA,
303 },
304 "backup-id": {
305 optional: true,
306 schema: BACKUP_ID_SCHEMA,
307 },
308 },
309 },
310 returns: {
311 type: Array,
312 description: "Returns the list of snapshots.",
313 items: {
314 type: SnapshotListItem,
315 }
316 },
317 access: {
318 permission: &Permission::Privilege(
319 &["datastore", "{store}"],
320 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
321 true),
322 },
323 )]
324 /// List backup snapshots.
325 pub fn list_snapshots (
326 store: String,
327 backup_type: Option<String>,
328 backup_id: Option<String>,
329 _param: Value,
330 _info: &ApiMethod,
331 rpcenv: &mut dyn RpcEnvironment,
332 ) -> Result<Vec<SnapshotListItem>, Error> {
333
334 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
335 let user_info = CachedUserInfo::new()?;
336 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
337
338 let datastore = DataStore::lookup_datastore(&store)?;
339
340 let base_path = datastore.base_path();
341
342 let backup_list = BackupInfo::list_backups(&base_path)?;
343
344 let mut snapshots = vec![];
345
346 for info in backup_list {
347 let group = info.backup_dir.group();
348 if let Some(ref backup_type) = backup_type {
349 if backup_type != group.backup_type() { continue; }
350 }
351 if let Some(ref backup_id) = backup_id {
352 if backup_id != group.backup_id() { continue; }
353 }
354
355 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
356 let owner = datastore.get_owner(group)?;
357
358 if !list_all {
359 if owner != userid { continue; }
360 }
361
362 let mut size = None;
363
364 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
365 Ok((manifest, files)) => {
366 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
367 // extract the first line from notes
368 let comment: Option<String> = manifest.unprotected["notes"]
369 .as_str()
370 .and_then(|notes| notes.lines().next())
371 .map(String::from);
372
373 let verify = manifest.unprotected["verify_state"].clone();
374 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
375 Ok(verify) => verify,
376 Err(err) => {
377 eprintln!("error parsing verification state : '{}'", err);
378 None
379 }
380 };
381
382 (comment, verify, files)
383 },
384 Err(err) => {
385 eprintln!("error during snapshot file listing: '{}'", err);
386 (
387 None,
388 None,
389 info
390 .files
391 .iter()
392 .map(|x| BackupContent {
393 filename: x.to_string(),
394 size: None,
395 crypt_mode: None,
396 })
397 .collect()
398 )
399 },
400 };
401
402 let result_item = SnapshotListItem {
403 backup_type: group.backup_type().to_string(),
404 backup_id: group.backup_id().to_string(),
405 backup_time: info.backup_dir.backup_time().timestamp(),
406 comment,
407 verification,
408 files,
409 size,
410 owner: Some(owner),
411 };
412
413 snapshots.push(result_item);
414 }
415
416 Ok(snapshots)
417 }
418
419 #[api(
420 input: {
421 properties: {
422 store: {
423 schema: DATASTORE_SCHEMA,
424 },
425 },
426 },
427 returns: {
428 type: StorageStatus,
429 },
430 access: {
431 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
432 },
433 )]
434 /// Get datastore status.
435 pub fn status(
436 store: String,
437 _info: &ApiMethod,
438 _rpcenv: &mut dyn RpcEnvironment,
439 ) -> Result<StorageStatus, Error> {
440 let datastore = DataStore::lookup_datastore(&store)?;
441 crate::tools::disks::disk_usage(&datastore.base_path())
442 }
443
444 #[api(
445 input: {
446 properties: {
447 store: {
448 schema: DATASTORE_SCHEMA,
449 },
450 "backup-type": {
451 schema: BACKUP_TYPE_SCHEMA,
452 optional: true,
453 },
454 "backup-id": {
455 schema: BACKUP_ID_SCHEMA,
456 optional: true,
457 },
458 "backup-time": {
459 schema: BACKUP_TIME_SCHEMA,
460 optional: true,
461 },
462 },
463 },
464 returns: {
465 schema: UPID_SCHEMA,
466 },
467 access: {
468 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
469 },
470 )]
471 /// Verify backups.
472 ///
473 /// This function can verify a single backup snapshot, all backup from a backup group,
474 /// or all backups in the datastore.
475 pub fn verify(
476 store: String,
477 backup_type: Option<String>,
478 backup_id: Option<String>,
479 backup_time: Option<i64>,
480 rpcenv: &mut dyn RpcEnvironment,
481 ) -> Result<Value, Error> {
482 let datastore = DataStore::lookup_datastore(&store)?;
483
484 let worker_id;
485
486 let mut backup_dir = None;
487 let mut backup_group = None;
488
489 match (backup_type, backup_id, backup_time) {
490 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
491 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
492 let dir = BackupDir::new(backup_type, backup_id, backup_time);
493 backup_dir = Some(dir);
494 }
495 (Some(backup_type), Some(backup_id), None) => {
496 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
497 let group = BackupGroup::new(backup_type, backup_id);
498 backup_group = Some(group);
499 }
500 (None, None, None) => {
501 worker_id = store.clone();
502 }
503 _ => bail!("parameters do not specify a backup group or snapshot"),
504 }
505
506 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
507 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
508
509 let upid_str = WorkerTask::new_thread(
510 "verify",
511 Some(worker_id.clone()),
512 userid,
513 to_stdout,
514 move |worker| {
515 let failed_dirs = if let Some(backup_dir) = backup_dir {
516 let mut verified_chunks = HashSet::with_capacity(1024*16);
517 let mut corrupt_chunks = HashSet::with_capacity(64);
518 let mut res = Vec::new();
519 if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
520 res.push(backup_dir.to_string());
521 }
522 res
523 } else if let Some(backup_group) = backup_group {
524 verify_backup_group(&datastore, &backup_group, &worker)?
525 } else {
526 verify_all_backups(&datastore, &worker)?
527 };
528 if failed_dirs.len() > 0 {
529 worker.log("Failed to verify following snapshots:");
530 for dir in failed_dirs {
531 worker.log(format!("\t{}", dir));
532 }
533 bail!("verification failed - please check the log for details");
534 }
535 Ok(())
536 },
537 )?;
538
539 Ok(json!(upid_str))
540 }
541
542 #[macro_export]
543 macro_rules! add_common_prune_prameters {
544 ( [ $( $list1:tt )* ] ) => {
545 add_common_prune_prameters!([$( $list1 )* ] , [])
546 };
547 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
548 [
549 $( $list1 )*
550 (
551 "keep-daily",
552 true,
553 &PRUNE_SCHEMA_KEEP_DAILY,
554 ),
555 (
556 "keep-hourly",
557 true,
558 &PRUNE_SCHEMA_KEEP_HOURLY,
559 ),
560 (
561 "keep-last",
562 true,
563 &PRUNE_SCHEMA_KEEP_LAST,
564 ),
565 (
566 "keep-monthly",
567 true,
568 &PRUNE_SCHEMA_KEEP_MONTHLY,
569 ),
570 (
571 "keep-weekly",
572 true,
573 &PRUNE_SCHEMA_KEEP_WEEKLY,
574 ),
575 (
576 "keep-yearly",
577 true,
578 &PRUNE_SCHEMA_KEEP_YEARLY,
579 ),
580 $( $list2 )*
581 ]
582 }
583 }
584
585 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
586 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
587 &PruneListItem::API_SCHEMA
588 ).schema();
589
590 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
591 &ApiHandler::Sync(&prune),
592 &ObjectSchema::new(
593 "Prune the datastore.",
594 &add_common_prune_prameters!([
595 ("backup-id", false, &BACKUP_ID_SCHEMA),
596 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
597 ("dry-run", true, &BooleanSchema::new(
598 "Just show what prune would do, but do not delete anything.")
599 .schema()
600 ),
601 ],[
602 ("store", false, &DATASTORE_SCHEMA),
603 ])
604 ))
605 .returns(&API_RETURN_SCHEMA_PRUNE)
606 .access(None, &Permission::Privilege(
607 &["datastore", "{store}"],
608 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
609 true)
610 );
611
612 fn prune(
613 param: Value,
614 _info: &ApiMethod,
615 rpcenv: &mut dyn RpcEnvironment,
616 ) -> Result<Value, Error> {
617
618 let store = tools::required_string_param(&param, "store")?;
619 let backup_type = tools::required_string_param(&param, "backup-type")?;
620 let backup_id = tools::required_string_param(&param, "backup-id")?;
621
622 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
623 let user_info = CachedUserInfo::new()?;
624 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
625
626 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
627
628 let group = BackupGroup::new(backup_type, backup_id);
629
630 let datastore = DataStore::lookup_datastore(&store)?;
631
632 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
633 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
634
635 let prune_options = PruneOptions {
636 keep_last: param["keep-last"].as_u64(),
637 keep_hourly: param["keep-hourly"].as_u64(),
638 keep_daily: param["keep-daily"].as_u64(),
639 keep_weekly: param["keep-weekly"].as_u64(),
640 keep_monthly: param["keep-monthly"].as_u64(),
641 keep_yearly: param["keep-yearly"].as_u64(),
642 };
643
644 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
645
646 let mut prune_result = Vec::new();
647
648 let list = group.list_backups(&datastore.base_path())?;
649
650 let mut prune_info = compute_prune_info(list, &prune_options)?;
651
652 prune_info.reverse(); // delete older snapshots first
653
654 let keep_all = !prune_options.keeps_something();
655
656 if dry_run {
657 for (info, mut keep) in prune_info {
658 if keep_all { keep = true; }
659
660 let backup_time = info.backup_dir.backup_time();
661 let group = info.backup_dir.group();
662
663 prune_result.push(json!({
664 "backup-type": group.backup_type(),
665 "backup-id": group.backup_id(),
666 "backup-time": backup_time.timestamp(),
667 "keep": keep,
668 }));
669 }
670 return Ok(json!(prune_result));
671 }
672
673
674 // We use a WorkerTask just to have a task log, but run synchrounously
675 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
676
677 let result = try_block! {
678 if keep_all {
679 worker.log("No prune selection - keeping all files.");
680 } else {
681 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
682 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
683 store, backup_type, backup_id));
684 }
685
686 for (info, mut keep) in prune_info {
687 if keep_all { keep = true; }
688
689 let backup_time = info.backup_dir.backup_time();
690 let timestamp = BackupDir::backup_time_to_string(backup_time);
691 let group = info.backup_dir.group();
692
693
694 let msg = format!(
695 "{}/{}/{} {}",
696 group.backup_type(),
697 group.backup_id(),
698 timestamp,
699 if keep { "keep" } else { "remove" },
700 );
701
702 worker.log(msg);
703
704 prune_result.push(json!({
705 "backup-type": group.backup_type(),
706 "backup-id": group.backup_id(),
707 "backup-time": backup_time.timestamp(),
708 "keep": keep,
709 }));
710
711 if !(dry_run || keep) {
712 datastore.remove_backup_dir(&info.backup_dir, true)?;
713 }
714 }
715
716 Ok(())
717 };
718
719 worker.log_result(&result);
720
721 if let Err(err) = result {
722 bail!("prune failed - {}", err);
723 };
724
725 Ok(json!(prune_result))
726 }
727
728 #[api(
729 input: {
730 properties: {
731 store: {
732 schema: DATASTORE_SCHEMA,
733 },
734 },
735 },
736 returns: {
737 schema: UPID_SCHEMA,
738 },
739 access: {
740 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
741 },
742 )]
743 /// Start garbage collection.
744 fn start_garbage_collection(
745 store: String,
746 _info: &ApiMethod,
747 rpcenv: &mut dyn RpcEnvironment,
748 ) -> Result<Value, Error> {
749
750 let datastore = DataStore::lookup_datastore(&store)?;
751
752 println!("Starting garbage collection on store {}", store);
753
754 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
755
756 let upid_str = WorkerTask::new_thread(
757 "garbage_collection",
758 Some(store.clone()),
759 Userid::root_userid().clone(),
760 to_stdout,
761 move |worker| {
762 worker.log(format!("starting garbage collection on store {}", store));
763 datastore.garbage_collection(&worker)
764 },
765 )?;
766
767 Ok(json!(upid_str))
768 }
769
770 #[api(
771 input: {
772 properties: {
773 store: {
774 schema: DATASTORE_SCHEMA,
775 },
776 },
777 },
778 returns: {
779 type: GarbageCollectionStatus,
780 },
781 access: {
782 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
783 },
784 )]
785 /// Garbage collection status.
786 pub fn garbage_collection_status(
787 store: String,
788 _info: &ApiMethod,
789 _rpcenv: &mut dyn RpcEnvironment,
790 ) -> Result<GarbageCollectionStatus, Error> {
791
792 let datastore = DataStore::lookup_datastore(&store)?;
793
794 let status = datastore.last_gc_status();
795
796 Ok(status)
797 }
798
799 #[api(
800 returns: {
801 description: "List the accessible datastores.",
802 type: Array,
803 items: {
804 description: "Datastore name and description.",
805 properties: {
806 store: {
807 schema: DATASTORE_SCHEMA,
808 },
809 comment: {
810 optional: true,
811 schema: SINGLE_LINE_COMMENT_SCHEMA,
812 },
813 },
814 },
815 },
816 access: {
817 permission: &Permission::Anybody,
818 },
819 )]
820 /// Datastore list
821 fn get_datastore_list(
822 _param: Value,
823 _info: &ApiMethod,
824 rpcenv: &mut dyn RpcEnvironment,
825 ) -> Result<Value, Error> {
826
827 let (config, _digest) = datastore::config()?;
828
829 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
830 let user_info = CachedUserInfo::new()?;
831
832 let mut list = Vec::new();
833
834 for (store, (_, data)) in &config.sections {
835 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
836 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
837 if allowed {
838 let mut entry = json!({ "store": store });
839 if let Some(comment) = data["comment"].as_str() {
840 entry["comment"] = comment.into();
841 }
842 list.push(entry);
843 }
844 }
845
846 Ok(list.into())
847 }
848
849 #[sortable]
850 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
851 &ApiHandler::AsyncHttp(&download_file),
852 &ObjectSchema::new(
853 "Download single raw file from backup snapshot.",
854 &sorted!([
855 ("store", false, &DATASTORE_SCHEMA),
856 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
857 ("backup-id", false, &BACKUP_ID_SCHEMA),
858 ("backup-time", false, &BACKUP_TIME_SCHEMA),
859 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
860 ]),
861 )
862 ).access(None, &Permission::Privilege(
863 &["datastore", "{store}"],
864 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
865 true)
866 );
867
868 fn download_file(
869 _parts: Parts,
870 _req_body: Body,
871 param: Value,
872 _info: &ApiMethod,
873 rpcenv: Box<dyn RpcEnvironment>,
874 ) -> ApiResponseFuture {
875
876 async move {
877 let store = tools::required_string_param(&param, "store")?;
878 let datastore = DataStore::lookup_datastore(store)?;
879
880 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
881 let user_info = CachedUserInfo::new()?;
882 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
883
884 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
885
886 let backup_type = tools::required_string_param(&param, "backup-type")?;
887 let backup_id = tools::required_string_param(&param, "backup-id")?;
888 let backup_time = tools::required_integer_param(&param, "backup-time")?;
889
890 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
891
892 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
893 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
894
895 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
896
897 let mut path = datastore.base_path();
898 path.push(backup_dir.relative_path());
899 path.push(&file_name);
900
901 let file = tokio::fs::File::open(&path)
902 .await
903 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
904
905 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
906 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
907 .map_err(move |err| {
908 eprintln!("error during streaming of '{:?}' - {}", &path, err);
909 err
910 });
911 let body = Body::wrap_stream(payload);
912
913 // fixme: set other headers ?
914 Ok(Response::builder()
915 .status(StatusCode::OK)
916 .header(header::CONTENT_TYPE, "application/octet-stream")
917 .body(body)
918 .unwrap())
919 }.boxed()
920 }
921
922 #[sortable]
923 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
924 &ApiHandler::AsyncHttp(&download_file_decoded),
925 &ObjectSchema::new(
926 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
927 &sorted!([
928 ("store", false, &DATASTORE_SCHEMA),
929 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
930 ("backup-id", false, &BACKUP_ID_SCHEMA),
931 ("backup-time", false, &BACKUP_TIME_SCHEMA),
932 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
933 ]),
934 )
935 ).access(None, &Permission::Privilege(
936 &["datastore", "{store}"],
937 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
938 true)
939 );
940
941 fn download_file_decoded(
942 _parts: Parts,
943 _req_body: Body,
944 param: Value,
945 _info: &ApiMethod,
946 rpcenv: Box<dyn RpcEnvironment>,
947 ) -> ApiResponseFuture {
948
949 async move {
950 let store = tools::required_string_param(&param, "store")?;
951 let datastore = DataStore::lookup_datastore(store)?;
952
953 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
954 let user_info = CachedUserInfo::new()?;
955 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
956
957 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
958
959 let backup_type = tools::required_string_param(&param, "backup-type")?;
960 let backup_id = tools::required_string_param(&param, "backup-id")?;
961 let backup_time = tools::required_integer_param(&param, "backup-time")?;
962
963 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
964
965 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
966 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
967
968 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
969 for file in files {
970 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
971 bail!("cannot decode '{}' - is encrypted", file_name);
972 }
973 }
974
975 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
976
977 let mut path = datastore.base_path();
978 path.push(backup_dir.relative_path());
979 path.push(&file_name);
980
981 let extension = file_name.rsplitn(2, '.').next().unwrap();
982
983 let body = match extension {
984 "didx" => {
985 let index = DynamicIndexReader::open(&path)
986 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
987 let (csum, size) = index.compute_csum();
988 manifest.verify_file(&file_name, &csum, size)?;
989
990 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
991 let reader = AsyncIndexReader::new(index, chunk_reader);
992 Body::wrap_stream(AsyncReaderStream::new(reader)
993 .map_err(move |err| {
994 eprintln!("error during streaming of '{:?}' - {}", path, err);
995 err
996 }))
997 },
998 "fidx" => {
999 let index = FixedIndexReader::open(&path)
1000 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1001
1002 let (csum, size) = index.compute_csum();
1003 manifest.verify_file(&file_name, &csum, size)?;
1004
1005 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1006 let reader = AsyncIndexReader::new(index, chunk_reader);
1007 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1008 .map_err(move |err| {
1009 eprintln!("error during streaming of '{:?}' - {}", path, err);
1010 err
1011 }))
1012 },
1013 "blob" => {
1014 let file = std::fs::File::open(&path)
1015 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1016
1017 // FIXME: load full blob to verify index checksum?
1018
1019 Body::wrap_stream(
1020 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1021 .map_err(move |err| {
1022 eprintln!("error during streaming of '{:?}' - {}", path, err);
1023 err
1024 })
1025 )
1026 },
1027 extension => {
1028 bail!("cannot download '{}' files", extension);
1029 },
1030 };
1031
1032 // fixme: set other headers ?
1033 Ok(Response::builder()
1034 .status(StatusCode::OK)
1035 .header(header::CONTENT_TYPE, "application/octet-stream")
1036 .body(body)
1037 .unwrap())
1038 }.boxed()
1039 }
1040
1041 #[sortable]
1042 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1043 &ApiHandler::AsyncHttp(&upload_backup_log),
1044 &ObjectSchema::new(
1045 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1046 &sorted!([
1047 ("store", false, &DATASTORE_SCHEMA),
1048 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1049 ("backup-id", false, &BACKUP_ID_SCHEMA),
1050 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1051 ]),
1052 )
1053 ).access(
1054 Some("Only the backup creator/owner is allowed to do this."),
1055 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1056 );
1057
1058 fn upload_backup_log(
1059 _parts: Parts,
1060 req_body: Body,
1061 param: Value,
1062 _info: &ApiMethod,
1063 rpcenv: Box<dyn RpcEnvironment>,
1064 ) -> ApiResponseFuture {
1065
1066 async move {
1067 let store = tools::required_string_param(&param, "store")?;
1068 let datastore = DataStore::lookup_datastore(store)?;
1069
1070 let file_name = CLIENT_LOG_BLOB_NAME;
1071
1072 let backup_type = tools::required_string_param(&param, "backup-type")?;
1073 let backup_id = tools::required_string_param(&param, "backup-id")?;
1074 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1075
1076 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1077
1078 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1079 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1080
1081 let mut path = datastore.base_path();
1082 path.push(backup_dir.relative_path());
1083 path.push(&file_name);
1084
1085 if path.exists() {
1086 bail!("backup already contains a log.");
1087 }
1088
1089 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1090 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1091
1092 let data = req_body
1093 .map_err(Error::from)
1094 .try_fold(Vec::new(), |mut acc, chunk| {
1095 acc.extend_from_slice(&*chunk);
1096 future::ok::<_, Error>(acc)
1097 })
1098 .await?;
1099
1100 // always verify blob/CRC at server side
1101 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1102
1103 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1104
1105 // fixme: use correct formatter
1106 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1107 }.boxed()
1108 }
1109
1110 #[api(
1111 input: {
1112 properties: {
1113 store: {
1114 schema: DATASTORE_SCHEMA,
1115 },
1116 "backup-type": {
1117 schema: BACKUP_TYPE_SCHEMA,
1118 },
1119 "backup-id": {
1120 schema: BACKUP_ID_SCHEMA,
1121 },
1122 "backup-time": {
1123 schema: BACKUP_TIME_SCHEMA,
1124 },
1125 "filepath": {
1126 description: "Base64 encoded path.",
1127 type: String,
1128 }
1129 },
1130 },
1131 access: {
1132 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1133 },
1134 )]
1135 /// Get the entries of the given path of the catalog
1136 fn catalog(
1137 store: String,
1138 backup_type: String,
1139 backup_id: String,
1140 backup_time: i64,
1141 filepath: String,
1142 _param: Value,
1143 _info: &ApiMethod,
1144 rpcenv: &mut dyn RpcEnvironment,
1145 ) -> Result<Value, Error> {
1146 let datastore = DataStore::lookup_datastore(&store)?;
1147
1148 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1149 let user_info = CachedUserInfo::new()?;
1150 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1151
1152 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1153
1154 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1155 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1156
1157 let file_name = CATALOG_NAME;
1158
1159 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1160 for file in files {
1161 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1162 bail!("cannot decode '{}' - is encrypted", file_name);
1163 }
1164 }
1165
1166 let mut path = datastore.base_path();
1167 path.push(backup_dir.relative_path());
1168 path.push(file_name);
1169
1170 let index = DynamicIndexReader::open(&path)
1171 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1172
1173 let (csum, size) = index.compute_csum();
1174 manifest.verify_file(&file_name, &csum, size)?;
1175
1176 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1177 let reader = BufferedDynamicReader::new(index, chunk_reader);
1178
1179 let mut catalog_reader = CatalogReader::new(reader);
1180 let mut current = catalog_reader.root()?;
1181 let mut components = vec![];
1182
1183
1184 if filepath != "root" {
1185 components = base64::decode(filepath)?;
1186 if components.len() > 0 && components[0] == '/' as u8 {
1187 components.remove(0);
1188 }
1189 for component in components.split(|c| *c == '/' as u8) {
1190 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1191 current = entry;
1192 } else {
1193 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1194 }
1195 }
1196 }
1197
1198 let mut res = Vec::new();
1199
1200 for direntry in catalog_reader.read_dir(&current)? {
1201 let mut components = components.clone();
1202 components.push('/' as u8);
1203 components.extend(&direntry.name);
1204 let path = base64::encode(components);
1205 let text = String::from_utf8_lossy(&direntry.name);
1206 let mut entry = json!({
1207 "filepath": path,
1208 "text": text,
1209 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1210 "leaf": true,
1211 });
1212 match direntry.attr {
1213 DirEntryAttribute::Directory { start: _ } => {
1214 entry["leaf"] = false.into();
1215 },
1216 DirEntryAttribute::File { size, mtime } => {
1217 entry["size"] = size.into();
1218 entry["mtime"] = mtime.into();
1219 },
1220 _ => {},
1221 }
1222 res.push(entry);
1223 }
1224
1225 Ok(res.into())
1226 }
1227
1228 #[sortable]
1229 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1230 &ApiHandler::AsyncHttp(&pxar_file_download),
1231 &ObjectSchema::new(
1232 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1233 &sorted!([
1234 ("store", false, &DATASTORE_SCHEMA),
1235 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1236 ("backup-id", false, &BACKUP_ID_SCHEMA),
1237 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1238 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1239 ]),
1240 )
1241 ).access(None, &Permission::Privilege(
1242 &["datastore", "{store}"],
1243 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1244 true)
1245 );
1246
1247 fn pxar_file_download(
1248 _parts: Parts,
1249 _req_body: Body,
1250 param: Value,
1251 _info: &ApiMethod,
1252 rpcenv: Box<dyn RpcEnvironment>,
1253 ) -> ApiResponseFuture {
1254
1255 async move {
1256 let store = tools::required_string_param(&param, "store")?;
1257 let datastore = DataStore::lookup_datastore(&store)?;
1258
1259 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1260 let user_info = CachedUserInfo::new()?;
1261 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1262
1263 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1264
1265 let backup_type = tools::required_string_param(&param, "backup-type")?;
1266 let backup_id = tools::required_string_param(&param, "backup-id")?;
1267 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1268
1269 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1270
1271 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1272 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1273
1274 let mut components = base64::decode(&filepath)?;
1275 if components.len() > 0 && components[0] == '/' as u8 {
1276 components.remove(0);
1277 }
1278
1279 let mut split = components.splitn(2, |c| *c == '/' as u8);
1280 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1281 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1282 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1283 for file in files {
1284 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1285 bail!("cannot decode '{}' - is encrypted", pxar_name);
1286 }
1287 }
1288
1289 let mut path = datastore.base_path();
1290 path.push(backup_dir.relative_path());
1291 path.push(pxar_name);
1292
1293 let index = DynamicIndexReader::open(&path)
1294 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1295
1296 let (csum, size) = index.compute_csum();
1297 manifest.verify_file(&pxar_name, &csum, size)?;
1298
1299 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1300 let reader = BufferedDynamicReader::new(index, chunk_reader);
1301 let archive_size = reader.archive_size();
1302 let reader = LocalDynamicReadAt::new(reader);
1303
1304 let decoder = Accessor::new(reader, archive_size).await?;
1305 let root = decoder.open_root().await?;
1306 let file = root
1307 .lookup(OsStr::from_bytes(file_path)).await?
1308 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1309
1310 let file = match file.kind() {
1311 EntryKind::File { .. } => file,
1312 EntryKind::Hardlink(_) => {
1313 decoder.follow_hardlink(&file).await?
1314 },
1315 // TODO symlink
1316 other => bail!("cannot download file of type {:?}", other),
1317 };
1318
1319 let body = Body::wrap_stream(
1320 AsyncReaderStream::new(file.contents().await?)
1321 .map_err(move |err| {
1322 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1323 err
1324 })
1325 );
1326
1327 // fixme: set other headers ?
1328 Ok(Response::builder()
1329 .status(StatusCode::OK)
1330 .header(header::CONTENT_TYPE, "application/octet-stream")
1331 .body(body)
1332 .unwrap())
1333 }.boxed()
1334 }
1335
1336 #[api(
1337 input: {
1338 properties: {
1339 store: {
1340 schema: DATASTORE_SCHEMA,
1341 },
1342 timeframe: {
1343 type: RRDTimeFrameResolution,
1344 },
1345 cf: {
1346 type: RRDMode,
1347 },
1348 },
1349 },
1350 access: {
1351 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1352 },
1353 )]
1354 /// Read datastore stats
1355 fn get_rrd_stats(
1356 store: String,
1357 timeframe: RRDTimeFrameResolution,
1358 cf: RRDMode,
1359 _param: Value,
1360 ) -> Result<Value, Error> {
1361
1362 create_value_from_rrd(
1363 &format!("datastore/{}", store),
1364 &[
1365 "total", "used",
1366 "read_ios", "read_bytes",
1367 "write_ios", "write_bytes",
1368 "io_ticks",
1369 ],
1370 timeframe,
1371 cf,
1372 )
1373 }
1374
1375 #[api(
1376 input: {
1377 properties: {
1378 store: {
1379 schema: DATASTORE_SCHEMA,
1380 },
1381 "backup-type": {
1382 schema: BACKUP_TYPE_SCHEMA,
1383 },
1384 "backup-id": {
1385 schema: BACKUP_ID_SCHEMA,
1386 },
1387 "backup-time": {
1388 schema: BACKUP_TIME_SCHEMA,
1389 },
1390 },
1391 },
1392 access: {
1393 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1394 },
1395 )]
1396 /// Get "notes" for a specific backup
1397 fn get_notes(
1398 store: String,
1399 backup_type: String,
1400 backup_id: String,
1401 backup_time: i64,
1402 rpcenv: &mut dyn RpcEnvironment,
1403 ) -> Result<String, Error> {
1404 let datastore = DataStore::lookup_datastore(&store)?;
1405
1406 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1407 let user_info = CachedUserInfo::new()?;
1408 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1409
1410 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1411
1412 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1413 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1414
1415 let manifest = datastore.load_manifest_json(&backup_dir)?;
1416
1417 let notes = manifest["unprotected"]["notes"]
1418 .as_str()
1419 .unwrap_or("");
1420
1421 Ok(String::from(notes))
1422 }
1423
1424 #[api(
1425 input: {
1426 properties: {
1427 store: {
1428 schema: DATASTORE_SCHEMA,
1429 },
1430 "backup-type": {
1431 schema: BACKUP_TYPE_SCHEMA,
1432 },
1433 "backup-id": {
1434 schema: BACKUP_ID_SCHEMA,
1435 },
1436 "backup-time": {
1437 schema: BACKUP_TIME_SCHEMA,
1438 },
1439 notes: {
1440 description: "A multiline text.",
1441 },
1442 },
1443 },
1444 access: {
1445 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1446 },
1447 )]
1448 /// Set "notes" for a specific backup
1449 fn set_notes(
1450 store: String,
1451 backup_type: String,
1452 backup_id: String,
1453 backup_time: i64,
1454 notes: String,
1455 rpcenv: &mut dyn RpcEnvironment,
1456 ) -> Result<(), Error> {
1457 let datastore = DataStore::lookup_datastore(&store)?;
1458
1459 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1460 let user_info = CachedUserInfo::new()?;
1461 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1462
1463 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1464
1465 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1466 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1467
1468 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1469
1470 manifest["unprotected"]["notes"] = notes.into();
1471
1472 datastore.store_manifest(&backup_dir, manifest)?;
1473
1474 Ok(())
1475 }
1476
1477 #[sortable]
1478 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1479 (
1480 "catalog",
1481 &Router::new()
1482 .get(&API_METHOD_CATALOG)
1483 ),
1484 (
1485 "download",
1486 &Router::new()
1487 .download(&API_METHOD_DOWNLOAD_FILE)
1488 ),
1489 (
1490 "download-decoded",
1491 &Router::new()
1492 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1493 ),
1494 (
1495 "files",
1496 &Router::new()
1497 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1498 ),
1499 (
1500 "gc",
1501 &Router::new()
1502 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1503 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1504 ),
1505 (
1506 "groups",
1507 &Router::new()
1508 .get(&API_METHOD_LIST_GROUPS)
1509 ),
1510 (
1511 "notes",
1512 &Router::new()
1513 .get(&API_METHOD_GET_NOTES)
1514 .put(&API_METHOD_SET_NOTES)
1515 ),
1516 (
1517 "prune",
1518 &Router::new()
1519 .post(&API_METHOD_PRUNE)
1520 ),
1521 (
1522 "pxar-file-download",
1523 &Router::new()
1524 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1525 ),
1526 (
1527 "rrd",
1528 &Router::new()
1529 .get(&API_METHOD_GET_RRD_STATS)
1530 ),
1531 (
1532 "snapshots",
1533 &Router::new()
1534 .get(&API_METHOD_LIST_SNAPSHOTS)
1535 .delete(&API_METHOD_DELETE_SNAPSHOT)
1536 ),
1537 (
1538 "status",
1539 &Router::new()
1540 .get(&API_METHOD_STATUS)
1541 ),
1542 (
1543 "upload-backup-log",
1544 &Router::new()
1545 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1546 ),
1547 (
1548 "verify",
1549 &Router::new()
1550 .post(&API_METHOD_VERIFY)
1551 ),
1552 ];
1553
1554 const DATASTORE_INFO_ROUTER: Router = Router::new()
1555 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1556 .subdirs(DATASTORE_INFO_SUBDIRS);
1557
1558
1559 pub const ROUTER: Router = Router::new()
1560 .get(&API_METHOD_GET_DATASTORE_LIST)
1561 .match_all("store", &DATASTORE_INFO_ROUTER);