]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
c260b62da7721b002fc97fd125edb06bd5e16410
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5
6 use anyhow::{bail, format_err, Error};
7 use futures::*;
8 use hyper::http::request::Parts;
9 use hyper::{header, Body, Response, StatusCode};
10 use serde_json::{json, Value};
11
12 use proxmox::api::{
13 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
14 RpcEnvironment, RpcEnvironmentType, Permission
15 };
16 use proxmox::api::router::SubdirMap;
17 use proxmox::api::schema::*;
18 use proxmox::tools::fs::{replace_file, CreateOptions};
19 use proxmox::try_block;
20 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
21
22 use pxar::accessor::aio::Accessor;
23 use pxar::EntryKind;
24
25 use crate::api2::types::*;
26 use crate::api2::node::rrd::create_value_from_rrd;
27 use crate::backup::*;
28 use crate::config::datastore;
29 use crate::config::cached_user_info::CachedUserInfo;
30
31 use crate::server::WorkerTask;
32 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
33 use crate::config::acl::{
34 PRIV_DATASTORE_AUDIT,
35 PRIV_DATASTORE_MODIFY,
36 PRIV_DATASTORE_READ,
37 PRIV_DATASTORE_PRUNE,
38 PRIV_DATASTORE_BACKUP,
39 };
40
41 fn check_backup_owner(
42 store: &DataStore,
43 group: &BackupGroup,
44 userid: &Userid,
45 ) -> Result<(), Error> {
46 let owner = store.get_owner(group)?;
47 if &owner != userid {
48 bail!("backup owner check failed ({} != {})", userid, owner);
49 }
50 Ok(())
51 }
52
53 fn read_backup_index(
54 store: &DataStore,
55 backup_dir: &BackupDir,
56 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
57
58 let (manifest, index_size) = store.load_manifest(backup_dir)?;
59
60 let mut result = Vec::new();
61 for item in manifest.files() {
62 result.push(BackupContent {
63 filename: item.filename.clone(),
64 crypt_mode: Some(item.crypt_mode),
65 size: Some(item.size),
66 });
67 }
68
69 result.push(BackupContent {
70 filename: MANIFEST_BLOB_NAME.to_string(),
71 crypt_mode: match manifest.signature {
72 Some(_) => Some(CryptMode::SignOnly),
73 None => Some(CryptMode::None),
74 },
75 size: Some(index_size),
76 });
77
78 Ok((manifest, result))
79 }
80
81 fn get_all_snapshot_files(
82 store: &DataStore,
83 info: &BackupInfo,
84 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
85
86 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
87
88 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
89 acc.insert(item.filename.clone());
90 acc
91 });
92
93 for file in &info.files {
94 if file_set.contains(file) { continue; }
95 files.push(BackupContent {
96 filename: file.to_string(),
97 size: None,
98 crypt_mode: None,
99 });
100 }
101
102 Ok((manifest, files))
103 }
104
105 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
106
107 let mut group_hash = HashMap::new();
108
109 for info in backup_list {
110 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
111 let time_list = group_hash.entry(group_id).or_insert(vec![]);
112 time_list.push(info);
113 }
114
115 group_hash
116 }
117
118 #[api(
119 input: {
120 properties: {
121 store: {
122 schema: DATASTORE_SCHEMA,
123 },
124 },
125 },
126 returns: {
127 type: Array,
128 description: "Returns the list of backup groups.",
129 items: {
130 type: GroupListItem,
131 }
132 },
133 access: {
134 permission: &Permission::Privilege(
135 &["datastore", "{store}"],
136 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
137 true),
138 },
139 )]
140 /// List backup groups.
141 fn list_groups(
142 store: String,
143 rpcenv: &mut dyn RpcEnvironment,
144 ) -> Result<Vec<GroupListItem>, Error> {
145
146 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
147 let user_info = CachedUserInfo::new()?;
148 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
149
150 let datastore = DataStore::lookup_datastore(&store)?;
151
152 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
153
154 let group_hash = group_backups(backup_list);
155
156 let mut groups = Vec::new();
157
158 for (_group_id, mut list) in group_hash {
159
160 BackupInfo::sort_list(&mut list, false);
161
162 let info = &list[0];
163
164 let group = info.backup_dir.group();
165
166 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
167 let owner = datastore.get_owner(group)?;
168 if !list_all {
169 if owner != userid { continue; }
170 }
171
172 let result_item = GroupListItem {
173 backup_type: group.backup_type().to_string(),
174 backup_id: group.backup_id().to_string(),
175 last_backup: info.backup_dir.backup_time(),
176 backup_count: list.len() as u64,
177 files: info.files.clone(),
178 owner: Some(owner),
179 };
180 groups.push(result_item);
181 }
182
183 Ok(groups)
184 }
185
186 #[api(
187 input: {
188 properties: {
189 store: {
190 schema: DATASTORE_SCHEMA,
191 },
192 "backup-type": {
193 schema: BACKUP_TYPE_SCHEMA,
194 },
195 "backup-id": {
196 schema: BACKUP_ID_SCHEMA,
197 },
198 "backup-time": {
199 schema: BACKUP_TIME_SCHEMA,
200 },
201 },
202 },
203 returns: {
204 type: Array,
205 description: "Returns the list of archive files inside a backup snapshots.",
206 items: {
207 type: BackupContent,
208 }
209 },
210 access: {
211 permission: &Permission::Privilege(
212 &["datastore", "{store}"],
213 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
214 true),
215 },
216 )]
217 /// List snapshot files.
218 pub fn list_snapshot_files(
219 store: String,
220 backup_type: String,
221 backup_id: String,
222 backup_time: i64,
223 _info: &ApiMethod,
224 rpcenv: &mut dyn RpcEnvironment,
225 ) -> Result<Vec<BackupContent>, Error> {
226
227 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
228 let user_info = CachedUserInfo::new()?;
229 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
230
231 let datastore = DataStore::lookup_datastore(&store)?;
232
233 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
234
235 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
236 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
237
238 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
239
240 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
241
242 Ok(files)
243 }
244
245 #[api(
246 input: {
247 properties: {
248 store: {
249 schema: DATASTORE_SCHEMA,
250 },
251 "backup-type": {
252 schema: BACKUP_TYPE_SCHEMA,
253 },
254 "backup-id": {
255 schema: BACKUP_ID_SCHEMA,
256 },
257 "backup-time": {
258 schema: BACKUP_TIME_SCHEMA,
259 },
260 },
261 },
262 access: {
263 permission: &Permission::Privilege(
264 &["datastore", "{store}"],
265 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
266 true),
267 },
268 )]
269 /// Delete backup snapshot.
270 fn delete_snapshot(
271 store: String,
272 backup_type: String,
273 backup_id: String,
274 backup_time: i64,
275 _info: &ApiMethod,
276 rpcenv: &mut dyn RpcEnvironment,
277 ) -> Result<Value, Error> {
278
279 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
280 let user_info = CachedUserInfo::new()?;
281 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
282
283 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
284
285 let datastore = DataStore::lookup_datastore(&store)?;
286
287 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
288 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
289
290 datastore.remove_backup_dir(&snapshot, false)?;
291
292 Ok(Value::Null)
293 }
294
295 #[api(
296 input: {
297 properties: {
298 store: {
299 schema: DATASTORE_SCHEMA,
300 },
301 "backup-type": {
302 optional: true,
303 schema: BACKUP_TYPE_SCHEMA,
304 },
305 "backup-id": {
306 optional: true,
307 schema: BACKUP_ID_SCHEMA,
308 },
309 },
310 },
311 returns: {
312 type: Array,
313 description: "Returns the list of snapshots.",
314 items: {
315 type: SnapshotListItem,
316 }
317 },
318 access: {
319 permission: &Permission::Privilege(
320 &["datastore", "{store}"],
321 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
322 true),
323 },
324 )]
325 /// List backup snapshots.
326 pub fn list_snapshots (
327 store: String,
328 backup_type: Option<String>,
329 backup_id: Option<String>,
330 _param: Value,
331 _info: &ApiMethod,
332 rpcenv: &mut dyn RpcEnvironment,
333 ) -> Result<Vec<SnapshotListItem>, Error> {
334
335 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
336 let user_info = CachedUserInfo::new()?;
337 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
338
339 let datastore = DataStore::lookup_datastore(&store)?;
340
341 let base_path = datastore.base_path();
342
343 let backup_list = BackupInfo::list_backups(&base_path)?;
344
345 let mut snapshots = vec![];
346
347 for info in backup_list {
348 let group = info.backup_dir.group();
349 if let Some(ref backup_type) = backup_type {
350 if backup_type != group.backup_type() { continue; }
351 }
352 if let Some(ref backup_id) = backup_id {
353 if backup_id != group.backup_id() { continue; }
354 }
355
356 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
357 let owner = datastore.get_owner(group)?;
358
359 if !list_all {
360 if owner != userid { continue; }
361 }
362
363 let mut size = None;
364
365 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
366 Ok((manifest, files)) => {
367 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
368 // extract the first line from notes
369 let comment: Option<String> = manifest.unprotected["notes"]
370 .as_str()
371 .and_then(|notes| notes.lines().next())
372 .map(String::from);
373
374 let verify = manifest.unprotected["verify_state"].clone();
375 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
376 Ok(verify) => verify,
377 Err(err) => {
378 eprintln!("error parsing verification state : '{}'", err);
379 None
380 }
381 };
382
383 (comment, verify, files)
384 },
385 Err(err) => {
386 eprintln!("error during snapshot file listing: '{}'", err);
387 (
388 None,
389 None,
390 info
391 .files
392 .iter()
393 .map(|x| BackupContent {
394 filename: x.to_string(),
395 size: None,
396 crypt_mode: None,
397 })
398 .collect()
399 )
400 },
401 };
402
403 let result_item = SnapshotListItem {
404 backup_type: group.backup_type().to_string(),
405 backup_id: group.backup_id().to_string(),
406 backup_time: info.backup_dir.backup_time(),
407 comment,
408 verification,
409 files,
410 size,
411 owner: Some(owner),
412 };
413
414 snapshots.push(result_item);
415 }
416
417 Ok(snapshots)
418 }
419
420 #[api(
421 input: {
422 properties: {
423 store: {
424 schema: DATASTORE_SCHEMA,
425 },
426 },
427 },
428 returns: {
429 type: StorageStatus,
430 },
431 access: {
432 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
433 },
434 )]
435 /// Get datastore status.
436 pub fn status(
437 store: String,
438 _info: &ApiMethod,
439 _rpcenv: &mut dyn RpcEnvironment,
440 ) -> Result<StorageStatus, Error> {
441 let datastore = DataStore::lookup_datastore(&store)?;
442 crate::tools::disks::disk_usage(&datastore.base_path())
443 }
444
445 #[api(
446 input: {
447 properties: {
448 store: {
449 schema: DATASTORE_SCHEMA,
450 },
451 "backup-type": {
452 schema: BACKUP_TYPE_SCHEMA,
453 optional: true,
454 },
455 "backup-id": {
456 schema: BACKUP_ID_SCHEMA,
457 optional: true,
458 },
459 "backup-time": {
460 schema: BACKUP_TIME_SCHEMA,
461 optional: true,
462 },
463 },
464 },
465 returns: {
466 schema: UPID_SCHEMA,
467 },
468 access: {
469 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
470 },
471 )]
472 /// Verify backups.
473 ///
474 /// This function can verify a single backup snapshot, all backup from a backup group,
475 /// or all backups in the datastore.
476 pub fn verify(
477 store: String,
478 backup_type: Option<String>,
479 backup_id: Option<String>,
480 backup_time: Option<i64>,
481 rpcenv: &mut dyn RpcEnvironment,
482 ) -> Result<Value, Error> {
483 let datastore = DataStore::lookup_datastore(&store)?;
484
485 let worker_id;
486
487 let mut backup_dir = None;
488 let mut backup_group = None;
489
490 match (backup_type, backup_id, backup_time) {
491 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
492 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
493 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
494 backup_dir = Some(dir);
495 }
496 (Some(backup_type), Some(backup_id), None) => {
497 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
498 let group = BackupGroup::new(backup_type, backup_id);
499 backup_group = Some(group);
500 }
501 (None, None, None) => {
502 worker_id = store.clone();
503 }
504 _ => bail!("parameters do not specify a backup group or snapshot"),
505 }
506
507 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
508 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
509
510 let upid_str = WorkerTask::new_thread(
511 "verify",
512 Some(worker_id.clone()),
513 userid,
514 to_stdout,
515 move |worker| {
516 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
517 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
518
519 let failed_dirs = if let Some(backup_dir) = backup_dir {
520 let mut res = Vec::new();
521 if !verify_backup_dir(
522 datastore,
523 &backup_dir,
524 verified_chunks,
525 corrupt_chunks,
526 worker.clone(),
527 worker.upid().clone(),
528 )? {
529 res.push(backup_dir.to_string());
530 }
531 res
532 } else if let Some(backup_group) = backup_group {
533 let (_count, failed_dirs) = verify_backup_group(
534 datastore,
535 &backup_group,
536 verified_chunks,
537 corrupt_chunks,
538 None,
539 worker.clone(),
540 worker.upid(),
541 )?;
542 failed_dirs
543 } else {
544 verify_all_backups(datastore, worker.clone(), worker.upid())?
545 };
546 if failed_dirs.len() > 0 {
547 worker.log("Failed to verify following snapshots:");
548 for dir in failed_dirs {
549 worker.log(format!("\t{}", dir));
550 }
551 bail!("verification failed - please check the log for details");
552 }
553 Ok(())
554 },
555 )?;
556
557 Ok(json!(upid_str))
558 }
559
560 #[macro_export]
561 macro_rules! add_common_prune_prameters {
562 ( [ $( $list1:tt )* ] ) => {
563 add_common_prune_prameters!([$( $list1 )* ] , [])
564 };
565 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
566 [
567 $( $list1 )*
568 (
569 "keep-daily",
570 true,
571 &PRUNE_SCHEMA_KEEP_DAILY,
572 ),
573 (
574 "keep-hourly",
575 true,
576 &PRUNE_SCHEMA_KEEP_HOURLY,
577 ),
578 (
579 "keep-last",
580 true,
581 &PRUNE_SCHEMA_KEEP_LAST,
582 ),
583 (
584 "keep-monthly",
585 true,
586 &PRUNE_SCHEMA_KEEP_MONTHLY,
587 ),
588 (
589 "keep-weekly",
590 true,
591 &PRUNE_SCHEMA_KEEP_WEEKLY,
592 ),
593 (
594 "keep-yearly",
595 true,
596 &PRUNE_SCHEMA_KEEP_YEARLY,
597 ),
598 $( $list2 )*
599 ]
600 }
601 }
602
603 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
604 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
605 &PruneListItem::API_SCHEMA
606 ).schema();
607
608 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
609 &ApiHandler::Sync(&prune),
610 &ObjectSchema::new(
611 "Prune the datastore.",
612 &add_common_prune_prameters!([
613 ("backup-id", false, &BACKUP_ID_SCHEMA),
614 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
615 ("dry-run", true, &BooleanSchema::new(
616 "Just show what prune would do, but do not delete anything.")
617 .schema()
618 ),
619 ],[
620 ("store", false, &DATASTORE_SCHEMA),
621 ])
622 ))
623 .returns(&API_RETURN_SCHEMA_PRUNE)
624 .access(None, &Permission::Privilege(
625 &["datastore", "{store}"],
626 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
627 true)
628 );
629
630 fn prune(
631 param: Value,
632 _info: &ApiMethod,
633 rpcenv: &mut dyn RpcEnvironment,
634 ) -> Result<Value, Error> {
635
636 let store = tools::required_string_param(&param, "store")?;
637 let backup_type = tools::required_string_param(&param, "backup-type")?;
638 let backup_id = tools::required_string_param(&param, "backup-id")?;
639
640 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
641 let user_info = CachedUserInfo::new()?;
642 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
643
644 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
645
646 let group = BackupGroup::new(backup_type, backup_id);
647
648 let datastore = DataStore::lookup_datastore(&store)?;
649
650 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
651 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
652
653 let prune_options = PruneOptions {
654 keep_last: param["keep-last"].as_u64(),
655 keep_hourly: param["keep-hourly"].as_u64(),
656 keep_daily: param["keep-daily"].as_u64(),
657 keep_weekly: param["keep-weekly"].as_u64(),
658 keep_monthly: param["keep-monthly"].as_u64(),
659 keep_yearly: param["keep-yearly"].as_u64(),
660 };
661
662 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
663
664 let mut prune_result = Vec::new();
665
666 let list = group.list_backups(&datastore.base_path())?;
667
668 let mut prune_info = compute_prune_info(list, &prune_options)?;
669
670 prune_info.reverse(); // delete older snapshots first
671
672 let keep_all = !prune_options.keeps_something();
673
674 if dry_run {
675 for (info, mut keep) in prune_info {
676 if keep_all { keep = true; }
677
678 let backup_time = info.backup_dir.backup_time();
679 let group = info.backup_dir.group();
680
681 prune_result.push(json!({
682 "backup-type": group.backup_type(),
683 "backup-id": group.backup_id(),
684 "backup-time": backup_time,
685 "keep": keep,
686 }));
687 }
688 return Ok(json!(prune_result));
689 }
690
691
692 // We use a WorkerTask just to have a task log, but run synchrounously
693 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
694
695 let result = try_block! {
696 if keep_all {
697 worker.log("No prune selection - keeping all files.");
698 } else {
699 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
700 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
701 store, backup_type, backup_id));
702 }
703
704 for (info, mut keep) in prune_info {
705 if keep_all { keep = true; }
706
707 let backup_time = info.backup_dir.backup_time();
708 let timestamp = info.backup_dir.backup_time_string();
709 let group = info.backup_dir.group();
710
711
712 let msg = format!(
713 "{}/{}/{} {}",
714 group.backup_type(),
715 group.backup_id(),
716 timestamp,
717 if keep { "keep" } else { "remove" },
718 );
719
720 worker.log(msg);
721
722 prune_result.push(json!({
723 "backup-type": group.backup_type(),
724 "backup-id": group.backup_id(),
725 "backup-time": backup_time,
726 "keep": keep,
727 }));
728
729 if !(dry_run || keep) {
730 datastore.remove_backup_dir(&info.backup_dir, true)?;
731 }
732 }
733
734 Ok(())
735 };
736
737 worker.log_result(&result);
738
739 if let Err(err) = result {
740 bail!("prune failed - {}", err);
741 };
742
743 Ok(json!(prune_result))
744 }
745
746 #[api(
747 input: {
748 properties: {
749 store: {
750 schema: DATASTORE_SCHEMA,
751 },
752 },
753 },
754 returns: {
755 schema: UPID_SCHEMA,
756 },
757 access: {
758 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
759 },
760 )]
761 /// Start garbage collection.
762 fn start_garbage_collection(
763 store: String,
764 _info: &ApiMethod,
765 rpcenv: &mut dyn RpcEnvironment,
766 ) -> Result<Value, Error> {
767
768 let datastore = DataStore::lookup_datastore(&store)?;
769
770 println!("Starting garbage collection on store {}", store);
771
772 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
773
774 let upid_str = WorkerTask::new_thread(
775 "garbage_collection",
776 Some(store.clone()),
777 Userid::root_userid().clone(),
778 to_stdout,
779 move |worker| {
780 worker.log(format!("starting garbage collection on store {}", store));
781 datastore.garbage_collection(&*worker, worker.upid())
782 },
783 )?;
784
785 Ok(json!(upid_str))
786 }
787
788 #[api(
789 input: {
790 properties: {
791 store: {
792 schema: DATASTORE_SCHEMA,
793 },
794 },
795 },
796 returns: {
797 type: GarbageCollectionStatus,
798 },
799 access: {
800 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
801 },
802 )]
803 /// Garbage collection status.
804 pub fn garbage_collection_status(
805 store: String,
806 _info: &ApiMethod,
807 _rpcenv: &mut dyn RpcEnvironment,
808 ) -> Result<GarbageCollectionStatus, Error> {
809
810 let datastore = DataStore::lookup_datastore(&store)?;
811
812 let status = datastore.last_gc_status();
813
814 Ok(status)
815 }
816
817 #[api(
818 returns: {
819 description: "List the accessible datastores.",
820 type: Array,
821 items: {
822 description: "Datastore name and description.",
823 properties: {
824 store: {
825 schema: DATASTORE_SCHEMA,
826 },
827 comment: {
828 optional: true,
829 schema: SINGLE_LINE_COMMENT_SCHEMA,
830 },
831 },
832 },
833 },
834 access: {
835 permission: &Permission::Anybody,
836 },
837 )]
838 /// Datastore list
839 fn get_datastore_list(
840 _param: Value,
841 _info: &ApiMethod,
842 rpcenv: &mut dyn RpcEnvironment,
843 ) -> Result<Value, Error> {
844
845 let (config, _digest) = datastore::config()?;
846
847 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
848 let user_info = CachedUserInfo::new()?;
849
850 let mut list = Vec::new();
851
852 for (store, (_, data)) in &config.sections {
853 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
854 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
855 if allowed {
856 let mut entry = json!({ "store": store });
857 if let Some(comment) = data["comment"].as_str() {
858 entry["comment"] = comment.into();
859 }
860 list.push(entry);
861 }
862 }
863
864 Ok(list.into())
865 }
866
867 #[sortable]
868 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
869 &ApiHandler::AsyncHttp(&download_file),
870 &ObjectSchema::new(
871 "Download single raw file from backup snapshot.",
872 &sorted!([
873 ("store", false, &DATASTORE_SCHEMA),
874 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
875 ("backup-id", false, &BACKUP_ID_SCHEMA),
876 ("backup-time", false, &BACKUP_TIME_SCHEMA),
877 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
878 ]),
879 )
880 ).access(None, &Permission::Privilege(
881 &["datastore", "{store}"],
882 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
883 true)
884 );
885
886 fn download_file(
887 _parts: Parts,
888 _req_body: Body,
889 param: Value,
890 _info: &ApiMethod,
891 rpcenv: Box<dyn RpcEnvironment>,
892 ) -> ApiResponseFuture {
893
894 async move {
895 let store = tools::required_string_param(&param, "store")?;
896 let datastore = DataStore::lookup_datastore(store)?;
897
898 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
899 let user_info = CachedUserInfo::new()?;
900 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
901
902 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
903
904 let backup_type = tools::required_string_param(&param, "backup-type")?;
905 let backup_id = tools::required_string_param(&param, "backup-id")?;
906 let backup_time = tools::required_integer_param(&param, "backup-time")?;
907
908 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
909
910 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
911 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
912
913 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
914
915 let mut path = datastore.base_path();
916 path.push(backup_dir.relative_path());
917 path.push(&file_name);
918
919 let file = tokio::fs::File::open(&path)
920 .await
921 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
922
923 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
924 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
925 .map_err(move |err| {
926 eprintln!("error during streaming of '{:?}' - {}", &path, err);
927 err
928 });
929 let body = Body::wrap_stream(payload);
930
931 // fixme: set other headers ?
932 Ok(Response::builder()
933 .status(StatusCode::OK)
934 .header(header::CONTENT_TYPE, "application/octet-stream")
935 .body(body)
936 .unwrap())
937 }.boxed()
938 }
939
940 #[sortable]
941 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
942 &ApiHandler::AsyncHttp(&download_file_decoded),
943 &ObjectSchema::new(
944 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
945 &sorted!([
946 ("store", false, &DATASTORE_SCHEMA),
947 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
948 ("backup-id", false, &BACKUP_ID_SCHEMA),
949 ("backup-time", false, &BACKUP_TIME_SCHEMA),
950 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
951 ]),
952 )
953 ).access(None, &Permission::Privilege(
954 &["datastore", "{store}"],
955 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
956 true)
957 );
958
959 fn download_file_decoded(
960 _parts: Parts,
961 _req_body: Body,
962 param: Value,
963 _info: &ApiMethod,
964 rpcenv: Box<dyn RpcEnvironment>,
965 ) -> ApiResponseFuture {
966
967 async move {
968 let store = tools::required_string_param(&param, "store")?;
969 let datastore = DataStore::lookup_datastore(store)?;
970
971 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
972 let user_info = CachedUserInfo::new()?;
973 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
974
975 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
976
977 let backup_type = tools::required_string_param(&param, "backup-type")?;
978 let backup_id = tools::required_string_param(&param, "backup-id")?;
979 let backup_time = tools::required_integer_param(&param, "backup-time")?;
980
981 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
982
983 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
984 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
985
986 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
987 for file in files {
988 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
989 bail!("cannot decode '{}' - is encrypted", file_name);
990 }
991 }
992
993 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
994
995 let mut path = datastore.base_path();
996 path.push(backup_dir.relative_path());
997 path.push(&file_name);
998
999 let extension = file_name.rsplitn(2, '.').next().unwrap();
1000
1001 let body = match extension {
1002 "didx" => {
1003 let index = DynamicIndexReader::open(&path)
1004 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1005 let (csum, size) = index.compute_csum();
1006 manifest.verify_file(&file_name, &csum, size)?;
1007
1008 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1009 let reader = AsyncIndexReader::new(index, chunk_reader);
1010 Body::wrap_stream(AsyncReaderStream::new(reader)
1011 .map_err(move |err| {
1012 eprintln!("error during streaming of '{:?}' - {}", path, err);
1013 err
1014 }))
1015 },
1016 "fidx" => {
1017 let index = FixedIndexReader::open(&path)
1018 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1019
1020 let (csum, size) = index.compute_csum();
1021 manifest.verify_file(&file_name, &csum, size)?;
1022
1023 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1024 let reader = AsyncIndexReader::new(index, chunk_reader);
1025 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1026 .map_err(move |err| {
1027 eprintln!("error during streaming of '{:?}' - {}", path, err);
1028 err
1029 }))
1030 },
1031 "blob" => {
1032 let file = std::fs::File::open(&path)
1033 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1034
1035 // FIXME: load full blob to verify index checksum?
1036
1037 Body::wrap_stream(
1038 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1039 .map_err(move |err| {
1040 eprintln!("error during streaming of '{:?}' - {}", path, err);
1041 err
1042 })
1043 )
1044 },
1045 extension => {
1046 bail!("cannot download '{}' files", extension);
1047 },
1048 };
1049
1050 // fixme: set other headers ?
1051 Ok(Response::builder()
1052 .status(StatusCode::OK)
1053 .header(header::CONTENT_TYPE, "application/octet-stream")
1054 .body(body)
1055 .unwrap())
1056 }.boxed()
1057 }
1058
1059 #[sortable]
1060 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1061 &ApiHandler::AsyncHttp(&upload_backup_log),
1062 &ObjectSchema::new(
1063 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1064 &sorted!([
1065 ("store", false, &DATASTORE_SCHEMA),
1066 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1067 ("backup-id", false, &BACKUP_ID_SCHEMA),
1068 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1069 ]),
1070 )
1071 ).access(
1072 Some("Only the backup creator/owner is allowed to do this."),
1073 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1074 );
1075
1076 fn upload_backup_log(
1077 _parts: Parts,
1078 req_body: Body,
1079 param: Value,
1080 _info: &ApiMethod,
1081 rpcenv: Box<dyn RpcEnvironment>,
1082 ) -> ApiResponseFuture {
1083
1084 async move {
1085 let store = tools::required_string_param(&param, "store")?;
1086 let datastore = DataStore::lookup_datastore(store)?;
1087
1088 let file_name = CLIENT_LOG_BLOB_NAME;
1089
1090 let backup_type = tools::required_string_param(&param, "backup-type")?;
1091 let backup_id = tools::required_string_param(&param, "backup-id")?;
1092 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1093
1094 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1095
1096 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1097 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1098
1099 let mut path = datastore.base_path();
1100 path.push(backup_dir.relative_path());
1101 path.push(&file_name);
1102
1103 if path.exists() {
1104 bail!("backup already contains a log.");
1105 }
1106
1107 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1108 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1109
1110 let data = req_body
1111 .map_err(Error::from)
1112 .try_fold(Vec::new(), |mut acc, chunk| {
1113 acc.extend_from_slice(&*chunk);
1114 future::ok::<_, Error>(acc)
1115 })
1116 .await?;
1117
1118 // always verify blob/CRC at server side
1119 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1120
1121 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1122
1123 // fixme: use correct formatter
1124 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1125 }.boxed()
1126 }
1127
1128 #[api(
1129 input: {
1130 properties: {
1131 store: {
1132 schema: DATASTORE_SCHEMA,
1133 },
1134 "backup-type": {
1135 schema: BACKUP_TYPE_SCHEMA,
1136 },
1137 "backup-id": {
1138 schema: BACKUP_ID_SCHEMA,
1139 },
1140 "backup-time": {
1141 schema: BACKUP_TIME_SCHEMA,
1142 },
1143 "filepath": {
1144 description: "Base64 encoded path.",
1145 type: String,
1146 }
1147 },
1148 },
1149 access: {
1150 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1151 },
1152 )]
1153 /// Get the entries of the given path of the catalog
1154 fn catalog(
1155 store: String,
1156 backup_type: String,
1157 backup_id: String,
1158 backup_time: i64,
1159 filepath: String,
1160 _param: Value,
1161 _info: &ApiMethod,
1162 rpcenv: &mut dyn RpcEnvironment,
1163 ) -> Result<Value, Error> {
1164 let datastore = DataStore::lookup_datastore(&store)?;
1165
1166 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1167 let user_info = CachedUserInfo::new()?;
1168 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1169
1170 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1171
1172 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1173 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1174
1175 let file_name = CATALOG_NAME;
1176
1177 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1178 for file in files {
1179 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1180 bail!("cannot decode '{}' - is encrypted", file_name);
1181 }
1182 }
1183
1184 let mut path = datastore.base_path();
1185 path.push(backup_dir.relative_path());
1186 path.push(file_name);
1187
1188 let index = DynamicIndexReader::open(&path)
1189 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1190
1191 let (csum, size) = index.compute_csum();
1192 manifest.verify_file(&file_name, &csum, size)?;
1193
1194 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1195 let reader = BufferedDynamicReader::new(index, chunk_reader);
1196
1197 let mut catalog_reader = CatalogReader::new(reader);
1198 let mut current = catalog_reader.root()?;
1199 let mut components = vec![];
1200
1201
1202 if filepath != "root" {
1203 components = base64::decode(filepath)?;
1204 if components.len() > 0 && components[0] == '/' as u8 {
1205 components.remove(0);
1206 }
1207 for component in components.split(|c| *c == '/' as u8) {
1208 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1209 current = entry;
1210 } else {
1211 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1212 }
1213 }
1214 }
1215
1216 let mut res = Vec::new();
1217
1218 for direntry in catalog_reader.read_dir(&current)? {
1219 let mut components = components.clone();
1220 components.push('/' as u8);
1221 components.extend(&direntry.name);
1222 let path = base64::encode(components);
1223 let text = String::from_utf8_lossy(&direntry.name);
1224 let mut entry = json!({
1225 "filepath": path,
1226 "text": text,
1227 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1228 "leaf": true,
1229 });
1230 match direntry.attr {
1231 DirEntryAttribute::Directory { start: _ } => {
1232 entry["leaf"] = false.into();
1233 },
1234 DirEntryAttribute::File { size, mtime } => {
1235 entry["size"] = size.into();
1236 entry["mtime"] = mtime.into();
1237 },
1238 _ => {},
1239 }
1240 res.push(entry);
1241 }
1242
1243 Ok(res.into())
1244 }
1245
1246 #[sortable]
1247 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1248 &ApiHandler::AsyncHttp(&pxar_file_download),
1249 &ObjectSchema::new(
1250 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1251 &sorted!([
1252 ("store", false, &DATASTORE_SCHEMA),
1253 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1254 ("backup-id", false, &BACKUP_ID_SCHEMA),
1255 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1256 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1257 ]),
1258 )
1259 ).access(None, &Permission::Privilege(
1260 &["datastore", "{store}"],
1261 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1262 true)
1263 );
1264
1265 fn pxar_file_download(
1266 _parts: Parts,
1267 _req_body: Body,
1268 param: Value,
1269 _info: &ApiMethod,
1270 rpcenv: Box<dyn RpcEnvironment>,
1271 ) -> ApiResponseFuture {
1272
1273 async move {
1274 let store = tools::required_string_param(&param, "store")?;
1275 let datastore = DataStore::lookup_datastore(&store)?;
1276
1277 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1278 let user_info = CachedUserInfo::new()?;
1279 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1280
1281 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1282
1283 let backup_type = tools::required_string_param(&param, "backup-type")?;
1284 let backup_id = tools::required_string_param(&param, "backup-id")?;
1285 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1286
1287 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1288
1289 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1290 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1291
1292 let mut components = base64::decode(&filepath)?;
1293 if components.len() > 0 && components[0] == '/' as u8 {
1294 components.remove(0);
1295 }
1296
1297 let mut split = components.splitn(2, |c| *c == '/' as u8);
1298 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1299 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1300 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1301 for file in files {
1302 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1303 bail!("cannot decode '{}' - is encrypted", pxar_name);
1304 }
1305 }
1306
1307 let mut path = datastore.base_path();
1308 path.push(backup_dir.relative_path());
1309 path.push(pxar_name);
1310
1311 let index = DynamicIndexReader::open(&path)
1312 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1313
1314 let (csum, size) = index.compute_csum();
1315 manifest.verify_file(&pxar_name, &csum, size)?;
1316
1317 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1318 let reader = BufferedDynamicReader::new(index, chunk_reader);
1319 let archive_size = reader.archive_size();
1320 let reader = LocalDynamicReadAt::new(reader);
1321
1322 let decoder = Accessor::new(reader, archive_size).await?;
1323 let root = decoder.open_root().await?;
1324 let file = root
1325 .lookup(OsStr::from_bytes(file_path)).await?
1326 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1327
1328 let file = match file.kind() {
1329 EntryKind::File { .. } => file,
1330 EntryKind::Hardlink(_) => {
1331 decoder.follow_hardlink(&file).await?
1332 },
1333 // TODO symlink
1334 other => bail!("cannot download file of type {:?}", other),
1335 };
1336
1337 let body = Body::wrap_stream(
1338 AsyncReaderStream::new(file.contents().await?)
1339 .map_err(move |err| {
1340 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1341 err
1342 })
1343 );
1344
1345 // fixme: set other headers ?
1346 Ok(Response::builder()
1347 .status(StatusCode::OK)
1348 .header(header::CONTENT_TYPE, "application/octet-stream")
1349 .body(body)
1350 .unwrap())
1351 }.boxed()
1352 }
1353
1354 #[api(
1355 input: {
1356 properties: {
1357 store: {
1358 schema: DATASTORE_SCHEMA,
1359 },
1360 timeframe: {
1361 type: RRDTimeFrameResolution,
1362 },
1363 cf: {
1364 type: RRDMode,
1365 },
1366 },
1367 },
1368 access: {
1369 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1370 },
1371 )]
1372 /// Read datastore stats
1373 fn get_rrd_stats(
1374 store: String,
1375 timeframe: RRDTimeFrameResolution,
1376 cf: RRDMode,
1377 _param: Value,
1378 ) -> Result<Value, Error> {
1379
1380 create_value_from_rrd(
1381 &format!("datastore/{}", store),
1382 &[
1383 "total", "used",
1384 "read_ios", "read_bytes",
1385 "write_ios", "write_bytes",
1386 "io_ticks",
1387 ],
1388 timeframe,
1389 cf,
1390 )
1391 }
1392
1393 #[api(
1394 input: {
1395 properties: {
1396 store: {
1397 schema: DATASTORE_SCHEMA,
1398 },
1399 "backup-type": {
1400 schema: BACKUP_TYPE_SCHEMA,
1401 },
1402 "backup-id": {
1403 schema: BACKUP_ID_SCHEMA,
1404 },
1405 "backup-time": {
1406 schema: BACKUP_TIME_SCHEMA,
1407 },
1408 },
1409 },
1410 access: {
1411 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1412 },
1413 )]
1414 /// Get "notes" for a specific backup
1415 fn get_notes(
1416 store: String,
1417 backup_type: String,
1418 backup_id: String,
1419 backup_time: i64,
1420 rpcenv: &mut dyn RpcEnvironment,
1421 ) -> Result<String, Error> {
1422 let datastore = DataStore::lookup_datastore(&store)?;
1423
1424 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1425 let user_info = CachedUserInfo::new()?;
1426 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1427
1428 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1429
1430 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1431 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1432
1433 let manifest = datastore.load_manifest_json(&backup_dir)?;
1434
1435 let notes = manifest["unprotected"]["notes"]
1436 .as_str()
1437 .unwrap_or("");
1438
1439 Ok(String::from(notes))
1440 }
1441
1442 #[api(
1443 input: {
1444 properties: {
1445 store: {
1446 schema: DATASTORE_SCHEMA,
1447 },
1448 "backup-type": {
1449 schema: BACKUP_TYPE_SCHEMA,
1450 },
1451 "backup-id": {
1452 schema: BACKUP_ID_SCHEMA,
1453 },
1454 "backup-time": {
1455 schema: BACKUP_TIME_SCHEMA,
1456 },
1457 notes: {
1458 description: "A multiline text.",
1459 },
1460 },
1461 },
1462 access: {
1463 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1464 },
1465 )]
1466 /// Set "notes" for a specific backup
1467 fn set_notes(
1468 store: String,
1469 backup_type: String,
1470 backup_id: String,
1471 backup_time: i64,
1472 notes: String,
1473 rpcenv: &mut dyn RpcEnvironment,
1474 ) -> Result<(), Error> {
1475 let datastore = DataStore::lookup_datastore(&store)?;
1476
1477 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1478 let user_info = CachedUserInfo::new()?;
1479 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1480
1481 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1482
1483 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1484 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1485
1486 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1487
1488 manifest["unprotected"]["notes"] = notes.into();
1489
1490 datastore.store_manifest(&backup_dir, manifest)?;
1491
1492 Ok(())
1493 }
1494
1495 #[sortable]
1496 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1497 (
1498 "catalog",
1499 &Router::new()
1500 .get(&API_METHOD_CATALOG)
1501 ),
1502 (
1503 "download",
1504 &Router::new()
1505 .download(&API_METHOD_DOWNLOAD_FILE)
1506 ),
1507 (
1508 "download-decoded",
1509 &Router::new()
1510 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1511 ),
1512 (
1513 "files",
1514 &Router::new()
1515 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1516 ),
1517 (
1518 "gc",
1519 &Router::new()
1520 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1521 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1522 ),
1523 (
1524 "groups",
1525 &Router::new()
1526 .get(&API_METHOD_LIST_GROUPS)
1527 ),
1528 (
1529 "notes",
1530 &Router::new()
1531 .get(&API_METHOD_GET_NOTES)
1532 .put(&API_METHOD_SET_NOTES)
1533 ),
1534 (
1535 "prune",
1536 &Router::new()
1537 .post(&API_METHOD_PRUNE)
1538 ),
1539 (
1540 "pxar-file-download",
1541 &Router::new()
1542 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1543 ),
1544 (
1545 "rrd",
1546 &Router::new()
1547 .get(&API_METHOD_GET_RRD_STATS)
1548 ),
1549 (
1550 "snapshots",
1551 &Router::new()
1552 .get(&API_METHOD_LIST_SNAPSHOTS)
1553 .delete(&API_METHOD_DELETE_SNAPSHOT)
1554 ),
1555 (
1556 "status",
1557 &Router::new()
1558 .get(&API_METHOD_STATUS)
1559 ),
1560 (
1561 "upload-backup-log",
1562 &Router::new()
1563 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1564 ),
1565 (
1566 "verify",
1567 &Router::new()
1568 .post(&API_METHOD_VERIFY)
1569 ),
1570 ];
1571
1572 const DATASTORE_INFO_ROUTER: Router = Router::new()
1573 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1574 .subdirs(DATASTORE_INFO_SUBDIRS);
1575
1576
1577 pub const ROUTER: Router = Router::new()
1578 .get(&API_METHOD_GET_DATASTORE_LIST)
1579 .match_all("store", &DATASTORE_INFO_ROUTER);