]> git.proxmox.com Git - proxmox-backup.git/blame_incremental - src/api2/admin/datastore.rs
verify: acquire shared snapshot flock and skip on error
[proxmox-backup.git] / src / api2 / admin / datastore.rs
... / ...
CommitLineData
1use std::collections::{HashSet, HashMap};
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
4use std::sync::{Arc, Mutex};
5
6use anyhow::{bail, format_err, Error};
7use futures::*;
8use hyper::http::request::Parts;
9use hyper::{header, Body, Response, StatusCode};
10use serde_json::{json, Value};
11
12use proxmox::api::{
13 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
14 RpcEnvironment, RpcEnvironmentType, Permission
15};
16use proxmox::api::router::SubdirMap;
17use proxmox::api::schema::*;
18use proxmox::tools::fs::{replace_file, CreateOptions};
19use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
20
21use pxar::accessor::aio::Accessor;
22use pxar::EntryKind;
23
24use crate::api2::types::*;
25use crate::api2::node::rrd::create_value_from_rrd;
26use crate::backup::*;
27use crate::config::datastore;
28use crate::config::cached_user_info::CachedUserInfo;
29
30use crate::server::WorkerTask;
31use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
32use crate::config::acl::{
33 PRIV_DATASTORE_AUDIT,
34 PRIV_DATASTORE_MODIFY,
35 PRIV_DATASTORE_READ,
36 PRIV_DATASTORE_PRUNE,
37 PRIV_DATASTORE_BACKUP,
38};
39
40fn check_backup_owner(
41 store: &DataStore,
42 group: &BackupGroup,
43 userid: &Userid,
44) -> Result<(), Error> {
45 let owner = store.get_owner(group)?;
46 if &owner != userid {
47 bail!("backup owner check failed ({} != {})", userid, owner);
48 }
49 Ok(())
50}
51
52fn read_backup_index(
53 store: &DataStore,
54 backup_dir: &BackupDir,
55) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
56
57 let (manifest, index_size) = store.load_manifest(backup_dir)?;
58
59 let mut result = Vec::new();
60 for item in manifest.files() {
61 result.push(BackupContent {
62 filename: item.filename.clone(),
63 crypt_mode: Some(item.crypt_mode),
64 size: Some(item.size),
65 });
66 }
67
68 result.push(BackupContent {
69 filename: MANIFEST_BLOB_NAME.to_string(),
70 crypt_mode: match manifest.signature {
71 Some(_) => Some(CryptMode::SignOnly),
72 None => Some(CryptMode::None),
73 },
74 size: Some(index_size),
75 });
76
77 Ok((manifest, result))
78}
79
80fn get_all_snapshot_files(
81 store: &DataStore,
82 info: &BackupInfo,
83) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
84
85 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
86
87 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
88 acc.insert(item.filename.clone());
89 acc
90 });
91
92 for file in &info.files {
93 if file_set.contains(file) { continue; }
94 files.push(BackupContent {
95 filename: file.to_string(),
96 size: None,
97 crypt_mode: None,
98 });
99 }
100
101 Ok((manifest, files))
102}
103
104fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
105
106 let mut group_hash = HashMap::new();
107
108 for info in backup_list {
109 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
110 let time_list = group_hash.entry(group_id).or_insert(vec![]);
111 time_list.push(info);
112 }
113
114 group_hash
115}
116
117#[api(
118 input: {
119 properties: {
120 store: {
121 schema: DATASTORE_SCHEMA,
122 },
123 },
124 },
125 returns: {
126 type: Array,
127 description: "Returns the list of backup groups.",
128 items: {
129 type: GroupListItem,
130 }
131 },
132 access: {
133 permission: &Permission::Privilege(
134 &["datastore", "{store}"],
135 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
136 true),
137 },
138)]
139/// List backup groups.
140fn list_groups(
141 store: String,
142 rpcenv: &mut dyn RpcEnvironment,
143) -> Result<Vec<GroupListItem>, Error> {
144
145 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
146 let user_info = CachedUserInfo::new()?;
147 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
148
149 let datastore = DataStore::lookup_datastore(&store)?;
150
151 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
152
153 let group_hash = group_backups(backup_list);
154
155 let mut groups = Vec::new();
156
157 for (_group_id, mut list) in group_hash {
158
159 BackupInfo::sort_list(&mut list, false);
160
161 let info = &list[0];
162
163 let group = info.backup_dir.group();
164
165 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
166 let owner = datastore.get_owner(group)?;
167 if !list_all {
168 if owner != userid { continue; }
169 }
170
171 let result_item = GroupListItem {
172 backup_type: group.backup_type().to_string(),
173 backup_id: group.backup_id().to_string(),
174 last_backup: info.backup_dir.backup_time(),
175 backup_count: list.len() as u64,
176 files: info.files.clone(),
177 owner: Some(owner),
178 };
179 groups.push(result_item);
180 }
181
182 Ok(groups)
183}
184
185#[api(
186 input: {
187 properties: {
188 store: {
189 schema: DATASTORE_SCHEMA,
190 },
191 "backup-type": {
192 schema: BACKUP_TYPE_SCHEMA,
193 },
194 "backup-id": {
195 schema: BACKUP_ID_SCHEMA,
196 },
197 "backup-time": {
198 schema: BACKUP_TIME_SCHEMA,
199 },
200 },
201 },
202 returns: {
203 type: Array,
204 description: "Returns the list of archive files inside a backup snapshots.",
205 items: {
206 type: BackupContent,
207 }
208 },
209 access: {
210 permission: &Permission::Privilege(
211 &["datastore", "{store}"],
212 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
213 true),
214 },
215)]
216/// List snapshot files.
217pub fn list_snapshot_files(
218 store: String,
219 backup_type: String,
220 backup_id: String,
221 backup_time: i64,
222 _info: &ApiMethod,
223 rpcenv: &mut dyn RpcEnvironment,
224) -> Result<Vec<BackupContent>, Error> {
225
226 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
227 let user_info = CachedUserInfo::new()?;
228 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
229
230 let datastore = DataStore::lookup_datastore(&store)?;
231
232 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
233
234 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
235 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
236
237 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
238
239 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
240
241 Ok(files)
242}
243
244#[api(
245 input: {
246 properties: {
247 store: {
248 schema: DATASTORE_SCHEMA,
249 },
250 "backup-type": {
251 schema: BACKUP_TYPE_SCHEMA,
252 },
253 "backup-id": {
254 schema: BACKUP_ID_SCHEMA,
255 },
256 "backup-time": {
257 schema: BACKUP_TIME_SCHEMA,
258 },
259 },
260 },
261 access: {
262 permission: &Permission::Privilege(
263 &["datastore", "{store}"],
264 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
265 true),
266 },
267)]
268/// Delete backup snapshot.
269fn delete_snapshot(
270 store: String,
271 backup_type: String,
272 backup_id: String,
273 backup_time: i64,
274 _info: &ApiMethod,
275 rpcenv: &mut dyn RpcEnvironment,
276) -> Result<Value, Error> {
277
278 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
279 let user_info = CachedUserInfo::new()?;
280 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
281
282 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
283
284 let datastore = DataStore::lookup_datastore(&store)?;
285
286 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
287 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
288
289 datastore.remove_backup_dir(&snapshot, false)?;
290
291 Ok(Value::Null)
292}
293
294#[api(
295 input: {
296 properties: {
297 store: {
298 schema: DATASTORE_SCHEMA,
299 },
300 "backup-type": {
301 optional: true,
302 schema: BACKUP_TYPE_SCHEMA,
303 },
304 "backup-id": {
305 optional: true,
306 schema: BACKUP_ID_SCHEMA,
307 },
308 },
309 },
310 returns: {
311 type: Array,
312 description: "Returns the list of snapshots.",
313 items: {
314 type: SnapshotListItem,
315 }
316 },
317 access: {
318 permission: &Permission::Privilege(
319 &["datastore", "{store}"],
320 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
321 true),
322 },
323)]
324/// List backup snapshots.
325pub fn list_snapshots (
326 store: String,
327 backup_type: Option<String>,
328 backup_id: Option<String>,
329 _param: Value,
330 _info: &ApiMethod,
331 rpcenv: &mut dyn RpcEnvironment,
332) -> Result<Vec<SnapshotListItem>, Error> {
333
334 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
335 let user_info = CachedUserInfo::new()?;
336 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
337
338 let datastore = DataStore::lookup_datastore(&store)?;
339
340 let base_path = datastore.base_path();
341
342 let backup_list = BackupInfo::list_backups(&base_path)?;
343
344 let mut snapshots = vec![];
345
346 for info in backup_list {
347 let group = info.backup_dir.group();
348 if let Some(ref backup_type) = backup_type {
349 if backup_type != group.backup_type() { continue; }
350 }
351 if let Some(ref backup_id) = backup_id {
352 if backup_id != group.backup_id() { continue; }
353 }
354
355 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
356 let owner = datastore.get_owner(group)?;
357
358 if !list_all {
359 if owner != userid { continue; }
360 }
361
362 let mut size = None;
363
364 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
365 Ok((manifest, files)) => {
366 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
367 // extract the first line from notes
368 let comment: Option<String> = manifest.unprotected["notes"]
369 .as_str()
370 .and_then(|notes| notes.lines().next())
371 .map(String::from);
372
373 let verify = manifest.unprotected["verify_state"].clone();
374 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
375 Ok(verify) => verify,
376 Err(err) => {
377 eprintln!("error parsing verification state : '{}'", err);
378 None
379 }
380 };
381
382 (comment, verify, files)
383 },
384 Err(err) => {
385 eprintln!("error during snapshot file listing: '{}'", err);
386 (
387 None,
388 None,
389 info
390 .files
391 .iter()
392 .map(|x| BackupContent {
393 filename: x.to_string(),
394 size: None,
395 crypt_mode: None,
396 })
397 .collect()
398 )
399 },
400 };
401
402 let result_item = SnapshotListItem {
403 backup_type: group.backup_type().to_string(),
404 backup_id: group.backup_id().to_string(),
405 backup_time: info.backup_dir.backup_time(),
406 comment,
407 verification,
408 files,
409 size,
410 owner: Some(owner),
411 };
412
413 snapshots.push(result_item);
414 }
415
416 Ok(snapshots)
417}
418
419#[api(
420 input: {
421 properties: {
422 store: {
423 schema: DATASTORE_SCHEMA,
424 },
425 },
426 },
427 returns: {
428 type: StorageStatus,
429 },
430 access: {
431 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
432 },
433)]
434/// Get datastore status.
435pub fn status(
436 store: String,
437 _info: &ApiMethod,
438 _rpcenv: &mut dyn RpcEnvironment,
439) -> Result<StorageStatus, Error> {
440 let datastore = DataStore::lookup_datastore(&store)?;
441 crate::tools::disks::disk_usage(&datastore.base_path())
442}
443
444#[api(
445 input: {
446 properties: {
447 store: {
448 schema: DATASTORE_SCHEMA,
449 },
450 "backup-type": {
451 schema: BACKUP_TYPE_SCHEMA,
452 optional: true,
453 },
454 "backup-id": {
455 schema: BACKUP_ID_SCHEMA,
456 optional: true,
457 },
458 "backup-time": {
459 schema: BACKUP_TIME_SCHEMA,
460 optional: true,
461 },
462 },
463 },
464 returns: {
465 schema: UPID_SCHEMA,
466 },
467 access: {
468 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
469 },
470)]
471/// Verify backups.
472///
473/// This function can verify a single backup snapshot, all backup from a backup group,
474/// or all backups in the datastore.
475pub fn verify(
476 store: String,
477 backup_type: Option<String>,
478 backup_id: Option<String>,
479 backup_time: Option<i64>,
480 rpcenv: &mut dyn RpcEnvironment,
481) -> Result<Value, Error> {
482 let datastore = DataStore::lookup_datastore(&store)?;
483
484 let worker_id;
485
486 let mut backup_dir = None;
487 let mut backup_group = None;
488
489 match (backup_type, backup_id, backup_time) {
490 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
491 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
492 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
493 backup_dir = Some(dir);
494 }
495 (Some(backup_type), Some(backup_id), None) => {
496 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
497 let group = BackupGroup::new(backup_type, backup_id);
498 backup_group = Some(group);
499 }
500 (None, None, None) => {
501 worker_id = store.clone();
502 }
503 _ => bail!("parameters do not specify a backup group or snapshot"),
504 }
505
506 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
507 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
508
509 let upid_str = WorkerTask::new_thread(
510 "verify",
511 Some(worker_id.clone()),
512 userid,
513 to_stdout,
514 move |worker| {
515 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
516 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
517
518 let failed_dirs = if let Some(backup_dir) = backup_dir {
519 let mut res = Vec::new();
520 if !verify_backup_dir(
521 datastore,
522 &backup_dir,
523 verified_chunks,
524 corrupt_chunks,
525 worker.clone(),
526 worker.upid().clone(),
527 )? {
528 res.push(backup_dir.to_string());
529 }
530 res
531 } else if let Some(backup_group) = backup_group {
532 let (_count, failed_dirs) = verify_backup_group(
533 datastore,
534 &backup_group,
535 verified_chunks,
536 corrupt_chunks,
537 None,
538 worker.clone(),
539 worker.upid(),
540 )?;
541 failed_dirs
542 } else {
543 verify_all_backups(datastore, worker.clone(), worker.upid())?
544 };
545 if failed_dirs.len() > 0 {
546 worker.log("Failed to verify following snapshots:");
547 for dir in failed_dirs {
548 worker.log(format!("\t{}", dir));
549 }
550 bail!("verification failed - please check the log for details");
551 }
552 Ok(())
553 },
554 )?;
555
556 Ok(json!(upid_str))
557}
558
559#[macro_export]
560macro_rules! add_common_prune_prameters {
561 ( [ $( $list1:tt )* ] ) => {
562 add_common_prune_prameters!([$( $list1 )* ] , [])
563 };
564 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
565 [
566 $( $list1 )*
567 (
568 "keep-daily",
569 true,
570 &PRUNE_SCHEMA_KEEP_DAILY,
571 ),
572 (
573 "keep-hourly",
574 true,
575 &PRUNE_SCHEMA_KEEP_HOURLY,
576 ),
577 (
578 "keep-last",
579 true,
580 &PRUNE_SCHEMA_KEEP_LAST,
581 ),
582 (
583 "keep-monthly",
584 true,
585 &PRUNE_SCHEMA_KEEP_MONTHLY,
586 ),
587 (
588 "keep-weekly",
589 true,
590 &PRUNE_SCHEMA_KEEP_WEEKLY,
591 ),
592 (
593 "keep-yearly",
594 true,
595 &PRUNE_SCHEMA_KEEP_YEARLY,
596 ),
597 $( $list2 )*
598 ]
599 }
600}
601
602pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
603 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
604 &PruneListItem::API_SCHEMA
605).schema();
606
607const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
608 &ApiHandler::Sync(&prune),
609 &ObjectSchema::new(
610 "Prune the datastore.",
611 &add_common_prune_prameters!([
612 ("backup-id", false, &BACKUP_ID_SCHEMA),
613 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
614 ("dry-run", true, &BooleanSchema::new(
615 "Just show what prune would do, but do not delete anything.")
616 .schema()
617 ),
618 ],[
619 ("store", false, &DATASTORE_SCHEMA),
620 ])
621 ))
622 .returns(&API_RETURN_SCHEMA_PRUNE)
623 .access(None, &Permission::Privilege(
624 &["datastore", "{store}"],
625 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
626 true)
627);
628
629fn prune(
630 param: Value,
631 _info: &ApiMethod,
632 rpcenv: &mut dyn RpcEnvironment,
633) -> Result<Value, Error> {
634
635 let store = tools::required_string_param(&param, "store")?;
636 let backup_type = tools::required_string_param(&param, "backup-type")?;
637 let backup_id = tools::required_string_param(&param, "backup-id")?;
638
639 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
640 let user_info = CachedUserInfo::new()?;
641 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
642
643 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
644
645 let group = BackupGroup::new(backup_type, backup_id);
646
647 let datastore = DataStore::lookup_datastore(&store)?;
648
649 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
650 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
651
652 let prune_options = PruneOptions {
653 keep_last: param["keep-last"].as_u64(),
654 keep_hourly: param["keep-hourly"].as_u64(),
655 keep_daily: param["keep-daily"].as_u64(),
656 keep_weekly: param["keep-weekly"].as_u64(),
657 keep_monthly: param["keep-monthly"].as_u64(),
658 keep_yearly: param["keep-yearly"].as_u64(),
659 };
660
661 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
662
663 let mut prune_result = Vec::new();
664
665 let list = group.list_backups(&datastore.base_path())?;
666
667 let mut prune_info = compute_prune_info(list, &prune_options)?;
668
669 prune_info.reverse(); // delete older snapshots first
670
671 let keep_all = !prune_options.keeps_something();
672
673 if dry_run {
674 for (info, mut keep) in prune_info {
675 if keep_all { keep = true; }
676
677 let backup_time = info.backup_dir.backup_time();
678 let group = info.backup_dir.group();
679
680 prune_result.push(json!({
681 "backup-type": group.backup_type(),
682 "backup-id": group.backup_id(),
683 "backup-time": backup_time,
684 "keep": keep,
685 }));
686 }
687 return Ok(json!(prune_result));
688 }
689
690
691 // We use a WorkerTask just to have a task log, but run synchrounously
692 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
693
694 if keep_all {
695 worker.log("No prune selection - keeping all files.");
696 } else {
697 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
698 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
699 store, backup_type, backup_id));
700 }
701
702 for (info, mut keep) in prune_info {
703 if keep_all { keep = true; }
704
705 let backup_time = info.backup_dir.backup_time();
706 let timestamp = info.backup_dir.backup_time_string();
707 let group = info.backup_dir.group();
708
709
710 let msg = format!(
711 "{}/{}/{} {}",
712 group.backup_type(),
713 group.backup_id(),
714 timestamp,
715 if keep { "keep" } else { "remove" },
716 );
717
718 worker.log(msg);
719
720 prune_result.push(json!({
721 "backup-type": group.backup_type(),
722 "backup-id": group.backup_id(),
723 "backup-time": backup_time,
724 "keep": keep,
725 }));
726
727 if !(dry_run || keep) {
728 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
729 worker.warn(
730 format!(
731 "failed to remove dir {:?}: {}",
732 info.backup_dir.relative_path(), err
733 )
734 );
735 }
736 }
737 }
738
739 worker.log_result(&Ok(()));
740
741 Ok(json!(prune_result))
742}
743
744#[api(
745 input: {
746 properties: {
747 store: {
748 schema: DATASTORE_SCHEMA,
749 },
750 },
751 },
752 returns: {
753 schema: UPID_SCHEMA,
754 },
755 access: {
756 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
757 },
758)]
759/// Start garbage collection.
760fn start_garbage_collection(
761 store: String,
762 _info: &ApiMethod,
763 rpcenv: &mut dyn RpcEnvironment,
764) -> Result<Value, Error> {
765
766 let datastore = DataStore::lookup_datastore(&store)?;
767
768 println!("Starting garbage collection on store {}", store);
769
770 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
771
772 let upid_str = WorkerTask::new_thread(
773 "garbage_collection",
774 Some(store.clone()),
775 Userid::root_userid().clone(),
776 to_stdout,
777 move |worker| {
778 worker.log(format!("starting garbage collection on store {}", store));
779 datastore.garbage_collection(&*worker, worker.upid())
780 },
781 )?;
782
783 Ok(json!(upid_str))
784}
785
786#[api(
787 input: {
788 properties: {
789 store: {
790 schema: DATASTORE_SCHEMA,
791 },
792 },
793 },
794 returns: {
795 type: GarbageCollectionStatus,
796 },
797 access: {
798 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
799 },
800)]
801/// Garbage collection status.
802pub fn garbage_collection_status(
803 store: String,
804 _info: &ApiMethod,
805 _rpcenv: &mut dyn RpcEnvironment,
806) -> Result<GarbageCollectionStatus, Error> {
807
808 let datastore = DataStore::lookup_datastore(&store)?;
809
810 let status = datastore.last_gc_status();
811
812 Ok(status)
813}
814
815#[api(
816 returns: {
817 description: "List the accessible datastores.",
818 type: Array,
819 items: {
820 description: "Datastore name and description.",
821 properties: {
822 store: {
823 schema: DATASTORE_SCHEMA,
824 },
825 comment: {
826 optional: true,
827 schema: SINGLE_LINE_COMMENT_SCHEMA,
828 },
829 },
830 },
831 },
832 access: {
833 permission: &Permission::Anybody,
834 },
835)]
836/// Datastore list
837fn get_datastore_list(
838 _param: Value,
839 _info: &ApiMethod,
840 rpcenv: &mut dyn RpcEnvironment,
841) -> Result<Value, Error> {
842
843 let (config, _digest) = datastore::config()?;
844
845 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
846 let user_info = CachedUserInfo::new()?;
847
848 let mut list = Vec::new();
849
850 for (store, (_, data)) in &config.sections {
851 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
852 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
853 if allowed {
854 let mut entry = json!({ "store": store });
855 if let Some(comment) = data["comment"].as_str() {
856 entry["comment"] = comment.into();
857 }
858 list.push(entry);
859 }
860 }
861
862 Ok(list.into())
863}
864
865#[sortable]
866pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
867 &ApiHandler::AsyncHttp(&download_file),
868 &ObjectSchema::new(
869 "Download single raw file from backup snapshot.",
870 &sorted!([
871 ("store", false, &DATASTORE_SCHEMA),
872 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
873 ("backup-id", false, &BACKUP_ID_SCHEMA),
874 ("backup-time", false, &BACKUP_TIME_SCHEMA),
875 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
876 ]),
877 )
878).access(None, &Permission::Privilege(
879 &["datastore", "{store}"],
880 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
881 true)
882);
883
884fn download_file(
885 _parts: Parts,
886 _req_body: Body,
887 param: Value,
888 _info: &ApiMethod,
889 rpcenv: Box<dyn RpcEnvironment>,
890) -> ApiResponseFuture {
891
892 async move {
893 let store = tools::required_string_param(&param, "store")?;
894 let datastore = DataStore::lookup_datastore(store)?;
895
896 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
897 let user_info = CachedUserInfo::new()?;
898 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
899
900 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
901
902 let backup_type = tools::required_string_param(&param, "backup-type")?;
903 let backup_id = tools::required_string_param(&param, "backup-id")?;
904 let backup_time = tools::required_integer_param(&param, "backup-time")?;
905
906 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
907
908 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
909 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
910
911 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
912
913 let mut path = datastore.base_path();
914 path.push(backup_dir.relative_path());
915 path.push(&file_name);
916
917 let file = tokio::fs::File::open(&path)
918 .await
919 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
920
921 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
922 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
923 .map_err(move |err| {
924 eprintln!("error during streaming of '{:?}' - {}", &path, err);
925 err
926 });
927 let body = Body::wrap_stream(payload);
928
929 // fixme: set other headers ?
930 Ok(Response::builder()
931 .status(StatusCode::OK)
932 .header(header::CONTENT_TYPE, "application/octet-stream")
933 .body(body)
934 .unwrap())
935 }.boxed()
936}
937
938#[sortable]
939pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
940 &ApiHandler::AsyncHttp(&download_file_decoded),
941 &ObjectSchema::new(
942 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
943 &sorted!([
944 ("store", false, &DATASTORE_SCHEMA),
945 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
946 ("backup-id", false, &BACKUP_ID_SCHEMA),
947 ("backup-time", false, &BACKUP_TIME_SCHEMA),
948 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
949 ]),
950 )
951).access(None, &Permission::Privilege(
952 &["datastore", "{store}"],
953 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
954 true)
955);
956
957fn download_file_decoded(
958 _parts: Parts,
959 _req_body: Body,
960 param: Value,
961 _info: &ApiMethod,
962 rpcenv: Box<dyn RpcEnvironment>,
963) -> ApiResponseFuture {
964
965 async move {
966 let store = tools::required_string_param(&param, "store")?;
967 let datastore = DataStore::lookup_datastore(store)?;
968
969 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
970 let user_info = CachedUserInfo::new()?;
971 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
972
973 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
974
975 let backup_type = tools::required_string_param(&param, "backup-type")?;
976 let backup_id = tools::required_string_param(&param, "backup-id")?;
977 let backup_time = tools::required_integer_param(&param, "backup-time")?;
978
979 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
980
981 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
982 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
983
984 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
985 for file in files {
986 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
987 bail!("cannot decode '{}' - is encrypted", file_name);
988 }
989 }
990
991 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
992
993 let mut path = datastore.base_path();
994 path.push(backup_dir.relative_path());
995 path.push(&file_name);
996
997 let extension = file_name.rsplitn(2, '.').next().unwrap();
998
999 let body = match extension {
1000 "didx" => {
1001 let index = DynamicIndexReader::open(&path)
1002 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1003 let (csum, size) = index.compute_csum();
1004 manifest.verify_file(&file_name, &csum, size)?;
1005
1006 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1007 let reader = AsyncIndexReader::new(index, chunk_reader);
1008 Body::wrap_stream(AsyncReaderStream::new(reader)
1009 .map_err(move |err| {
1010 eprintln!("error during streaming of '{:?}' - {}", path, err);
1011 err
1012 }))
1013 },
1014 "fidx" => {
1015 let index = FixedIndexReader::open(&path)
1016 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1017
1018 let (csum, size) = index.compute_csum();
1019 manifest.verify_file(&file_name, &csum, size)?;
1020
1021 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1022 let reader = AsyncIndexReader::new(index, chunk_reader);
1023 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1024 .map_err(move |err| {
1025 eprintln!("error during streaming of '{:?}' - {}", path, err);
1026 err
1027 }))
1028 },
1029 "blob" => {
1030 let file = std::fs::File::open(&path)
1031 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1032
1033 // FIXME: load full blob to verify index checksum?
1034
1035 Body::wrap_stream(
1036 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1037 .map_err(move |err| {
1038 eprintln!("error during streaming of '{:?}' - {}", path, err);
1039 err
1040 })
1041 )
1042 },
1043 extension => {
1044 bail!("cannot download '{}' files", extension);
1045 },
1046 };
1047
1048 // fixme: set other headers ?
1049 Ok(Response::builder()
1050 .status(StatusCode::OK)
1051 .header(header::CONTENT_TYPE, "application/octet-stream")
1052 .body(body)
1053 .unwrap())
1054 }.boxed()
1055}
1056
1057#[sortable]
1058pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1059 &ApiHandler::AsyncHttp(&upload_backup_log),
1060 &ObjectSchema::new(
1061 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1062 &sorted!([
1063 ("store", false, &DATASTORE_SCHEMA),
1064 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1065 ("backup-id", false, &BACKUP_ID_SCHEMA),
1066 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1067 ]),
1068 )
1069).access(
1070 Some("Only the backup creator/owner is allowed to do this."),
1071 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1072);
1073
1074fn upload_backup_log(
1075 _parts: Parts,
1076 req_body: Body,
1077 param: Value,
1078 _info: &ApiMethod,
1079 rpcenv: Box<dyn RpcEnvironment>,
1080) -> ApiResponseFuture {
1081
1082 async move {
1083 let store = tools::required_string_param(&param, "store")?;
1084 let datastore = DataStore::lookup_datastore(store)?;
1085
1086 let file_name = CLIENT_LOG_BLOB_NAME;
1087
1088 let backup_type = tools::required_string_param(&param, "backup-type")?;
1089 let backup_id = tools::required_string_param(&param, "backup-id")?;
1090 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1091
1092 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1093
1094 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1095 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1096
1097 let mut path = datastore.base_path();
1098 path.push(backup_dir.relative_path());
1099 path.push(&file_name);
1100
1101 if path.exists() {
1102 bail!("backup already contains a log.");
1103 }
1104
1105 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1106 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1107
1108 let data = req_body
1109 .map_err(Error::from)
1110 .try_fold(Vec::new(), |mut acc, chunk| {
1111 acc.extend_from_slice(&*chunk);
1112 future::ok::<_, Error>(acc)
1113 })
1114 .await?;
1115
1116 // always verify blob/CRC at server side
1117 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1118
1119 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1120
1121 // fixme: use correct formatter
1122 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1123 }.boxed()
1124}
1125
1126#[api(
1127 input: {
1128 properties: {
1129 store: {
1130 schema: DATASTORE_SCHEMA,
1131 },
1132 "backup-type": {
1133 schema: BACKUP_TYPE_SCHEMA,
1134 },
1135 "backup-id": {
1136 schema: BACKUP_ID_SCHEMA,
1137 },
1138 "backup-time": {
1139 schema: BACKUP_TIME_SCHEMA,
1140 },
1141 "filepath": {
1142 description: "Base64 encoded path.",
1143 type: String,
1144 }
1145 },
1146 },
1147 access: {
1148 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1149 },
1150)]
1151/// Get the entries of the given path of the catalog
1152fn catalog(
1153 store: String,
1154 backup_type: String,
1155 backup_id: String,
1156 backup_time: i64,
1157 filepath: String,
1158 _param: Value,
1159 _info: &ApiMethod,
1160 rpcenv: &mut dyn RpcEnvironment,
1161) -> Result<Value, Error> {
1162 let datastore = DataStore::lookup_datastore(&store)?;
1163
1164 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1165 let user_info = CachedUserInfo::new()?;
1166 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1167
1168 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1169
1170 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1171 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1172
1173 let file_name = CATALOG_NAME;
1174
1175 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1176 for file in files {
1177 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1178 bail!("cannot decode '{}' - is encrypted", file_name);
1179 }
1180 }
1181
1182 let mut path = datastore.base_path();
1183 path.push(backup_dir.relative_path());
1184 path.push(file_name);
1185
1186 let index = DynamicIndexReader::open(&path)
1187 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1188
1189 let (csum, size) = index.compute_csum();
1190 manifest.verify_file(&file_name, &csum, size)?;
1191
1192 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1193 let reader = BufferedDynamicReader::new(index, chunk_reader);
1194
1195 let mut catalog_reader = CatalogReader::new(reader);
1196 let mut current = catalog_reader.root()?;
1197 let mut components = vec![];
1198
1199
1200 if filepath != "root" {
1201 components = base64::decode(filepath)?;
1202 if components.len() > 0 && components[0] == '/' as u8 {
1203 components.remove(0);
1204 }
1205 for component in components.split(|c| *c == '/' as u8) {
1206 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1207 current = entry;
1208 } else {
1209 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1210 }
1211 }
1212 }
1213
1214 let mut res = Vec::new();
1215
1216 for direntry in catalog_reader.read_dir(&current)? {
1217 let mut components = components.clone();
1218 components.push('/' as u8);
1219 components.extend(&direntry.name);
1220 let path = base64::encode(components);
1221 let text = String::from_utf8_lossy(&direntry.name);
1222 let mut entry = json!({
1223 "filepath": path,
1224 "text": text,
1225 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1226 "leaf": true,
1227 });
1228 match direntry.attr {
1229 DirEntryAttribute::Directory { start: _ } => {
1230 entry["leaf"] = false.into();
1231 },
1232 DirEntryAttribute::File { size, mtime } => {
1233 entry["size"] = size.into();
1234 entry["mtime"] = mtime.into();
1235 },
1236 _ => {},
1237 }
1238 res.push(entry);
1239 }
1240
1241 Ok(res.into())
1242}
1243
1244#[sortable]
1245pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1246 &ApiHandler::AsyncHttp(&pxar_file_download),
1247 &ObjectSchema::new(
1248 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1249 &sorted!([
1250 ("store", false, &DATASTORE_SCHEMA),
1251 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1252 ("backup-id", false, &BACKUP_ID_SCHEMA),
1253 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1254 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1255 ]),
1256 )
1257).access(None, &Permission::Privilege(
1258 &["datastore", "{store}"],
1259 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1260 true)
1261);
1262
1263fn pxar_file_download(
1264 _parts: Parts,
1265 _req_body: Body,
1266 param: Value,
1267 _info: &ApiMethod,
1268 rpcenv: Box<dyn RpcEnvironment>,
1269) -> ApiResponseFuture {
1270
1271 async move {
1272 let store = tools::required_string_param(&param, "store")?;
1273 let datastore = DataStore::lookup_datastore(&store)?;
1274
1275 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1276 let user_info = CachedUserInfo::new()?;
1277 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1278
1279 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1280
1281 let backup_type = tools::required_string_param(&param, "backup-type")?;
1282 let backup_id = tools::required_string_param(&param, "backup-id")?;
1283 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1284
1285 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1286
1287 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1288 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1289
1290 let mut components = base64::decode(&filepath)?;
1291 if components.len() > 0 && components[0] == '/' as u8 {
1292 components.remove(0);
1293 }
1294
1295 let mut split = components.splitn(2, |c| *c == '/' as u8);
1296 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1297 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1298 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1299 for file in files {
1300 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1301 bail!("cannot decode '{}' - is encrypted", pxar_name);
1302 }
1303 }
1304
1305 let mut path = datastore.base_path();
1306 path.push(backup_dir.relative_path());
1307 path.push(pxar_name);
1308
1309 let index = DynamicIndexReader::open(&path)
1310 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1311
1312 let (csum, size) = index.compute_csum();
1313 manifest.verify_file(&pxar_name, &csum, size)?;
1314
1315 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1316 let reader = BufferedDynamicReader::new(index, chunk_reader);
1317 let archive_size = reader.archive_size();
1318 let reader = LocalDynamicReadAt::new(reader);
1319
1320 let decoder = Accessor::new(reader, archive_size).await?;
1321 let root = decoder.open_root().await?;
1322 let file = root
1323 .lookup(OsStr::from_bytes(file_path)).await?
1324 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1325
1326 let file = match file.kind() {
1327 EntryKind::File { .. } => file,
1328 EntryKind::Hardlink(_) => {
1329 decoder.follow_hardlink(&file).await?
1330 },
1331 // TODO symlink
1332 other => bail!("cannot download file of type {:?}", other),
1333 };
1334
1335 let body = Body::wrap_stream(
1336 AsyncReaderStream::new(file.contents().await?)
1337 .map_err(move |err| {
1338 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1339 err
1340 })
1341 );
1342
1343 // fixme: set other headers ?
1344 Ok(Response::builder()
1345 .status(StatusCode::OK)
1346 .header(header::CONTENT_TYPE, "application/octet-stream")
1347 .body(body)
1348 .unwrap())
1349 }.boxed()
1350}
1351
1352#[api(
1353 input: {
1354 properties: {
1355 store: {
1356 schema: DATASTORE_SCHEMA,
1357 },
1358 timeframe: {
1359 type: RRDTimeFrameResolution,
1360 },
1361 cf: {
1362 type: RRDMode,
1363 },
1364 },
1365 },
1366 access: {
1367 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1368 },
1369)]
1370/// Read datastore stats
1371fn get_rrd_stats(
1372 store: String,
1373 timeframe: RRDTimeFrameResolution,
1374 cf: RRDMode,
1375 _param: Value,
1376) -> Result<Value, Error> {
1377
1378 create_value_from_rrd(
1379 &format!("datastore/{}", store),
1380 &[
1381 "total", "used",
1382 "read_ios", "read_bytes",
1383 "write_ios", "write_bytes",
1384 "io_ticks",
1385 ],
1386 timeframe,
1387 cf,
1388 )
1389}
1390
1391#[api(
1392 input: {
1393 properties: {
1394 store: {
1395 schema: DATASTORE_SCHEMA,
1396 },
1397 "backup-type": {
1398 schema: BACKUP_TYPE_SCHEMA,
1399 },
1400 "backup-id": {
1401 schema: BACKUP_ID_SCHEMA,
1402 },
1403 "backup-time": {
1404 schema: BACKUP_TIME_SCHEMA,
1405 },
1406 },
1407 },
1408 access: {
1409 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1410 },
1411)]
1412/// Get "notes" for a specific backup
1413fn get_notes(
1414 store: String,
1415 backup_type: String,
1416 backup_id: String,
1417 backup_time: i64,
1418 rpcenv: &mut dyn RpcEnvironment,
1419) -> Result<String, Error> {
1420 let datastore = DataStore::lookup_datastore(&store)?;
1421
1422 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1423 let user_info = CachedUserInfo::new()?;
1424 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1425
1426 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1427
1428 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1429 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1430
1431 let manifest = datastore.load_manifest_json(&backup_dir)?;
1432
1433 let notes = manifest["unprotected"]["notes"]
1434 .as_str()
1435 .unwrap_or("");
1436
1437 Ok(String::from(notes))
1438}
1439
1440#[api(
1441 input: {
1442 properties: {
1443 store: {
1444 schema: DATASTORE_SCHEMA,
1445 },
1446 "backup-type": {
1447 schema: BACKUP_TYPE_SCHEMA,
1448 },
1449 "backup-id": {
1450 schema: BACKUP_ID_SCHEMA,
1451 },
1452 "backup-time": {
1453 schema: BACKUP_TIME_SCHEMA,
1454 },
1455 notes: {
1456 description: "A multiline text.",
1457 },
1458 },
1459 },
1460 access: {
1461 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1462 },
1463)]
1464/// Set "notes" for a specific backup
1465fn set_notes(
1466 store: String,
1467 backup_type: String,
1468 backup_id: String,
1469 backup_time: i64,
1470 notes: String,
1471 rpcenv: &mut dyn RpcEnvironment,
1472) -> Result<(), Error> {
1473 let datastore = DataStore::lookup_datastore(&store)?;
1474
1475 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1476 let user_info = CachedUserInfo::new()?;
1477 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1478
1479 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1480
1481 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1482 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1483
1484 let mut manifest = datastore.load_manifest_json(&backup_dir)?;
1485
1486 manifest["unprotected"]["notes"] = notes.into();
1487
1488 datastore.store_manifest(&backup_dir, manifest)?;
1489
1490 Ok(())
1491}
1492
1493#[api(
1494 input: {
1495 properties: {
1496 store: {
1497 schema: DATASTORE_SCHEMA,
1498 },
1499 "backup-type": {
1500 schema: BACKUP_TYPE_SCHEMA,
1501 },
1502 "backup-id": {
1503 schema: BACKUP_ID_SCHEMA,
1504 },
1505 "new-owner": {
1506 type: Userid,
1507 },
1508 },
1509 },
1510 access: {
1511 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1512 },
1513)]
1514/// Change owner of a backup group
1515fn set_backup_owner(
1516 store: String,
1517 backup_type: String,
1518 backup_id: String,
1519 new_owner: Userid,
1520 _rpcenv: &mut dyn RpcEnvironment,
1521) -> Result<(), Error> {
1522
1523 let datastore = DataStore::lookup_datastore(&store)?;
1524
1525 let backup_group = BackupGroup::new(backup_type, backup_id);
1526
1527 let user_info = CachedUserInfo::new()?;
1528
1529 if !user_info.is_active_user(&new_owner) {
1530 bail!("user '{}' is inactive or non-existent", new_owner);
1531 }
1532
1533 datastore.set_owner(&backup_group, &new_owner, true)?;
1534
1535 Ok(())
1536}
1537
1538#[sortable]
1539const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1540 (
1541 "catalog",
1542 &Router::new()
1543 .get(&API_METHOD_CATALOG)
1544 ),
1545 (
1546 "change-owner",
1547 &Router::new()
1548 .post(&API_METHOD_SET_BACKUP_OWNER)
1549 ),
1550 (
1551 "download",
1552 &Router::new()
1553 .download(&API_METHOD_DOWNLOAD_FILE)
1554 ),
1555 (
1556 "download-decoded",
1557 &Router::new()
1558 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1559 ),
1560 (
1561 "files",
1562 &Router::new()
1563 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1564 ),
1565 (
1566 "gc",
1567 &Router::new()
1568 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1569 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1570 ),
1571 (
1572 "groups",
1573 &Router::new()
1574 .get(&API_METHOD_LIST_GROUPS)
1575 ),
1576 (
1577 "notes",
1578 &Router::new()
1579 .get(&API_METHOD_GET_NOTES)
1580 .put(&API_METHOD_SET_NOTES)
1581 ),
1582 (
1583 "prune",
1584 &Router::new()
1585 .post(&API_METHOD_PRUNE)
1586 ),
1587 (
1588 "pxar-file-download",
1589 &Router::new()
1590 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1591 ),
1592 (
1593 "rrd",
1594 &Router::new()
1595 .get(&API_METHOD_GET_RRD_STATS)
1596 ),
1597 (
1598 "snapshots",
1599 &Router::new()
1600 .get(&API_METHOD_LIST_SNAPSHOTS)
1601 .delete(&API_METHOD_DELETE_SNAPSHOT)
1602 ),
1603 (
1604 "status",
1605 &Router::new()
1606 .get(&API_METHOD_STATUS)
1607 ),
1608 (
1609 "upload-backup-log",
1610 &Router::new()
1611 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1612 ),
1613 (
1614 "verify",
1615 &Router::new()
1616 .post(&API_METHOD_VERIFY)
1617 ),
1618];
1619
1620const DATASTORE_INFO_ROUTER: Router = Router::new()
1621 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1622 .subdirs(DATASTORE_INFO_SUBDIRS);
1623
1624
1625pub const ROUTER: Router = Router::new()
1626 .get(&API_METHOD_GET_DATASTORE_LIST)
1627 .match_all("store", &DATASTORE_INFO_ROUTER);