]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
upid: use systemd escape to decode/encode the worker_id
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5 use std::path::{Path, PathBuf};
6 use std::pin::Pin;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13
14 use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
16 RpcEnvironment, RpcEnvironmentType, Permission
17 };
18 use proxmox::api::router::SubdirMap;
19 use proxmox::api::schema::*;
20 use proxmox::tools::fs::{replace_file, CreateOptions};
21 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
22
23 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
24 use pxar::EntryKind;
25
26 use crate::api2::types::*;
27 use crate::api2::node::rrd::create_value_from_rrd;
28 use crate::backup::*;
29 use crate::config::datastore;
30 use crate::config::cached_user_info::CachedUserInfo;
31
32 use crate::server::WorkerTask;
33 use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37 };
38
39 use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
41 PRIV_DATASTORE_MODIFY,
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
44 PRIV_DATASTORE_BACKUP,
45 };
46
47 fn check_backup_owner(
48 store: &DataStore,
49 group: &BackupGroup,
50 userid: &Userid,
51 ) -> Result<(), Error> {
52 let owner = store.get_owner(group)?;
53 if &owner != userid {
54 bail!("backup owner check failed ({} != {})", userid, owner);
55 }
56 Ok(())
57 }
58
59 fn read_backup_index(
60 store: &DataStore,
61 backup_dir: &BackupDir,
62 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
63
64 let (manifest, index_size) = store.load_manifest(backup_dir)?;
65
66 let mut result = Vec::new();
67 for item in manifest.files() {
68 result.push(BackupContent {
69 filename: item.filename.clone(),
70 crypt_mode: Some(item.crypt_mode),
71 size: Some(item.size),
72 });
73 }
74
75 result.push(BackupContent {
76 filename: MANIFEST_BLOB_NAME.to_string(),
77 crypt_mode: match manifest.signature {
78 Some(_) => Some(CryptMode::SignOnly),
79 None => Some(CryptMode::None),
80 },
81 size: Some(index_size),
82 });
83
84 Ok((manifest, result))
85 }
86
87 fn get_all_snapshot_files(
88 store: &DataStore,
89 info: &BackupInfo,
90 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
91
92 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
93
94 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
95 acc.insert(item.filename.clone());
96 acc
97 });
98
99 for file in &info.files {
100 if file_set.contains(file) { continue; }
101 files.push(BackupContent {
102 filename: file.to_string(),
103 size: None,
104 crypt_mode: None,
105 });
106 }
107
108 Ok((manifest, files))
109 }
110
111 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
112
113 let mut group_hash = HashMap::new();
114
115 for info in backup_list {
116 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
117 let time_list = group_hash.entry(group_id).or_insert(vec![]);
118 time_list.push(info);
119 }
120
121 group_hash
122 }
123
124 #[api(
125 input: {
126 properties: {
127 store: {
128 schema: DATASTORE_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of backup groups.",
135 items: {
136 type: GroupListItem,
137 }
138 },
139 access: {
140 permission: &Permission::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
143 true),
144 },
145 )]
146 /// List backup groups.
147 fn list_groups(
148 store: String,
149 rpcenv: &mut dyn RpcEnvironment,
150 ) -> Result<Vec<GroupListItem>, Error> {
151
152 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
153 let user_info = CachedUserInfo::new()?;
154 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
155
156 let datastore = DataStore::lookup_datastore(&store)?;
157
158 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
159
160 let group_hash = group_backups(backup_list);
161
162 let mut groups = Vec::new();
163
164 for (_group_id, mut list) in group_hash {
165
166 BackupInfo::sort_list(&mut list, false);
167
168 let info = &list[0];
169
170 let group = info.backup_dir.group();
171
172 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
173 let owner = datastore.get_owner(group)?;
174 if !list_all && owner != userid {
175 continue;
176 }
177
178 let result_item = GroupListItem {
179 backup_type: group.backup_type().to_string(),
180 backup_id: group.backup_id().to_string(),
181 last_backup: info.backup_dir.backup_time(),
182 backup_count: list.len() as u64,
183 files: info.files.clone(),
184 owner: Some(owner),
185 };
186 groups.push(result_item);
187 }
188
189 Ok(groups)
190 }
191
192 #[api(
193 input: {
194 properties: {
195 store: {
196 schema: DATASTORE_SCHEMA,
197 },
198 "backup-type": {
199 schema: BACKUP_TYPE_SCHEMA,
200 },
201 "backup-id": {
202 schema: BACKUP_ID_SCHEMA,
203 },
204 "backup-time": {
205 schema: BACKUP_TIME_SCHEMA,
206 },
207 },
208 },
209 returns: {
210 type: Array,
211 description: "Returns the list of archive files inside a backup snapshots.",
212 items: {
213 type: BackupContent,
214 }
215 },
216 access: {
217 permission: &Permission::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
220 true),
221 },
222 )]
223 /// List snapshot files.
224 pub fn list_snapshot_files(
225 store: String,
226 backup_type: String,
227 backup_id: String,
228 backup_time: i64,
229 _info: &ApiMethod,
230 rpcenv: &mut dyn RpcEnvironment,
231 ) -> Result<Vec<BackupContent>, Error> {
232
233 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
234 let user_info = CachedUserInfo::new()?;
235 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
236
237 let datastore = DataStore::lookup_datastore(&store)?;
238
239 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
240
241 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
243
244 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
245
246 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
247
248 Ok(files)
249 }
250
251 #[api(
252 input: {
253 properties: {
254 store: {
255 schema: DATASTORE_SCHEMA,
256 },
257 "backup-type": {
258 schema: BACKUP_TYPE_SCHEMA,
259 },
260 "backup-id": {
261 schema: BACKUP_ID_SCHEMA,
262 },
263 "backup-time": {
264 schema: BACKUP_TIME_SCHEMA,
265 },
266 },
267 },
268 access: {
269 permission: &Permission::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
272 true),
273 },
274 )]
275 /// Delete backup snapshot.
276 fn delete_snapshot(
277 store: String,
278 backup_type: String,
279 backup_id: String,
280 backup_time: i64,
281 _info: &ApiMethod,
282 rpcenv: &mut dyn RpcEnvironment,
283 ) -> Result<Value, Error> {
284
285 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
286 let user_info = CachedUserInfo::new()?;
287 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
288
289 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
290
291 let datastore = DataStore::lookup_datastore(&store)?;
292
293 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
295
296 datastore.remove_backup_dir(&snapshot, false)?;
297
298 Ok(Value::Null)
299 }
300
301 #[api(
302 input: {
303 properties: {
304 store: {
305 schema: DATASTORE_SCHEMA,
306 },
307 "backup-type": {
308 optional: true,
309 schema: BACKUP_TYPE_SCHEMA,
310 },
311 "backup-id": {
312 optional: true,
313 schema: BACKUP_ID_SCHEMA,
314 },
315 },
316 },
317 returns: {
318 type: Array,
319 description: "Returns the list of snapshots.",
320 items: {
321 type: SnapshotListItem,
322 }
323 },
324 access: {
325 permission: &Permission::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
328 true),
329 },
330 )]
331 /// List backup snapshots.
332 pub fn list_snapshots (
333 store: String,
334 backup_type: Option<String>,
335 backup_id: Option<String>,
336 _param: Value,
337 _info: &ApiMethod,
338 rpcenv: &mut dyn RpcEnvironment,
339 ) -> Result<Vec<SnapshotListItem>, Error> {
340
341 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
342 let user_info = CachedUserInfo::new()?;
343 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
344
345 let datastore = DataStore::lookup_datastore(&store)?;
346
347 let base_path = datastore.base_path();
348
349 let backup_list = BackupInfo::list_backups(&base_path)?;
350
351 let mut snapshots = vec![];
352
353 for info in backup_list {
354 let group = info.backup_dir.group();
355 if let Some(ref backup_type) = backup_type {
356 if backup_type != group.backup_type() { continue; }
357 }
358 if let Some(ref backup_id) = backup_id {
359 if backup_id != group.backup_id() { continue; }
360 }
361
362 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
363 let owner = datastore.get_owner(group)?;
364
365 if !list_all && owner != userid {
366 continue;
367 }
368
369 let mut size = None;
370
371 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
372 Ok((manifest, files)) => {
373 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
374 // extract the first line from notes
375 let comment: Option<String> = manifest.unprotected["notes"]
376 .as_str()
377 .and_then(|notes| notes.lines().next())
378 .map(String::from);
379
380 let verify = manifest.unprotected["verify_state"].clone();
381 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
382 Ok(verify) => verify,
383 Err(err) => {
384 eprintln!("error parsing verification state : '{}'", err);
385 None
386 }
387 };
388
389 (comment, verify, files)
390 },
391 Err(err) => {
392 eprintln!("error during snapshot file listing: '{}'", err);
393 (
394 None,
395 None,
396 info
397 .files
398 .iter()
399 .map(|x| BackupContent {
400 filename: x.to_string(),
401 size: None,
402 crypt_mode: None,
403 })
404 .collect()
405 )
406 },
407 };
408
409 let result_item = SnapshotListItem {
410 backup_type: group.backup_type().to_string(),
411 backup_id: group.backup_id().to_string(),
412 backup_time: info.backup_dir.backup_time(),
413 comment,
414 verification,
415 files,
416 size,
417 owner: Some(owner),
418 };
419
420 snapshots.push(result_item);
421 }
422
423 Ok(snapshots)
424 }
425
426 #[api(
427 input: {
428 properties: {
429 store: {
430 schema: DATASTORE_SCHEMA,
431 },
432 },
433 },
434 returns: {
435 type: StorageStatus,
436 },
437 access: {
438 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
439 },
440 )]
441 /// Get datastore status.
442 pub fn status(
443 store: String,
444 _info: &ApiMethod,
445 _rpcenv: &mut dyn RpcEnvironment,
446 ) -> Result<StorageStatus, Error> {
447 let datastore = DataStore::lookup_datastore(&store)?;
448 crate::tools::disks::disk_usage(&datastore.base_path())
449 }
450
451 #[api(
452 input: {
453 properties: {
454 store: {
455 schema: DATASTORE_SCHEMA,
456 },
457 "backup-type": {
458 schema: BACKUP_TYPE_SCHEMA,
459 optional: true,
460 },
461 "backup-id": {
462 schema: BACKUP_ID_SCHEMA,
463 optional: true,
464 },
465 "backup-time": {
466 schema: BACKUP_TIME_SCHEMA,
467 optional: true,
468 },
469 },
470 },
471 returns: {
472 schema: UPID_SCHEMA,
473 },
474 access: {
475 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
476 },
477 )]
478 /// Verify backups.
479 ///
480 /// This function can verify a single backup snapshot, all backup from a backup group,
481 /// or all backups in the datastore.
482 pub fn verify(
483 store: String,
484 backup_type: Option<String>,
485 backup_id: Option<String>,
486 backup_time: Option<i64>,
487 rpcenv: &mut dyn RpcEnvironment,
488 ) -> Result<Value, Error> {
489 let datastore = DataStore::lookup_datastore(&store)?;
490
491 let worker_id;
492
493 let mut backup_dir = None;
494 let mut backup_group = None;
495 let mut worker_type = "verify";
496
497 match (backup_type, backup_id, backup_time) {
498 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
499 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
500 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
501 backup_dir = Some(dir);
502 worker_type = "verify_snapshot";
503 }
504 (Some(backup_type), Some(backup_id), None) => {
505 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
506 let group = BackupGroup::new(backup_type, backup_id);
507 backup_group = Some(group);
508 worker_type = "verify_group";
509 }
510 (None, None, None) => {
511 worker_id = store.clone();
512 }
513 _ => bail!("parameters do not specify a backup group or snapshot"),
514 }
515
516 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
517 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
518
519 let upid_str = WorkerTask::new_thread(
520 worker_type,
521 Some(worker_id.clone()),
522 userid,
523 to_stdout,
524 move |worker| {
525 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
526 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
527
528 let failed_dirs = if let Some(backup_dir) = backup_dir {
529 let mut res = Vec::new();
530 if !verify_backup_dir(
531 datastore,
532 &backup_dir,
533 verified_chunks,
534 corrupt_chunks,
535 worker.clone(),
536 worker.upid().clone(),
537 )? {
538 res.push(backup_dir.to_string());
539 }
540 res
541 } else if let Some(backup_group) = backup_group {
542 let (_count, failed_dirs) = verify_backup_group(
543 datastore,
544 &backup_group,
545 verified_chunks,
546 corrupt_chunks,
547 None,
548 worker.clone(),
549 worker.upid(),
550 )?;
551 failed_dirs
552 } else {
553 verify_all_backups(datastore, worker.clone(), worker.upid())?
554 };
555 if failed_dirs.len() > 0 {
556 worker.log("Failed to verify following snapshots:");
557 for dir in failed_dirs {
558 worker.log(format!("\t{}", dir));
559 }
560 bail!("verification failed - please check the log for details");
561 }
562 Ok(())
563 },
564 )?;
565
566 Ok(json!(upid_str))
567 }
568
569 #[macro_export]
570 macro_rules! add_common_prune_prameters {
571 ( [ $( $list1:tt )* ] ) => {
572 add_common_prune_prameters!([$( $list1 )* ] , [])
573 };
574 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
575 [
576 $( $list1 )*
577 (
578 "keep-daily",
579 true,
580 &PRUNE_SCHEMA_KEEP_DAILY,
581 ),
582 (
583 "keep-hourly",
584 true,
585 &PRUNE_SCHEMA_KEEP_HOURLY,
586 ),
587 (
588 "keep-last",
589 true,
590 &PRUNE_SCHEMA_KEEP_LAST,
591 ),
592 (
593 "keep-monthly",
594 true,
595 &PRUNE_SCHEMA_KEEP_MONTHLY,
596 ),
597 (
598 "keep-weekly",
599 true,
600 &PRUNE_SCHEMA_KEEP_WEEKLY,
601 ),
602 (
603 "keep-yearly",
604 true,
605 &PRUNE_SCHEMA_KEEP_YEARLY,
606 ),
607 $( $list2 )*
608 ]
609 }
610 }
611
612 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
613 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
614 &PruneListItem::API_SCHEMA
615 ).schema();
616
617 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
618 &ApiHandler::Sync(&prune),
619 &ObjectSchema::new(
620 "Prune the datastore.",
621 &add_common_prune_prameters!([
622 ("backup-id", false, &BACKUP_ID_SCHEMA),
623 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
624 ("dry-run", true, &BooleanSchema::new(
625 "Just show what prune would do, but do not delete anything.")
626 .schema()
627 ),
628 ],[
629 ("store", false, &DATASTORE_SCHEMA),
630 ])
631 ))
632 .returns(&API_RETURN_SCHEMA_PRUNE)
633 .access(None, &Permission::Privilege(
634 &["datastore", "{store}"],
635 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
636 true)
637 );
638
639 fn prune(
640 param: Value,
641 _info: &ApiMethod,
642 rpcenv: &mut dyn RpcEnvironment,
643 ) -> Result<Value, Error> {
644
645 let store = tools::required_string_param(&param, "store")?;
646 let backup_type = tools::required_string_param(&param, "backup-type")?;
647 let backup_id = tools::required_string_param(&param, "backup-id")?;
648
649 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
650 let user_info = CachedUserInfo::new()?;
651 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
652
653 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
654
655 let group = BackupGroup::new(backup_type, backup_id);
656
657 let datastore = DataStore::lookup_datastore(&store)?;
658
659 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
660 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
661
662 let prune_options = PruneOptions {
663 keep_last: param["keep-last"].as_u64(),
664 keep_hourly: param["keep-hourly"].as_u64(),
665 keep_daily: param["keep-daily"].as_u64(),
666 keep_weekly: param["keep-weekly"].as_u64(),
667 keep_monthly: param["keep-monthly"].as_u64(),
668 keep_yearly: param["keep-yearly"].as_u64(),
669 };
670
671 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
672
673 let mut prune_result = Vec::new();
674
675 let list = group.list_backups(&datastore.base_path())?;
676
677 let mut prune_info = compute_prune_info(list, &prune_options)?;
678
679 prune_info.reverse(); // delete older snapshots first
680
681 let keep_all = !prune_options.keeps_something();
682
683 if dry_run {
684 for (info, mut keep) in prune_info {
685 if keep_all { keep = true; }
686
687 let backup_time = info.backup_dir.backup_time();
688 let group = info.backup_dir.group();
689
690 prune_result.push(json!({
691 "backup-type": group.backup_type(),
692 "backup-id": group.backup_id(),
693 "backup-time": backup_time,
694 "keep": keep,
695 }));
696 }
697 return Ok(json!(prune_result));
698 }
699
700
701 // We use a WorkerTask just to have a task log, but run synchrounously
702 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
703
704 if keep_all {
705 worker.log("No prune selection - keeping all files.");
706 } else {
707 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
708 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
709 store, backup_type, backup_id));
710 }
711
712 for (info, mut keep) in prune_info {
713 if keep_all { keep = true; }
714
715 let backup_time = info.backup_dir.backup_time();
716 let timestamp = info.backup_dir.backup_time_string();
717 let group = info.backup_dir.group();
718
719
720 let msg = format!(
721 "{}/{}/{} {}",
722 group.backup_type(),
723 group.backup_id(),
724 timestamp,
725 if keep { "keep" } else { "remove" },
726 );
727
728 worker.log(msg);
729
730 prune_result.push(json!({
731 "backup-type": group.backup_type(),
732 "backup-id": group.backup_id(),
733 "backup-time": backup_time,
734 "keep": keep,
735 }));
736
737 if !(dry_run || keep) {
738 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
739 worker.warn(
740 format!(
741 "failed to remove dir {:?}: {}",
742 info.backup_dir.relative_path(), err
743 )
744 );
745 }
746 }
747 }
748
749 worker.log_result(&Ok(()));
750
751 Ok(json!(prune_result))
752 }
753
754 #[api(
755 input: {
756 properties: {
757 store: {
758 schema: DATASTORE_SCHEMA,
759 },
760 },
761 },
762 returns: {
763 schema: UPID_SCHEMA,
764 },
765 access: {
766 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
767 },
768 )]
769 /// Start garbage collection.
770 fn start_garbage_collection(
771 store: String,
772 _info: &ApiMethod,
773 rpcenv: &mut dyn RpcEnvironment,
774 ) -> Result<Value, Error> {
775
776 let datastore = DataStore::lookup_datastore(&store)?;
777
778 println!("Starting garbage collection on store {}", store);
779
780 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
781
782 let upid_str = WorkerTask::new_thread(
783 "garbage_collection",
784 Some(store.clone()),
785 Userid::root_userid().clone(),
786 to_stdout,
787 move |worker| {
788 worker.log(format!("starting garbage collection on store {}", store));
789 datastore.garbage_collection(&*worker, worker.upid())
790 },
791 )?;
792
793 Ok(json!(upid_str))
794 }
795
796 #[api(
797 input: {
798 properties: {
799 store: {
800 schema: DATASTORE_SCHEMA,
801 },
802 },
803 },
804 returns: {
805 type: GarbageCollectionStatus,
806 },
807 access: {
808 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
809 },
810 )]
811 /// Garbage collection status.
812 pub fn garbage_collection_status(
813 store: String,
814 _info: &ApiMethod,
815 _rpcenv: &mut dyn RpcEnvironment,
816 ) -> Result<GarbageCollectionStatus, Error> {
817
818 let datastore = DataStore::lookup_datastore(&store)?;
819
820 let status = datastore.last_gc_status();
821
822 Ok(status)
823 }
824
825 #[api(
826 returns: {
827 description: "List the accessible datastores.",
828 type: Array,
829 items: {
830 description: "Datastore name and description.",
831 properties: {
832 store: {
833 schema: DATASTORE_SCHEMA,
834 },
835 comment: {
836 optional: true,
837 schema: SINGLE_LINE_COMMENT_SCHEMA,
838 },
839 },
840 },
841 },
842 access: {
843 permission: &Permission::Anybody,
844 },
845 )]
846 /// Datastore list
847 fn get_datastore_list(
848 _param: Value,
849 _info: &ApiMethod,
850 rpcenv: &mut dyn RpcEnvironment,
851 ) -> Result<Value, Error> {
852
853 let (config, _digest) = datastore::config()?;
854
855 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
856 let user_info = CachedUserInfo::new()?;
857
858 let mut list = Vec::new();
859
860 for (store, (_, data)) in &config.sections {
861 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
862 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
863 if allowed {
864 let mut entry = json!({ "store": store });
865 if let Some(comment) = data["comment"].as_str() {
866 entry["comment"] = comment.into();
867 }
868 list.push(entry);
869 }
870 }
871
872 Ok(list.into())
873 }
874
875 #[sortable]
876 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
877 &ApiHandler::AsyncHttp(&download_file),
878 &ObjectSchema::new(
879 "Download single raw file from backup snapshot.",
880 &sorted!([
881 ("store", false, &DATASTORE_SCHEMA),
882 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
883 ("backup-id", false, &BACKUP_ID_SCHEMA),
884 ("backup-time", false, &BACKUP_TIME_SCHEMA),
885 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
886 ]),
887 )
888 ).access(None, &Permission::Privilege(
889 &["datastore", "{store}"],
890 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
891 true)
892 );
893
894 fn download_file(
895 _parts: Parts,
896 _req_body: Body,
897 param: Value,
898 _info: &ApiMethod,
899 rpcenv: Box<dyn RpcEnvironment>,
900 ) -> ApiResponseFuture {
901
902 async move {
903 let store = tools::required_string_param(&param, "store")?;
904 let datastore = DataStore::lookup_datastore(store)?;
905
906 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
907 let user_info = CachedUserInfo::new()?;
908 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
909
910 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
911
912 let backup_type = tools::required_string_param(&param, "backup-type")?;
913 let backup_id = tools::required_string_param(&param, "backup-id")?;
914 let backup_time = tools::required_integer_param(&param, "backup-time")?;
915
916 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
917
918 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
919 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
920
921 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
922
923 let mut path = datastore.base_path();
924 path.push(backup_dir.relative_path());
925 path.push(&file_name);
926
927 let file = tokio::fs::File::open(&path)
928 .await
929 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
930
931 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
932 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
933 .map_err(move |err| {
934 eprintln!("error during streaming of '{:?}' - {}", &path, err);
935 err
936 });
937 let body = Body::wrap_stream(payload);
938
939 // fixme: set other headers ?
940 Ok(Response::builder()
941 .status(StatusCode::OK)
942 .header(header::CONTENT_TYPE, "application/octet-stream")
943 .body(body)
944 .unwrap())
945 }.boxed()
946 }
947
948 #[sortable]
949 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
950 &ApiHandler::AsyncHttp(&download_file_decoded),
951 &ObjectSchema::new(
952 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
953 &sorted!([
954 ("store", false, &DATASTORE_SCHEMA),
955 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
956 ("backup-id", false, &BACKUP_ID_SCHEMA),
957 ("backup-time", false, &BACKUP_TIME_SCHEMA),
958 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
959 ]),
960 )
961 ).access(None, &Permission::Privilege(
962 &["datastore", "{store}"],
963 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
964 true)
965 );
966
967 fn download_file_decoded(
968 _parts: Parts,
969 _req_body: Body,
970 param: Value,
971 _info: &ApiMethod,
972 rpcenv: Box<dyn RpcEnvironment>,
973 ) -> ApiResponseFuture {
974
975 async move {
976 let store = tools::required_string_param(&param, "store")?;
977 let datastore = DataStore::lookup_datastore(store)?;
978
979 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
980 let user_info = CachedUserInfo::new()?;
981 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
982
983 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
984
985 let backup_type = tools::required_string_param(&param, "backup-type")?;
986 let backup_id = tools::required_string_param(&param, "backup-id")?;
987 let backup_time = tools::required_integer_param(&param, "backup-time")?;
988
989 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
990
991 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
992 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
993
994 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
995 for file in files {
996 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
997 bail!("cannot decode '{}' - is encrypted", file_name);
998 }
999 }
1000
1001 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1002
1003 let mut path = datastore.base_path();
1004 path.push(backup_dir.relative_path());
1005 path.push(&file_name);
1006
1007 let extension = file_name.rsplitn(2, '.').next().unwrap();
1008
1009 let body = match extension {
1010 "didx" => {
1011 let index = DynamicIndexReader::open(&path)
1012 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1013 let (csum, size) = index.compute_csum();
1014 manifest.verify_file(&file_name, &csum, size)?;
1015
1016 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1017 let reader = AsyncIndexReader::new(index, chunk_reader);
1018 Body::wrap_stream(AsyncReaderStream::new(reader)
1019 .map_err(move |err| {
1020 eprintln!("error during streaming of '{:?}' - {}", path, err);
1021 err
1022 }))
1023 },
1024 "fidx" => {
1025 let index = FixedIndexReader::open(&path)
1026 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1027
1028 let (csum, size) = index.compute_csum();
1029 manifest.verify_file(&file_name, &csum, size)?;
1030
1031 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1032 let reader = AsyncIndexReader::new(index, chunk_reader);
1033 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1034 .map_err(move |err| {
1035 eprintln!("error during streaming of '{:?}' - {}", path, err);
1036 err
1037 }))
1038 },
1039 "blob" => {
1040 let file = std::fs::File::open(&path)
1041 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1042
1043 // FIXME: load full blob to verify index checksum?
1044
1045 Body::wrap_stream(
1046 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1047 .map_err(move |err| {
1048 eprintln!("error during streaming of '{:?}' - {}", path, err);
1049 err
1050 })
1051 )
1052 },
1053 extension => {
1054 bail!("cannot download '{}' files", extension);
1055 },
1056 };
1057
1058 // fixme: set other headers ?
1059 Ok(Response::builder()
1060 .status(StatusCode::OK)
1061 .header(header::CONTENT_TYPE, "application/octet-stream")
1062 .body(body)
1063 .unwrap())
1064 }.boxed()
1065 }
1066
1067 #[sortable]
1068 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1069 &ApiHandler::AsyncHttp(&upload_backup_log),
1070 &ObjectSchema::new(
1071 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1072 &sorted!([
1073 ("store", false, &DATASTORE_SCHEMA),
1074 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1075 ("backup-id", false, &BACKUP_ID_SCHEMA),
1076 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1077 ]),
1078 )
1079 ).access(
1080 Some("Only the backup creator/owner is allowed to do this."),
1081 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1082 );
1083
1084 fn upload_backup_log(
1085 _parts: Parts,
1086 req_body: Body,
1087 param: Value,
1088 _info: &ApiMethod,
1089 rpcenv: Box<dyn RpcEnvironment>,
1090 ) -> ApiResponseFuture {
1091
1092 async move {
1093 let store = tools::required_string_param(&param, "store")?;
1094 let datastore = DataStore::lookup_datastore(store)?;
1095
1096 let file_name = CLIENT_LOG_BLOB_NAME;
1097
1098 let backup_type = tools::required_string_param(&param, "backup-type")?;
1099 let backup_id = tools::required_string_param(&param, "backup-id")?;
1100 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1101
1102 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1103
1104 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1105 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1106
1107 let mut path = datastore.base_path();
1108 path.push(backup_dir.relative_path());
1109 path.push(&file_name);
1110
1111 if path.exists() {
1112 bail!("backup already contains a log.");
1113 }
1114
1115 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1116 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1117
1118 let data = req_body
1119 .map_err(Error::from)
1120 .try_fold(Vec::new(), |mut acc, chunk| {
1121 acc.extend_from_slice(&*chunk);
1122 future::ok::<_, Error>(acc)
1123 })
1124 .await?;
1125
1126 // always verify blob/CRC at server side
1127 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1128
1129 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1130
1131 // fixme: use correct formatter
1132 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1133 }.boxed()
1134 }
1135
1136 #[api(
1137 input: {
1138 properties: {
1139 store: {
1140 schema: DATASTORE_SCHEMA,
1141 },
1142 "backup-type": {
1143 schema: BACKUP_TYPE_SCHEMA,
1144 },
1145 "backup-id": {
1146 schema: BACKUP_ID_SCHEMA,
1147 },
1148 "backup-time": {
1149 schema: BACKUP_TIME_SCHEMA,
1150 },
1151 "filepath": {
1152 description: "Base64 encoded path.",
1153 type: String,
1154 }
1155 },
1156 },
1157 access: {
1158 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1159 },
1160 )]
1161 /// Get the entries of the given path of the catalog
1162 fn catalog(
1163 store: String,
1164 backup_type: String,
1165 backup_id: String,
1166 backup_time: i64,
1167 filepath: String,
1168 _param: Value,
1169 _info: &ApiMethod,
1170 rpcenv: &mut dyn RpcEnvironment,
1171 ) -> Result<Value, Error> {
1172 let datastore = DataStore::lookup_datastore(&store)?;
1173
1174 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1175 let user_info = CachedUserInfo::new()?;
1176 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1177
1178 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1179
1180 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1181 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1182
1183 let file_name = CATALOG_NAME;
1184
1185 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1186 for file in files {
1187 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1188 bail!("cannot decode '{}' - is encrypted", file_name);
1189 }
1190 }
1191
1192 let mut path = datastore.base_path();
1193 path.push(backup_dir.relative_path());
1194 path.push(file_name);
1195
1196 let index = DynamicIndexReader::open(&path)
1197 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1198
1199 let (csum, size) = index.compute_csum();
1200 manifest.verify_file(&file_name, &csum, size)?;
1201
1202 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1203 let reader = BufferedDynamicReader::new(index, chunk_reader);
1204
1205 let mut catalog_reader = CatalogReader::new(reader);
1206 let mut current = catalog_reader.root()?;
1207 let mut components = vec![];
1208
1209
1210 if filepath != "root" {
1211 components = base64::decode(filepath)?;
1212 if components.len() > 0 && components[0] == '/' as u8 {
1213 components.remove(0);
1214 }
1215 for component in components.split(|c| *c == '/' as u8) {
1216 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1217 current = entry;
1218 } else {
1219 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1220 }
1221 }
1222 }
1223
1224 let mut res = Vec::new();
1225
1226 for direntry in catalog_reader.read_dir(&current)? {
1227 let mut components = components.clone();
1228 components.push('/' as u8);
1229 components.extend(&direntry.name);
1230 let path = base64::encode(components);
1231 let text = String::from_utf8_lossy(&direntry.name);
1232 let mut entry = json!({
1233 "filepath": path,
1234 "text": text,
1235 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1236 "leaf": true,
1237 });
1238 match direntry.attr {
1239 DirEntryAttribute::Directory { start: _ } => {
1240 entry["leaf"] = false.into();
1241 },
1242 DirEntryAttribute::File { size, mtime } => {
1243 entry["size"] = size.into();
1244 entry["mtime"] = mtime.into();
1245 },
1246 _ => {},
1247 }
1248 res.push(entry);
1249 }
1250
1251 Ok(res.into())
1252 }
1253
1254 fn recurse_files<'a, T, W>(
1255 zip: &'a mut ZipEncoder<W>,
1256 decoder: &'a mut Accessor<T>,
1257 prefix: &'a Path,
1258 file: FileEntry<T>,
1259 ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
1260 where
1261 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1262 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1263 {
1264 Box::pin(async move {
1265 let metadata = file.entry().metadata();
1266 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1267
1268 match file.kind() {
1269 EntryKind::File { .. } => {
1270 let entry = ZipEntry::new(
1271 path,
1272 metadata.stat.mtime.secs,
1273 metadata.stat.mode as u16,
1274 true,
1275 );
1276 zip.add_entry(entry, Some(file.contents().await?))
1277 .await
1278 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1279 }
1280 EntryKind::Hardlink(_) => {
1281 let realfile = decoder.follow_hardlink(&file).await?;
1282 let entry = ZipEntry::new(
1283 path,
1284 metadata.stat.mtime.secs,
1285 metadata.stat.mode as u16,
1286 true,
1287 );
1288 zip.add_entry(entry, Some(realfile.contents().await?))
1289 .await
1290 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1291 }
1292 EntryKind::Directory => {
1293 let dir = file.enter_directory().await?;
1294 let mut readdir = dir.read_dir();
1295 let entry = ZipEntry::new(
1296 path,
1297 metadata.stat.mtime.secs,
1298 metadata.stat.mode as u16,
1299 false,
1300 );
1301 zip.add_entry::<FileContents<T>>(entry, None).await?;
1302 while let Some(entry) = readdir.next().await {
1303 let entry = entry?.decode_entry().await?;
1304 recurse_files(zip, decoder, prefix, entry).await?;
1305 }
1306 }
1307 _ => {} // ignore all else
1308 };
1309
1310 Ok(())
1311 })
1312 }
1313
1314 #[sortable]
1315 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1316 &ApiHandler::AsyncHttp(&pxar_file_download),
1317 &ObjectSchema::new(
1318 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1319 &sorted!([
1320 ("store", false, &DATASTORE_SCHEMA),
1321 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1322 ("backup-id", false, &BACKUP_ID_SCHEMA),
1323 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1324 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1325 ]),
1326 )
1327 ).access(None, &Permission::Privilege(
1328 &["datastore", "{store}"],
1329 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1330 true)
1331 );
1332
1333 fn pxar_file_download(
1334 _parts: Parts,
1335 _req_body: Body,
1336 param: Value,
1337 _info: &ApiMethod,
1338 rpcenv: Box<dyn RpcEnvironment>,
1339 ) -> ApiResponseFuture {
1340
1341 async move {
1342 let store = tools::required_string_param(&param, "store")?;
1343 let datastore = DataStore::lookup_datastore(&store)?;
1344
1345 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1346 let user_info = CachedUserInfo::new()?;
1347 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1348
1349 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1350
1351 let backup_type = tools::required_string_param(&param, "backup-type")?;
1352 let backup_id = tools::required_string_param(&param, "backup-id")?;
1353 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1354
1355 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1356
1357 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1358 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1359
1360 let mut components = base64::decode(&filepath)?;
1361 if components.len() > 0 && components[0] == '/' as u8 {
1362 components.remove(0);
1363 }
1364
1365 let mut split = components.splitn(2, |c| *c == '/' as u8);
1366 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1367 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1368 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1369 for file in files {
1370 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1371 bail!("cannot decode '{}' - is encrypted", pxar_name);
1372 }
1373 }
1374
1375 let mut path = datastore.base_path();
1376 path.push(backup_dir.relative_path());
1377 path.push(pxar_name);
1378
1379 let index = DynamicIndexReader::open(&path)
1380 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1381
1382 let (csum, size) = index.compute_csum();
1383 manifest.verify_file(&pxar_name, &csum, size)?;
1384
1385 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1386 let reader = BufferedDynamicReader::new(index, chunk_reader);
1387 let archive_size = reader.archive_size();
1388 let reader = LocalDynamicReadAt::new(reader);
1389
1390 let decoder = Accessor::new(reader, archive_size).await?;
1391 let root = decoder.open_root().await?;
1392 let file = root
1393 .lookup(OsStr::from_bytes(file_path)).await?
1394 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1395
1396 let body = match file.kind() {
1397 EntryKind::File { .. } => Body::wrap_stream(
1398 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1399 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1400 err
1401 }),
1402 ),
1403 EntryKind::Hardlink(_) => Body::wrap_stream(
1404 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1405 .map_err(move |err| {
1406 eprintln!(
1407 "error during streaming of hardlink '{:?}' - {}",
1408 filepath, err
1409 );
1410 err
1411 }),
1412 ),
1413 EntryKind::Directory => {
1414 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1415 let mut prefix = PathBuf::new();
1416 let mut components = file.entry().path().components();
1417 components.next_back(); // discar last
1418 for comp in components {
1419 prefix.push(comp);
1420 }
1421
1422 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1423
1424 crate::server::spawn_internal_task(async move {
1425 let mut zipencoder = ZipEncoder::new(channelwriter);
1426 let mut decoder = decoder;
1427 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
1428 .await
1429 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1430
1431 zipencoder
1432 .finish()
1433 .await
1434 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1435 });
1436
1437 Body::wrap_stream(receiver.map_err(move |err| {
1438 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1439 err
1440 }))
1441 }
1442 other => bail!("cannot download file of type {:?}", other),
1443 };
1444
1445 // fixme: set other headers ?
1446 Ok(Response::builder()
1447 .status(StatusCode::OK)
1448 .header(header::CONTENT_TYPE, "application/octet-stream")
1449 .body(body)
1450 .unwrap())
1451 }.boxed()
1452 }
1453
1454 #[api(
1455 input: {
1456 properties: {
1457 store: {
1458 schema: DATASTORE_SCHEMA,
1459 },
1460 timeframe: {
1461 type: RRDTimeFrameResolution,
1462 },
1463 cf: {
1464 type: RRDMode,
1465 },
1466 },
1467 },
1468 access: {
1469 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1470 },
1471 )]
1472 /// Read datastore stats
1473 fn get_rrd_stats(
1474 store: String,
1475 timeframe: RRDTimeFrameResolution,
1476 cf: RRDMode,
1477 _param: Value,
1478 ) -> Result<Value, Error> {
1479
1480 create_value_from_rrd(
1481 &format!("datastore/{}", store),
1482 &[
1483 "total", "used",
1484 "read_ios", "read_bytes",
1485 "write_ios", "write_bytes",
1486 "io_ticks",
1487 ],
1488 timeframe,
1489 cf,
1490 )
1491 }
1492
1493 #[api(
1494 input: {
1495 properties: {
1496 store: {
1497 schema: DATASTORE_SCHEMA,
1498 },
1499 "backup-type": {
1500 schema: BACKUP_TYPE_SCHEMA,
1501 },
1502 "backup-id": {
1503 schema: BACKUP_ID_SCHEMA,
1504 },
1505 "backup-time": {
1506 schema: BACKUP_TIME_SCHEMA,
1507 },
1508 },
1509 },
1510 access: {
1511 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1512 },
1513 )]
1514 /// Get "notes" for a specific backup
1515 fn get_notes(
1516 store: String,
1517 backup_type: String,
1518 backup_id: String,
1519 backup_time: i64,
1520 rpcenv: &mut dyn RpcEnvironment,
1521 ) -> Result<String, Error> {
1522 let datastore = DataStore::lookup_datastore(&store)?;
1523
1524 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1525 let user_info = CachedUserInfo::new()?;
1526 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1527
1528 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1529
1530 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1531 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1532
1533 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1534
1535 let notes = manifest.unprotected["notes"]
1536 .as_str()
1537 .unwrap_or("");
1538
1539 Ok(String::from(notes))
1540 }
1541
1542 #[api(
1543 input: {
1544 properties: {
1545 store: {
1546 schema: DATASTORE_SCHEMA,
1547 },
1548 "backup-type": {
1549 schema: BACKUP_TYPE_SCHEMA,
1550 },
1551 "backup-id": {
1552 schema: BACKUP_ID_SCHEMA,
1553 },
1554 "backup-time": {
1555 schema: BACKUP_TIME_SCHEMA,
1556 },
1557 notes: {
1558 description: "A multiline text.",
1559 },
1560 },
1561 },
1562 access: {
1563 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1564 },
1565 )]
1566 /// Set "notes" for a specific backup
1567 fn set_notes(
1568 store: String,
1569 backup_type: String,
1570 backup_id: String,
1571 backup_time: i64,
1572 notes: String,
1573 rpcenv: &mut dyn RpcEnvironment,
1574 ) -> Result<(), Error> {
1575 let datastore = DataStore::lookup_datastore(&store)?;
1576
1577 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1578 let user_info = CachedUserInfo::new()?;
1579 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1580
1581 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1582
1583 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1584 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1585
1586 datastore.update_manifest(&backup_dir,|manifest| {
1587 manifest.unprotected["notes"] = notes.into();
1588 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1589
1590 Ok(())
1591 }
1592
1593 #[api(
1594 input: {
1595 properties: {
1596 store: {
1597 schema: DATASTORE_SCHEMA,
1598 },
1599 "backup-type": {
1600 schema: BACKUP_TYPE_SCHEMA,
1601 },
1602 "backup-id": {
1603 schema: BACKUP_ID_SCHEMA,
1604 },
1605 "new-owner": {
1606 type: Userid,
1607 },
1608 },
1609 },
1610 access: {
1611 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1612 },
1613 )]
1614 /// Change owner of a backup group
1615 fn set_backup_owner(
1616 store: String,
1617 backup_type: String,
1618 backup_id: String,
1619 new_owner: Userid,
1620 _rpcenv: &mut dyn RpcEnvironment,
1621 ) -> Result<(), Error> {
1622
1623 let datastore = DataStore::lookup_datastore(&store)?;
1624
1625 let backup_group = BackupGroup::new(backup_type, backup_id);
1626
1627 let user_info = CachedUserInfo::new()?;
1628
1629 if !user_info.is_active_user(&new_owner) {
1630 bail!("user '{}' is inactive or non-existent", new_owner);
1631 }
1632
1633 datastore.set_owner(&backup_group, &new_owner, true)?;
1634
1635 Ok(())
1636 }
1637
1638 #[sortable]
1639 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1640 (
1641 "catalog",
1642 &Router::new()
1643 .get(&API_METHOD_CATALOG)
1644 ),
1645 (
1646 "change-owner",
1647 &Router::new()
1648 .post(&API_METHOD_SET_BACKUP_OWNER)
1649 ),
1650 (
1651 "download",
1652 &Router::new()
1653 .download(&API_METHOD_DOWNLOAD_FILE)
1654 ),
1655 (
1656 "download-decoded",
1657 &Router::new()
1658 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1659 ),
1660 (
1661 "files",
1662 &Router::new()
1663 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1664 ),
1665 (
1666 "gc",
1667 &Router::new()
1668 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1669 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1670 ),
1671 (
1672 "groups",
1673 &Router::new()
1674 .get(&API_METHOD_LIST_GROUPS)
1675 ),
1676 (
1677 "notes",
1678 &Router::new()
1679 .get(&API_METHOD_GET_NOTES)
1680 .put(&API_METHOD_SET_NOTES)
1681 ),
1682 (
1683 "prune",
1684 &Router::new()
1685 .post(&API_METHOD_PRUNE)
1686 ),
1687 (
1688 "pxar-file-download",
1689 &Router::new()
1690 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1691 ),
1692 (
1693 "rrd",
1694 &Router::new()
1695 .get(&API_METHOD_GET_RRD_STATS)
1696 ),
1697 (
1698 "snapshots",
1699 &Router::new()
1700 .get(&API_METHOD_LIST_SNAPSHOTS)
1701 .delete(&API_METHOD_DELETE_SNAPSHOT)
1702 ),
1703 (
1704 "status",
1705 &Router::new()
1706 .get(&API_METHOD_STATUS)
1707 ),
1708 (
1709 "upload-backup-log",
1710 &Router::new()
1711 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1712 ),
1713 (
1714 "verify",
1715 &Router::new()
1716 .post(&API_METHOD_VERIFY)
1717 ),
1718 ];
1719
1720 const DATASTORE_INFO_ROUTER: Router = Router::new()
1721 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1722 .subdirs(DATASTORE_INFO_SUBDIRS);
1723
1724
1725 pub const ROUTER: Router = Router::new()
1726 .get(&API_METHOD_GET_DATASTORE_LIST)
1727 .match_all("store", &DATASTORE_INFO_ROUTER);