]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
api2/admin/datastore/pxar_file_download: download directory as zip
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5 use std::path::PathBuf;
6 use std::pin::Pin;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13
14 use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
16 RpcEnvironment, RpcEnvironmentType, Permission
17 };
18 use proxmox::api::router::SubdirMap;
19 use proxmox::api::schema::*;
20 use proxmox::tools::fs::{replace_file, CreateOptions};
21 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
22
23 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
24 use pxar::EntryKind;
25
26 use crate::api2::types::*;
27 use crate::api2::node::rrd::create_value_from_rrd;
28 use crate::backup::*;
29 use crate::config::datastore;
30 use crate::config::cached_user_info::CachedUserInfo;
31
32 use crate::server::WorkerTask;
33 use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37 };
38
39 use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
41 PRIV_DATASTORE_MODIFY,
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
44 PRIV_DATASTORE_BACKUP,
45 };
46
47 fn check_backup_owner(
48 store: &DataStore,
49 group: &BackupGroup,
50 userid: &Userid,
51 ) -> Result<(), Error> {
52 let owner = store.get_owner(group)?;
53 if &owner != userid {
54 bail!("backup owner check failed ({} != {})", userid, owner);
55 }
56 Ok(())
57 }
58
59 fn read_backup_index(
60 store: &DataStore,
61 backup_dir: &BackupDir,
62 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
63
64 let (manifest, index_size) = store.load_manifest(backup_dir)?;
65
66 let mut result = Vec::new();
67 for item in manifest.files() {
68 result.push(BackupContent {
69 filename: item.filename.clone(),
70 crypt_mode: Some(item.crypt_mode),
71 size: Some(item.size),
72 });
73 }
74
75 result.push(BackupContent {
76 filename: MANIFEST_BLOB_NAME.to_string(),
77 crypt_mode: match manifest.signature {
78 Some(_) => Some(CryptMode::SignOnly),
79 None => Some(CryptMode::None),
80 },
81 size: Some(index_size),
82 });
83
84 Ok((manifest, result))
85 }
86
87 fn get_all_snapshot_files(
88 store: &DataStore,
89 info: &BackupInfo,
90 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
91
92 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
93
94 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
95 acc.insert(item.filename.clone());
96 acc
97 });
98
99 for file in &info.files {
100 if file_set.contains(file) { continue; }
101 files.push(BackupContent {
102 filename: file.to_string(),
103 size: None,
104 crypt_mode: None,
105 });
106 }
107
108 Ok((manifest, files))
109 }
110
111 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
112
113 let mut group_hash = HashMap::new();
114
115 for info in backup_list {
116 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
117 let time_list = group_hash.entry(group_id).or_insert(vec![]);
118 time_list.push(info);
119 }
120
121 group_hash
122 }
123
124 #[api(
125 input: {
126 properties: {
127 store: {
128 schema: DATASTORE_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of backup groups.",
135 items: {
136 type: GroupListItem,
137 }
138 },
139 access: {
140 permission: &Permission::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
143 true),
144 },
145 )]
146 /// List backup groups.
147 fn list_groups(
148 store: String,
149 rpcenv: &mut dyn RpcEnvironment,
150 ) -> Result<Vec<GroupListItem>, Error> {
151
152 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
153 let user_info = CachedUserInfo::new()?;
154 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
155
156 let datastore = DataStore::lookup_datastore(&store)?;
157
158 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
159
160 let group_hash = group_backups(backup_list);
161
162 let mut groups = Vec::new();
163
164 for (_group_id, mut list) in group_hash {
165
166 BackupInfo::sort_list(&mut list, false);
167
168 let info = &list[0];
169
170 let group = info.backup_dir.group();
171
172 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
173 let owner = datastore.get_owner(group)?;
174 if !list_all && owner != userid {
175 continue;
176 }
177
178 let result_item = GroupListItem {
179 backup_type: group.backup_type().to_string(),
180 backup_id: group.backup_id().to_string(),
181 last_backup: info.backup_dir.backup_time(),
182 backup_count: list.len() as u64,
183 files: info.files.clone(),
184 owner: Some(owner),
185 };
186 groups.push(result_item);
187 }
188
189 Ok(groups)
190 }
191
192 #[api(
193 input: {
194 properties: {
195 store: {
196 schema: DATASTORE_SCHEMA,
197 },
198 "backup-type": {
199 schema: BACKUP_TYPE_SCHEMA,
200 },
201 "backup-id": {
202 schema: BACKUP_ID_SCHEMA,
203 },
204 "backup-time": {
205 schema: BACKUP_TIME_SCHEMA,
206 },
207 },
208 },
209 returns: {
210 type: Array,
211 description: "Returns the list of archive files inside a backup snapshots.",
212 items: {
213 type: BackupContent,
214 }
215 },
216 access: {
217 permission: &Permission::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
220 true),
221 },
222 )]
223 /// List snapshot files.
224 pub fn list_snapshot_files(
225 store: String,
226 backup_type: String,
227 backup_id: String,
228 backup_time: i64,
229 _info: &ApiMethod,
230 rpcenv: &mut dyn RpcEnvironment,
231 ) -> Result<Vec<BackupContent>, Error> {
232
233 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
234 let user_info = CachedUserInfo::new()?;
235 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
236
237 let datastore = DataStore::lookup_datastore(&store)?;
238
239 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
240
241 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
243
244 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
245
246 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
247
248 Ok(files)
249 }
250
251 #[api(
252 input: {
253 properties: {
254 store: {
255 schema: DATASTORE_SCHEMA,
256 },
257 "backup-type": {
258 schema: BACKUP_TYPE_SCHEMA,
259 },
260 "backup-id": {
261 schema: BACKUP_ID_SCHEMA,
262 },
263 "backup-time": {
264 schema: BACKUP_TIME_SCHEMA,
265 },
266 },
267 },
268 access: {
269 permission: &Permission::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
272 true),
273 },
274 )]
275 /// Delete backup snapshot.
276 fn delete_snapshot(
277 store: String,
278 backup_type: String,
279 backup_id: String,
280 backup_time: i64,
281 _info: &ApiMethod,
282 rpcenv: &mut dyn RpcEnvironment,
283 ) -> Result<Value, Error> {
284
285 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
286 let user_info = CachedUserInfo::new()?;
287 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
288
289 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
290
291 let datastore = DataStore::lookup_datastore(&store)?;
292
293 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
295
296 datastore.remove_backup_dir(&snapshot, false)?;
297
298 Ok(Value::Null)
299 }
300
301 #[api(
302 input: {
303 properties: {
304 store: {
305 schema: DATASTORE_SCHEMA,
306 },
307 "backup-type": {
308 optional: true,
309 schema: BACKUP_TYPE_SCHEMA,
310 },
311 "backup-id": {
312 optional: true,
313 schema: BACKUP_ID_SCHEMA,
314 },
315 },
316 },
317 returns: {
318 type: Array,
319 description: "Returns the list of snapshots.",
320 items: {
321 type: SnapshotListItem,
322 }
323 },
324 access: {
325 permission: &Permission::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
328 true),
329 },
330 )]
331 /// List backup snapshots.
332 pub fn list_snapshots (
333 store: String,
334 backup_type: Option<String>,
335 backup_id: Option<String>,
336 _param: Value,
337 _info: &ApiMethod,
338 rpcenv: &mut dyn RpcEnvironment,
339 ) -> Result<Vec<SnapshotListItem>, Error> {
340
341 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
342 let user_info = CachedUserInfo::new()?;
343 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
344
345 let datastore = DataStore::lookup_datastore(&store)?;
346
347 let base_path = datastore.base_path();
348
349 let backup_list = BackupInfo::list_backups(&base_path)?;
350
351 let mut snapshots = vec![];
352
353 for info in backup_list {
354 let group = info.backup_dir.group();
355 if let Some(ref backup_type) = backup_type {
356 if backup_type != group.backup_type() { continue; }
357 }
358 if let Some(ref backup_id) = backup_id {
359 if backup_id != group.backup_id() { continue; }
360 }
361
362 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
363 let owner = datastore.get_owner(group)?;
364
365 if !list_all && owner != userid {
366 continue;
367 }
368
369 let mut size = None;
370
371 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
372 Ok((manifest, files)) => {
373 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
374 // extract the first line from notes
375 let comment: Option<String> = manifest.unprotected["notes"]
376 .as_str()
377 .and_then(|notes| notes.lines().next())
378 .map(String::from);
379
380 let verify = manifest.unprotected["verify_state"].clone();
381 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
382 Ok(verify) => verify,
383 Err(err) => {
384 eprintln!("error parsing verification state : '{}'", err);
385 None
386 }
387 };
388
389 (comment, verify, files)
390 },
391 Err(err) => {
392 eprintln!("error during snapshot file listing: '{}'", err);
393 (
394 None,
395 None,
396 info
397 .files
398 .iter()
399 .map(|x| BackupContent {
400 filename: x.to_string(),
401 size: None,
402 crypt_mode: None,
403 })
404 .collect()
405 )
406 },
407 };
408
409 let result_item = SnapshotListItem {
410 backup_type: group.backup_type().to_string(),
411 backup_id: group.backup_id().to_string(),
412 backup_time: info.backup_dir.backup_time(),
413 comment,
414 verification,
415 files,
416 size,
417 owner: Some(owner),
418 };
419
420 snapshots.push(result_item);
421 }
422
423 Ok(snapshots)
424 }
425
426 #[api(
427 input: {
428 properties: {
429 store: {
430 schema: DATASTORE_SCHEMA,
431 },
432 },
433 },
434 returns: {
435 type: StorageStatus,
436 },
437 access: {
438 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
439 },
440 )]
441 /// Get datastore status.
442 pub fn status(
443 store: String,
444 _info: &ApiMethod,
445 _rpcenv: &mut dyn RpcEnvironment,
446 ) -> Result<StorageStatus, Error> {
447 let datastore = DataStore::lookup_datastore(&store)?;
448 crate::tools::disks::disk_usage(&datastore.base_path())
449 }
450
451 #[api(
452 input: {
453 properties: {
454 store: {
455 schema: DATASTORE_SCHEMA,
456 },
457 "backup-type": {
458 schema: BACKUP_TYPE_SCHEMA,
459 optional: true,
460 },
461 "backup-id": {
462 schema: BACKUP_ID_SCHEMA,
463 optional: true,
464 },
465 "backup-time": {
466 schema: BACKUP_TIME_SCHEMA,
467 optional: true,
468 },
469 },
470 },
471 returns: {
472 schema: UPID_SCHEMA,
473 },
474 access: {
475 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
476 },
477 )]
478 /// Verify backups.
479 ///
480 /// This function can verify a single backup snapshot, all backup from a backup group,
481 /// or all backups in the datastore.
482 pub fn verify(
483 store: String,
484 backup_type: Option<String>,
485 backup_id: Option<String>,
486 backup_time: Option<i64>,
487 rpcenv: &mut dyn RpcEnvironment,
488 ) -> Result<Value, Error> {
489 let datastore = DataStore::lookup_datastore(&store)?;
490
491 let worker_id;
492
493 let mut backup_dir = None;
494 let mut backup_group = None;
495
496 match (backup_type, backup_id, backup_time) {
497 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
498 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
499 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
500 backup_dir = Some(dir);
501 }
502 (Some(backup_type), Some(backup_id), None) => {
503 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
504 let group = BackupGroup::new(backup_type, backup_id);
505 backup_group = Some(group);
506 }
507 (None, None, None) => {
508 worker_id = store.clone();
509 }
510 _ => bail!("parameters do not specify a backup group or snapshot"),
511 }
512
513 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
514 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
515
516 let upid_str = WorkerTask::new_thread(
517 "verify",
518 Some(worker_id.clone()),
519 userid,
520 to_stdout,
521 move |worker| {
522 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
523 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
524
525 let failed_dirs = if let Some(backup_dir) = backup_dir {
526 let mut res = Vec::new();
527 if !verify_backup_dir(
528 datastore,
529 &backup_dir,
530 verified_chunks,
531 corrupt_chunks,
532 worker.clone(),
533 worker.upid().clone(),
534 )? {
535 res.push(backup_dir.to_string());
536 }
537 res
538 } else if let Some(backup_group) = backup_group {
539 let (_count, failed_dirs) = verify_backup_group(
540 datastore,
541 &backup_group,
542 verified_chunks,
543 corrupt_chunks,
544 None,
545 worker.clone(),
546 worker.upid(),
547 )?;
548 failed_dirs
549 } else {
550 verify_all_backups(datastore, worker.clone(), worker.upid())?
551 };
552 if failed_dirs.len() > 0 {
553 worker.log("Failed to verify following snapshots:");
554 for dir in failed_dirs {
555 worker.log(format!("\t{}", dir));
556 }
557 bail!("verification failed - please check the log for details");
558 }
559 Ok(())
560 },
561 )?;
562
563 Ok(json!(upid_str))
564 }
565
566 #[macro_export]
567 macro_rules! add_common_prune_prameters {
568 ( [ $( $list1:tt )* ] ) => {
569 add_common_prune_prameters!([$( $list1 )* ] , [])
570 };
571 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
572 [
573 $( $list1 )*
574 (
575 "keep-daily",
576 true,
577 &PRUNE_SCHEMA_KEEP_DAILY,
578 ),
579 (
580 "keep-hourly",
581 true,
582 &PRUNE_SCHEMA_KEEP_HOURLY,
583 ),
584 (
585 "keep-last",
586 true,
587 &PRUNE_SCHEMA_KEEP_LAST,
588 ),
589 (
590 "keep-monthly",
591 true,
592 &PRUNE_SCHEMA_KEEP_MONTHLY,
593 ),
594 (
595 "keep-weekly",
596 true,
597 &PRUNE_SCHEMA_KEEP_WEEKLY,
598 ),
599 (
600 "keep-yearly",
601 true,
602 &PRUNE_SCHEMA_KEEP_YEARLY,
603 ),
604 $( $list2 )*
605 ]
606 }
607 }
608
609 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
610 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
611 &PruneListItem::API_SCHEMA
612 ).schema();
613
614 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
615 &ApiHandler::Sync(&prune),
616 &ObjectSchema::new(
617 "Prune the datastore.",
618 &add_common_prune_prameters!([
619 ("backup-id", false, &BACKUP_ID_SCHEMA),
620 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
621 ("dry-run", true, &BooleanSchema::new(
622 "Just show what prune would do, but do not delete anything.")
623 .schema()
624 ),
625 ],[
626 ("store", false, &DATASTORE_SCHEMA),
627 ])
628 ))
629 .returns(&API_RETURN_SCHEMA_PRUNE)
630 .access(None, &Permission::Privilege(
631 &["datastore", "{store}"],
632 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
633 true)
634 );
635
636 fn prune(
637 param: Value,
638 _info: &ApiMethod,
639 rpcenv: &mut dyn RpcEnvironment,
640 ) -> Result<Value, Error> {
641
642 let store = tools::required_string_param(&param, "store")?;
643 let backup_type = tools::required_string_param(&param, "backup-type")?;
644 let backup_id = tools::required_string_param(&param, "backup-id")?;
645
646 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
647 let user_info = CachedUserInfo::new()?;
648 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
649
650 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
651
652 let group = BackupGroup::new(backup_type, backup_id);
653
654 let datastore = DataStore::lookup_datastore(&store)?;
655
656 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
657 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
658
659 let prune_options = PruneOptions {
660 keep_last: param["keep-last"].as_u64(),
661 keep_hourly: param["keep-hourly"].as_u64(),
662 keep_daily: param["keep-daily"].as_u64(),
663 keep_weekly: param["keep-weekly"].as_u64(),
664 keep_monthly: param["keep-monthly"].as_u64(),
665 keep_yearly: param["keep-yearly"].as_u64(),
666 };
667
668 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
669
670 let mut prune_result = Vec::new();
671
672 let list = group.list_backups(&datastore.base_path())?;
673
674 let mut prune_info = compute_prune_info(list, &prune_options)?;
675
676 prune_info.reverse(); // delete older snapshots first
677
678 let keep_all = !prune_options.keeps_something();
679
680 if dry_run {
681 for (info, mut keep) in prune_info {
682 if keep_all { keep = true; }
683
684 let backup_time = info.backup_dir.backup_time();
685 let group = info.backup_dir.group();
686
687 prune_result.push(json!({
688 "backup-type": group.backup_type(),
689 "backup-id": group.backup_id(),
690 "backup-time": backup_time,
691 "keep": keep,
692 }));
693 }
694 return Ok(json!(prune_result));
695 }
696
697
698 // We use a WorkerTask just to have a task log, but run synchrounously
699 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
700
701 if keep_all {
702 worker.log("No prune selection - keeping all files.");
703 } else {
704 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
705 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
706 store, backup_type, backup_id));
707 }
708
709 for (info, mut keep) in prune_info {
710 if keep_all { keep = true; }
711
712 let backup_time = info.backup_dir.backup_time();
713 let timestamp = info.backup_dir.backup_time_string();
714 let group = info.backup_dir.group();
715
716
717 let msg = format!(
718 "{}/{}/{} {}",
719 group.backup_type(),
720 group.backup_id(),
721 timestamp,
722 if keep { "keep" } else { "remove" },
723 );
724
725 worker.log(msg);
726
727 prune_result.push(json!({
728 "backup-type": group.backup_type(),
729 "backup-id": group.backup_id(),
730 "backup-time": backup_time,
731 "keep": keep,
732 }));
733
734 if !(dry_run || keep) {
735 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
736 worker.warn(
737 format!(
738 "failed to remove dir {:?}: {}",
739 info.backup_dir.relative_path(), err
740 )
741 );
742 }
743 }
744 }
745
746 worker.log_result(&Ok(()));
747
748 Ok(json!(prune_result))
749 }
750
751 #[api(
752 input: {
753 properties: {
754 store: {
755 schema: DATASTORE_SCHEMA,
756 },
757 },
758 },
759 returns: {
760 schema: UPID_SCHEMA,
761 },
762 access: {
763 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
764 },
765 )]
766 /// Start garbage collection.
767 fn start_garbage_collection(
768 store: String,
769 _info: &ApiMethod,
770 rpcenv: &mut dyn RpcEnvironment,
771 ) -> Result<Value, Error> {
772
773 let datastore = DataStore::lookup_datastore(&store)?;
774
775 println!("Starting garbage collection on store {}", store);
776
777 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
778
779 let upid_str = WorkerTask::new_thread(
780 "garbage_collection",
781 Some(store.clone()),
782 Userid::root_userid().clone(),
783 to_stdout,
784 move |worker| {
785 worker.log(format!("starting garbage collection on store {}", store));
786 datastore.garbage_collection(&*worker, worker.upid())
787 },
788 )?;
789
790 Ok(json!(upid_str))
791 }
792
793 #[api(
794 input: {
795 properties: {
796 store: {
797 schema: DATASTORE_SCHEMA,
798 },
799 },
800 },
801 returns: {
802 type: GarbageCollectionStatus,
803 },
804 access: {
805 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
806 },
807 )]
808 /// Garbage collection status.
809 pub fn garbage_collection_status(
810 store: String,
811 _info: &ApiMethod,
812 _rpcenv: &mut dyn RpcEnvironment,
813 ) -> Result<GarbageCollectionStatus, Error> {
814
815 let datastore = DataStore::lookup_datastore(&store)?;
816
817 let status = datastore.last_gc_status();
818
819 Ok(status)
820 }
821
822 #[api(
823 returns: {
824 description: "List the accessible datastores.",
825 type: Array,
826 items: {
827 description: "Datastore name and description.",
828 properties: {
829 store: {
830 schema: DATASTORE_SCHEMA,
831 },
832 comment: {
833 optional: true,
834 schema: SINGLE_LINE_COMMENT_SCHEMA,
835 },
836 },
837 },
838 },
839 access: {
840 permission: &Permission::Anybody,
841 },
842 )]
843 /// Datastore list
844 fn get_datastore_list(
845 _param: Value,
846 _info: &ApiMethod,
847 rpcenv: &mut dyn RpcEnvironment,
848 ) -> Result<Value, Error> {
849
850 let (config, _digest) = datastore::config()?;
851
852 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
853 let user_info = CachedUserInfo::new()?;
854
855 let mut list = Vec::new();
856
857 for (store, (_, data)) in &config.sections {
858 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
859 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
860 if allowed {
861 let mut entry = json!({ "store": store });
862 if let Some(comment) = data["comment"].as_str() {
863 entry["comment"] = comment.into();
864 }
865 list.push(entry);
866 }
867 }
868
869 Ok(list.into())
870 }
871
872 #[sortable]
873 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
874 &ApiHandler::AsyncHttp(&download_file),
875 &ObjectSchema::new(
876 "Download single raw file from backup snapshot.",
877 &sorted!([
878 ("store", false, &DATASTORE_SCHEMA),
879 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
880 ("backup-id", false, &BACKUP_ID_SCHEMA),
881 ("backup-time", false, &BACKUP_TIME_SCHEMA),
882 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
883 ]),
884 )
885 ).access(None, &Permission::Privilege(
886 &["datastore", "{store}"],
887 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
888 true)
889 );
890
891 fn download_file(
892 _parts: Parts,
893 _req_body: Body,
894 param: Value,
895 _info: &ApiMethod,
896 rpcenv: Box<dyn RpcEnvironment>,
897 ) -> ApiResponseFuture {
898
899 async move {
900 let store = tools::required_string_param(&param, "store")?;
901 let datastore = DataStore::lookup_datastore(store)?;
902
903 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
904 let user_info = CachedUserInfo::new()?;
905 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
906
907 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
908
909 let backup_type = tools::required_string_param(&param, "backup-type")?;
910 let backup_id = tools::required_string_param(&param, "backup-id")?;
911 let backup_time = tools::required_integer_param(&param, "backup-time")?;
912
913 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
914
915 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
916 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
917
918 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
919
920 let mut path = datastore.base_path();
921 path.push(backup_dir.relative_path());
922 path.push(&file_name);
923
924 let file = tokio::fs::File::open(&path)
925 .await
926 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
927
928 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
929 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
930 .map_err(move |err| {
931 eprintln!("error during streaming of '{:?}' - {}", &path, err);
932 err
933 });
934 let body = Body::wrap_stream(payload);
935
936 // fixme: set other headers ?
937 Ok(Response::builder()
938 .status(StatusCode::OK)
939 .header(header::CONTENT_TYPE, "application/octet-stream")
940 .body(body)
941 .unwrap())
942 }.boxed()
943 }
944
945 #[sortable]
946 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
947 &ApiHandler::AsyncHttp(&download_file_decoded),
948 &ObjectSchema::new(
949 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
950 &sorted!([
951 ("store", false, &DATASTORE_SCHEMA),
952 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
953 ("backup-id", false, &BACKUP_ID_SCHEMA),
954 ("backup-time", false, &BACKUP_TIME_SCHEMA),
955 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
956 ]),
957 )
958 ).access(None, &Permission::Privilege(
959 &["datastore", "{store}"],
960 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
961 true)
962 );
963
964 fn download_file_decoded(
965 _parts: Parts,
966 _req_body: Body,
967 param: Value,
968 _info: &ApiMethod,
969 rpcenv: Box<dyn RpcEnvironment>,
970 ) -> ApiResponseFuture {
971
972 async move {
973 let store = tools::required_string_param(&param, "store")?;
974 let datastore = DataStore::lookup_datastore(store)?;
975
976 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
977 let user_info = CachedUserInfo::new()?;
978 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
979
980 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
981
982 let backup_type = tools::required_string_param(&param, "backup-type")?;
983 let backup_id = tools::required_string_param(&param, "backup-id")?;
984 let backup_time = tools::required_integer_param(&param, "backup-time")?;
985
986 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
987
988 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
989 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
990
991 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
992 for file in files {
993 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
994 bail!("cannot decode '{}' - is encrypted", file_name);
995 }
996 }
997
998 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
999
1000 let mut path = datastore.base_path();
1001 path.push(backup_dir.relative_path());
1002 path.push(&file_name);
1003
1004 let extension = file_name.rsplitn(2, '.').next().unwrap();
1005
1006 let body = match extension {
1007 "didx" => {
1008 let index = DynamicIndexReader::open(&path)
1009 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1010 let (csum, size) = index.compute_csum();
1011 manifest.verify_file(&file_name, &csum, size)?;
1012
1013 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1014 let reader = AsyncIndexReader::new(index, chunk_reader);
1015 Body::wrap_stream(AsyncReaderStream::new(reader)
1016 .map_err(move |err| {
1017 eprintln!("error during streaming of '{:?}' - {}", path, err);
1018 err
1019 }))
1020 },
1021 "fidx" => {
1022 let index = FixedIndexReader::open(&path)
1023 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1024
1025 let (csum, size) = index.compute_csum();
1026 manifest.verify_file(&file_name, &csum, size)?;
1027
1028 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1029 let reader = AsyncIndexReader::new(index, chunk_reader);
1030 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1031 .map_err(move |err| {
1032 eprintln!("error during streaming of '{:?}' - {}", path, err);
1033 err
1034 }))
1035 },
1036 "blob" => {
1037 let file = std::fs::File::open(&path)
1038 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1039
1040 // FIXME: load full blob to verify index checksum?
1041
1042 Body::wrap_stream(
1043 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1044 .map_err(move |err| {
1045 eprintln!("error during streaming of '{:?}' - {}", path, err);
1046 err
1047 })
1048 )
1049 },
1050 extension => {
1051 bail!("cannot download '{}' files", extension);
1052 },
1053 };
1054
1055 // fixme: set other headers ?
1056 Ok(Response::builder()
1057 .status(StatusCode::OK)
1058 .header(header::CONTENT_TYPE, "application/octet-stream")
1059 .body(body)
1060 .unwrap())
1061 }.boxed()
1062 }
1063
1064 #[sortable]
1065 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1066 &ApiHandler::AsyncHttp(&upload_backup_log),
1067 &ObjectSchema::new(
1068 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1069 &sorted!([
1070 ("store", false, &DATASTORE_SCHEMA),
1071 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1072 ("backup-id", false, &BACKUP_ID_SCHEMA),
1073 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1074 ]),
1075 )
1076 ).access(
1077 Some("Only the backup creator/owner is allowed to do this."),
1078 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1079 );
1080
1081 fn upload_backup_log(
1082 _parts: Parts,
1083 req_body: Body,
1084 param: Value,
1085 _info: &ApiMethod,
1086 rpcenv: Box<dyn RpcEnvironment>,
1087 ) -> ApiResponseFuture {
1088
1089 async move {
1090 let store = tools::required_string_param(&param, "store")?;
1091 let datastore = DataStore::lookup_datastore(store)?;
1092
1093 let file_name = CLIENT_LOG_BLOB_NAME;
1094
1095 let backup_type = tools::required_string_param(&param, "backup-type")?;
1096 let backup_id = tools::required_string_param(&param, "backup-id")?;
1097 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1098
1099 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1100
1101 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1102 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1103
1104 let mut path = datastore.base_path();
1105 path.push(backup_dir.relative_path());
1106 path.push(&file_name);
1107
1108 if path.exists() {
1109 bail!("backup already contains a log.");
1110 }
1111
1112 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1113 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1114
1115 let data = req_body
1116 .map_err(Error::from)
1117 .try_fold(Vec::new(), |mut acc, chunk| {
1118 acc.extend_from_slice(&*chunk);
1119 future::ok::<_, Error>(acc)
1120 })
1121 .await?;
1122
1123 // always verify blob/CRC at server side
1124 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1125
1126 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1127
1128 // fixme: use correct formatter
1129 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1130 }.boxed()
1131 }
1132
1133 #[api(
1134 input: {
1135 properties: {
1136 store: {
1137 schema: DATASTORE_SCHEMA,
1138 },
1139 "backup-type": {
1140 schema: BACKUP_TYPE_SCHEMA,
1141 },
1142 "backup-id": {
1143 schema: BACKUP_ID_SCHEMA,
1144 },
1145 "backup-time": {
1146 schema: BACKUP_TIME_SCHEMA,
1147 },
1148 "filepath": {
1149 description: "Base64 encoded path.",
1150 type: String,
1151 }
1152 },
1153 },
1154 access: {
1155 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1156 },
1157 )]
1158 /// Get the entries of the given path of the catalog
1159 fn catalog(
1160 store: String,
1161 backup_type: String,
1162 backup_id: String,
1163 backup_time: i64,
1164 filepath: String,
1165 _param: Value,
1166 _info: &ApiMethod,
1167 rpcenv: &mut dyn RpcEnvironment,
1168 ) -> Result<Value, Error> {
1169 let datastore = DataStore::lookup_datastore(&store)?;
1170
1171 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1172 let user_info = CachedUserInfo::new()?;
1173 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1174
1175 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1176
1177 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1178 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1179
1180 let file_name = CATALOG_NAME;
1181
1182 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1183 for file in files {
1184 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1185 bail!("cannot decode '{}' - is encrypted", file_name);
1186 }
1187 }
1188
1189 let mut path = datastore.base_path();
1190 path.push(backup_dir.relative_path());
1191 path.push(file_name);
1192
1193 let index = DynamicIndexReader::open(&path)
1194 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1195
1196 let (csum, size) = index.compute_csum();
1197 manifest.verify_file(&file_name, &csum, size)?;
1198
1199 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1200 let reader = BufferedDynamicReader::new(index, chunk_reader);
1201
1202 let mut catalog_reader = CatalogReader::new(reader);
1203 let mut current = catalog_reader.root()?;
1204 let mut components = vec![];
1205
1206
1207 if filepath != "root" {
1208 components = base64::decode(filepath)?;
1209 if components.len() > 0 && components[0] == '/' as u8 {
1210 components.remove(0);
1211 }
1212 for component in components.split(|c| *c == '/' as u8) {
1213 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1214 current = entry;
1215 } else {
1216 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1217 }
1218 }
1219 }
1220
1221 let mut res = Vec::new();
1222
1223 for direntry in catalog_reader.read_dir(&current)? {
1224 let mut components = components.clone();
1225 components.push('/' as u8);
1226 components.extend(&direntry.name);
1227 let path = base64::encode(components);
1228 let text = String::from_utf8_lossy(&direntry.name);
1229 let mut entry = json!({
1230 "filepath": path,
1231 "text": text,
1232 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1233 "leaf": true,
1234 });
1235 match direntry.attr {
1236 DirEntryAttribute::Directory { start: _ } => {
1237 entry["leaf"] = false.into();
1238 },
1239 DirEntryAttribute::File { size, mtime } => {
1240 entry["size"] = size.into();
1241 entry["mtime"] = mtime.into();
1242 },
1243 _ => {},
1244 }
1245 res.push(entry);
1246 }
1247
1248 Ok(res.into())
1249 }
1250
1251 fn recurse_files<T, W>(
1252 mut zip: ZipEncoder<W>,
1253 mut decoder: Accessor<T>,
1254 prefix: PathBuf,
1255 file: FileEntry<T>,
1256 ) -> Pin<Box<dyn Future<Output = Result<(ZipEncoder<W>, Accessor<T>), Error>> + Send + 'static>>
1257 where
1258 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1259 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1260 {
1261 Box::pin(async move {
1262 let metadata = file.entry().metadata();
1263 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1264
1265 match file.kind() {
1266 EntryKind::File { .. } => {
1267 let entry = ZipEntry::new(
1268 path,
1269 metadata.stat.mtime.secs,
1270 metadata.stat.mode as u16,
1271 true,
1272 );
1273 zip.add_entry(entry, Some(file.contents().await?))
1274 .await
1275 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1276 }
1277 EntryKind::Hardlink(_) => {
1278 let realfile = decoder.follow_hardlink(&file).await?;
1279 let entry = ZipEntry::new(
1280 path,
1281 metadata.stat.mtime.secs,
1282 metadata.stat.mode as u16,
1283 true,
1284 );
1285 zip.add_entry(entry, Some(realfile.contents().await?))
1286 .await
1287 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1288 }
1289 EntryKind::Directory => {
1290 let dir = file.enter_directory().await?;
1291 let mut readdir = dir.read_dir();
1292 let entry = ZipEntry::new(
1293 path,
1294 metadata.stat.mtime.secs,
1295 metadata.stat.mode as u16,
1296 false,
1297 );
1298 zip.add_entry::<FileContents<T>>(entry, None).await?;
1299 while let Some(entry) = readdir.next().await {
1300 let entry = entry?.decode_entry().await?;
1301 let (zip_tmp, decoder_tmp) =
1302 recurse_files(zip, decoder, prefix.clone(), entry).await?;
1303 zip = zip_tmp;
1304 decoder = decoder_tmp;
1305 }
1306 }
1307 _ => {} // ignore all else
1308 };
1309
1310 Ok((zip, decoder))
1311 })
1312 }
1313
1314 #[sortable]
1315 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1316 &ApiHandler::AsyncHttp(&pxar_file_download),
1317 &ObjectSchema::new(
1318 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1319 &sorted!([
1320 ("store", false, &DATASTORE_SCHEMA),
1321 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1322 ("backup-id", false, &BACKUP_ID_SCHEMA),
1323 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1324 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1325 ]),
1326 )
1327 ).access(None, &Permission::Privilege(
1328 &["datastore", "{store}"],
1329 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1330 true)
1331 );
1332
1333 fn pxar_file_download(
1334 _parts: Parts,
1335 _req_body: Body,
1336 param: Value,
1337 _info: &ApiMethod,
1338 rpcenv: Box<dyn RpcEnvironment>,
1339 ) -> ApiResponseFuture {
1340
1341 async move {
1342 let store = tools::required_string_param(&param, "store")?;
1343 let datastore = DataStore::lookup_datastore(&store)?;
1344
1345 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1346 let user_info = CachedUserInfo::new()?;
1347 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1348
1349 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1350
1351 let backup_type = tools::required_string_param(&param, "backup-type")?;
1352 let backup_id = tools::required_string_param(&param, "backup-id")?;
1353 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1354
1355 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1356
1357 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1358 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1359
1360 let mut components = base64::decode(&filepath)?;
1361 if components.len() > 0 && components[0] == '/' as u8 {
1362 components.remove(0);
1363 }
1364
1365 let mut split = components.splitn(2, |c| *c == '/' as u8);
1366 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1367 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1368 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1369 for file in files {
1370 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1371 bail!("cannot decode '{}' - is encrypted", pxar_name);
1372 }
1373 }
1374
1375 let mut path = datastore.base_path();
1376 path.push(backup_dir.relative_path());
1377 path.push(pxar_name);
1378
1379 let index = DynamicIndexReader::open(&path)
1380 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1381
1382 let (csum, size) = index.compute_csum();
1383 manifest.verify_file(&pxar_name, &csum, size)?;
1384
1385 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1386 let reader = BufferedDynamicReader::new(index, chunk_reader);
1387 let archive_size = reader.archive_size();
1388 let reader = LocalDynamicReadAt::new(reader);
1389
1390 let decoder = Accessor::new(reader, archive_size).await?;
1391 let root = decoder.open_root().await?;
1392 let file = root
1393 .lookup(OsStr::from_bytes(file_path)).await?
1394 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1395
1396 let body = match file.kind() {
1397 EntryKind::File { .. } => Body::wrap_stream(
1398 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1399 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1400 err
1401 }),
1402 ),
1403 EntryKind::Hardlink(_) => Body::wrap_stream(
1404 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1405 .map_err(move |err| {
1406 eprintln!(
1407 "error during streaming of hardlink '{:?}' - {}",
1408 filepath, err
1409 );
1410 err
1411 }),
1412 ),
1413 EntryKind::Directory => {
1414 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1415 let mut prefix = PathBuf::new();
1416 let mut components = file.entry().path().components();
1417 components.next_back(); // discar last
1418 for comp in components {
1419 prefix.push(comp);
1420 }
1421
1422 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1423 let zipencoder = ZipEncoder::new(channelwriter);
1424
1425 crate::server::spawn_internal_task(async move {
1426 let (mut zipencoder, _) = recurse_files(zipencoder, decoder, prefix, file)
1427 .await
1428 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1429
1430 zipencoder
1431 .finish()
1432 .await
1433 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1434 });
1435
1436 Body::wrap_stream(receiver.map_err(move |err| {
1437 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1438 err
1439 }))
1440 }
1441 other => bail!("cannot download file of type {:?}", other),
1442 };
1443
1444 // fixme: set other headers ?
1445 Ok(Response::builder()
1446 .status(StatusCode::OK)
1447 .header(header::CONTENT_TYPE, "application/octet-stream")
1448 .body(body)
1449 .unwrap())
1450 }.boxed()
1451 }
1452
1453 #[api(
1454 input: {
1455 properties: {
1456 store: {
1457 schema: DATASTORE_SCHEMA,
1458 },
1459 timeframe: {
1460 type: RRDTimeFrameResolution,
1461 },
1462 cf: {
1463 type: RRDMode,
1464 },
1465 },
1466 },
1467 access: {
1468 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1469 },
1470 )]
1471 /// Read datastore stats
1472 fn get_rrd_stats(
1473 store: String,
1474 timeframe: RRDTimeFrameResolution,
1475 cf: RRDMode,
1476 _param: Value,
1477 ) -> Result<Value, Error> {
1478
1479 create_value_from_rrd(
1480 &format!("datastore/{}", store),
1481 &[
1482 "total", "used",
1483 "read_ios", "read_bytes",
1484 "write_ios", "write_bytes",
1485 "io_ticks",
1486 ],
1487 timeframe,
1488 cf,
1489 )
1490 }
1491
1492 #[api(
1493 input: {
1494 properties: {
1495 store: {
1496 schema: DATASTORE_SCHEMA,
1497 },
1498 "backup-type": {
1499 schema: BACKUP_TYPE_SCHEMA,
1500 },
1501 "backup-id": {
1502 schema: BACKUP_ID_SCHEMA,
1503 },
1504 "backup-time": {
1505 schema: BACKUP_TIME_SCHEMA,
1506 },
1507 },
1508 },
1509 access: {
1510 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1511 },
1512 )]
1513 /// Get "notes" for a specific backup
1514 fn get_notes(
1515 store: String,
1516 backup_type: String,
1517 backup_id: String,
1518 backup_time: i64,
1519 rpcenv: &mut dyn RpcEnvironment,
1520 ) -> Result<String, Error> {
1521 let datastore = DataStore::lookup_datastore(&store)?;
1522
1523 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1524 let user_info = CachedUserInfo::new()?;
1525 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1526
1527 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1528
1529 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1530 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1531
1532 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1533
1534 let notes = manifest.unprotected["notes"]
1535 .as_str()
1536 .unwrap_or("");
1537
1538 Ok(String::from(notes))
1539 }
1540
1541 #[api(
1542 input: {
1543 properties: {
1544 store: {
1545 schema: DATASTORE_SCHEMA,
1546 },
1547 "backup-type": {
1548 schema: BACKUP_TYPE_SCHEMA,
1549 },
1550 "backup-id": {
1551 schema: BACKUP_ID_SCHEMA,
1552 },
1553 "backup-time": {
1554 schema: BACKUP_TIME_SCHEMA,
1555 },
1556 notes: {
1557 description: "A multiline text.",
1558 },
1559 },
1560 },
1561 access: {
1562 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1563 },
1564 )]
1565 /// Set "notes" for a specific backup
1566 fn set_notes(
1567 store: String,
1568 backup_type: String,
1569 backup_id: String,
1570 backup_time: i64,
1571 notes: String,
1572 rpcenv: &mut dyn RpcEnvironment,
1573 ) -> Result<(), Error> {
1574 let datastore = DataStore::lookup_datastore(&store)?;
1575
1576 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1577 let user_info = CachedUserInfo::new()?;
1578 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1579
1580 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1581
1582 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1583 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1584
1585 datastore.update_manifest(&backup_dir,|manifest| {
1586 manifest.unprotected["notes"] = notes.into();
1587 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1588
1589 Ok(())
1590 }
1591
1592 #[api(
1593 input: {
1594 properties: {
1595 store: {
1596 schema: DATASTORE_SCHEMA,
1597 },
1598 "backup-type": {
1599 schema: BACKUP_TYPE_SCHEMA,
1600 },
1601 "backup-id": {
1602 schema: BACKUP_ID_SCHEMA,
1603 },
1604 "new-owner": {
1605 type: Userid,
1606 },
1607 },
1608 },
1609 access: {
1610 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1611 },
1612 )]
1613 /// Change owner of a backup group
1614 fn set_backup_owner(
1615 store: String,
1616 backup_type: String,
1617 backup_id: String,
1618 new_owner: Userid,
1619 _rpcenv: &mut dyn RpcEnvironment,
1620 ) -> Result<(), Error> {
1621
1622 let datastore = DataStore::lookup_datastore(&store)?;
1623
1624 let backup_group = BackupGroup::new(backup_type, backup_id);
1625
1626 let user_info = CachedUserInfo::new()?;
1627
1628 if !user_info.is_active_user(&new_owner) {
1629 bail!("user '{}' is inactive or non-existent", new_owner);
1630 }
1631
1632 datastore.set_owner(&backup_group, &new_owner, true)?;
1633
1634 Ok(())
1635 }
1636
1637 #[sortable]
1638 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1639 (
1640 "catalog",
1641 &Router::new()
1642 .get(&API_METHOD_CATALOG)
1643 ),
1644 (
1645 "change-owner",
1646 &Router::new()
1647 .post(&API_METHOD_SET_BACKUP_OWNER)
1648 ),
1649 (
1650 "download",
1651 &Router::new()
1652 .download(&API_METHOD_DOWNLOAD_FILE)
1653 ),
1654 (
1655 "download-decoded",
1656 &Router::new()
1657 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1658 ),
1659 (
1660 "files",
1661 &Router::new()
1662 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1663 ),
1664 (
1665 "gc",
1666 &Router::new()
1667 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1668 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1669 ),
1670 (
1671 "groups",
1672 &Router::new()
1673 .get(&API_METHOD_LIST_GROUPS)
1674 ),
1675 (
1676 "notes",
1677 &Router::new()
1678 .get(&API_METHOD_GET_NOTES)
1679 .put(&API_METHOD_SET_NOTES)
1680 ),
1681 (
1682 "prune",
1683 &Router::new()
1684 .post(&API_METHOD_PRUNE)
1685 ),
1686 (
1687 "pxar-file-download",
1688 &Router::new()
1689 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1690 ),
1691 (
1692 "rrd",
1693 &Router::new()
1694 .get(&API_METHOD_GET_RRD_STATS)
1695 ),
1696 (
1697 "snapshots",
1698 &Router::new()
1699 .get(&API_METHOD_LIST_SNAPSHOTS)
1700 .delete(&API_METHOD_DELETE_SNAPSHOT)
1701 ),
1702 (
1703 "status",
1704 &Router::new()
1705 .get(&API_METHOD_STATUS)
1706 ),
1707 (
1708 "upload-backup-log",
1709 &Router::new()
1710 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1711 ),
1712 (
1713 "verify",
1714 &Router::new()
1715 .post(&API_METHOD_VERIFY)
1716 ),
1717 ];
1718
1719 const DATASTORE_INFO_ROUTER: Router = Router::new()
1720 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1721 .subdirs(DATASTORE_INFO_SUBDIRS);
1722
1723
1724 pub const ROUTER: Router = Router::new()
1725 .get(&API_METHOD_GET_DATASTORE_LIST)
1726 .match_all("store", &DATASTORE_INFO_ROUTER);