]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
verify: directly pass manifest to filter function
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5 use std::path::{Path, PathBuf};
6 use std::pin::Pin;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13
14 use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
16 RpcEnvironment, RpcEnvironmentType, Permission
17 };
18 use proxmox::api::router::SubdirMap;
19 use proxmox::api::schema::*;
20 use proxmox::tools::fs::{replace_file, CreateOptions};
21 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
22
23 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
24 use pxar::EntryKind;
25
26 use crate::api2::types::*;
27 use crate::api2::node::rrd::create_value_from_rrd;
28 use crate::backup::*;
29 use crate::config::datastore;
30 use crate::config::cached_user_info::CachedUserInfo;
31
32 use crate::server::WorkerTask;
33 use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37 };
38
39 use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
41 PRIV_DATASTORE_MODIFY,
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
44 PRIV_DATASTORE_BACKUP,
45 };
46
47 fn check_backup_owner(
48 store: &DataStore,
49 group: &BackupGroup,
50 userid: &Userid,
51 ) -> Result<(), Error> {
52 let owner = store.get_owner(group)?;
53 if &owner != userid {
54 bail!("backup owner check failed ({} != {})", userid, owner);
55 }
56 Ok(())
57 }
58
59 fn read_backup_index(
60 store: &DataStore,
61 backup_dir: &BackupDir,
62 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
63
64 let (manifest, index_size) = store.load_manifest(backup_dir)?;
65
66 let mut result = Vec::new();
67 for item in manifest.files() {
68 result.push(BackupContent {
69 filename: item.filename.clone(),
70 crypt_mode: Some(item.crypt_mode),
71 size: Some(item.size),
72 });
73 }
74
75 result.push(BackupContent {
76 filename: MANIFEST_BLOB_NAME.to_string(),
77 crypt_mode: match manifest.signature {
78 Some(_) => Some(CryptMode::SignOnly),
79 None => Some(CryptMode::None),
80 },
81 size: Some(index_size),
82 });
83
84 Ok((manifest, result))
85 }
86
87 fn get_all_snapshot_files(
88 store: &DataStore,
89 info: &BackupInfo,
90 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
91
92 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
93
94 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
95 acc.insert(item.filename.clone());
96 acc
97 });
98
99 for file in &info.files {
100 if file_set.contains(file) { continue; }
101 files.push(BackupContent {
102 filename: file.to_string(),
103 size: None,
104 crypt_mode: None,
105 });
106 }
107
108 Ok((manifest, files))
109 }
110
111 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
112
113 let mut group_hash = HashMap::new();
114
115 for info in backup_list {
116 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
117 let time_list = group_hash.entry(group_id).or_insert(vec![]);
118 time_list.push(info);
119 }
120
121 group_hash
122 }
123
124 #[api(
125 input: {
126 properties: {
127 store: {
128 schema: DATASTORE_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of backup groups.",
135 items: {
136 type: GroupListItem,
137 }
138 },
139 access: {
140 permission: &Permission::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
143 true),
144 },
145 )]
146 /// List backup groups.
147 fn list_groups(
148 store: String,
149 rpcenv: &mut dyn RpcEnvironment,
150 ) -> Result<Vec<GroupListItem>, Error> {
151
152 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
153 let user_info = CachedUserInfo::new()?;
154 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
155
156 let datastore = DataStore::lookup_datastore(&store)?;
157
158 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
159
160 let group_hash = group_backups(backup_list);
161
162 let mut groups = Vec::new();
163
164 for (_group_id, mut list) in group_hash {
165
166 BackupInfo::sort_list(&mut list, false);
167
168 let info = &list[0];
169
170 let group = info.backup_dir.group();
171
172 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
173 let owner = datastore.get_owner(group)?;
174 if !list_all && owner != userid {
175 continue;
176 }
177
178 let result_item = GroupListItem {
179 backup_type: group.backup_type().to_string(),
180 backup_id: group.backup_id().to_string(),
181 last_backup: info.backup_dir.backup_time(),
182 backup_count: list.len() as u64,
183 files: info.files.clone(),
184 owner: Some(owner),
185 };
186 groups.push(result_item);
187 }
188
189 Ok(groups)
190 }
191
192 #[api(
193 input: {
194 properties: {
195 store: {
196 schema: DATASTORE_SCHEMA,
197 },
198 "backup-type": {
199 schema: BACKUP_TYPE_SCHEMA,
200 },
201 "backup-id": {
202 schema: BACKUP_ID_SCHEMA,
203 },
204 "backup-time": {
205 schema: BACKUP_TIME_SCHEMA,
206 },
207 },
208 },
209 returns: {
210 type: Array,
211 description: "Returns the list of archive files inside a backup snapshots.",
212 items: {
213 type: BackupContent,
214 }
215 },
216 access: {
217 permission: &Permission::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
220 true),
221 },
222 )]
223 /// List snapshot files.
224 pub fn list_snapshot_files(
225 store: String,
226 backup_type: String,
227 backup_id: String,
228 backup_time: i64,
229 _info: &ApiMethod,
230 rpcenv: &mut dyn RpcEnvironment,
231 ) -> Result<Vec<BackupContent>, Error> {
232
233 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
234 let user_info = CachedUserInfo::new()?;
235 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
236
237 let datastore = DataStore::lookup_datastore(&store)?;
238
239 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
240
241 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
243
244 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
245
246 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
247
248 Ok(files)
249 }
250
251 #[api(
252 input: {
253 properties: {
254 store: {
255 schema: DATASTORE_SCHEMA,
256 },
257 "backup-type": {
258 schema: BACKUP_TYPE_SCHEMA,
259 },
260 "backup-id": {
261 schema: BACKUP_ID_SCHEMA,
262 },
263 "backup-time": {
264 schema: BACKUP_TIME_SCHEMA,
265 },
266 },
267 },
268 access: {
269 permission: &Permission::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
272 true),
273 },
274 )]
275 /// Delete backup snapshot.
276 fn delete_snapshot(
277 store: String,
278 backup_type: String,
279 backup_id: String,
280 backup_time: i64,
281 _info: &ApiMethod,
282 rpcenv: &mut dyn RpcEnvironment,
283 ) -> Result<Value, Error> {
284
285 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
286 let user_info = CachedUserInfo::new()?;
287 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
288
289 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
290
291 let datastore = DataStore::lookup_datastore(&store)?;
292
293 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
295
296 datastore.remove_backup_dir(&snapshot, false)?;
297
298 Ok(Value::Null)
299 }
300
301 #[api(
302 input: {
303 properties: {
304 store: {
305 schema: DATASTORE_SCHEMA,
306 },
307 "backup-type": {
308 optional: true,
309 schema: BACKUP_TYPE_SCHEMA,
310 },
311 "backup-id": {
312 optional: true,
313 schema: BACKUP_ID_SCHEMA,
314 },
315 },
316 },
317 returns: {
318 type: Array,
319 description: "Returns the list of snapshots.",
320 items: {
321 type: SnapshotListItem,
322 }
323 },
324 access: {
325 permission: &Permission::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
328 true),
329 },
330 )]
331 /// List backup snapshots.
332 pub fn list_snapshots (
333 store: String,
334 backup_type: Option<String>,
335 backup_id: Option<String>,
336 _param: Value,
337 _info: &ApiMethod,
338 rpcenv: &mut dyn RpcEnvironment,
339 ) -> Result<Vec<SnapshotListItem>, Error> {
340
341 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
342 let user_info = CachedUserInfo::new()?;
343 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
344
345 let datastore = DataStore::lookup_datastore(&store)?;
346
347 let base_path = datastore.base_path();
348
349 let backup_list = BackupInfo::list_backups(&base_path)?;
350
351 let mut snapshots = vec![];
352
353 for info in backup_list {
354 let group = info.backup_dir.group();
355 if let Some(ref backup_type) = backup_type {
356 if backup_type != group.backup_type() { continue; }
357 }
358 if let Some(ref backup_id) = backup_id {
359 if backup_id != group.backup_id() { continue; }
360 }
361
362 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
363 let owner = datastore.get_owner(group)?;
364
365 if !list_all && owner != userid {
366 continue;
367 }
368
369 let mut size = None;
370
371 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
372 Ok((manifest, files)) => {
373 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
374 // extract the first line from notes
375 let comment: Option<String> = manifest.unprotected["notes"]
376 .as_str()
377 .and_then(|notes| notes.lines().next())
378 .map(String::from);
379
380 let verify = manifest.unprotected["verify_state"].clone();
381 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
382 Ok(verify) => verify,
383 Err(err) => {
384 eprintln!("error parsing verification state : '{}'", err);
385 None
386 }
387 };
388
389 (comment, verify, files)
390 },
391 Err(err) => {
392 eprintln!("error during snapshot file listing: '{}'", err);
393 (
394 None,
395 None,
396 info
397 .files
398 .iter()
399 .map(|x| BackupContent {
400 filename: x.to_string(),
401 size: None,
402 crypt_mode: None,
403 })
404 .collect()
405 )
406 },
407 };
408
409 let result_item = SnapshotListItem {
410 backup_type: group.backup_type().to_string(),
411 backup_id: group.backup_id().to_string(),
412 backup_time: info.backup_dir.backup_time(),
413 comment,
414 verification,
415 files,
416 size,
417 owner: Some(owner),
418 };
419
420 snapshots.push(result_item);
421 }
422
423 Ok(snapshots)
424 }
425
426 // returns a map from type to (group_count, snapshot_count)
427 fn get_snaphots_count(store: &DataStore) -> Result<HashMap<String, (usize, usize)>, Error> {
428 let base_path = store.base_path();
429 let backup_list = BackupInfo::list_backups(&base_path)?;
430 let mut groups = HashSet::new();
431 let mut result: HashMap<String, (usize, usize)> = HashMap::new();
432 for info in backup_list {
433 let group = info.backup_dir.group();
434
435 let id = group.backup_id();
436 let backup_type = group.backup_type();
437
438 let mut new_id = false;
439
440 if groups.insert(format!("{}-{}", &backup_type, &id)) {
441 new_id = true;
442 }
443
444 if let Some(mut counts) = result.get_mut(backup_type) {
445 counts.1 += 1;
446 if new_id {
447 counts.0 +=1;
448 }
449 } else {
450 result.insert(backup_type.to_string(), (1, 1));
451 }
452 }
453
454 Ok(result)
455 }
456
457 #[api(
458 input: {
459 properties: {
460 store: {
461 schema: DATASTORE_SCHEMA,
462 },
463 },
464 },
465 returns: {
466 description: "The overall Datastore status and information.",
467 type: Object,
468 properties: {
469 storage: {
470 type: StorageStatus,
471 },
472 counts: {
473 description: "Group and Snapshot counts per Type",
474 type: Object,
475 properties: { },
476 },
477 "gc-status": {
478 type: GarbageCollectionStatus,
479 },
480 },
481 },
482 access: {
483 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
484 },
485 )]
486 /// Get datastore status.
487 pub fn status(
488 store: String,
489 _info: &ApiMethod,
490 _rpcenv: &mut dyn RpcEnvironment,
491 ) -> Result<Value, Error> {
492 let datastore = DataStore::lookup_datastore(&store)?;
493 let storage_status = crate::tools::disks::disk_usage(&datastore.base_path())?;
494 let counts = get_snaphots_count(&datastore)?;
495 let gc_status = datastore.last_gc_status();
496
497 let res = json!({
498 "storage": storage_status,
499 "counts": counts,
500 "gc-status": gc_status,
501 });
502
503 Ok(res)
504 }
505
506 #[api(
507 input: {
508 properties: {
509 store: {
510 schema: DATASTORE_SCHEMA,
511 },
512 "backup-type": {
513 schema: BACKUP_TYPE_SCHEMA,
514 optional: true,
515 },
516 "backup-id": {
517 schema: BACKUP_ID_SCHEMA,
518 optional: true,
519 },
520 "backup-time": {
521 schema: BACKUP_TIME_SCHEMA,
522 optional: true,
523 },
524 },
525 },
526 returns: {
527 schema: UPID_SCHEMA,
528 },
529 access: {
530 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
531 },
532 )]
533 /// Verify backups.
534 ///
535 /// This function can verify a single backup snapshot, all backup from a backup group,
536 /// or all backups in the datastore.
537 pub fn verify(
538 store: String,
539 backup_type: Option<String>,
540 backup_id: Option<String>,
541 backup_time: Option<i64>,
542 rpcenv: &mut dyn RpcEnvironment,
543 ) -> Result<Value, Error> {
544 let datastore = DataStore::lookup_datastore(&store)?;
545
546 let worker_id;
547
548 let mut backup_dir = None;
549 let mut backup_group = None;
550 let mut worker_type = "verify";
551
552 match (backup_type, backup_id, backup_time) {
553 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
554 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
555 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
556 backup_dir = Some(dir);
557 worker_type = "verify_snapshot";
558 }
559 (Some(backup_type), Some(backup_id), None) => {
560 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
561 let group = BackupGroup::new(backup_type, backup_id);
562 backup_group = Some(group);
563 worker_type = "verify_group";
564 }
565 (None, None, None) => {
566 worker_id = store.clone();
567 }
568 _ => bail!("parameters do not specify a backup group or snapshot"),
569 }
570
571 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
572 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
573
574 let upid_str = WorkerTask::new_thread(
575 worker_type,
576 Some(worker_id.clone()),
577 userid,
578 to_stdout,
579 move |worker| {
580 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
581 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
582
583 let failed_dirs = if let Some(backup_dir) = backup_dir {
584 let mut res = Vec::new();
585 if !verify_backup_dir(
586 datastore,
587 &backup_dir,
588 verified_chunks,
589 corrupt_chunks,
590 worker.clone(),
591 worker.upid().clone(),
592 None,
593 )? {
594 res.push(backup_dir.to_string());
595 }
596 res
597 } else if let Some(backup_group) = backup_group {
598 let (_count, failed_dirs) = verify_backup_group(
599 datastore,
600 &backup_group,
601 verified_chunks,
602 corrupt_chunks,
603 None,
604 worker.clone(),
605 worker.upid(),
606 None,
607 )?;
608 failed_dirs
609 } else {
610 verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
611 };
612 if failed_dirs.len() > 0 {
613 worker.log("Failed to verify following snapshots:");
614 for dir in failed_dirs {
615 worker.log(format!("\t{}", dir));
616 }
617 bail!("verification failed - please check the log for details");
618 }
619 Ok(())
620 },
621 )?;
622
623 Ok(json!(upid_str))
624 }
625
626 #[macro_export]
627 macro_rules! add_common_prune_prameters {
628 ( [ $( $list1:tt )* ] ) => {
629 add_common_prune_prameters!([$( $list1 )* ] , [])
630 };
631 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
632 [
633 $( $list1 )*
634 (
635 "keep-daily",
636 true,
637 &PRUNE_SCHEMA_KEEP_DAILY,
638 ),
639 (
640 "keep-hourly",
641 true,
642 &PRUNE_SCHEMA_KEEP_HOURLY,
643 ),
644 (
645 "keep-last",
646 true,
647 &PRUNE_SCHEMA_KEEP_LAST,
648 ),
649 (
650 "keep-monthly",
651 true,
652 &PRUNE_SCHEMA_KEEP_MONTHLY,
653 ),
654 (
655 "keep-weekly",
656 true,
657 &PRUNE_SCHEMA_KEEP_WEEKLY,
658 ),
659 (
660 "keep-yearly",
661 true,
662 &PRUNE_SCHEMA_KEEP_YEARLY,
663 ),
664 $( $list2 )*
665 ]
666 }
667 }
668
669 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
670 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
671 &PruneListItem::API_SCHEMA
672 ).schema();
673
674 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
675 &ApiHandler::Sync(&prune),
676 &ObjectSchema::new(
677 "Prune the datastore.",
678 &add_common_prune_prameters!([
679 ("backup-id", false, &BACKUP_ID_SCHEMA),
680 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
681 ("dry-run", true, &BooleanSchema::new(
682 "Just show what prune would do, but do not delete anything.")
683 .schema()
684 ),
685 ],[
686 ("store", false, &DATASTORE_SCHEMA),
687 ])
688 ))
689 .returns(&API_RETURN_SCHEMA_PRUNE)
690 .access(None, &Permission::Privilege(
691 &["datastore", "{store}"],
692 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
693 true)
694 );
695
696 fn prune(
697 param: Value,
698 _info: &ApiMethod,
699 rpcenv: &mut dyn RpcEnvironment,
700 ) -> Result<Value, Error> {
701
702 let store = tools::required_string_param(&param, "store")?;
703 let backup_type = tools::required_string_param(&param, "backup-type")?;
704 let backup_id = tools::required_string_param(&param, "backup-id")?;
705
706 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
707 let user_info = CachedUserInfo::new()?;
708 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
709
710 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
711
712 let group = BackupGroup::new(backup_type, backup_id);
713
714 let datastore = DataStore::lookup_datastore(&store)?;
715
716 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
717 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
718
719 let prune_options = PruneOptions {
720 keep_last: param["keep-last"].as_u64(),
721 keep_hourly: param["keep-hourly"].as_u64(),
722 keep_daily: param["keep-daily"].as_u64(),
723 keep_weekly: param["keep-weekly"].as_u64(),
724 keep_monthly: param["keep-monthly"].as_u64(),
725 keep_yearly: param["keep-yearly"].as_u64(),
726 };
727
728 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
729
730 let mut prune_result = Vec::new();
731
732 let list = group.list_backups(&datastore.base_path())?;
733
734 let mut prune_info = compute_prune_info(list, &prune_options)?;
735
736 prune_info.reverse(); // delete older snapshots first
737
738 let keep_all = !prune_options.keeps_something();
739
740 if dry_run {
741 for (info, mut keep) in prune_info {
742 if keep_all { keep = true; }
743
744 let backup_time = info.backup_dir.backup_time();
745 let group = info.backup_dir.group();
746
747 prune_result.push(json!({
748 "backup-type": group.backup_type(),
749 "backup-id": group.backup_id(),
750 "backup-time": backup_time,
751 "keep": keep,
752 }));
753 }
754 return Ok(json!(prune_result));
755 }
756
757
758 // We use a WorkerTask just to have a task log, but run synchrounously
759 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
760
761 if keep_all {
762 worker.log("No prune selection - keeping all files.");
763 } else {
764 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
765 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
766 store, backup_type, backup_id));
767 }
768
769 for (info, mut keep) in prune_info {
770 if keep_all { keep = true; }
771
772 let backup_time = info.backup_dir.backup_time();
773 let timestamp = info.backup_dir.backup_time_string();
774 let group = info.backup_dir.group();
775
776
777 let msg = format!(
778 "{}/{}/{} {}",
779 group.backup_type(),
780 group.backup_id(),
781 timestamp,
782 if keep { "keep" } else { "remove" },
783 );
784
785 worker.log(msg);
786
787 prune_result.push(json!({
788 "backup-type": group.backup_type(),
789 "backup-id": group.backup_id(),
790 "backup-time": backup_time,
791 "keep": keep,
792 }));
793
794 if !(dry_run || keep) {
795 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
796 worker.warn(
797 format!(
798 "failed to remove dir {:?}: {}",
799 info.backup_dir.relative_path(), err
800 )
801 );
802 }
803 }
804 }
805
806 worker.log_result(&Ok(()));
807
808 Ok(json!(prune_result))
809 }
810
811 #[api(
812 input: {
813 properties: {
814 store: {
815 schema: DATASTORE_SCHEMA,
816 },
817 },
818 },
819 returns: {
820 schema: UPID_SCHEMA,
821 },
822 access: {
823 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
824 },
825 )]
826 /// Start garbage collection.
827 fn start_garbage_collection(
828 store: String,
829 _info: &ApiMethod,
830 rpcenv: &mut dyn RpcEnvironment,
831 ) -> Result<Value, Error> {
832
833 let datastore = DataStore::lookup_datastore(&store)?;
834
835 println!("Starting garbage collection on store {}", store);
836
837 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
838
839 let upid_str = WorkerTask::new_thread(
840 "garbage_collection",
841 Some(store.clone()),
842 Userid::root_userid().clone(),
843 to_stdout,
844 move |worker| {
845 worker.log(format!("starting garbage collection on store {}", store));
846 datastore.garbage_collection(&*worker, worker.upid())
847 },
848 )?;
849
850 Ok(json!(upid_str))
851 }
852
853 #[api(
854 input: {
855 properties: {
856 store: {
857 schema: DATASTORE_SCHEMA,
858 },
859 },
860 },
861 returns: {
862 type: GarbageCollectionStatus,
863 },
864 access: {
865 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
866 },
867 )]
868 /// Garbage collection status.
869 pub fn garbage_collection_status(
870 store: String,
871 _info: &ApiMethod,
872 _rpcenv: &mut dyn RpcEnvironment,
873 ) -> Result<GarbageCollectionStatus, Error> {
874
875 let datastore = DataStore::lookup_datastore(&store)?;
876
877 let status = datastore.last_gc_status();
878
879 Ok(status)
880 }
881
882 #[api(
883 returns: {
884 description: "List the accessible datastores.",
885 type: Array,
886 items: {
887 description: "Datastore name and description.",
888 properties: {
889 store: {
890 schema: DATASTORE_SCHEMA,
891 },
892 comment: {
893 optional: true,
894 schema: SINGLE_LINE_COMMENT_SCHEMA,
895 },
896 },
897 },
898 },
899 access: {
900 permission: &Permission::Anybody,
901 },
902 )]
903 /// Datastore list
904 fn get_datastore_list(
905 _param: Value,
906 _info: &ApiMethod,
907 rpcenv: &mut dyn RpcEnvironment,
908 ) -> Result<Value, Error> {
909
910 let (config, _digest) = datastore::config()?;
911
912 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
913 let user_info = CachedUserInfo::new()?;
914
915 let mut list = Vec::new();
916
917 for (store, (_, data)) in &config.sections {
918 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
919 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
920 if allowed {
921 let mut entry = json!({ "store": store });
922 if let Some(comment) = data["comment"].as_str() {
923 entry["comment"] = comment.into();
924 }
925 list.push(entry);
926 }
927 }
928
929 Ok(list.into())
930 }
931
932 #[sortable]
933 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
934 &ApiHandler::AsyncHttp(&download_file),
935 &ObjectSchema::new(
936 "Download single raw file from backup snapshot.",
937 &sorted!([
938 ("store", false, &DATASTORE_SCHEMA),
939 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
940 ("backup-id", false, &BACKUP_ID_SCHEMA),
941 ("backup-time", false, &BACKUP_TIME_SCHEMA),
942 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
943 ]),
944 )
945 ).access(None, &Permission::Privilege(
946 &["datastore", "{store}"],
947 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
948 true)
949 );
950
951 fn download_file(
952 _parts: Parts,
953 _req_body: Body,
954 param: Value,
955 _info: &ApiMethod,
956 rpcenv: Box<dyn RpcEnvironment>,
957 ) -> ApiResponseFuture {
958
959 async move {
960 let store = tools::required_string_param(&param, "store")?;
961 let datastore = DataStore::lookup_datastore(store)?;
962
963 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
964 let user_info = CachedUserInfo::new()?;
965 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
966
967 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
968
969 let backup_type = tools::required_string_param(&param, "backup-type")?;
970 let backup_id = tools::required_string_param(&param, "backup-id")?;
971 let backup_time = tools::required_integer_param(&param, "backup-time")?;
972
973 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
974
975 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
976 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
977
978 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
979
980 let mut path = datastore.base_path();
981 path.push(backup_dir.relative_path());
982 path.push(&file_name);
983
984 let file = tokio::fs::File::open(&path)
985 .await
986 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
987
988 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
989 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
990 .map_err(move |err| {
991 eprintln!("error during streaming of '{:?}' - {}", &path, err);
992 err
993 });
994 let body = Body::wrap_stream(payload);
995
996 // fixme: set other headers ?
997 Ok(Response::builder()
998 .status(StatusCode::OK)
999 .header(header::CONTENT_TYPE, "application/octet-stream")
1000 .body(body)
1001 .unwrap())
1002 }.boxed()
1003 }
1004
1005 #[sortable]
1006 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1007 &ApiHandler::AsyncHttp(&download_file_decoded),
1008 &ObjectSchema::new(
1009 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1010 &sorted!([
1011 ("store", false, &DATASTORE_SCHEMA),
1012 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1013 ("backup-id", false, &BACKUP_ID_SCHEMA),
1014 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1015 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1016 ]),
1017 )
1018 ).access(None, &Permission::Privilege(
1019 &["datastore", "{store}"],
1020 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1021 true)
1022 );
1023
1024 fn download_file_decoded(
1025 _parts: Parts,
1026 _req_body: Body,
1027 param: Value,
1028 _info: &ApiMethod,
1029 rpcenv: Box<dyn RpcEnvironment>,
1030 ) -> ApiResponseFuture {
1031
1032 async move {
1033 let store = tools::required_string_param(&param, "store")?;
1034 let datastore = DataStore::lookup_datastore(store)?;
1035
1036 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1037 let user_info = CachedUserInfo::new()?;
1038 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1039
1040 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1041
1042 let backup_type = tools::required_string_param(&param, "backup-type")?;
1043 let backup_id = tools::required_string_param(&param, "backup-id")?;
1044 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1045
1046 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1047
1048 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1049 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1050
1051 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1052 for file in files {
1053 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1054 bail!("cannot decode '{}' - is encrypted", file_name);
1055 }
1056 }
1057
1058 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1059
1060 let mut path = datastore.base_path();
1061 path.push(backup_dir.relative_path());
1062 path.push(&file_name);
1063
1064 let extension = file_name.rsplitn(2, '.').next().unwrap();
1065
1066 let body = match extension {
1067 "didx" => {
1068 let index = DynamicIndexReader::open(&path)
1069 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1070 let (csum, size) = index.compute_csum();
1071 manifest.verify_file(&file_name, &csum, size)?;
1072
1073 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1074 let reader = AsyncIndexReader::new(index, chunk_reader);
1075 Body::wrap_stream(AsyncReaderStream::new(reader)
1076 .map_err(move |err| {
1077 eprintln!("error during streaming of '{:?}' - {}", path, err);
1078 err
1079 }))
1080 },
1081 "fidx" => {
1082 let index = FixedIndexReader::open(&path)
1083 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1084
1085 let (csum, size) = index.compute_csum();
1086 manifest.verify_file(&file_name, &csum, size)?;
1087
1088 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1089 let reader = AsyncIndexReader::new(index, chunk_reader);
1090 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1091 .map_err(move |err| {
1092 eprintln!("error during streaming of '{:?}' - {}", path, err);
1093 err
1094 }))
1095 },
1096 "blob" => {
1097 let file = std::fs::File::open(&path)
1098 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1099
1100 // FIXME: load full blob to verify index checksum?
1101
1102 Body::wrap_stream(
1103 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1104 .map_err(move |err| {
1105 eprintln!("error during streaming of '{:?}' - {}", path, err);
1106 err
1107 })
1108 )
1109 },
1110 extension => {
1111 bail!("cannot download '{}' files", extension);
1112 },
1113 };
1114
1115 // fixme: set other headers ?
1116 Ok(Response::builder()
1117 .status(StatusCode::OK)
1118 .header(header::CONTENT_TYPE, "application/octet-stream")
1119 .body(body)
1120 .unwrap())
1121 }.boxed()
1122 }
1123
1124 #[sortable]
1125 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1126 &ApiHandler::AsyncHttp(&upload_backup_log),
1127 &ObjectSchema::new(
1128 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1129 &sorted!([
1130 ("store", false, &DATASTORE_SCHEMA),
1131 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1132 ("backup-id", false, &BACKUP_ID_SCHEMA),
1133 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1134 ]),
1135 )
1136 ).access(
1137 Some("Only the backup creator/owner is allowed to do this."),
1138 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1139 );
1140
1141 fn upload_backup_log(
1142 _parts: Parts,
1143 req_body: Body,
1144 param: Value,
1145 _info: &ApiMethod,
1146 rpcenv: Box<dyn RpcEnvironment>,
1147 ) -> ApiResponseFuture {
1148
1149 async move {
1150 let store = tools::required_string_param(&param, "store")?;
1151 let datastore = DataStore::lookup_datastore(store)?;
1152
1153 let file_name = CLIENT_LOG_BLOB_NAME;
1154
1155 let backup_type = tools::required_string_param(&param, "backup-type")?;
1156 let backup_id = tools::required_string_param(&param, "backup-id")?;
1157 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1158
1159 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1160
1161 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1162 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
1163
1164 let mut path = datastore.base_path();
1165 path.push(backup_dir.relative_path());
1166 path.push(&file_name);
1167
1168 if path.exists() {
1169 bail!("backup already contains a log.");
1170 }
1171
1172 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1173 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1174
1175 let data = req_body
1176 .map_err(Error::from)
1177 .try_fold(Vec::new(), |mut acc, chunk| {
1178 acc.extend_from_slice(&*chunk);
1179 future::ok::<_, Error>(acc)
1180 })
1181 .await?;
1182
1183 // always verify blob/CRC at server side
1184 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1185
1186 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1187
1188 // fixme: use correct formatter
1189 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1190 }.boxed()
1191 }
1192
1193 #[api(
1194 input: {
1195 properties: {
1196 store: {
1197 schema: DATASTORE_SCHEMA,
1198 },
1199 "backup-type": {
1200 schema: BACKUP_TYPE_SCHEMA,
1201 },
1202 "backup-id": {
1203 schema: BACKUP_ID_SCHEMA,
1204 },
1205 "backup-time": {
1206 schema: BACKUP_TIME_SCHEMA,
1207 },
1208 "filepath": {
1209 description: "Base64 encoded path.",
1210 type: String,
1211 }
1212 },
1213 },
1214 access: {
1215 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1216 },
1217 )]
1218 /// Get the entries of the given path of the catalog
1219 fn catalog(
1220 store: String,
1221 backup_type: String,
1222 backup_id: String,
1223 backup_time: i64,
1224 filepath: String,
1225 _param: Value,
1226 _info: &ApiMethod,
1227 rpcenv: &mut dyn RpcEnvironment,
1228 ) -> Result<Value, Error> {
1229 let datastore = DataStore::lookup_datastore(&store)?;
1230
1231 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1232 let user_info = CachedUserInfo::new()?;
1233 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1234
1235 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1236
1237 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1238 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1239
1240 let file_name = CATALOG_NAME;
1241
1242 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1243 for file in files {
1244 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1245 bail!("cannot decode '{}' - is encrypted", file_name);
1246 }
1247 }
1248
1249 let mut path = datastore.base_path();
1250 path.push(backup_dir.relative_path());
1251 path.push(file_name);
1252
1253 let index = DynamicIndexReader::open(&path)
1254 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1255
1256 let (csum, size) = index.compute_csum();
1257 manifest.verify_file(&file_name, &csum, size)?;
1258
1259 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1260 let reader = BufferedDynamicReader::new(index, chunk_reader);
1261
1262 let mut catalog_reader = CatalogReader::new(reader);
1263 let mut current = catalog_reader.root()?;
1264 let mut components = vec![];
1265
1266
1267 if filepath != "root" {
1268 components = base64::decode(filepath)?;
1269 if components.len() > 0 && components[0] == '/' as u8 {
1270 components.remove(0);
1271 }
1272 for component in components.split(|c| *c == '/' as u8) {
1273 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1274 current = entry;
1275 } else {
1276 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1277 }
1278 }
1279 }
1280
1281 let mut res = Vec::new();
1282
1283 for direntry in catalog_reader.read_dir(&current)? {
1284 let mut components = components.clone();
1285 components.push('/' as u8);
1286 components.extend(&direntry.name);
1287 let path = base64::encode(components);
1288 let text = String::from_utf8_lossy(&direntry.name);
1289 let mut entry = json!({
1290 "filepath": path,
1291 "text": text,
1292 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1293 "leaf": true,
1294 });
1295 match direntry.attr {
1296 DirEntryAttribute::Directory { start: _ } => {
1297 entry["leaf"] = false.into();
1298 },
1299 DirEntryAttribute::File { size, mtime } => {
1300 entry["size"] = size.into();
1301 entry["mtime"] = mtime.into();
1302 },
1303 _ => {},
1304 }
1305 res.push(entry);
1306 }
1307
1308 Ok(res.into())
1309 }
1310
1311 fn recurse_files<'a, T, W>(
1312 zip: &'a mut ZipEncoder<W>,
1313 decoder: &'a mut Accessor<T>,
1314 prefix: &'a Path,
1315 file: FileEntry<T>,
1316 ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
1317 where
1318 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1319 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1320 {
1321 Box::pin(async move {
1322 let metadata = file.entry().metadata();
1323 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1324
1325 match file.kind() {
1326 EntryKind::File { .. } => {
1327 let entry = ZipEntry::new(
1328 path,
1329 metadata.stat.mtime.secs,
1330 metadata.stat.mode as u16,
1331 true,
1332 );
1333 zip.add_entry(entry, Some(file.contents().await?))
1334 .await
1335 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1336 }
1337 EntryKind::Hardlink(_) => {
1338 let realfile = decoder.follow_hardlink(&file).await?;
1339 let entry = ZipEntry::new(
1340 path,
1341 metadata.stat.mtime.secs,
1342 metadata.stat.mode as u16,
1343 true,
1344 );
1345 zip.add_entry(entry, Some(realfile.contents().await?))
1346 .await
1347 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1348 }
1349 EntryKind::Directory => {
1350 let dir = file.enter_directory().await?;
1351 let mut readdir = dir.read_dir();
1352 let entry = ZipEntry::new(
1353 path,
1354 metadata.stat.mtime.secs,
1355 metadata.stat.mode as u16,
1356 false,
1357 );
1358 zip.add_entry::<FileContents<T>>(entry, None).await?;
1359 while let Some(entry) = readdir.next().await {
1360 let entry = entry?.decode_entry().await?;
1361 recurse_files(zip, decoder, prefix, entry).await?;
1362 }
1363 }
1364 _ => {} // ignore all else
1365 };
1366
1367 Ok(())
1368 })
1369 }
1370
1371 #[sortable]
1372 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1373 &ApiHandler::AsyncHttp(&pxar_file_download),
1374 &ObjectSchema::new(
1375 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1376 &sorted!([
1377 ("store", false, &DATASTORE_SCHEMA),
1378 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1379 ("backup-id", false, &BACKUP_ID_SCHEMA),
1380 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1381 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1382 ]),
1383 )
1384 ).access(None, &Permission::Privilege(
1385 &["datastore", "{store}"],
1386 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1387 true)
1388 );
1389
1390 fn pxar_file_download(
1391 _parts: Parts,
1392 _req_body: Body,
1393 param: Value,
1394 _info: &ApiMethod,
1395 rpcenv: Box<dyn RpcEnvironment>,
1396 ) -> ApiResponseFuture {
1397
1398 async move {
1399 let store = tools::required_string_param(&param, "store")?;
1400 let datastore = DataStore::lookup_datastore(&store)?;
1401
1402 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1403 let user_info = CachedUserInfo::new()?;
1404 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1405
1406 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1407
1408 let backup_type = tools::required_string_param(&param, "backup-type")?;
1409 let backup_id = tools::required_string_param(&param, "backup-id")?;
1410 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1411
1412 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1413
1414 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1415 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1416
1417 let mut components = base64::decode(&filepath)?;
1418 if components.len() > 0 && components[0] == '/' as u8 {
1419 components.remove(0);
1420 }
1421
1422 let mut split = components.splitn(2, |c| *c == '/' as u8);
1423 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1424 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1425 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1426 for file in files {
1427 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1428 bail!("cannot decode '{}' - is encrypted", pxar_name);
1429 }
1430 }
1431
1432 let mut path = datastore.base_path();
1433 path.push(backup_dir.relative_path());
1434 path.push(pxar_name);
1435
1436 let index = DynamicIndexReader::open(&path)
1437 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1438
1439 let (csum, size) = index.compute_csum();
1440 manifest.verify_file(&pxar_name, &csum, size)?;
1441
1442 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1443 let reader = BufferedDynamicReader::new(index, chunk_reader);
1444 let archive_size = reader.archive_size();
1445 let reader = LocalDynamicReadAt::new(reader);
1446
1447 let decoder = Accessor::new(reader, archive_size).await?;
1448 let root = decoder.open_root().await?;
1449 let file = root
1450 .lookup(OsStr::from_bytes(file_path)).await?
1451 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1452
1453 let body = match file.kind() {
1454 EntryKind::File { .. } => Body::wrap_stream(
1455 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1456 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1457 err
1458 }),
1459 ),
1460 EntryKind::Hardlink(_) => Body::wrap_stream(
1461 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1462 .map_err(move |err| {
1463 eprintln!(
1464 "error during streaming of hardlink '{:?}' - {}",
1465 filepath, err
1466 );
1467 err
1468 }),
1469 ),
1470 EntryKind::Directory => {
1471 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1472 let mut prefix = PathBuf::new();
1473 let mut components = file.entry().path().components();
1474 components.next_back(); // discar last
1475 for comp in components {
1476 prefix.push(comp);
1477 }
1478
1479 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1480
1481 crate::server::spawn_internal_task(async move {
1482 let mut zipencoder = ZipEncoder::new(channelwriter);
1483 let mut decoder = decoder;
1484 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
1485 .await
1486 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1487
1488 zipencoder
1489 .finish()
1490 .await
1491 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1492 });
1493
1494 Body::wrap_stream(receiver.map_err(move |err| {
1495 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1496 err
1497 }))
1498 }
1499 other => bail!("cannot download file of type {:?}", other),
1500 };
1501
1502 // fixme: set other headers ?
1503 Ok(Response::builder()
1504 .status(StatusCode::OK)
1505 .header(header::CONTENT_TYPE, "application/octet-stream")
1506 .body(body)
1507 .unwrap())
1508 }.boxed()
1509 }
1510
1511 #[api(
1512 input: {
1513 properties: {
1514 store: {
1515 schema: DATASTORE_SCHEMA,
1516 },
1517 timeframe: {
1518 type: RRDTimeFrameResolution,
1519 },
1520 cf: {
1521 type: RRDMode,
1522 },
1523 },
1524 },
1525 access: {
1526 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1527 },
1528 )]
1529 /// Read datastore stats
1530 fn get_rrd_stats(
1531 store: String,
1532 timeframe: RRDTimeFrameResolution,
1533 cf: RRDMode,
1534 _param: Value,
1535 ) -> Result<Value, Error> {
1536
1537 create_value_from_rrd(
1538 &format!("datastore/{}", store),
1539 &[
1540 "total", "used",
1541 "read_ios", "read_bytes",
1542 "write_ios", "write_bytes",
1543 "io_ticks",
1544 ],
1545 timeframe,
1546 cf,
1547 )
1548 }
1549
1550 #[api(
1551 input: {
1552 properties: {
1553 store: {
1554 schema: DATASTORE_SCHEMA,
1555 },
1556 "backup-type": {
1557 schema: BACKUP_TYPE_SCHEMA,
1558 },
1559 "backup-id": {
1560 schema: BACKUP_ID_SCHEMA,
1561 },
1562 "backup-time": {
1563 schema: BACKUP_TIME_SCHEMA,
1564 },
1565 },
1566 },
1567 access: {
1568 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1569 },
1570 )]
1571 /// Get "notes" for a specific backup
1572 fn get_notes(
1573 store: String,
1574 backup_type: String,
1575 backup_id: String,
1576 backup_time: i64,
1577 rpcenv: &mut dyn RpcEnvironment,
1578 ) -> Result<String, Error> {
1579 let datastore = DataStore::lookup_datastore(&store)?;
1580
1581 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1582 let user_info = CachedUserInfo::new()?;
1583 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1584
1585 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1586
1587 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1588 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1589
1590 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1591
1592 let notes = manifest.unprotected["notes"]
1593 .as_str()
1594 .unwrap_or("");
1595
1596 Ok(String::from(notes))
1597 }
1598
1599 #[api(
1600 input: {
1601 properties: {
1602 store: {
1603 schema: DATASTORE_SCHEMA,
1604 },
1605 "backup-type": {
1606 schema: BACKUP_TYPE_SCHEMA,
1607 },
1608 "backup-id": {
1609 schema: BACKUP_ID_SCHEMA,
1610 },
1611 "backup-time": {
1612 schema: BACKUP_TIME_SCHEMA,
1613 },
1614 notes: {
1615 description: "A multiline text.",
1616 },
1617 },
1618 },
1619 access: {
1620 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1621 },
1622 )]
1623 /// Set "notes" for a specific backup
1624 fn set_notes(
1625 store: String,
1626 backup_type: String,
1627 backup_id: String,
1628 backup_time: i64,
1629 notes: String,
1630 rpcenv: &mut dyn RpcEnvironment,
1631 ) -> Result<(), Error> {
1632 let datastore = DataStore::lookup_datastore(&store)?;
1633
1634 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1635 let user_info = CachedUserInfo::new()?;
1636 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
1637
1638 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1639
1640 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1641 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
1642
1643 datastore.update_manifest(&backup_dir,|manifest| {
1644 manifest.unprotected["notes"] = notes.into();
1645 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1646
1647 Ok(())
1648 }
1649
1650 #[api(
1651 input: {
1652 properties: {
1653 store: {
1654 schema: DATASTORE_SCHEMA,
1655 },
1656 "backup-type": {
1657 schema: BACKUP_TYPE_SCHEMA,
1658 },
1659 "backup-id": {
1660 schema: BACKUP_ID_SCHEMA,
1661 },
1662 "new-owner": {
1663 type: Userid,
1664 },
1665 },
1666 },
1667 access: {
1668 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1669 },
1670 )]
1671 /// Change owner of a backup group
1672 fn set_backup_owner(
1673 store: String,
1674 backup_type: String,
1675 backup_id: String,
1676 new_owner: Userid,
1677 _rpcenv: &mut dyn RpcEnvironment,
1678 ) -> Result<(), Error> {
1679
1680 let datastore = DataStore::lookup_datastore(&store)?;
1681
1682 let backup_group = BackupGroup::new(backup_type, backup_id);
1683
1684 let user_info = CachedUserInfo::new()?;
1685
1686 if !user_info.is_active_user(&new_owner) {
1687 bail!("user '{}' is inactive or non-existent", new_owner);
1688 }
1689
1690 datastore.set_owner(&backup_group, &new_owner, true)?;
1691
1692 Ok(())
1693 }
1694
1695 #[sortable]
1696 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1697 (
1698 "catalog",
1699 &Router::new()
1700 .get(&API_METHOD_CATALOG)
1701 ),
1702 (
1703 "change-owner",
1704 &Router::new()
1705 .post(&API_METHOD_SET_BACKUP_OWNER)
1706 ),
1707 (
1708 "download",
1709 &Router::new()
1710 .download(&API_METHOD_DOWNLOAD_FILE)
1711 ),
1712 (
1713 "download-decoded",
1714 &Router::new()
1715 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1716 ),
1717 (
1718 "files",
1719 &Router::new()
1720 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1721 ),
1722 (
1723 "gc",
1724 &Router::new()
1725 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1726 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1727 ),
1728 (
1729 "groups",
1730 &Router::new()
1731 .get(&API_METHOD_LIST_GROUPS)
1732 ),
1733 (
1734 "notes",
1735 &Router::new()
1736 .get(&API_METHOD_GET_NOTES)
1737 .put(&API_METHOD_SET_NOTES)
1738 ),
1739 (
1740 "prune",
1741 &Router::new()
1742 .post(&API_METHOD_PRUNE)
1743 ),
1744 (
1745 "pxar-file-download",
1746 &Router::new()
1747 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1748 ),
1749 (
1750 "rrd",
1751 &Router::new()
1752 .get(&API_METHOD_GET_RRD_STATS)
1753 ),
1754 (
1755 "snapshots",
1756 &Router::new()
1757 .get(&API_METHOD_LIST_SNAPSHOTS)
1758 .delete(&API_METHOD_DELETE_SNAPSHOT)
1759 ),
1760 (
1761 "status",
1762 &Router::new()
1763 .get(&API_METHOD_STATUS)
1764 ),
1765 (
1766 "upload-backup-log",
1767 &Router::new()
1768 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1769 ),
1770 (
1771 "verify",
1772 &Router::new()
1773 .post(&API_METHOD_VERIFY)
1774 ),
1775 ];
1776
1777 const DATASTORE_INFO_ROUTER: Router = Router::new()
1778 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1779 .subdirs(DATASTORE_INFO_SUBDIRS);
1780
1781
1782 pub const ROUTER: Router = Router::new()
1783 .get(&API_METHOD_GET_DATASTORE_LIST)
1784 .match_all("store", &DATASTORE_INFO_ROUTER);