]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
DataStore::load_manifest: also return CryptMode
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use pxar::accessor::aio::Accessor;
21 use pxar::EntryKind;
22
23 use crate::api2::types::*;
24 use crate::api2::node::rrd::create_value_from_rrd;
25 use crate::backup::*;
26 use crate::config::datastore;
27 use crate::config::cached_user_info::CachedUserInfo;
28
29 use crate::server::WorkerTask;
30 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
31 use crate::config::acl::{
32 PRIV_DATASTORE_AUDIT,
33 PRIV_DATASTORE_MODIFY,
34 PRIV_DATASTORE_READ,
35 PRIV_DATASTORE_PRUNE,
36 PRIV_DATASTORE_BACKUP,
37 };
38
39 fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
40 let owner = store.get_owner(group)?;
41 if &owner != userid {
42 bail!("backup owner check failed ({} != {})", userid, owner);
43 }
44 Ok(())
45 }
46
47 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
48
49 let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
50
51 let mut result = Vec::new();
52 for item in manifest.files() {
53 result.push(BackupContent {
54 filename: item.filename.clone(),
55 crypt_mode: Some(item.crypt_mode),
56 size: Some(item.size),
57 });
58 }
59
60 result.push(BackupContent {
61 filename: MANIFEST_BLOB_NAME.to_string(),
62 crypt_mode: Some(manifest_crypt_mode),
63 size: Some(index_size),
64 });
65
66 Ok(result)
67 }
68
69 fn get_all_snapshot_files(
70 store: &DataStore,
71 info: &BackupInfo,
72 ) -> Result<Vec<BackupContent>, Error> {
73 let mut files = read_backup_index(&store, &info.backup_dir)?;
74
75 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
76 acc.insert(item.filename.clone());
77 acc
78 });
79
80 for file in &info.files {
81 if file_set.contains(file) { continue; }
82 files.push(BackupContent {
83 filename: file.to_string(),
84 size: None,
85 crypt_mode: None,
86 });
87 }
88
89 Ok(files)
90 }
91
92 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
93
94 let mut group_hash = HashMap::new();
95
96 for info in backup_list {
97 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
98 let time_list = group_hash.entry(group_id).or_insert(vec![]);
99 time_list.push(info);
100 }
101
102 group_hash
103 }
104
105 #[api(
106 input: {
107 properties: {
108 store: {
109 schema: DATASTORE_SCHEMA,
110 },
111 },
112 },
113 returns: {
114 type: Array,
115 description: "Returns the list of backup groups.",
116 items: {
117 type: GroupListItem,
118 }
119 },
120 access: {
121 permission: &Permission::Privilege(
122 &["datastore", "{store}"],
123 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
124 true),
125 },
126 )]
127 /// List backup groups.
128 fn list_groups(
129 store: String,
130 rpcenv: &mut dyn RpcEnvironment,
131 ) -> Result<Vec<GroupListItem>, Error> {
132
133 let username = rpcenv.get_user().unwrap();
134 let user_info = CachedUserInfo::new()?;
135 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
136
137 let datastore = DataStore::lookup_datastore(&store)?;
138
139 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
140
141 let group_hash = group_backups(backup_list);
142
143 let mut groups = Vec::new();
144
145 for (_group_id, mut list) in group_hash {
146
147 BackupInfo::sort_list(&mut list, false);
148
149 let info = &list[0];
150
151 let group = info.backup_dir.group();
152
153 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
154 let owner = datastore.get_owner(group)?;
155 if !list_all {
156 if owner != username { continue; }
157 }
158
159 let result_item = GroupListItem {
160 backup_type: group.backup_type().to_string(),
161 backup_id: group.backup_id().to_string(),
162 last_backup: info.backup_dir.backup_time().timestamp(),
163 backup_count: list.len() as u64,
164 files: info.files.clone(),
165 owner: Some(owner),
166 };
167 groups.push(result_item);
168 }
169
170 Ok(groups)
171 }
172
173 #[api(
174 input: {
175 properties: {
176 store: {
177 schema: DATASTORE_SCHEMA,
178 },
179 "backup-type": {
180 schema: BACKUP_TYPE_SCHEMA,
181 },
182 "backup-id": {
183 schema: BACKUP_ID_SCHEMA,
184 },
185 "backup-time": {
186 schema: BACKUP_TIME_SCHEMA,
187 },
188 },
189 },
190 returns: {
191 type: Array,
192 description: "Returns the list of archive files inside a backup snapshots.",
193 items: {
194 type: BackupContent,
195 }
196 },
197 access: {
198 permission: &Permission::Privilege(
199 &["datastore", "{store}"],
200 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
201 true),
202 },
203 )]
204 /// List snapshot files.
205 pub fn list_snapshot_files(
206 store: String,
207 backup_type: String,
208 backup_id: String,
209 backup_time: i64,
210 _info: &ApiMethod,
211 rpcenv: &mut dyn RpcEnvironment,
212 ) -> Result<Vec<BackupContent>, Error> {
213
214 let username = rpcenv.get_user().unwrap();
215 let user_info = CachedUserInfo::new()?;
216 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
217
218 let datastore = DataStore::lookup_datastore(&store)?;
219
220 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
221
222 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
223 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
224
225 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
226
227 get_all_snapshot_files(&datastore, &info)
228 }
229
230 #[api(
231 input: {
232 properties: {
233 store: {
234 schema: DATASTORE_SCHEMA,
235 },
236 "backup-type": {
237 schema: BACKUP_TYPE_SCHEMA,
238 },
239 "backup-id": {
240 schema: BACKUP_ID_SCHEMA,
241 },
242 "backup-time": {
243 schema: BACKUP_TIME_SCHEMA,
244 },
245 },
246 },
247 access: {
248 permission: &Permission::Privilege(
249 &["datastore", "{store}"],
250 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
251 true),
252 },
253 )]
254 /// Delete backup snapshot.
255 fn delete_snapshot(
256 store: String,
257 backup_type: String,
258 backup_id: String,
259 backup_time: i64,
260 _info: &ApiMethod,
261 rpcenv: &mut dyn RpcEnvironment,
262 ) -> Result<Value, Error> {
263
264 let username = rpcenv.get_user().unwrap();
265 let user_info = CachedUserInfo::new()?;
266 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
267
268 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
269
270 let datastore = DataStore::lookup_datastore(&store)?;
271
272 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
273 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
274
275 datastore.remove_backup_dir(&snapshot)?;
276
277 Ok(Value::Null)
278 }
279
280 #[api(
281 input: {
282 properties: {
283 store: {
284 schema: DATASTORE_SCHEMA,
285 },
286 "backup-type": {
287 optional: true,
288 schema: BACKUP_TYPE_SCHEMA,
289 },
290 "backup-id": {
291 optional: true,
292 schema: BACKUP_ID_SCHEMA,
293 },
294 },
295 },
296 returns: {
297 type: Array,
298 description: "Returns the list of snapshots.",
299 items: {
300 type: SnapshotListItem,
301 }
302 },
303 access: {
304 permission: &Permission::Privilege(
305 &["datastore", "{store}"],
306 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
307 true),
308 },
309 )]
310 /// List backup snapshots.
311 pub fn list_snapshots (
312 store: String,
313 backup_type: Option<String>,
314 backup_id: Option<String>,
315 _param: Value,
316 _info: &ApiMethod,
317 rpcenv: &mut dyn RpcEnvironment,
318 ) -> Result<Vec<SnapshotListItem>, Error> {
319
320 let username = rpcenv.get_user().unwrap();
321 let user_info = CachedUserInfo::new()?;
322 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
323
324 let datastore = DataStore::lookup_datastore(&store)?;
325
326 let base_path = datastore.base_path();
327
328 let backup_list = BackupInfo::list_backups(&base_path)?;
329
330 let mut snapshots = vec![];
331
332 for info in backup_list {
333 let group = info.backup_dir.group();
334 if let Some(ref backup_type) = backup_type {
335 if backup_type != group.backup_type() { continue; }
336 }
337 if let Some(ref backup_id) = backup_id {
338 if backup_id != group.backup_id() { continue; }
339 }
340
341 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
342 let owner = datastore.get_owner(group)?;
343
344 if !list_all {
345 if owner != username { continue; }
346 }
347
348 let mut size = None;
349
350 let files = match get_all_snapshot_files(&datastore, &info) {
351 Ok(files) => {
352 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
353 files
354 },
355 Err(err) => {
356 eprintln!("error during snapshot file listing: '{}'", err);
357 info
358 .files
359 .iter()
360 .map(|x| BackupContent {
361 filename: x.to_string(),
362 size: None,
363 crypt_mode: None,
364 })
365 .collect()
366 },
367 };
368
369 let result_item = SnapshotListItem {
370 backup_type: group.backup_type().to_string(),
371 backup_id: group.backup_id().to_string(),
372 backup_time: info.backup_dir.backup_time().timestamp(),
373 files,
374 size,
375 owner: Some(owner),
376 };
377
378 snapshots.push(result_item);
379 }
380
381 Ok(snapshots)
382 }
383
384 #[api(
385 input: {
386 properties: {
387 store: {
388 schema: DATASTORE_SCHEMA,
389 },
390 },
391 },
392 returns: {
393 type: StorageStatus,
394 },
395 access: {
396 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
397 },
398 )]
399 /// Get datastore status.
400 pub fn status(
401 store: String,
402 _info: &ApiMethod,
403 _rpcenv: &mut dyn RpcEnvironment,
404 ) -> Result<StorageStatus, Error> {
405 let datastore = DataStore::lookup_datastore(&store)?;
406 crate::tools::disks::disk_usage(&datastore.base_path())
407 }
408
409 #[api(
410 input: {
411 properties: {
412 store: {
413 schema: DATASTORE_SCHEMA,
414 },
415 "backup-type": {
416 schema: BACKUP_TYPE_SCHEMA,
417 optional: true,
418 },
419 "backup-id": {
420 schema: BACKUP_ID_SCHEMA,
421 optional: true,
422 },
423 "backup-time": {
424 schema: BACKUP_TIME_SCHEMA,
425 optional: true,
426 },
427 },
428 },
429 returns: {
430 schema: UPID_SCHEMA,
431 },
432 access: {
433 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
434 },
435 )]
436 /// Verify backups.
437 ///
438 /// This function can verify a single backup snapshot, all backup from a backup group,
439 /// or all backups in the datastore.
440 pub fn verify(
441 store: String,
442 backup_type: Option<String>,
443 backup_id: Option<String>,
444 backup_time: Option<i64>,
445 rpcenv: &mut dyn RpcEnvironment,
446 ) -> Result<Value, Error> {
447 let datastore = DataStore::lookup_datastore(&store)?;
448
449 let worker_id;
450
451 let mut backup_dir = None;
452 let mut backup_group = None;
453
454 match (backup_type, backup_id, backup_time) {
455 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
456 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
457 let dir = BackupDir::new(backup_type, backup_id, backup_time);
458 backup_dir = Some(dir);
459 }
460 (Some(backup_type), Some(backup_id), None) => {
461 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
462 let group = BackupGroup::new(backup_type, backup_id);
463 backup_group = Some(group);
464 }
465 (None, None, None) => {
466 worker_id = store.clone();
467 }
468 _ => bail!("parameters do not spefify a backup group or snapshot"),
469 }
470
471 let username = rpcenv.get_user().unwrap();
472 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
473
474 let upid_str = WorkerTask::new_thread(
475 "verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
476 {
477 let success = if let Some(backup_dir) = backup_dir {
478 verify_backup_dir(&datastore, &backup_dir, &worker)?
479 } else if let Some(backup_group) = backup_group {
480 verify_backup_group(&datastore, &backup_group, &worker)?
481 } else {
482 verify_all_backups(&datastore, &worker)?
483 };
484 if !success {
485 bail!("verfication failed - please check the log for details");
486 }
487 Ok(())
488 })?;
489
490 Ok(json!(upid_str))
491 }
492
493 #[macro_export]
494 macro_rules! add_common_prune_prameters {
495 ( [ $( $list1:tt )* ] ) => {
496 add_common_prune_prameters!([$( $list1 )* ] , [])
497 };
498 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
499 [
500 $( $list1 )*
501 (
502 "keep-daily",
503 true,
504 &PRUNE_SCHEMA_KEEP_DAILY,
505 ),
506 (
507 "keep-hourly",
508 true,
509 &PRUNE_SCHEMA_KEEP_HOURLY,
510 ),
511 (
512 "keep-last",
513 true,
514 &PRUNE_SCHEMA_KEEP_LAST,
515 ),
516 (
517 "keep-monthly",
518 true,
519 &PRUNE_SCHEMA_KEEP_MONTHLY,
520 ),
521 (
522 "keep-weekly",
523 true,
524 &PRUNE_SCHEMA_KEEP_WEEKLY,
525 ),
526 (
527 "keep-yearly",
528 true,
529 &PRUNE_SCHEMA_KEEP_YEARLY,
530 ),
531 $( $list2 )*
532 ]
533 }
534 }
535
536 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
537 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
538 PruneListItem::API_SCHEMA
539 ).schema();
540
541 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
542 &ApiHandler::Sync(&prune),
543 &ObjectSchema::new(
544 "Prune the datastore.",
545 &add_common_prune_prameters!([
546 ("backup-id", false, &BACKUP_ID_SCHEMA),
547 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
548 ("dry-run", true, &BooleanSchema::new(
549 "Just show what prune would do, but do not delete anything.")
550 .schema()
551 ),
552 ],[
553 ("store", false, &DATASTORE_SCHEMA),
554 ])
555 ))
556 .returns(&API_RETURN_SCHEMA_PRUNE)
557 .access(None, &Permission::Privilege(
558 &["datastore", "{store}"],
559 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
560 true)
561 );
562
563 fn prune(
564 param: Value,
565 _info: &ApiMethod,
566 rpcenv: &mut dyn RpcEnvironment,
567 ) -> Result<Value, Error> {
568
569 let store = tools::required_string_param(&param, "store")?;
570 let backup_type = tools::required_string_param(&param, "backup-type")?;
571 let backup_id = tools::required_string_param(&param, "backup-id")?;
572
573 let username = rpcenv.get_user().unwrap();
574 let user_info = CachedUserInfo::new()?;
575 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
576
577 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
578
579 let group = BackupGroup::new(backup_type, backup_id);
580
581 let datastore = DataStore::lookup_datastore(&store)?;
582
583 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
584 if !allowed { check_backup_owner(&datastore, &group, &username)?; }
585
586 let prune_options = PruneOptions {
587 keep_last: param["keep-last"].as_u64(),
588 keep_hourly: param["keep-hourly"].as_u64(),
589 keep_daily: param["keep-daily"].as_u64(),
590 keep_weekly: param["keep-weekly"].as_u64(),
591 keep_monthly: param["keep-monthly"].as_u64(),
592 keep_yearly: param["keep-yearly"].as_u64(),
593 };
594
595 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
596
597 let mut prune_result = Vec::new();
598
599 let list = group.list_backups(&datastore.base_path())?;
600
601 let mut prune_info = compute_prune_info(list, &prune_options)?;
602
603 prune_info.reverse(); // delete older snapshots first
604
605 let keep_all = !prune_options.keeps_something();
606
607 if dry_run {
608 for (info, mut keep) in prune_info {
609 if keep_all { keep = true; }
610
611 let backup_time = info.backup_dir.backup_time();
612 let group = info.backup_dir.group();
613
614 prune_result.push(json!({
615 "backup-type": group.backup_type(),
616 "backup-id": group.backup_id(),
617 "backup-time": backup_time.timestamp(),
618 "keep": keep,
619 }));
620 }
621 return Ok(json!(prune_result));
622 }
623
624
625 // We use a WorkerTask just to have a task log, but run synchrounously
626 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
627
628 let result = try_block! {
629 if keep_all {
630 worker.log("No prune selection - keeping all files.");
631 } else {
632 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
633 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
634 store, backup_type, backup_id));
635 }
636
637 for (info, mut keep) in prune_info {
638 if keep_all { keep = true; }
639
640 let backup_time = info.backup_dir.backup_time();
641 let timestamp = BackupDir::backup_time_to_string(backup_time);
642 let group = info.backup_dir.group();
643
644
645 let msg = format!(
646 "{}/{}/{} {}",
647 group.backup_type(),
648 group.backup_id(),
649 timestamp,
650 if keep { "keep" } else { "remove" },
651 );
652
653 worker.log(msg);
654
655 prune_result.push(json!({
656 "backup-type": group.backup_type(),
657 "backup-id": group.backup_id(),
658 "backup-time": backup_time.timestamp(),
659 "keep": keep,
660 }));
661
662 if !(dry_run || keep) {
663 datastore.remove_backup_dir(&info.backup_dir)?;
664 }
665 }
666
667 Ok(())
668 };
669
670 worker.log_result(&result);
671
672 if let Err(err) = result {
673 bail!("prune failed - {}", err);
674 };
675
676 Ok(json!(prune_result))
677 }
678
679 #[api(
680 input: {
681 properties: {
682 store: {
683 schema: DATASTORE_SCHEMA,
684 },
685 },
686 },
687 returns: {
688 schema: UPID_SCHEMA,
689 },
690 access: {
691 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
692 },
693 )]
694 /// Start garbage collection.
695 fn start_garbage_collection(
696 store: String,
697 _info: &ApiMethod,
698 rpcenv: &mut dyn RpcEnvironment,
699 ) -> Result<Value, Error> {
700
701 let datastore = DataStore::lookup_datastore(&store)?;
702
703 println!("Starting garbage collection on store {}", store);
704
705 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
706
707 let upid_str = WorkerTask::new_thread(
708 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
709 {
710 worker.log(format!("starting garbage collection on store {}", store));
711 datastore.garbage_collection(&worker)
712 })?;
713
714 Ok(json!(upid_str))
715 }
716
717 #[api(
718 input: {
719 properties: {
720 store: {
721 schema: DATASTORE_SCHEMA,
722 },
723 },
724 },
725 returns: {
726 type: GarbageCollectionStatus,
727 },
728 access: {
729 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
730 },
731 )]
732 /// Garbage collection status.
733 pub fn garbage_collection_status(
734 store: String,
735 _info: &ApiMethod,
736 _rpcenv: &mut dyn RpcEnvironment,
737 ) -> Result<GarbageCollectionStatus, Error> {
738
739 let datastore = DataStore::lookup_datastore(&store)?;
740
741 let status = datastore.last_gc_status();
742
743 Ok(status)
744 }
745
746 #[api(
747 returns: {
748 description: "List the accessible datastores.",
749 type: Array,
750 items: {
751 description: "Datastore name and description.",
752 properties: {
753 store: {
754 schema: DATASTORE_SCHEMA,
755 },
756 comment: {
757 optional: true,
758 schema: SINGLE_LINE_COMMENT_SCHEMA,
759 },
760 },
761 },
762 },
763 access: {
764 permission: &Permission::Anybody,
765 },
766 )]
767 /// Datastore list
768 fn get_datastore_list(
769 _param: Value,
770 _info: &ApiMethod,
771 rpcenv: &mut dyn RpcEnvironment,
772 ) -> Result<Value, Error> {
773
774 let (config, _digest) = datastore::config()?;
775
776 let username = rpcenv.get_user().unwrap();
777 let user_info = CachedUserInfo::new()?;
778
779 let mut list = Vec::new();
780
781 for (store, (_, data)) in &config.sections {
782 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
783 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
784 if allowed {
785 let mut entry = json!({ "store": store });
786 if let Some(comment) = data["comment"].as_str() {
787 entry["comment"] = comment.into();
788 }
789 list.push(entry);
790 }
791 }
792
793 Ok(list.into())
794 }
795
796 #[sortable]
797 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
798 &ApiHandler::AsyncHttp(&download_file),
799 &ObjectSchema::new(
800 "Download single raw file from backup snapshot.",
801 &sorted!([
802 ("store", false, &DATASTORE_SCHEMA),
803 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
804 ("backup-id", false, &BACKUP_ID_SCHEMA),
805 ("backup-time", false, &BACKUP_TIME_SCHEMA),
806 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
807 ]),
808 )
809 ).access(None, &Permission::Privilege(
810 &["datastore", "{store}"],
811 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
812 true)
813 );
814
815 fn download_file(
816 _parts: Parts,
817 _req_body: Body,
818 param: Value,
819 _info: &ApiMethod,
820 rpcenv: Box<dyn RpcEnvironment>,
821 ) -> ApiResponseFuture {
822
823 async move {
824 let store = tools::required_string_param(&param, "store")?;
825 let datastore = DataStore::lookup_datastore(store)?;
826
827 let username = rpcenv.get_user().unwrap();
828 let user_info = CachedUserInfo::new()?;
829 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
830
831 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
832
833 let backup_type = tools::required_string_param(&param, "backup-type")?;
834 let backup_id = tools::required_string_param(&param, "backup-id")?;
835 let backup_time = tools::required_integer_param(&param, "backup-time")?;
836
837 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
838
839 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
840 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
841
842 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
843
844 let mut path = datastore.base_path();
845 path.push(backup_dir.relative_path());
846 path.push(&file_name);
847
848 let file = tokio::fs::File::open(&path)
849 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
850 .await?;
851
852 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
853 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
854 .map_err(move |err| {
855 eprintln!("error during streaming of '{:?}' - {}", &path, err);
856 err
857 });
858 let body = Body::wrap_stream(payload);
859
860 // fixme: set other headers ?
861 Ok(Response::builder()
862 .status(StatusCode::OK)
863 .header(header::CONTENT_TYPE, "application/octet-stream")
864 .body(body)
865 .unwrap())
866 }.boxed()
867 }
868
869 #[sortable]
870 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
871 &ApiHandler::AsyncHttp(&download_file_decoded),
872 &ObjectSchema::new(
873 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
874 &sorted!([
875 ("store", false, &DATASTORE_SCHEMA),
876 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
877 ("backup-id", false, &BACKUP_ID_SCHEMA),
878 ("backup-time", false, &BACKUP_TIME_SCHEMA),
879 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
880 ]),
881 )
882 ).access(None, &Permission::Privilege(
883 &["datastore", "{store}"],
884 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
885 true)
886 );
887
888 fn download_file_decoded(
889 _parts: Parts,
890 _req_body: Body,
891 param: Value,
892 _info: &ApiMethod,
893 rpcenv: Box<dyn RpcEnvironment>,
894 ) -> ApiResponseFuture {
895
896 async move {
897 let store = tools::required_string_param(&param, "store")?;
898 let datastore = DataStore::lookup_datastore(store)?;
899
900 let username = rpcenv.get_user().unwrap();
901 let user_info = CachedUserInfo::new()?;
902 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
903
904 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
905
906 let backup_type = tools::required_string_param(&param, "backup-type")?;
907 let backup_id = tools::required_string_param(&param, "backup-id")?;
908 let backup_time = tools::required_integer_param(&param, "backup-time")?;
909
910 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
911
912 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
913 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
914
915 let files = read_backup_index(&datastore, &backup_dir)?;
916 for file in files {
917 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
918 bail!("cannot decode '{}' - is encrypted", file_name);
919 }
920 }
921
922 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
923
924 let mut path = datastore.base_path();
925 path.push(backup_dir.relative_path());
926 path.push(&file_name);
927
928 let extension = file_name.rsplitn(2, '.').next().unwrap();
929
930 let body = match extension {
931 "didx" => {
932 let index = DynamicIndexReader::open(&path)
933 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
934
935 let chunk_reader = LocalChunkReader::new(datastore, None);
936 let reader = AsyncIndexReader::new(index, chunk_reader);
937 Body::wrap_stream(AsyncReaderStream::new(reader)
938 .map_err(move |err| {
939 eprintln!("error during streaming of '{:?}' - {}", path, err);
940 err
941 }))
942 },
943 "fidx" => {
944 let index = FixedIndexReader::open(&path)
945 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
946
947 let chunk_reader = LocalChunkReader::new(datastore, None);
948 let reader = AsyncIndexReader::new(index, chunk_reader);
949 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
950 .map_err(move |err| {
951 eprintln!("error during streaming of '{:?}' - {}", path, err);
952 err
953 }))
954 },
955 "blob" => {
956 let file = std::fs::File::open(&path)
957 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
958
959 Body::wrap_stream(
960 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
961 .map_err(move |err| {
962 eprintln!("error during streaming of '{:?}' - {}", path, err);
963 err
964 })
965 )
966 },
967 extension => {
968 bail!("cannot download '{}' files", extension);
969 },
970 };
971
972 // fixme: set other headers ?
973 Ok(Response::builder()
974 .status(StatusCode::OK)
975 .header(header::CONTENT_TYPE, "application/octet-stream")
976 .body(body)
977 .unwrap())
978 }.boxed()
979 }
980
981 #[sortable]
982 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
983 &ApiHandler::AsyncHttp(&upload_backup_log),
984 &ObjectSchema::new(
985 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
986 &sorted!([
987 ("store", false, &DATASTORE_SCHEMA),
988 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
989 ("backup-id", false, &BACKUP_ID_SCHEMA),
990 ("backup-time", false, &BACKUP_TIME_SCHEMA),
991 ]),
992 )
993 ).access(
994 Some("Only the backup creator/owner is allowed to do this."),
995 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
996 );
997
998 fn upload_backup_log(
999 _parts: Parts,
1000 req_body: Body,
1001 param: Value,
1002 _info: &ApiMethod,
1003 rpcenv: Box<dyn RpcEnvironment>,
1004 ) -> ApiResponseFuture {
1005
1006 async move {
1007 let store = tools::required_string_param(&param, "store")?;
1008 let datastore = DataStore::lookup_datastore(store)?;
1009
1010 let file_name = CLIENT_LOG_BLOB_NAME;
1011
1012 let backup_type = tools::required_string_param(&param, "backup-type")?;
1013 let backup_id = tools::required_string_param(&param, "backup-id")?;
1014 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1015
1016 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1017
1018 let username = rpcenv.get_user().unwrap();
1019 check_backup_owner(&datastore, backup_dir.group(), &username)?;
1020
1021 let mut path = datastore.base_path();
1022 path.push(backup_dir.relative_path());
1023 path.push(&file_name);
1024
1025 if path.exists() {
1026 bail!("backup already contains a log.");
1027 }
1028
1029 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1030 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1031
1032 let data = req_body
1033 .map_err(Error::from)
1034 .try_fold(Vec::new(), |mut acc, chunk| {
1035 acc.extend_from_slice(&*chunk);
1036 future::ok::<_, Error>(acc)
1037 })
1038 .await?;
1039
1040 let blob = DataBlob::from_raw(data)?;
1041 // always verify CRC at server side
1042 blob.verify_crc()?;
1043 let raw_data = blob.raw_data();
1044 replace_file(&path, raw_data, CreateOptions::new())?;
1045
1046 // fixme: use correct formatter
1047 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1048 }.boxed()
1049 }
1050
1051 #[api(
1052 input: {
1053 properties: {
1054 store: {
1055 schema: DATASTORE_SCHEMA,
1056 },
1057 "backup-type": {
1058 schema: BACKUP_TYPE_SCHEMA,
1059 },
1060 "backup-id": {
1061 schema: BACKUP_ID_SCHEMA,
1062 },
1063 "backup-time": {
1064 schema: BACKUP_TIME_SCHEMA,
1065 },
1066 "filepath": {
1067 description: "Base64 encoded path.",
1068 type: String,
1069 }
1070 },
1071 },
1072 access: {
1073 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1074 },
1075 )]
1076 /// Get the entries of the given path of the catalog
1077 fn catalog(
1078 store: String,
1079 backup_type: String,
1080 backup_id: String,
1081 backup_time: i64,
1082 filepath: String,
1083 _param: Value,
1084 _info: &ApiMethod,
1085 rpcenv: &mut dyn RpcEnvironment,
1086 ) -> Result<Value, Error> {
1087 let datastore = DataStore::lookup_datastore(&store)?;
1088
1089 let username = rpcenv.get_user().unwrap();
1090 let user_info = CachedUserInfo::new()?;
1091 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1092
1093 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1094
1095 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1096 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1097
1098 let mut path = datastore.base_path();
1099 path.push(backup_dir.relative_path());
1100 path.push(CATALOG_NAME);
1101
1102 let index = DynamicIndexReader::open(&path)
1103 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1104
1105 let chunk_reader = LocalChunkReader::new(datastore, None);
1106 let reader = BufferedDynamicReader::new(index, chunk_reader);
1107
1108 let mut catalog_reader = CatalogReader::new(reader);
1109 let mut current = catalog_reader.root()?;
1110 let mut components = vec![];
1111
1112
1113 if filepath != "root" {
1114 components = base64::decode(filepath)?;
1115 if components.len() > 0 && components[0] == '/' as u8 {
1116 components.remove(0);
1117 }
1118 for component in components.split(|c| *c == '/' as u8) {
1119 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1120 current = entry;
1121 } else {
1122 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1123 }
1124 }
1125 }
1126
1127 let mut res = Vec::new();
1128
1129 for direntry in catalog_reader.read_dir(&current)? {
1130 let mut components = components.clone();
1131 components.push('/' as u8);
1132 components.extend(&direntry.name);
1133 let path = base64::encode(components);
1134 let text = String::from_utf8_lossy(&direntry.name);
1135 let mut entry = json!({
1136 "filepath": path,
1137 "text": text,
1138 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1139 "leaf": true,
1140 });
1141 match direntry.attr {
1142 DirEntryAttribute::Directory { start: _ } => {
1143 entry["leaf"] = false.into();
1144 },
1145 DirEntryAttribute::File { size, mtime } => {
1146 entry["size"] = size.into();
1147 entry["mtime"] = mtime.into();
1148 },
1149 _ => {},
1150 }
1151 res.push(entry);
1152 }
1153
1154 Ok(res.into())
1155 }
1156
1157 #[sortable]
1158 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1159 &ApiHandler::AsyncHttp(&pxar_file_download),
1160 &ObjectSchema::new(
1161 "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
1162 &sorted!([
1163 ("store", false, &DATASTORE_SCHEMA),
1164 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1165 ("backup-id", false, &BACKUP_ID_SCHEMA),
1166 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1167 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1168 ]),
1169 )
1170 ).access(None, &Permission::Privilege(
1171 &["datastore", "{store}"],
1172 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1173 true)
1174 );
1175
1176 fn pxar_file_download(
1177 _parts: Parts,
1178 _req_body: Body,
1179 param: Value,
1180 _info: &ApiMethod,
1181 rpcenv: Box<dyn RpcEnvironment>,
1182 ) -> ApiResponseFuture {
1183
1184 async move {
1185 let store = tools::required_string_param(&param, "store")?;
1186 let datastore = DataStore::lookup_datastore(&store)?;
1187
1188 let username = rpcenv.get_user().unwrap();
1189 let user_info = CachedUserInfo::new()?;
1190 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1191
1192 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1193
1194 let backup_type = tools::required_string_param(&param, "backup-type")?;
1195 let backup_id = tools::required_string_param(&param, "backup-id")?;
1196 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1197
1198 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1199
1200 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1201 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1202
1203 let mut path = datastore.base_path();
1204 path.push(backup_dir.relative_path());
1205
1206 let mut components = base64::decode(&filepath)?;
1207 if components.len() > 0 && components[0] == '/' as u8 {
1208 components.remove(0);
1209 }
1210
1211 let mut split = components.splitn(2, |c| *c == '/' as u8);
1212 let pxar_name = split.next().unwrap();
1213 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1214
1215 path.push(OsStr::from_bytes(&pxar_name));
1216
1217 let index = DynamicIndexReader::open(&path)
1218 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1219
1220 let chunk_reader = LocalChunkReader::new(datastore, None);
1221 let reader = BufferedDynamicReader::new(index, chunk_reader);
1222 let archive_size = reader.archive_size();
1223 let reader = LocalDynamicReadAt::new(reader);
1224
1225 let decoder = Accessor::new(reader, archive_size).await?;
1226 let root = decoder.open_root().await?;
1227 let file = root
1228 .lookup(OsStr::from_bytes(file_path)).await?
1229 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1230
1231 let file = match file.kind() {
1232 EntryKind::File { .. } => file,
1233 EntryKind::Hardlink(_) => {
1234 decoder.follow_hardlink(&file).await?
1235 },
1236 // TODO symlink
1237 other => bail!("cannot download file of type {:?}", other),
1238 };
1239
1240 let body = Body::wrap_stream(
1241 AsyncReaderStream::new(file.contents().await?)
1242 .map_err(move |err| {
1243 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1244 err
1245 })
1246 );
1247
1248 // fixme: set other headers ?
1249 Ok(Response::builder()
1250 .status(StatusCode::OK)
1251 .header(header::CONTENT_TYPE, "application/octet-stream")
1252 .body(body)
1253 .unwrap())
1254 }.boxed()
1255 }
1256
1257 #[api(
1258 input: {
1259 properties: {
1260 store: {
1261 schema: DATASTORE_SCHEMA,
1262 },
1263 timeframe: {
1264 type: RRDTimeFrameResolution,
1265 },
1266 cf: {
1267 type: RRDMode,
1268 },
1269 },
1270 },
1271 access: {
1272 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1273 },
1274 )]
1275 /// Read datastore stats
1276 fn get_rrd_stats(
1277 store: String,
1278 timeframe: RRDTimeFrameResolution,
1279 cf: RRDMode,
1280 _param: Value,
1281 ) -> Result<Value, Error> {
1282
1283 create_value_from_rrd(
1284 &format!("datastore/{}", store),
1285 &[
1286 "total", "used",
1287 "read_ios", "read_bytes",
1288 "write_ios", "write_bytes",
1289 "io_ticks",
1290 ],
1291 timeframe,
1292 cf,
1293 )
1294 }
1295
1296 #[sortable]
1297 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1298 (
1299 "catalog",
1300 &Router::new()
1301 .get(&API_METHOD_CATALOG)
1302 ),
1303 (
1304 "download",
1305 &Router::new()
1306 .download(&API_METHOD_DOWNLOAD_FILE)
1307 ),
1308 (
1309 "download-decoded",
1310 &Router::new()
1311 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1312 ),
1313 (
1314 "files",
1315 &Router::new()
1316 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1317 ),
1318 (
1319 "gc",
1320 &Router::new()
1321 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1322 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1323 ),
1324 (
1325 "groups",
1326 &Router::new()
1327 .get(&API_METHOD_LIST_GROUPS)
1328 ),
1329 (
1330 "prune",
1331 &Router::new()
1332 .post(&API_METHOD_PRUNE)
1333 ),
1334 (
1335 "pxar-file-download",
1336 &Router::new()
1337 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1338 ),
1339 (
1340 "rrd",
1341 &Router::new()
1342 .get(&API_METHOD_GET_RRD_STATS)
1343 ),
1344 (
1345 "snapshots",
1346 &Router::new()
1347 .get(&API_METHOD_LIST_SNAPSHOTS)
1348 .delete(&API_METHOD_DELETE_SNAPSHOT)
1349 ),
1350 (
1351 "status",
1352 &Router::new()
1353 .get(&API_METHOD_STATUS)
1354 ),
1355 (
1356 "upload-backup-log",
1357 &Router::new()
1358 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1359 ),
1360 (
1361 "verify",
1362 &Router::new()
1363 .post(&API_METHOD_VERIFY)
1364 ),
1365 ];
1366
1367 const DATASTORE_INFO_ROUTER: Router = Router::new()
1368 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1369 .subdirs(DATASTORE_INFO_SUBDIRS);
1370
1371
1372 pub const ROUTER: Router = Router::new()
1373 .get(&API_METHOD_GET_DATASTORE_LIST)
1374 .match_all("store", &DATASTORE_INFO_ROUTER);