]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
verify: keep also track of corrupt chunks
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use pxar::accessor::aio::Accessor;
21 use pxar::EntryKind;
22
23 use crate::api2::types::*;
24 use crate::api2::node::rrd::create_value_from_rrd;
25 use crate::backup::*;
26 use crate::config::datastore;
27 use crate::config::cached_user_info::CachedUserInfo;
28
29 use crate::server::WorkerTask;
30 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
31 use crate::config::acl::{
32 PRIV_DATASTORE_AUDIT,
33 PRIV_DATASTORE_MODIFY,
34 PRIV_DATASTORE_READ,
35 PRIV_DATASTORE_PRUNE,
36 PRIV_DATASTORE_BACKUP,
37 };
38
39 fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
40 let owner = store.get_owner(group)?;
41 if &owner != userid {
42 bail!("backup owner check failed ({} != {})", userid, owner);
43 }
44 Ok(())
45 }
46
47 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
48
49 let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
50
51 let mut result = Vec::new();
52 for item in manifest.files() {
53 result.push(BackupContent {
54 filename: item.filename.clone(),
55 crypt_mode: Some(item.crypt_mode),
56 size: Some(item.size),
57 });
58 }
59
60 result.push(BackupContent {
61 filename: MANIFEST_BLOB_NAME.to_string(),
62 crypt_mode: Some(manifest_crypt_mode),
63 size: Some(index_size),
64 });
65
66 Ok(result)
67 }
68
69 fn get_all_snapshot_files(
70 store: &DataStore,
71 info: &BackupInfo,
72 ) -> Result<Vec<BackupContent>, Error> {
73 let mut files = read_backup_index(&store, &info.backup_dir)?;
74
75 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
76 acc.insert(item.filename.clone());
77 acc
78 });
79
80 for file in &info.files {
81 if file_set.contains(file) { continue; }
82 files.push(BackupContent {
83 filename: file.to_string(),
84 size: None,
85 crypt_mode: None,
86 });
87 }
88
89 Ok(files)
90 }
91
92 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
93
94 let mut group_hash = HashMap::new();
95
96 for info in backup_list {
97 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
98 let time_list = group_hash.entry(group_id).or_insert(vec![]);
99 time_list.push(info);
100 }
101
102 group_hash
103 }
104
105 #[api(
106 input: {
107 properties: {
108 store: {
109 schema: DATASTORE_SCHEMA,
110 },
111 },
112 },
113 returns: {
114 type: Array,
115 description: "Returns the list of backup groups.",
116 items: {
117 type: GroupListItem,
118 }
119 },
120 access: {
121 permission: &Permission::Privilege(
122 &["datastore", "{store}"],
123 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
124 true),
125 },
126 )]
127 /// List backup groups.
128 fn list_groups(
129 store: String,
130 rpcenv: &mut dyn RpcEnvironment,
131 ) -> Result<Vec<GroupListItem>, Error> {
132
133 let username = rpcenv.get_user().unwrap();
134 let user_info = CachedUserInfo::new()?;
135 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
136
137 let datastore = DataStore::lookup_datastore(&store)?;
138
139 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
140
141 let group_hash = group_backups(backup_list);
142
143 let mut groups = Vec::new();
144
145 for (_group_id, mut list) in group_hash {
146
147 BackupInfo::sort_list(&mut list, false);
148
149 let info = &list[0];
150
151 let group = info.backup_dir.group();
152
153 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
154 let owner = datastore.get_owner(group)?;
155 if !list_all {
156 if owner != username { continue; }
157 }
158
159 let result_item = GroupListItem {
160 backup_type: group.backup_type().to_string(),
161 backup_id: group.backup_id().to_string(),
162 last_backup: info.backup_dir.backup_time().timestamp(),
163 backup_count: list.len() as u64,
164 files: info.files.clone(),
165 owner: Some(owner),
166 };
167 groups.push(result_item);
168 }
169
170 Ok(groups)
171 }
172
173 #[api(
174 input: {
175 properties: {
176 store: {
177 schema: DATASTORE_SCHEMA,
178 },
179 "backup-type": {
180 schema: BACKUP_TYPE_SCHEMA,
181 },
182 "backup-id": {
183 schema: BACKUP_ID_SCHEMA,
184 },
185 "backup-time": {
186 schema: BACKUP_TIME_SCHEMA,
187 },
188 },
189 },
190 returns: {
191 type: Array,
192 description: "Returns the list of archive files inside a backup snapshots.",
193 items: {
194 type: BackupContent,
195 }
196 },
197 access: {
198 permission: &Permission::Privilege(
199 &["datastore", "{store}"],
200 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
201 true),
202 },
203 )]
204 /// List snapshot files.
205 pub fn list_snapshot_files(
206 store: String,
207 backup_type: String,
208 backup_id: String,
209 backup_time: i64,
210 _info: &ApiMethod,
211 rpcenv: &mut dyn RpcEnvironment,
212 ) -> Result<Vec<BackupContent>, Error> {
213
214 let username = rpcenv.get_user().unwrap();
215 let user_info = CachedUserInfo::new()?;
216 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
217
218 let datastore = DataStore::lookup_datastore(&store)?;
219
220 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
221
222 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
223 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
224
225 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
226
227 get_all_snapshot_files(&datastore, &info)
228 }
229
230 #[api(
231 input: {
232 properties: {
233 store: {
234 schema: DATASTORE_SCHEMA,
235 },
236 "backup-type": {
237 schema: BACKUP_TYPE_SCHEMA,
238 },
239 "backup-id": {
240 schema: BACKUP_ID_SCHEMA,
241 },
242 "backup-time": {
243 schema: BACKUP_TIME_SCHEMA,
244 },
245 },
246 },
247 access: {
248 permission: &Permission::Privilege(
249 &["datastore", "{store}"],
250 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
251 true),
252 },
253 )]
254 /// Delete backup snapshot.
255 fn delete_snapshot(
256 store: String,
257 backup_type: String,
258 backup_id: String,
259 backup_time: i64,
260 _info: &ApiMethod,
261 rpcenv: &mut dyn RpcEnvironment,
262 ) -> Result<Value, Error> {
263
264 let username = rpcenv.get_user().unwrap();
265 let user_info = CachedUserInfo::new()?;
266 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
267
268 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
269
270 let datastore = DataStore::lookup_datastore(&store)?;
271
272 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
273 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
274
275 datastore.remove_backup_dir(&snapshot, false)?;
276
277 Ok(Value::Null)
278 }
279
280 #[api(
281 input: {
282 properties: {
283 store: {
284 schema: DATASTORE_SCHEMA,
285 },
286 "backup-type": {
287 optional: true,
288 schema: BACKUP_TYPE_SCHEMA,
289 },
290 "backup-id": {
291 optional: true,
292 schema: BACKUP_ID_SCHEMA,
293 },
294 },
295 },
296 returns: {
297 type: Array,
298 description: "Returns the list of snapshots.",
299 items: {
300 type: SnapshotListItem,
301 }
302 },
303 access: {
304 permission: &Permission::Privilege(
305 &["datastore", "{store}"],
306 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
307 true),
308 },
309 )]
310 /// List backup snapshots.
311 pub fn list_snapshots (
312 store: String,
313 backup_type: Option<String>,
314 backup_id: Option<String>,
315 _param: Value,
316 _info: &ApiMethod,
317 rpcenv: &mut dyn RpcEnvironment,
318 ) -> Result<Vec<SnapshotListItem>, Error> {
319
320 let username = rpcenv.get_user().unwrap();
321 let user_info = CachedUserInfo::new()?;
322 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
323
324 let datastore = DataStore::lookup_datastore(&store)?;
325
326 let base_path = datastore.base_path();
327
328 let backup_list = BackupInfo::list_backups(&base_path)?;
329
330 let mut snapshots = vec![];
331
332 for info in backup_list {
333 let group = info.backup_dir.group();
334 if let Some(ref backup_type) = backup_type {
335 if backup_type != group.backup_type() { continue; }
336 }
337 if let Some(ref backup_id) = backup_id {
338 if backup_id != group.backup_id() { continue; }
339 }
340
341 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
342 let owner = datastore.get_owner(group)?;
343
344 if !list_all {
345 if owner != username { continue; }
346 }
347
348 let mut size = None;
349
350 let files = match get_all_snapshot_files(&datastore, &info) {
351 Ok(files) => {
352 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
353 files
354 },
355 Err(err) => {
356 eprintln!("error during snapshot file listing: '{}'", err);
357 info
358 .files
359 .iter()
360 .map(|x| BackupContent {
361 filename: x.to_string(),
362 size: None,
363 crypt_mode: None,
364 })
365 .collect()
366 },
367 };
368
369 let result_item = SnapshotListItem {
370 backup_type: group.backup_type().to_string(),
371 backup_id: group.backup_id().to_string(),
372 backup_time: info.backup_dir.backup_time().timestamp(),
373 files,
374 size,
375 owner: Some(owner),
376 };
377
378 snapshots.push(result_item);
379 }
380
381 Ok(snapshots)
382 }
383
384 #[api(
385 input: {
386 properties: {
387 store: {
388 schema: DATASTORE_SCHEMA,
389 },
390 },
391 },
392 returns: {
393 type: StorageStatus,
394 },
395 access: {
396 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
397 },
398 )]
399 /// Get datastore status.
400 pub fn status(
401 store: String,
402 _info: &ApiMethod,
403 _rpcenv: &mut dyn RpcEnvironment,
404 ) -> Result<StorageStatus, Error> {
405 let datastore = DataStore::lookup_datastore(&store)?;
406 crate::tools::disks::disk_usage(&datastore.base_path())
407 }
408
409 #[api(
410 input: {
411 properties: {
412 store: {
413 schema: DATASTORE_SCHEMA,
414 },
415 "backup-type": {
416 schema: BACKUP_TYPE_SCHEMA,
417 optional: true,
418 },
419 "backup-id": {
420 schema: BACKUP_ID_SCHEMA,
421 optional: true,
422 },
423 "backup-time": {
424 schema: BACKUP_TIME_SCHEMA,
425 optional: true,
426 },
427 },
428 },
429 returns: {
430 schema: UPID_SCHEMA,
431 },
432 access: {
433 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
434 },
435 )]
436 /// Verify backups.
437 ///
438 /// This function can verify a single backup snapshot, all backup from a backup group,
439 /// or all backups in the datastore.
440 pub fn verify(
441 store: String,
442 backup_type: Option<String>,
443 backup_id: Option<String>,
444 backup_time: Option<i64>,
445 rpcenv: &mut dyn RpcEnvironment,
446 ) -> Result<Value, Error> {
447 let datastore = DataStore::lookup_datastore(&store)?;
448
449 let worker_id;
450
451 let mut backup_dir = None;
452 let mut backup_group = None;
453
454 match (backup_type, backup_id, backup_time) {
455 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
456 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
457 let dir = BackupDir::new(backup_type, backup_id, backup_time);
458 backup_dir = Some(dir);
459 }
460 (Some(backup_type), Some(backup_id), None) => {
461 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
462 let group = BackupGroup::new(backup_type, backup_id);
463 backup_group = Some(group);
464 }
465 (None, None, None) => {
466 worker_id = store.clone();
467 }
468 _ => bail!("parameters do not spefify a backup group or snapshot"),
469 }
470
471 let username = rpcenv.get_user().unwrap();
472 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
473
474 let upid_str = WorkerTask::new_thread(
475 "verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
476 {
477 let success = if let Some(backup_dir) = backup_dir {
478 let mut verified_chunks = HashSet::with_capacity(1024*16);
479 let mut corrupt_chunks = HashSet::with_capacity(64);
480 verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)?
481 } else if let Some(backup_group) = backup_group {
482 verify_backup_group(&datastore, &backup_group, &worker)?
483 } else {
484 verify_all_backups(&datastore, &worker)?
485 };
486 if !success {
487 bail!("verfication failed - please check the log for details");
488 }
489 Ok(())
490 })?;
491
492 Ok(json!(upid_str))
493 }
494
495 #[macro_export]
496 macro_rules! add_common_prune_prameters {
497 ( [ $( $list1:tt )* ] ) => {
498 add_common_prune_prameters!([$( $list1 )* ] , [])
499 };
500 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
501 [
502 $( $list1 )*
503 (
504 "keep-daily",
505 true,
506 &PRUNE_SCHEMA_KEEP_DAILY,
507 ),
508 (
509 "keep-hourly",
510 true,
511 &PRUNE_SCHEMA_KEEP_HOURLY,
512 ),
513 (
514 "keep-last",
515 true,
516 &PRUNE_SCHEMA_KEEP_LAST,
517 ),
518 (
519 "keep-monthly",
520 true,
521 &PRUNE_SCHEMA_KEEP_MONTHLY,
522 ),
523 (
524 "keep-weekly",
525 true,
526 &PRUNE_SCHEMA_KEEP_WEEKLY,
527 ),
528 (
529 "keep-yearly",
530 true,
531 &PRUNE_SCHEMA_KEEP_YEARLY,
532 ),
533 $( $list2 )*
534 ]
535 }
536 }
537
538 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
539 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
540 &PruneListItem::API_SCHEMA
541 ).schema();
542
543 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
544 &ApiHandler::Sync(&prune),
545 &ObjectSchema::new(
546 "Prune the datastore.",
547 &add_common_prune_prameters!([
548 ("backup-id", false, &BACKUP_ID_SCHEMA),
549 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
550 ("dry-run", true, &BooleanSchema::new(
551 "Just show what prune would do, but do not delete anything.")
552 .schema()
553 ),
554 ],[
555 ("store", false, &DATASTORE_SCHEMA),
556 ])
557 ))
558 .returns(&API_RETURN_SCHEMA_PRUNE)
559 .access(None, &Permission::Privilege(
560 &["datastore", "{store}"],
561 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
562 true)
563 );
564
565 fn prune(
566 param: Value,
567 _info: &ApiMethod,
568 rpcenv: &mut dyn RpcEnvironment,
569 ) -> Result<Value, Error> {
570
571 let store = tools::required_string_param(&param, "store")?;
572 let backup_type = tools::required_string_param(&param, "backup-type")?;
573 let backup_id = tools::required_string_param(&param, "backup-id")?;
574
575 let username = rpcenv.get_user().unwrap();
576 let user_info = CachedUserInfo::new()?;
577 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
578
579 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
580
581 let group = BackupGroup::new(backup_type, backup_id);
582
583 let datastore = DataStore::lookup_datastore(&store)?;
584
585 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
586 if !allowed { check_backup_owner(&datastore, &group, &username)?; }
587
588 let prune_options = PruneOptions {
589 keep_last: param["keep-last"].as_u64(),
590 keep_hourly: param["keep-hourly"].as_u64(),
591 keep_daily: param["keep-daily"].as_u64(),
592 keep_weekly: param["keep-weekly"].as_u64(),
593 keep_monthly: param["keep-monthly"].as_u64(),
594 keep_yearly: param["keep-yearly"].as_u64(),
595 };
596
597 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
598
599 let mut prune_result = Vec::new();
600
601 let list = group.list_backups(&datastore.base_path())?;
602
603 let mut prune_info = compute_prune_info(list, &prune_options)?;
604
605 prune_info.reverse(); // delete older snapshots first
606
607 let keep_all = !prune_options.keeps_something();
608
609 if dry_run {
610 for (info, mut keep) in prune_info {
611 if keep_all { keep = true; }
612
613 let backup_time = info.backup_dir.backup_time();
614 let group = info.backup_dir.group();
615
616 prune_result.push(json!({
617 "backup-type": group.backup_type(),
618 "backup-id": group.backup_id(),
619 "backup-time": backup_time.timestamp(),
620 "keep": keep,
621 }));
622 }
623 return Ok(json!(prune_result));
624 }
625
626
627 // We use a WorkerTask just to have a task log, but run synchrounously
628 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
629
630 let result = try_block! {
631 if keep_all {
632 worker.log("No prune selection - keeping all files.");
633 } else {
634 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
635 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
636 store, backup_type, backup_id));
637 }
638
639 for (info, mut keep) in prune_info {
640 if keep_all { keep = true; }
641
642 let backup_time = info.backup_dir.backup_time();
643 let timestamp = BackupDir::backup_time_to_string(backup_time);
644 let group = info.backup_dir.group();
645
646
647 let msg = format!(
648 "{}/{}/{} {}",
649 group.backup_type(),
650 group.backup_id(),
651 timestamp,
652 if keep { "keep" } else { "remove" },
653 );
654
655 worker.log(msg);
656
657 prune_result.push(json!({
658 "backup-type": group.backup_type(),
659 "backup-id": group.backup_id(),
660 "backup-time": backup_time.timestamp(),
661 "keep": keep,
662 }));
663
664 if !(dry_run || keep) {
665 datastore.remove_backup_dir(&info.backup_dir, true)?;
666 }
667 }
668
669 Ok(())
670 };
671
672 worker.log_result(&result);
673
674 if let Err(err) = result {
675 bail!("prune failed - {}", err);
676 };
677
678 Ok(json!(prune_result))
679 }
680
681 #[api(
682 input: {
683 properties: {
684 store: {
685 schema: DATASTORE_SCHEMA,
686 },
687 },
688 },
689 returns: {
690 schema: UPID_SCHEMA,
691 },
692 access: {
693 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
694 },
695 )]
696 /// Start garbage collection.
697 fn start_garbage_collection(
698 store: String,
699 _info: &ApiMethod,
700 rpcenv: &mut dyn RpcEnvironment,
701 ) -> Result<Value, Error> {
702
703 let datastore = DataStore::lookup_datastore(&store)?;
704
705 println!("Starting garbage collection on store {}", store);
706
707 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
708
709 let upid_str = WorkerTask::new_thread(
710 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
711 {
712 worker.log(format!("starting garbage collection on store {}", store));
713 datastore.garbage_collection(&worker)
714 })?;
715
716 Ok(json!(upid_str))
717 }
718
719 #[api(
720 input: {
721 properties: {
722 store: {
723 schema: DATASTORE_SCHEMA,
724 },
725 },
726 },
727 returns: {
728 type: GarbageCollectionStatus,
729 },
730 access: {
731 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
732 },
733 )]
734 /// Garbage collection status.
735 pub fn garbage_collection_status(
736 store: String,
737 _info: &ApiMethod,
738 _rpcenv: &mut dyn RpcEnvironment,
739 ) -> Result<GarbageCollectionStatus, Error> {
740
741 let datastore = DataStore::lookup_datastore(&store)?;
742
743 let status = datastore.last_gc_status();
744
745 Ok(status)
746 }
747
748 #[api(
749 returns: {
750 description: "List the accessible datastores.",
751 type: Array,
752 items: {
753 description: "Datastore name and description.",
754 properties: {
755 store: {
756 schema: DATASTORE_SCHEMA,
757 },
758 comment: {
759 optional: true,
760 schema: SINGLE_LINE_COMMENT_SCHEMA,
761 },
762 },
763 },
764 },
765 access: {
766 permission: &Permission::Anybody,
767 },
768 )]
769 /// Datastore list
770 fn get_datastore_list(
771 _param: Value,
772 _info: &ApiMethod,
773 rpcenv: &mut dyn RpcEnvironment,
774 ) -> Result<Value, Error> {
775
776 let (config, _digest) = datastore::config()?;
777
778 let username = rpcenv.get_user().unwrap();
779 let user_info = CachedUserInfo::new()?;
780
781 let mut list = Vec::new();
782
783 for (store, (_, data)) in &config.sections {
784 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
785 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
786 if allowed {
787 let mut entry = json!({ "store": store });
788 if let Some(comment) = data["comment"].as_str() {
789 entry["comment"] = comment.into();
790 }
791 list.push(entry);
792 }
793 }
794
795 Ok(list.into())
796 }
797
798 #[sortable]
799 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
800 &ApiHandler::AsyncHttp(&download_file),
801 &ObjectSchema::new(
802 "Download single raw file from backup snapshot.",
803 &sorted!([
804 ("store", false, &DATASTORE_SCHEMA),
805 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
806 ("backup-id", false, &BACKUP_ID_SCHEMA),
807 ("backup-time", false, &BACKUP_TIME_SCHEMA),
808 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
809 ]),
810 )
811 ).access(None, &Permission::Privilege(
812 &["datastore", "{store}"],
813 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
814 true)
815 );
816
817 fn download_file(
818 _parts: Parts,
819 _req_body: Body,
820 param: Value,
821 _info: &ApiMethod,
822 rpcenv: Box<dyn RpcEnvironment>,
823 ) -> ApiResponseFuture {
824
825 async move {
826 let store = tools::required_string_param(&param, "store")?;
827 let datastore = DataStore::lookup_datastore(store)?;
828
829 let username = rpcenv.get_user().unwrap();
830 let user_info = CachedUserInfo::new()?;
831 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
832
833 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
834
835 let backup_type = tools::required_string_param(&param, "backup-type")?;
836 let backup_id = tools::required_string_param(&param, "backup-id")?;
837 let backup_time = tools::required_integer_param(&param, "backup-time")?;
838
839 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
840
841 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
842 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
843
844 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
845
846 let mut path = datastore.base_path();
847 path.push(backup_dir.relative_path());
848 path.push(&file_name);
849
850 let file = tokio::fs::File::open(&path)
851 .await
852 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
853
854 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
855 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
856 .map_err(move |err| {
857 eprintln!("error during streaming of '{:?}' - {}", &path, err);
858 err
859 });
860 let body = Body::wrap_stream(payload);
861
862 // fixme: set other headers ?
863 Ok(Response::builder()
864 .status(StatusCode::OK)
865 .header(header::CONTENT_TYPE, "application/octet-stream")
866 .body(body)
867 .unwrap())
868 }.boxed()
869 }
870
871 #[sortable]
872 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
873 &ApiHandler::AsyncHttp(&download_file_decoded),
874 &ObjectSchema::new(
875 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
876 &sorted!([
877 ("store", false, &DATASTORE_SCHEMA),
878 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
879 ("backup-id", false, &BACKUP_ID_SCHEMA),
880 ("backup-time", false, &BACKUP_TIME_SCHEMA),
881 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
882 ]),
883 )
884 ).access(None, &Permission::Privilege(
885 &["datastore", "{store}"],
886 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
887 true)
888 );
889
890 fn download_file_decoded(
891 _parts: Parts,
892 _req_body: Body,
893 param: Value,
894 _info: &ApiMethod,
895 rpcenv: Box<dyn RpcEnvironment>,
896 ) -> ApiResponseFuture {
897
898 async move {
899 let store = tools::required_string_param(&param, "store")?;
900 let datastore = DataStore::lookup_datastore(store)?;
901
902 let username = rpcenv.get_user().unwrap();
903 let user_info = CachedUserInfo::new()?;
904 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
905
906 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
907
908 let backup_type = tools::required_string_param(&param, "backup-type")?;
909 let backup_id = tools::required_string_param(&param, "backup-id")?;
910 let backup_time = tools::required_integer_param(&param, "backup-time")?;
911
912 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
913
914 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
915 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
916
917 let files = read_backup_index(&datastore, &backup_dir)?;
918 for file in files {
919 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
920 bail!("cannot decode '{}' - is encrypted", file_name);
921 }
922 }
923
924 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
925
926 let mut path = datastore.base_path();
927 path.push(backup_dir.relative_path());
928 path.push(&file_name);
929
930 let extension = file_name.rsplitn(2, '.').next().unwrap();
931
932 let body = match extension {
933 "didx" => {
934 let index = DynamicIndexReader::open(&path)
935 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
936
937 let chunk_reader = LocalChunkReader::new(datastore, None);
938 let reader = AsyncIndexReader::new(index, chunk_reader);
939 Body::wrap_stream(AsyncReaderStream::new(reader)
940 .map_err(move |err| {
941 eprintln!("error during streaming of '{:?}' - {}", path, err);
942 err
943 }))
944 },
945 "fidx" => {
946 let index = FixedIndexReader::open(&path)
947 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
948
949 let chunk_reader = LocalChunkReader::new(datastore, None);
950 let reader = AsyncIndexReader::new(index, chunk_reader);
951 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
952 .map_err(move |err| {
953 eprintln!("error during streaming of '{:?}' - {}", path, err);
954 err
955 }))
956 },
957 "blob" => {
958 let file = std::fs::File::open(&path)
959 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
960
961 Body::wrap_stream(
962 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
963 .map_err(move |err| {
964 eprintln!("error during streaming of '{:?}' - {}", path, err);
965 err
966 })
967 )
968 },
969 extension => {
970 bail!("cannot download '{}' files", extension);
971 },
972 };
973
974 // fixme: set other headers ?
975 Ok(Response::builder()
976 .status(StatusCode::OK)
977 .header(header::CONTENT_TYPE, "application/octet-stream")
978 .body(body)
979 .unwrap())
980 }.boxed()
981 }
982
983 #[sortable]
984 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
985 &ApiHandler::AsyncHttp(&upload_backup_log),
986 &ObjectSchema::new(
987 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
988 &sorted!([
989 ("store", false, &DATASTORE_SCHEMA),
990 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
991 ("backup-id", false, &BACKUP_ID_SCHEMA),
992 ("backup-time", false, &BACKUP_TIME_SCHEMA),
993 ]),
994 )
995 ).access(
996 Some("Only the backup creator/owner is allowed to do this."),
997 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
998 );
999
1000 fn upload_backup_log(
1001 _parts: Parts,
1002 req_body: Body,
1003 param: Value,
1004 _info: &ApiMethod,
1005 rpcenv: Box<dyn RpcEnvironment>,
1006 ) -> ApiResponseFuture {
1007
1008 async move {
1009 let store = tools::required_string_param(&param, "store")?;
1010 let datastore = DataStore::lookup_datastore(store)?;
1011
1012 let file_name = CLIENT_LOG_BLOB_NAME;
1013
1014 let backup_type = tools::required_string_param(&param, "backup-type")?;
1015 let backup_id = tools::required_string_param(&param, "backup-id")?;
1016 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1017
1018 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1019
1020 let username = rpcenv.get_user().unwrap();
1021 check_backup_owner(&datastore, backup_dir.group(), &username)?;
1022
1023 let mut path = datastore.base_path();
1024 path.push(backup_dir.relative_path());
1025 path.push(&file_name);
1026
1027 if path.exists() {
1028 bail!("backup already contains a log.");
1029 }
1030
1031 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1032 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1033
1034 let data = req_body
1035 .map_err(Error::from)
1036 .try_fold(Vec::new(), |mut acc, chunk| {
1037 acc.extend_from_slice(&*chunk);
1038 future::ok::<_, Error>(acc)
1039 })
1040 .await?;
1041
1042 // always verify blob/CRC at server side
1043 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1044
1045 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1046
1047 // fixme: use correct formatter
1048 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1049 }.boxed()
1050 }
1051
1052 #[api(
1053 input: {
1054 properties: {
1055 store: {
1056 schema: DATASTORE_SCHEMA,
1057 },
1058 "backup-type": {
1059 schema: BACKUP_TYPE_SCHEMA,
1060 },
1061 "backup-id": {
1062 schema: BACKUP_ID_SCHEMA,
1063 },
1064 "backup-time": {
1065 schema: BACKUP_TIME_SCHEMA,
1066 },
1067 "filepath": {
1068 description: "Base64 encoded path.",
1069 type: String,
1070 }
1071 },
1072 },
1073 access: {
1074 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1075 },
1076 )]
1077 /// Get the entries of the given path of the catalog
1078 fn catalog(
1079 store: String,
1080 backup_type: String,
1081 backup_id: String,
1082 backup_time: i64,
1083 filepath: String,
1084 _param: Value,
1085 _info: &ApiMethod,
1086 rpcenv: &mut dyn RpcEnvironment,
1087 ) -> Result<Value, Error> {
1088 let datastore = DataStore::lookup_datastore(&store)?;
1089
1090 let username = rpcenv.get_user().unwrap();
1091 let user_info = CachedUserInfo::new()?;
1092 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1093
1094 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1095
1096 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1097 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1098
1099 let mut path = datastore.base_path();
1100 path.push(backup_dir.relative_path());
1101 path.push(CATALOG_NAME);
1102
1103 let index = DynamicIndexReader::open(&path)
1104 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1105
1106 let chunk_reader = LocalChunkReader::new(datastore, None);
1107 let reader = BufferedDynamicReader::new(index, chunk_reader);
1108
1109 let mut catalog_reader = CatalogReader::new(reader);
1110 let mut current = catalog_reader.root()?;
1111 let mut components = vec![];
1112
1113
1114 if filepath != "root" {
1115 components = base64::decode(filepath)?;
1116 if components.len() > 0 && components[0] == '/' as u8 {
1117 components.remove(0);
1118 }
1119 for component in components.split(|c| *c == '/' as u8) {
1120 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1121 current = entry;
1122 } else {
1123 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1124 }
1125 }
1126 }
1127
1128 let mut res = Vec::new();
1129
1130 for direntry in catalog_reader.read_dir(&current)? {
1131 let mut components = components.clone();
1132 components.push('/' as u8);
1133 components.extend(&direntry.name);
1134 let path = base64::encode(components);
1135 let text = String::from_utf8_lossy(&direntry.name);
1136 let mut entry = json!({
1137 "filepath": path,
1138 "text": text,
1139 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1140 "leaf": true,
1141 });
1142 match direntry.attr {
1143 DirEntryAttribute::Directory { start: _ } => {
1144 entry["leaf"] = false.into();
1145 },
1146 DirEntryAttribute::File { size, mtime } => {
1147 entry["size"] = size.into();
1148 entry["mtime"] = mtime.into();
1149 },
1150 _ => {},
1151 }
1152 res.push(entry);
1153 }
1154
1155 Ok(res.into())
1156 }
1157
1158 #[sortable]
1159 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1160 &ApiHandler::AsyncHttp(&pxar_file_download),
1161 &ObjectSchema::new(
1162 "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
1163 &sorted!([
1164 ("store", false, &DATASTORE_SCHEMA),
1165 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1166 ("backup-id", false, &BACKUP_ID_SCHEMA),
1167 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1168 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1169 ]),
1170 )
1171 ).access(None, &Permission::Privilege(
1172 &["datastore", "{store}"],
1173 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1174 true)
1175 );
1176
1177 fn pxar_file_download(
1178 _parts: Parts,
1179 _req_body: Body,
1180 param: Value,
1181 _info: &ApiMethod,
1182 rpcenv: Box<dyn RpcEnvironment>,
1183 ) -> ApiResponseFuture {
1184
1185 async move {
1186 let store = tools::required_string_param(&param, "store")?;
1187 let datastore = DataStore::lookup_datastore(&store)?;
1188
1189 let username = rpcenv.get_user().unwrap();
1190 let user_info = CachedUserInfo::new()?;
1191 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1192
1193 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1194
1195 let backup_type = tools::required_string_param(&param, "backup-type")?;
1196 let backup_id = tools::required_string_param(&param, "backup-id")?;
1197 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1198
1199 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1200
1201 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1202 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1203
1204 let mut path = datastore.base_path();
1205 path.push(backup_dir.relative_path());
1206
1207 let mut components = base64::decode(&filepath)?;
1208 if components.len() > 0 && components[0] == '/' as u8 {
1209 components.remove(0);
1210 }
1211
1212 let mut split = components.splitn(2, |c| *c == '/' as u8);
1213 let pxar_name = split.next().unwrap();
1214 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1215
1216 path.push(OsStr::from_bytes(&pxar_name));
1217
1218 let index = DynamicIndexReader::open(&path)
1219 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1220
1221 let chunk_reader = LocalChunkReader::new(datastore, None);
1222 let reader = BufferedDynamicReader::new(index, chunk_reader);
1223 let archive_size = reader.archive_size();
1224 let reader = LocalDynamicReadAt::new(reader);
1225
1226 let decoder = Accessor::new(reader, archive_size).await?;
1227 let root = decoder.open_root().await?;
1228 let file = root
1229 .lookup(OsStr::from_bytes(file_path)).await?
1230 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1231
1232 let file = match file.kind() {
1233 EntryKind::File { .. } => file,
1234 EntryKind::Hardlink(_) => {
1235 decoder.follow_hardlink(&file).await?
1236 },
1237 // TODO symlink
1238 other => bail!("cannot download file of type {:?}", other),
1239 };
1240
1241 let body = Body::wrap_stream(
1242 AsyncReaderStream::new(file.contents().await?)
1243 .map_err(move |err| {
1244 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1245 err
1246 })
1247 );
1248
1249 // fixme: set other headers ?
1250 Ok(Response::builder()
1251 .status(StatusCode::OK)
1252 .header(header::CONTENT_TYPE, "application/octet-stream")
1253 .body(body)
1254 .unwrap())
1255 }.boxed()
1256 }
1257
1258 #[api(
1259 input: {
1260 properties: {
1261 store: {
1262 schema: DATASTORE_SCHEMA,
1263 },
1264 timeframe: {
1265 type: RRDTimeFrameResolution,
1266 },
1267 cf: {
1268 type: RRDMode,
1269 },
1270 },
1271 },
1272 access: {
1273 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1274 },
1275 )]
1276 /// Read datastore stats
1277 fn get_rrd_stats(
1278 store: String,
1279 timeframe: RRDTimeFrameResolution,
1280 cf: RRDMode,
1281 _param: Value,
1282 ) -> Result<Value, Error> {
1283
1284 create_value_from_rrd(
1285 &format!("datastore/{}", store),
1286 &[
1287 "total", "used",
1288 "read_ios", "read_bytes",
1289 "write_ios", "write_bytes",
1290 "io_ticks",
1291 ],
1292 timeframe,
1293 cf,
1294 )
1295 }
1296
1297 #[sortable]
1298 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1299 (
1300 "catalog",
1301 &Router::new()
1302 .get(&API_METHOD_CATALOG)
1303 ),
1304 (
1305 "download",
1306 &Router::new()
1307 .download(&API_METHOD_DOWNLOAD_FILE)
1308 ),
1309 (
1310 "download-decoded",
1311 &Router::new()
1312 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1313 ),
1314 (
1315 "files",
1316 &Router::new()
1317 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1318 ),
1319 (
1320 "gc",
1321 &Router::new()
1322 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1323 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1324 ),
1325 (
1326 "groups",
1327 &Router::new()
1328 .get(&API_METHOD_LIST_GROUPS)
1329 ),
1330 (
1331 "prune",
1332 &Router::new()
1333 .post(&API_METHOD_PRUNE)
1334 ),
1335 (
1336 "pxar-file-download",
1337 &Router::new()
1338 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1339 ),
1340 (
1341 "rrd",
1342 &Router::new()
1343 .get(&API_METHOD_GET_RRD_STATS)
1344 ),
1345 (
1346 "snapshots",
1347 &Router::new()
1348 .get(&API_METHOD_LIST_SNAPSHOTS)
1349 .delete(&API_METHOD_DELETE_SNAPSHOT)
1350 ),
1351 (
1352 "status",
1353 &Router::new()
1354 .get(&API_METHOD_STATUS)
1355 ),
1356 (
1357 "upload-backup-log",
1358 &Router::new()
1359 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1360 ),
1361 (
1362 "verify",
1363 &Router::new()
1364 .post(&API_METHOD_VERIFY)
1365 ),
1366 ];
1367
1368 const DATASTORE_INFO_ROUTER: Router = Router::new()
1369 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1370 .subdirs(DATASTORE_INFO_SUBDIRS);
1371
1372
1373 pub const ROUTER: Router = Router::new()
1374 .get(&API_METHOD_GET_DATASTORE_LIST)
1375 .match_all("store", &DATASTORE_INFO_ROUTER);