]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
allow to abort verify jobs
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4
5 use anyhow::{bail, format_err, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use pxar::accessor::aio::Accessor;
21 use pxar::EntryKind;
22
23 use crate::api2::types::*;
24 use crate::api2::node::rrd::create_value_from_rrd;
25 use crate::backup::*;
26 use crate::config::datastore;
27 use crate::config::cached_user_info::CachedUserInfo;
28
29 use crate::server::WorkerTask;
30 use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
31 use crate::config::acl::{
32 PRIV_DATASTORE_AUDIT,
33 PRIV_DATASTORE_MODIFY,
34 PRIV_DATASTORE_READ,
35 PRIV_DATASTORE_PRUNE,
36 PRIV_DATASTORE_BACKUP,
37 };
38
39 fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
40 let owner = store.get_owner(group)?;
41 if &owner != userid {
42 bail!("backup owner check failed ({} != {})", userid, owner);
43 }
44 Ok(())
45 }
46
47 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
48
49 let (manifest, index_size) = store.load_manifest(backup_dir)?;
50
51 let mut result = Vec::new();
52 for item in manifest.files() {
53 result.push(BackupContent {
54 filename: item.filename.clone(),
55 encrypted: item.encrypted,
56 size: Some(item.size),
57 });
58 }
59
60 result.push(BackupContent {
61 filename: MANIFEST_BLOB_NAME.to_string(),
62 encrypted: Some(false),
63 size: Some(index_size),
64 });
65
66 Ok(result)
67 }
68
69 fn get_all_snapshot_files(
70 store: &DataStore,
71 info: &BackupInfo,
72 ) -> Result<Vec<BackupContent>, Error> {
73 let mut files = read_backup_index(&store, &info.backup_dir)?;
74
75 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
76 acc.insert(item.filename.clone());
77 acc
78 });
79
80 for file in &info.files {
81 if file_set.contains(file) { continue; }
82 files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
83 }
84
85 Ok(files)
86 }
87
88 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
89
90 let mut group_hash = HashMap::new();
91
92 for info in backup_list {
93 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
94 let time_list = group_hash.entry(group_id).or_insert(vec![]);
95 time_list.push(info);
96 }
97
98 group_hash
99 }
100
101 #[api(
102 input: {
103 properties: {
104 store: {
105 schema: DATASTORE_SCHEMA,
106 },
107 },
108 },
109 returns: {
110 type: Array,
111 description: "Returns the list of backup groups.",
112 items: {
113 type: GroupListItem,
114 }
115 },
116 access: {
117 permission: &Permission::Privilege(
118 &["datastore", "{store}"],
119 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
120 true),
121 },
122 )]
123 /// List backup groups.
124 fn list_groups(
125 store: String,
126 rpcenv: &mut dyn RpcEnvironment,
127 ) -> Result<Vec<GroupListItem>, Error> {
128
129 let username = rpcenv.get_user().unwrap();
130 let user_info = CachedUserInfo::new()?;
131 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
132
133 let datastore = DataStore::lookup_datastore(&store)?;
134
135 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
136
137 let group_hash = group_backups(backup_list);
138
139 let mut groups = Vec::new();
140
141 for (_group_id, mut list) in group_hash {
142
143 BackupInfo::sort_list(&mut list, false);
144
145 let info = &list[0];
146
147 let group = info.backup_dir.group();
148
149 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
150 let owner = datastore.get_owner(group)?;
151 if !list_all {
152 if owner != username { continue; }
153 }
154
155 let result_item = GroupListItem {
156 backup_type: group.backup_type().to_string(),
157 backup_id: group.backup_id().to_string(),
158 last_backup: info.backup_dir.backup_time().timestamp(),
159 backup_count: list.len() as u64,
160 files: info.files.clone(),
161 owner: Some(owner),
162 };
163 groups.push(result_item);
164 }
165
166 Ok(groups)
167 }
168
169 #[api(
170 input: {
171 properties: {
172 store: {
173 schema: DATASTORE_SCHEMA,
174 },
175 "backup-type": {
176 schema: BACKUP_TYPE_SCHEMA,
177 },
178 "backup-id": {
179 schema: BACKUP_ID_SCHEMA,
180 },
181 "backup-time": {
182 schema: BACKUP_TIME_SCHEMA,
183 },
184 },
185 },
186 returns: {
187 type: Array,
188 description: "Returns the list of archive files inside a backup snapshots.",
189 items: {
190 type: BackupContent,
191 }
192 },
193 access: {
194 permission: &Permission::Privilege(
195 &["datastore", "{store}"],
196 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
197 true),
198 },
199 )]
200 /// List snapshot files.
201 pub fn list_snapshot_files(
202 store: String,
203 backup_type: String,
204 backup_id: String,
205 backup_time: i64,
206 _info: &ApiMethod,
207 rpcenv: &mut dyn RpcEnvironment,
208 ) -> Result<Vec<BackupContent>, Error> {
209
210 let username = rpcenv.get_user().unwrap();
211 let user_info = CachedUserInfo::new()?;
212 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
213
214 let datastore = DataStore::lookup_datastore(&store)?;
215
216 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
217
218 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
219 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
220
221 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
222
223 get_all_snapshot_files(&datastore, &info)
224 }
225
226 #[api(
227 input: {
228 properties: {
229 store: {
230 schema: DATASTORE_SCHEMA,
231 },
232 "backup-type": {
233 schema: BACKUP_TYPE_SCHEMA,
234 },
235 "backup-id": {
236 schema: BACKUP_ID_SCHEMA,
237 },
238 "backup-time": {
239 schema: BACKUP_TIME_SCHEMA,
240 },
241 },
242 },
243 access: {
244 permission: &Permission::Privilege(
245 &["datastore", "{store}"],
246 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
247 true),
248 },
249 )]
250 /// Delete backup snapshot.
251 fn delete_snapshot(
252 store: String,
253 backup_type: String,
254 backup_id: String,
255 backup_time: i64,
256 _info: &ApiMethod,
257 rpcenv: &mut dyn RpcEnvironment,
258 ) -> Result<Value, Error> {
259
260 let username = rpcenv.get_user().unwrap();
261 let user_info = CachedUserInfo::new()?;
262 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
263
264 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
265
266 let datastore = DataStore::lookup_datastore(&store)?;
267
268 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
269 if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
270
271 datastore.remove_backup_dir(&snapshot)?;
272
273 Ok(Value::Null)
274 }
275
276 #[api(
277 input: {
278 properties: {
279 store: {
280 schema: DATASTORE_SCHEMA,
281 },
282 "backup-type": {
283 optional: true,
284 schema: BACKUP_TYPE_SCHEMA,
285 },
286 "backup-id": {
287 optional: true,
288 schema: BACKUP_ID_SCHEMA,
289 },
290 },
291 },
292 returns: {
293 type: Array,
294 description: "Returns the list of snapshots.",
295 items: {
296 type: SnapshotListItem,
297 }
298 },
299 access: {
300 permission: &Permission::Privilege(
301 &["datastore", "{store}"],
302 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
303 true),
304 },
305 )]
306 /// List backup snapshots.
307 pub fn list_snapshots (
308 store: String,
309 backup_type: Option<String>,
310 backup_id: Option<String>,
311 _param: Value,
312 _info: &ApiMethod,
313 rpcenv: &mut dyn RpcEnvironment,
314 ) -> Result<Vec<SnapshotListItem>, Error> {
315
316 let username = rpcenv.get_user().unwrap();
317 let user_info = CachedUserInfo::new()?;
318 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
319
320 let datastore = DataStore::lookup_datastore(&store)?;
321
322 let base_path = datastore.base_path();
323
324 let backup_list = BackupInfo::list_backups(&base_path)?;
325
326 let mut snapshots = vec![];
327
328 for info in backup_list {
329 let group = info.backup_dir.group();
330 if let Some(ref backup_type) = backup_type {
331 if backup_type != group.backup_type() { continue; }
332 }
333 if let Some(ref backup_id) = backup_id {
334 if backup_id != group.backup_id() { continue; }
335 }
336
337 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
338 let owner = datastore.get_owner(group)?;
339
340 if !list_all {
341 if owner != username { continue; }
342 }
343
344 let mut size = None;
345
346 let files = match get_all_snapshot_files(&datastore, &info) {
347 Ok(files) => {
348 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
349 files
350 },
351 Err(err) => {
352 eprintln!("error during snapshot file listing: '{}'", err);
353 info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
354 },
355 };
356
357 let result_item = SnapshotListItem {
358 backup_type: group.backup_type().to_string(),
359 backup_id: group.backup_id().to_string(),
360 backup_time: info.backup_dir.backup_time().timestamp(),
361 files,
362 size,
363 owner: Some(owner),
364 };
365
366 snapshots.push(result_item);
367 }
368
369 Ok(snapshots)
370 }
371
372 #[api(
373 input: {
374 properties: {
375 store: {
376 schema: DATASTORE_SCHEMA,
377 },
378 },
379 },
380 returns: {
381 type: StorageStatus,
382 },
383 access: {
384 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
385 },
386 )]
387 /// Get datastore status.
388 pub fn status(
389 store: String,
390 _info: &ApiMethod,
391 _rpcenv: &mut dyn RpcEnvironment,
392 ) -> Result<StorageStatus, Error> {
393 let datastore = DataStore::lookup_datastore(&store)?;
394 crate::tools::disks::disk_usage(&datastore.base_path())
395 }
396
397 #[api(
398 input: {
399 properties: {
400 store: {
401 schema: DATASTORE_SCHEMA,
402 },
403 "backup-type": {
404 schema: BACKUP_TYPE_SCHEMA,
405 optional: true,
406 },
407 "backup-id": {
408 schema: BACKUP_ID_SCHEMA,
409 optional: true,
410 },
411 "backup-time": {
412 schema: BACKUP_TIME_SCHEMA,
413 optional: true,
414 },
415 },
416 },
417 returns: {
418 schema: UPID_SCHEMA,
419 },
420 access: {
421 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
422 },
423 )]
424 /// Verify backups.
425 ///
426 /// This function can verify a single backup snapshot, all backup from a backup group,
427 /// or all backups in the datastore.
428 pub fn verify(
429 store: String,
430 backup_type: Option<String>,
431 backup_id: Option<String>,
432 backup_time: Option<i64>,
433 rpcenv: &mut dyn RpcEnvironment,
434 ) -> Result<Value, Error> {
435 let datastore = DataStore::lookup_datastore(&store)?;
436
437 let worker_id;
438
439 let mut backup_dir = None;
440 let mut backup_group = None;
441
442 match (backup_type, backup_id, backup_time) {
443 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
444 let dir = BackupDir::new(backup_type, backup_id, backup_time);
445 worker_id = format!("{}_{}", store, dir);
446 backup_dir = Some(dir);
447 }
448 (Some(backup_type), Some(backup_id), None) => {
449 let group = BackupGroup::new(backup_type, backup_id);
450 worker_id = format!("{}_{}", store, group);
451 backup_group = Some(group);
452 }
453 (None, None, None) => {
454 worker_id = store.clone();
455 }
456 _ => bail!("parameters do not spefify a backup group or snapshot"),
457 }
458
459 let username = rpcenv.get_user().unwrap();
460 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
461
462 let upid_str = WorkerTask::new_thread(
463 "verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
464 {
465 let success = if let Some(backup_dir) = backup_dir {
466 verify_backup_dir(&datastore, &backup_dir, &worker)?
467 } else if let Some(backup_group) = backup_group {
468 verify_backup_group(&datastore, &backup_group, &worker)?
469 } else {
470 verify_all_backups(&datastore, &worker)?
471 };
472 if !success {
473 bail!("verfication failed - please check the log for details");
474 }
475 Ok(())
476 })?;
477
478 Ok(json!(upid_str))
479 }
480
481 #[macro_export]
482 macro_rules! add_common_prune_prameters {
483 ( [ $( $list1:tt )* ] ) => {
484 add_common_prune_prameters!([$( $list1 )* ] , [])
485 };
486 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
487 [
488 $( $list1 )*
489 (
490 "keep-daily",
491 true,
492 &PRUNE_SCHEMA_KEEP_DAILY,
493 ),
494 (
495 "keep-hourly",
496 true,
497 &PRUNE_SCHEMA_KEEP_HOURLY,
498 ),
499 (
500 "keep-last",
501 true,
502 &PRUNE_SCHEMA_KEEP_LAST,
503 ),
504 (
505 "keep-monthly",
506 true,
507 &PRUNE_SCHEMA_KEEP_MONTHLY,
508 ),
509 (
510 "keep-weekly",
511 true,
512 &PRUNE_SCHEMA_KEEP_WEEKLY,
513 ),
514 (
515 "keep-yearly",
516 true,
517 &PRUNE_SCHEMA_KEEP_YEARLY,
518 ),
519 $( $list2 )*
520 ]
521 }
522 }
523
524 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
525 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
526 PruneListItem::API_SCHEMA
527 ).schema();
528
529 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
530 &ApiHandler::Sync(&prune),
531 &ObjectSchema::new(
532 "Prune the datastore.",
533 &add_common_prune_prameters!([
534 ("backup-id", false, &BACKUP_ID_SCHEMA),
535 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
536 ("dry-run", true, &BooleanSchema::new(
537 "Just show what prune would do, but do not delete anything.")
538 .schema()
539 ),
540 ],[
541 ("store", false, &DATASTORE_SCHEMA),
542 ])
543 ))
544 .returns(&API_RETURN_SCHEMA_PRUNE)
545 .access(None, &Permission::Privilege(
546 &["datastore", "{store}"],
547 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
548 true)
549 );
550
551 fn prune(
552 param: Value,
553 _info: &ApiMethod,
554 rpcenv: &mut dyn RpcEnvironment,
555 ) -> Result<Value, Error> {
556
557 let store = tools::required_string_param(&param, "store")?;
558 let backup_type = tools::required_string_param(&param, "backup-type")?;
559 let backup_id = tools::required_string_param(&param, "backup-id")?;
560
561 let username = rpcenv.get_user().unwrap();
562 let user_info = CachedUserInfo::new()?;
563 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
564
565 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
566
567 let group = BackupGroup::new(backup_type, backup_id);
568
569 let datastore = DataStore::lookup_datastore(&store)?;
570
571 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
572 if !allowed { check_backup_owner(&datastore, &group, &username)?; }
573
574 let prune_options = PruneOptions {
575 keep_last: param["keep-last"].as_u64(),
576 keep_hourly: param["keep-hourly"].as_u64(),
577 keep_daily: param["keep-daily"].as_u64(),
578 keep_weekly: param["keep-weekly"].as_u64(),
579 keep_monthly: param["keep-monthly"].as_u64(),
580 keep_yearly: param["keep-yearly"].as_u64(),
581 };
582
583 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
584
585 let mut prune_result = Vec::new();
586
587 let list = group.list_backups(&datastore.base_path())?;
588
589 let mut prune_info = compute_prune_info(list, &prune_options)?;
590
591 prune_info.reverse(); // delete older snapshots first
592
593 let keep_all = !prune_options.keeps_something();
594
595 if dry_run {
596 for (info, mut keep) in prune_info {
597 if keep_all { keep = true; }
598
599 let backup_time = info.backup_dir.backup_time();
600 let group = info.backup_dir.group();
601
602 prune_result.push(json!({
603 "backup-type": group.backup_type(),
604 "backup-id": group.backup_id(),
605 "backup-time": backup_time.timestamp(),
606 "keep": keep,
607 }));
608 }
609 return Ok(json!(prune_result));
610 }
611
612
613 // We use a WorkerTask just to have a task log, but run synchrounously
614 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
615
616 let result = try_block! {
617 if keep_all {
618 worker.log("No prune selection - keeping all files.");
619 } else {
620 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
621 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
622 store, backup_type, backup_id));
623 }
624
625 for (info, mut keep) in prune_info {
626 if keep_all { keep = true; }
627
628 let backup_time = info.backup_dir.backup_time();
629 let timestamp = BackupDir::backup_time_to_string(backup_time);
630 let group = info.backup_dir.group();
631
632
633 let msg = format!(
634 "{}/{}/{} {}",
635 group.backup_type(),
636 group.backup_id(),
637 timestamp,
638 if keep { "keep" } else { "remove" },
639 );
640
641 worker.log(msg);
642
643 prune_result.push(json!({
644 "backup-type": group.backup_type(),
645 "backup-id": group.backup_id(),
646 "backup-time": backup_time.timestamp(),
647 "keep": keep,
648 }));
649
650 if !(dry_run || keep) {
651 datastore.remove_backup_dir(&info.backup_dir)?;
652 }
653 }
654
655 Ok(())
656 };
657
658 worker.log_result(&result);
659
660 if let Err(err) = result {
661 bail!("prune failed - {}", err);
662 };
663
664 Ok(json!(prune_result))
665 }
666
667 #[api(
668 input: {
669 properties: {
670 store: {
671 schema: DATASTORE_SCHEMA,
672 },
673 },
674 },
675 returns: {
676 schema: UPID_SCHEMA,
677 },
678 access: {
679 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
680 },
681 )]
682 /// Start garbage collection.
683 fn start_garbage_collection(
684 store: String,
685 _info: &ApiMethod,
686 rpcenv: &mut dyn RpcEnvironment,
687 ) -> Result<Value, Error> {
688
689 let datastore = DataStore::lookup_datastore(&store)?;
690
691 println!("Starting garbage collection on store {}", store);
692
693 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
694
695 let upid_str = WorkerTask::new_thread(
696 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
697 {
698 worker.log(format!("starting garbage collection on store {}", store));
699 datastore.garbage_collection(&worker)
700 })?;
701
702 Ok(json!(upid_str))
703 }
704
705 #[api(
706 input: {
707 properties: {
708 store: {
709 schema: DATASTORE_SCHEMA,
710 },
711 },
712 },
713 returns: {
714 type: GarbageCollectionStatus,
715 },
716 access: {
717 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
718 },
719 )]
720 /// Garbage collection status.
721 pub fn garbage_collection_status(
722 store: String,
723 _info: &ApiMethod,
724 _rpcenv: &mut dyn RpcEnvironment,
725 ) -> Result<GarbageCollectionStatus, Error> {
726
727 let datastore = DataStore::lookup_datastore(&store)?;
728
729 let status = datastore.last_gc_status();
730
731 Ok(status)
732 }
733
734 #[api(
735 returns: {
736 description: "List the accessible datastores.",
737 type: Array,
738 items: {
739 description: "Datastore name and description.",
740 properties: {
741 store: {
742 schema: DATASTORE_SCHEMA,
743 },
744 comment: {
745 optional: true,
746 schema: SINGLE_LINE_COMMENT_SCHEMA,
747 },
748 },
749 },
750 },
751 access: {
752 permission: &Permission::Anybody,
753 },
754 )]
755 /// Datastore list
756 fn get_datastore_list(
757 _param: Value,
758 _info: &ApiMethod,
759 rpcenv: &mut dyn RpcEnvironment,
760 ) -> Result<Value, Error> {
761
762 let (config, _digest) = datastore::config()?;
763
764 let username = rpcenv.get_user().unwrap();
765 let user_info = CachedUserInfo::new()?;
766
767 let mut list = Vec::new();
768
769 for (store, (_, data)) in &config.sections {
770 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
771 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
772 if allowed {
773 let mut entry = json!({ "store": store });
774 if let Some(comment) = data["comment"].as_str() {
775 entry["comment"] = comment.into();
776 }
777 list.push(entry);
778 }
779 }
780
781 Ok(list.into())
782 }
783
784 #[sortable]
785 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
786 &ApiHandler::AsyncHttp(&download_file),
787 &ObjectSchema::new(
788 "Download single raw file from backup snapshot.",
789 &sorted!([
790 ("store", false, &DATASTORE_SCHEMA),
791 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
792 ("backup-id", false, &BACKUP_ID_SCHEMA),
793 ("backup-time", false, &BACKUP_TIME_SCHEMA),
794 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
795 ]),
796 )
797 ).access(None, &Permission::Privilege(
798 &["datastore", "{store}"],
799 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
800 true)
801 );
802
803 fn download_file(
804 _parts: Parts,
805 _req_body: Body,
806 param: Value,
807 _info: &ApiMethod,
808 rpcenv: Box<dyn RpcEnvironment>,
809 ) -> ApiResponseFuture {
810
811 async move {
812 let store = tools::required_string_param(&param, "store")?;
813 let datastore = DataStore::lookup_datastore(store)?;
814
815 let username = rpcenv.get_user().unwrap();
816 let user_info = CachedUserInfo::new()?;
817 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
818
819 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
820
821 let backup_type = tools::required_string_param(&param, "backup-type")?;
822 let backup_id = tools::required_string_param(&param, "backup-id")?;
823 let backup_time = tools::required_integer_param(&param, "backup-time")?;
824
825 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
826
827 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
828 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
829
830 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
831
832 let mut path = datastore.base_path();
833 path.push(backup_dir.relative_path());
834 path.push(&file_name);
835
836 let file = tokio::fs::File::open(&path)
837 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
838 .await?;
839
840 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
841 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
842 .map_err(move |err| {
843 eprintln!("error during streaming of '{:?}' - {}", &path, err);
844 err
845 });
846 let body = Body::wrap_stream(payload);
847
848 // fixme: set other headers ?
849 Ok(Response::builder()
850 .status(StatusCode::OK)
851 .header(header::CONTENT_TYPE, "application/octet-stream")
852 .body(body)
853 .unwrap())
854 }.boxed()
855 }
856
857 #[sortable]
858 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
859 &ApiHandler::AsyncHttp(&download_file_decoded),
860 &ObjectSchema::new(
861 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
862 &sorted!([
863 ("store", false, &DATASTORE_SCHEMA),
864 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
865 ("backup-id", false, &BACKUP_ID_SCHEMA),
866 ("backup-time", false, &BACKUP_TIME_SCHEMA),
867 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
868 ]),
869 )
870 ).access(None, &Permission::Privilege(
871 &["datastore", "{store}"],
872 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
873 true)
874 );
875
876 fn download_file_decoded(
877 _parts: Parts,
878 _req_body: Body,
879 param: Value,
880 _info: &ApiMethod,
881 rpcenv: Box<dyn RpcEnvironment>,
882 ) -> ApiResponseFuture {
883
884 async move {
885 let store = tools::required_string_param(&param, "store")?;
886 let datastore = DataStore::lookup_datastore(store)?;
887
888 let username = rpcenv.get_user().unwrap();
889 let user_info = CachedUserInfo::new()?;
890 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
891
892 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
893
894 let backup_type = tools::required_string_param(&param, "backup-type")?;
895 let backup_id = tools::required_string_param(&param, "backup-id")?;
896 let backup_time = tools::required_integer_param(&param, "backup-time")?;
897
898 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
899
900 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
901 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
902
903 let files = read_backup_index(&datastore, &backup_dir)?;
904 for file in files {
905 if file.filename == file_name && file.encrypted == Some(true) {
906 bail!("cannot decode '{}' - is encrypted", file_name);
907 }
908 }
909
910 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
911
912 let mut path = datastore.base_path();
913 path.push(backup_dir.relative_path());
914 path.push(&file_name);
915
916 let extension = file_name.rsplitn(2, '.').next().unwrap();
917
918 let body = match extension {
919 "didx" => {
920 let index = DynamicIndexReader::open(&path)
921 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
922
923 let chunk_reader = LocalChunkReader::new(datastore, None);
924 let reader = AsyncIndexReader::new(index, chunk_reader);
925 Body::wrap_stream(AsyncReaderStream::new(reader)
926 .map_err(move |err| {
927 eprintln!("error during streaming of '{:?}' - {}", path, err);
928 err
929 }))
930 },
931 "fidx" => {
932 let index = FixedIndexReader::open(&path)
933 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
934
935 let chunk_reader = LocalChunkReader::new(datastore, None);
936 let reader = AsyncIndexReader::new(index, chunk_reader);
937 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
938 .map_err(move |err| {
939 eprintln!("error during streaming of '{:?}' - {}", path, err);
940 err
941 }))
942 },
943 "blob" => {
944 let file = std::fs::File::open(&path)
945 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
946
947 Body::wrap_stream(
948 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
949 .map_err(move |err| {
950 eprintln!("error during streaming of '{:?}' - {}", path, err);
951 err
952 })
953 )
954 },
955 extension => {
956 bail!("cannot download '{}' files", extension);
957 },
958 };
959
960 // fixme: set other headers ?
961 Ok(Response::builder()
962 .status(StatusCode::OK)
963 .header(header::CONTENT_TYPE, "application/octet-stream")
964 .body(body)
965 .unwrap())
966 }.boxed()
967 }
968
969 #[sortable]
970 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
971 &ApiHandler::AsyncHttp(&upload_backup_log),
972 &ObjectSchema::new(
973 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
974 &sorted!([
975 ("store", false, &DATASTORE_SCHEMA),
976 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
977 ("backup-id", false, &BACKUP_ID_SCHEMA),
978 ("backup-time", false, &BACKUP_TIME_SCHEMA),
979 ]),
980 )
981 ).access(
982 Some("Only the backup creator/owner is allowed to do this."),
983 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
984 );
985
986 fn upload_backup_log(
987 _parts: Parts,
988 req_body: Body,
989 param: Value,
990 _info: &ApiMethod,
991 rpcenv: Box<dyn RpcEnvironment>,
992 ) -> ApiResponseFuture {
993
994 async move {
995 let store = tools::required_string_param(&param, "store")?;
996 let datastore = DataStore::lookup_datastore(store)?;
997
998 let file_name = CLIENT_LOG_BLOB_NAME;
999
1000 let backup_type = tools::required_string_param(&param, "backup-type")?;
1001 let backup_id = tools::required_string_param(&param, "backup-id")?;
1002 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1003
1004 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1005
1006 let username = rpcenv.get_user().unwrap();
1007 check_backup_owner(&datastore, backup_dir.group(), &username)?;
1008
1009 let mut path = datastore.base_path();
1010 path.push(backup_dir.relative_path());
1011 path.push(&file_name);
1012
1013 if path.exists() {
1014 bail!("backup already contains a log.");
1015 }
1016
1017 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1018 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
1019
1020 let data = req_body
1021 .map_err(Error::from)
1022 .try_fold(Vec::new(), |mut acc, chunk| {
1023 acc.extend_from_slice(&*chunk);
1024 future::ok::<_, Error>(acc)
1025 })
1026 .await?;
1027
1028 let blob = DataBlob::from_raw(data)?;
1029 // always verify CRC at server side
1030 blob.verify_crc()?;
1031 let raw_data = blob.raw_data();
1032 replace_file(&path, raw_data, CreateOptions::new())?;
1033
1034 // fixme: use correct formatter
1035 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1036 }.boxed()
1037 }
1038
1039 #[api(
1040 input: {
1041 properties: {
1042 store: {
1043 schema: DATASTORE_SCHEMA,
1044 },
1045 "backup-type": {
1046 schema: BACKUP_TYPE_SCHEMA,
1047 },
1048 "backup-id": {
1049 schema: BACKUP_ID_SCHEMA,
1050 },
1051 "backup-time": {
1052 schema: BACKUP_TIME_SCHEMA,
1053 },
1054 "filepath": {
1055 description: "Base64 encoded path.",
1056 type: String,
1057 }
1058 },
1059 },
1060 access: {
1061 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1062 },
1063 )]
1064 /// Get the entries of the given path of the catalog
1065 fn catalog(
1066 store: String,
1067 backup_type: String,
1068 backup_id: String,
1069 backup_time: i64,
1070 filepath: String,
1071 _param: Value,
1072 _info: &ApiMethod,
1073 rpcenv: &mut dyn RpcEnvironment,
1074 ) -> Result<Value, Error> {
1075 let datastore = DataStore::lookup_datastore(&store)?;
1076
1077 let username = rpcenv.get_user().unwrap();
1078 let user_info = CachedUserInfo::new()?;
1079 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1080
1081 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1082
1083 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1084 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1085
1086 let mut path = datastore.base_path();
1087 path.push(backup_dir.relative_path());
1088 path.push(CATALOG_NAME);
1089
1090 let index = DynamicIndexReader::open(&path)
1091 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1092
1093 let chunk_reader = LocalChunkReader::new(datastore, None);
1094 let reader = BufferedDynamicReader::new(index, chunk_reader);
1095
1096 let mut catalog_reader = CatalogReader::new(reader);
1097 let mut current = catalog_reader.root()?;
1098 let mut components = vec![];
1099
1100
1101 if filepath != "root" {
1102 components = base64::decode(filepath)?;
1103 if components.len() > 0 && components[0] == '/' as u8 {
1104 components.remove(0);
1105 }
1106 for component in components.split(|c| *c == '/' as u8) {
1107 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1108 current = entry;
1109 } else {
1110 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1111 }
1112 }
1113 }
1114
1115 let mut res = Vec::new();
1116
1117 for direntry in catalog_reader.read_dir(&current)? {
1118 let mut components = components.clone();
1119 components.push('/' as u8);
1120 components.extend(&direntry.name);
1121 let path = base64::encode(components);
1122 let text = String::from_utf8_lossy(&direntry.name);
1123 let mut entry = json!({
1124 "filepath": path,
1125 "text": text,
1126 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1127 "leaf": true,
1128 });
1129 match direntry.attr {
1130 DirEntryAttribute::Directory { start: _ } => {
1131 entry["leaf"] = false.into();
1132 },
1133 DirEntryAttribute::File { size, mtime } => {
1134 entry["size"] = size.into();
1135 entry["mtime"] = mtime.into();
1136 },
1137 _ => {},
1138 }
1139 res.push(entry);
1140 }
1141
1142 Ok(res.into())
1143 }
1144
1145 #[sortable]
1146 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1147 &ApiHandler::AsyncHttp(&pxar_file_download),
1148 &ObjectSchema::new(
1149 "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
1150 &sorted!([
1151 ("store", false, &DATASTORE_SCHEMA),
1152 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1153 ("backup-id", false, &BACKUP_ID_SCHEMA),
1154 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1155 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1156 ]),
1157 )
1158 ).access(None, &Permission::Privilege(
1159 &["datastore", "{store}"],
1160 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1161 true)
1162 );
1163
1164 fn pxar_file_download(
1165 _parts: Parts,
1166 _req_body: Body,
1167 param: Value,
1168 _info: &ApiMethod,
1169 rpcenv: Box<dyn RpcEnvironment>,
1170 ) -> ApiResponseFuture {
1171
1172 async move {
1173 let store = tools::required_string_param(&param, "store")?;
1174 let datastore = DataStore::lookup_datastore(&store)?;
1175
1176 let username = rpcenv.get_user().unwrap();
1177 let user_info = CachedUserInfo::new()?;
1178 let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
1179
1180 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1181
1182 let backup_type = tools::required_string_param(&param, "backup-type")?;
1183 let backup_id = tools::required_string_param(&param, "backup-id")?;
1184 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1185
1186 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
1187
1188 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
1189 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
1190
1191 let mut path = datastore.base_path();
1192 path.push(backup_dir.relative_path());
1193
1194 let mut components = base64::decode(&filepath)?;
1195 if components.len() > 0 && components[0] == '/' as u8 {
1196 components.remove(0);
1197 }
1198
1199 let mut split = components.splitn(2, |c| *c == '/' as u8);
1200 let pxar_name = split.next().unwrap();
1201 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1202
1203 path.push(OsStr::from_bytes(&pxar_name));
1204
1205 let index = DynamicIndexReader::open(&path)
1206 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1207
1208 let chunk_reader = LocalChunkReader::new(datastore, None);
1209 let reader = BufferedDynamicReader::new(index, chunk_reader);
1210 let archive_size = reader.archive_size();
1211 let reader = LocalDynamicReadAt::new(reader);
1212
1213 let decoder = Accessor::new(reader, archive_size).await?;
1214 let root = decoder.open_root().await?;
1215 let file = root
1216 .lookup(OsStr::from_bytes(file_path)).await?
1217 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1218
1219 let file = match file.kind() {
1220 EntryKind::File { .. } => file,
1221 EntryKind::Hardlink(_) => {
1222 decoder.follow_hardlink(&file).await?
1223 },
1224 // TODO symlink
1225 other => bail!("cannot download file of type {:?}", other),
1226 };
1227
1228 let body = Body::wrap_stream(
1229 AsyncReaderStream::new(file.contents().await?)
1230 .map_err(move |err| {
1231 eprintln!("error during streaming of '{:?}' - {}", filepath, err);
1232 err
1233 })
1234 );
1235
1236 // fixme: set other headers ?
1237 Ok(Response::builder()
1238 .status(StatusCode::OK)
1239 .header(header::CONTENT_TYPE, "application/octet-stream")
1240 .body(body)
1241 .unwrap())
1242 }.boxed()
1243 }
1244
1245 #[api(
1246 input: {
1247 properties: {
1248 store: {
1249 schema: DATASTORE_SCHEMA,
1250 },
1251 timeframe: {
1252 type: RRDTimeFrameResolution,
1253 },
1254 cf: {
1255 type: RRDMode,
1256 },
1257 },
1258 },
1259 access: {
1260 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1261 },
1262 )]
1263 /// Read datastore stats
1264 fn get_rrd_stats(
1265 store: String,
1266 timeframe: RRDTimeFrameResolution,
1267 cf: RRDMode,
1268 _param: Value,
1269 ) -> Result<Value, Error> {
1270
1271 create_value_from_rrd(
1272 &format!("datastore/{}", store),
1273 &[
1274 "total", "used",
1275 "read_ios", "read_bytes",
1276 "write_ios", "write_bytes",
1277 "io_ticks",
1278 ],
1279 timeframe,
1280 cf,
1281 )
1282 }
1283
1284 #[sortable]
1285 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1286 (
1287 "catalog",
1288 &Router::new()
1289 .get(&API_METHOD_CATALOG)
1290 ),
1291 (
1292 "download",
1293 &Router::new()
1294 .download(&API_METHOD_DOWNLOAD_FILE)
1295 ),
1296 (
1297 "download-decoded",
1298 &Router::new()
1299 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1300 ),
1301 (
1302 "files",
1303 &Router::new()
1304 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1305 ),
1306 (
1307 "gc",
1308 &Router::new()
1309 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1310 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1311 ),
1312 (
1313 "groups",
1314 &Router::new()
1315 .get(&API_METHOD_LIST_GROUPS)
1316 ),
1317 (
1318 "prune",
1319 &Router::new()
1320 .post(&API_METHOD_PRUNE)
1321 ),
1322 (
1323 "pxar-file-download",
1324 &Router::new()
1325 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1326 ),
1327 (
1328 "rrd",
1329 &Router::new()
1330 .get(&API_METHOD_GET_RRD_STATS)
1331 ),
1332 (
1333 "snapshots",
1334 &Router::new()
1335 .get(&API_METHOD_LIST_SNAPSHOTS)
1336 .delete(&API_METHOD_DELETE_SNAPSHOT)
1337 ),
1338 (
1339 "status",
1340 &Router::new()
1341 .get(&API_METHOD_STATUS)
1342 ),
1343 (
1344 "upload-backup-log",
1345 &Router::new()
1346 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1347 ),
1348 (
1349 "verify",
1350 &Router::new()
1351 .post(&API_METHOD_VERIFY)
1352 ),
1353 ];
1354
1355 const DATASTORE_INFO_ROUTER: Router = Router::new()
1356 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1357 .subdirs(DATASTORE_INFO_SUBDIRS);
1358
1359
1360 pub const ROUTER: Router = Router::new()
1361 .get(&API_METHOD_GET_DATASTORE_LIST)
1362 .match_all("store", &DATASTORE_INFO_ROUTER);