]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
verify: introduce & use new Datastore.Verify privilege
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5 use std::path::{Path, PathBuf};
6 use std::pin::Pin;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13
14 use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
16 RpcEnvironment, RpcEnvironmentType, Permission
17 };
18 use proxmox::api::router::SubdirMap;
19 use proxmox::api::schema::*;
20 use proxmox::tools::fs::{replace_file, CreateOptions};
21 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
22
23 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
24 use pxar::EntryKind;
25
26 use crate::api2::types::*;
27 use crate::api2::node::rrd::create_value_from_rrd;
28 use crate::backup::*;
29 use crate::config::datastore;
30 use crate::config::cached_user_info::CachedUserInfo;
31
32 use crate::server::WorkerTask;
33 use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37 };
38
39 use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
41 PRIV_DATASTORE_MODIFY,
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
44 PRIV_DATASTORE_BACKUP,
45 PRIV_DATASTORE_VERIFY,
46 };
47
48 fn check_priv_or_backup_owner(
49 store: &DataStore,
50 group: &BackupGroup,
51 auth_id: &Authid,
52 required_privs: u64,
53 ) -> Result<(), Error> {
54 let user_info = CachedUserInfo::new()?;
55 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
56
57 if privs & required_privs == 0 {
58 let owner = store.get_owner(group)?;
59 check_backup_owner(&owner, auth_id)?;
60 }
61 Ok(())
62 }
63
64 fn check_backup_owner(
65 owner: &Authid,
66 auth_id: &Authid,
67 ) -> Result<(), Error> {
68 let correct_owner = owner == auth_id
69 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
70 if !correct_owner {
71 bail!("backup owner check failed ({} != {})", auth_id, owner);
72 }
73 Ok(())
74 }
75
76 fn read_backup_index(
77 store: &DataStore,
78 backup_dir: &BackupDir,
79 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
80
81 let (manifest, index_size) = store.load_manifest(backup_dir)?;
82
83 let mut result = Vec::new();
84 for item in manifest.files() {
85 result.push(BackupContent {
86 filename: item.filename.clone(),
87 crypt_mode: Some(item.crypt_mode),
88 size: Some(item.size),
89 });
90 }
91
92 result.push(BackupContent {
93 filename: MANIFEST_BLOB_NAME.to_string(),
94 crypt_mode: match manifest.signature {
95 Some(_) => Some(CryptMode::SignOnly),
96 None => Some(CryptMode::None),
97 },
98 size: Some(index_size),
99 });
100
101 Ok((manifest, result))
102 }
103
104 fn get_all_snapshot_files(
105 store: &DataStore,
106 info: &BackupInfo,
107 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
108
109 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
110
111 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
112 acc.insert(item.filename.clone());
113 acc
114 });
115
116 for file in &info.files {
117 if file_set.contains(file) { continue; }
118 files.push(BackupContent {
119 filename: file.to_string(),
120 size: None,
121 crypt_mode: None,
122 });
123 }
124
125 Ok((manifest, files))
126 }
127
128 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
129
130 let mut group_hash = HashMap::new();
131
132 for info in backup_list {
133 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
134 let time_list = group_hash.entry(group_id).or_insert(vec![]);
135 time_list.push(info);
136 }
137
138 group_hash
139 }
140
141 #[api(
142 input: {
143 properties: {
144 store: {
145 schema: DATASTORE_SCHEMA,
146 },
147 },
148 },
149 returns: {
150 type: Array,
151 description: "Returns the list of backup groups.",
152 items: {
153 type: GroupListItem,
154 }
155 },
156 access: {
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
161 },
162 )]
163 /// List backup groups.
164 fn list_groups(
165 store: String,
166 rpcenv: &mut dyn RpcEnvironment,
167 ) -> Result<Vec<GroupListItem>, Error> {
168
169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
170 let user_info = CachedUserInfo::new()?;
171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
172
173 let datastore = DataStore::lookup_datastore(&store)?;
174
175 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
176
177 let group_hash = group_backups(backup_list);
178
179 let mut groups = Vec::new();
180
181 for (_group_id, mut list) in group_hash {
182
183 BackupInfo::sort_list(&mut list, false);
184
185 let info = &list[0];
186
187 let group = info.backup_dir.group();
188
189 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
190 let owner = datastore.get_owner(group)?;
191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
192 continue;
193 }
194
195 let result_item = GroupListItem {
196 backup_type: group.backup_type().to_string(),
197 backup_id: group.backup_id().to_string(),
198 last_backup: info.backup_dir.backup_time(),
199 backup_count: list.len() as u64,
200 files: info.files.clone(),
201 owner: Some(owner),
202 };
203 groups.push(result_item);
204 }
205
206 Ok(groups)
207 }
208
209 #[api(
210 input: {
211 properties: {
212 store: {
213 schema: DATASTORE_SCHEMA,
214 },
215 "backup-type": {
216 schema: BACKUP_TYPE_SCHEMA,
217 },
218 "backup-id": {
219 schema: BACKUP_ID_SCHEMA,
220 },
221 "backup-time": {
222 schema: BACKUP_TIME_SCHEMA,
223 },
224 },
225 },
226 returns: {
227 type: Array,
228 description: "Returns the list of archive files inside a backup snapshots.",
229 items: {
230 type: BackupContent,
231 }
232 },
233 access: {
234 permission: &Permission::Privilege(
235 &["datastore", "{store}"],
236 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
237 true),
238 },
239 )]
240 /// List snapshot files.
241 pub fn list_snapshot_files(
242 store: String,
243 backup_type: String,
244 backup_id: String,
245 backup_time: i64,
246 _info: &ApiMethod,
247 rpcenv: &mut dyn RpcEnvironment,
248 ) -> Result<Vec<BackupContent>, Error> {
249
250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
251 let datastore = DataStore::lookup_datastore(&store)?;
252
253 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
254
255 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
256
257 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
258
259 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
260
261 Ok(files)
262 }
263
264 #[api(
265 input: {
266 properties: {
267 store: {
268 schema: DATASTORE_SCHEMA,
269 },
270 "backup-type": {
271 schema: BACKUP_TYPE_SCHEMA,
272 },
273 "backup-id": {
274 schema: BACKUP_ID_SCHEMA,
275 },
276 "backup-time": {
277 schema: BACKUP_TIME_SCHEMA,
278 },
279 },
280 },
281 access: {
282 permission: &Permission::Privilege(
283 &["datastore", "{store}"],
284 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
285 true),
286 },
287 )]
288 /// Delete backup snapshot.
289 fn delete_snapshot(
290 store: String,
291 backup_type: String,
292 backup_id: String,
293 backup_time: i64,
294 _info: &ApiMethod,
295 rpcenv: &mut dyn RpcEnvironment,
296 ) -> Result<Value, Error> {
297
298 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
299
300 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
301 let datastore = DataStore::lookup_datastore(&store)?;
302
303 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
304
305 datastore.remove_backup_dir(&snapshot, false)?;
306
307 Ok(Value::Null)
308 }
309
310 #[api(
311 input: {
312 properties: {
313 store: {
314 schema: DATASTORE_SCHEMA,
315 },
316 "backup-type": {
317 optional: true,
318 schema: BACKUP_TYPE_SCHEMA,
319 },
320 "backup-id": {
321 optional: true,
322 schema: BACKUP_ID_SCHEMA,
323 },
324 },
325 },
326 returns: {
327 type: Array,
328 description: "Returns the list of snapshots.",
329 items: {
330 type: SnapshotListItem,
331 }
332 },
333 access: {
334 permission: &Permission::Privilege(
335 &["datastore", "{store}"],
336 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
337 true),
338 },
339 )]
340 /// List backup snapshots.
341 pub fn list_snapshots (
342 store: String,
343 backup_type: Option<String>,
344 backup_id: Option<String>,
345 _param: Value,
346 _info: &ApiMethod,
347 rpcenv: &mut dyn RpcEnvironment,
348 ) -> Result<Vec<SnapshotListItem>, Error> {
349
350 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
351 let user_info = CachedUserInfo::new()?;
352 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
353
354 let datastore = DataStore::lookup_datastore(&store)?;
355
356 let base_path = datastore.base_path();
357
358 let backup_list = BackupInfo::list_backups(&base_path)?;
359
360 let mut snapshots = vec![];
361
362 for info in backup_list {
363 let group = info.backup_dir.group();
364 if let Some(ref backup_type) = backup_type {
365 if backup_type != group.backup_type() { continue; }
366 }
367 if let Some(ref backup_id) = backup_id {
368 if backup_id != group.backup_id() { continue; }
369 }
370
371 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
372 let owner = datastore.get_owner(group)?;
373
374 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
375 continue;
376 }
377
378 let mut size = None;
379
380 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
381 Ok((manifest, files)) => {
382 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
383 // extract the first line from notes
384 let comment: Option<String> = manifest.unprotected["notes"]
385 .as_str()
386 .and_then(|notes| notes.lines().next())
387 .map(String::from);
388
389 let verify = manifest.unprotected["verify_state"].clone();
390 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
391 Ok(verify) => verify,
392 Err(err) => {
393 eprintln!("error parsing verification state : '{}'", err);
394 None
395 }
396 };
397
398 (comment, verify, files)
399 },
400 Err(err) => {
401 eprintln!("error during snapshot file listing: '{}'", err);
402 (
403 None,
404 None,
405 info
406 .files
407 .iter()
408 .map(|x| BackupContent {
409 filename: x.to_string(),
410 size: None,
411 crypt_mode: None,
412 })
413 .collect()
414 )
415 },
416 };
417
418 let result_item = SnapshotListItem {
419 backup_type: group.backup_type().to_string(),
420 backup_id: group.backup_id().to_string(),
421 backup_time: info.backup_dir.backup_time(),
422 comment,
423 verification,
424 files,
425 size,
426 owner: Some(owner),
427 };
428
429 snapshots.push(result_item);
430 }
431
432 Ok(snapshots)
433 }
434
435 fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
436 let base_path = store.base_path();
437 let backup_list = BackupInfo::list_backups(&base_path)?;
438 let mut groups = HashSet::new();
439
440 let mut result = Counts {
441 ct: None,
442 host: None,
443 vm: None,
444 other: None,
445 };
446
447 for info in backup_list {
448 let group = info.backup_dir.group();
449
450 let id = group.backup_id();
451 let backup_type = group.backup_type();
452
453 let mut new_id = false;
454
455 if groups.insert(format!("{}-{}", &backup_type, &id)) {
456 new_id = true;
457 }
458
459 let mut counts = match backup_type {
460 "ct" => result.ct.take().unwrap_or(Default::default()),
461 "host" => result.host.take().unwrap_or(Default::default()),
462 "vm" => result.vm.take().unwrap_or(Default::default()),
463 _ => result.other.take().unwrap_or(Default::default()),
464 };
465
466 counts.snapshots += 1;
467 if new_id {
468 counts.groups +=1;
469 }
470
471 match backup_type {
472 "ct" => result.ct = Some(counts),
473 "host" => result.host = Some(counts),
474 "vm" => result.vm = Some(counts),
475 _ => result.other = Some(counts),
476 }
477 }
478
479 Ok(result)
480 }
481
482 #[api(
483 input: {
484 properties: {
485 store: {
486 schema: DATASTORE_SCHEMA,
487 },
488 },
489 },
490 returns: {
491 type: DataStoreStatus,
492 },
493 access: {
494 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
495 },
496 )]
497 /// Get datastore status.
498 pub fn status(
499 store: String,
500 _info: &ApiMethod,
501 _rpcenv: &mut dyn RpcEnvironment,
502 ) -> Result<DataStoreStatus, Error> {
503 let datastore = DataStore::lookup_datastore(&store)?;
504 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
505 let counts = get_snapshots_count(&datastore)?;
506 let gc_status = datastore.last_gc_status();
507
508 Ok(DataStoreStatus {
509 total: storage.total,
510 used: storage.used,
511 avail: storage.avail,
512 gc_status,
513 counts,
514 })
515 }
516
517 #[api(
518 input: {
519 properties: {
520 store: {
521 schema: DATASTORE_SCHEMA,
522 },
523 "backup-type": {
524 schema: BACKUP_TYPE_SCHEMA,
525 optional: true,
526 },
527 "backup-id": {
528 schema: BACKUP_ID_SCHEMA,
529 optional: true,
530 },
531 "backup-time": {
532 schema: BACKUP_TIME_SCHEMA,
533 optional: true,
534 },
535 },
536 },
537 returns: {
538 schema: UPID_SCHEMA,
539 },
540 access: {
541 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
542 },
543 )]
544 /// Verify backups.
545 ///
546 /// This function can verify a single backup snapshot, all backup from a backup group,
547 /// or all backups in the datastore.
548 pub fn verify(
549 store: String,
550 backup_type: Option<String>,
551 backup_id: Option<String>,
552 backup_time: Option<i64>,
553 rpcenv: &mut dyn RpcEnvironment,
554 ) -> Result<Value, Error> {
555 let datastore = DataStore::lookup_datastore(&store)?;
556
557 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
558 let worker_id;
559
560 let mut backup_dir = None;
561 let mut backup_group = None;
562 let mut worker_type = "verify";
563
564 match (backup_type, backup_id, backup_time) {
565 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
566 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
567 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
568
569 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
570
571 backup_dir = Some(dir);
572 worker_type = "verify_snapshot";
573 }
574 (Some(backup_type), Some(backup_id), None) => {
575 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
576 let group = BackupGroup::new(backup_type, backup_id);
577
578 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
579
580 backup_group = Some(group);
581 worker_type = "verify_group";
582 }
583 (None, None, None) => {
584 worker_id = store.clone();
585 }
586 _ => bail!("parameters do not specify a backup group or snapshot"),
587 }
588
589 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
590
591 let upid_str = WorkerTask::new_thread(
592 worker_type,
593 Some(worker_id.clone()),
594 auth_id.clone(),
595 to_stdout,
596 move |worker| {
597 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
598 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
599
600 let failed_dirs = if let Some(backup_dir) = backup_dir {
601 let mut res = Vec::new();
602 if !verify_backup_dir(
603 datastore,
604 &backup_dir,
605 verified_chunks,
606 corrupt_chunks,
607 worker.clone(),
608 worker.upid().clone(),
609 None,
610 )? {
611 res.push(backup_dir.to_string());
612 }
613 res
614 } else if let Some(backup_group) = backup_group {
615 let (_count, failed_dirs) = verify_backup_group(
616 datastore,
617 &backup_group,
618 verified_chunks,
619 corrupt_chunks,
620 None,
621 worker.clone(),
622 worker.upid(),
623 None,
624 )?;
625 failed_dirs
626 } else {
627 let privs = CachedUserInfo::new()?
628 .lookup_privs(&auth_id, &["datastore", &store]);
629
630 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
631 Some(auth_id)
632 } else {
633 None
634 };
635
636 verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
637 };
638 if failed_dirs.len() > 0 {
639 worker.log("Failed to verify following snapshots:");
640 for dir in failed_dirs {
641 worker.log(format!("\t{}", dir));
642 }
643 bail!("verification failed - please check the log for details");
644 }
645 Ok(())
646 },
647 )?;
648
649 Ok(json!(upid_str))
650 }
651
652 #[macro_export]
653 macro_rules! add_common_prune_prameters {
654 ( [ $( $list1:tt )* ] ) => {
655 add_common_prune_prameters!([$( $list1 )* ] , [])
656 };
657 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
658 [
659 $( $list1 )*
660 (
661 "keep-daily",
662 true,
663 &PRUNE_SCHEMA_KEEP_DAILY,
664 ),
665 (
666 "keep-hourly",
667 true,
668 &PRUNE_SCHEMA_KEEP_HOURLY,
669 ),
670 (
671 "keep-last",
672 true,
673 &PRUNE_SCHEMA_KEEP_LAST,
674 ),
675 (
676 "keep-monthly",
677 true,
678 &PRUNE_SCHEMA_KEEP_MONTHLY,
679 ),
680 (
681 "keep-weekly",
682 true,
683 &PRUNE_SCHEMA_KEEP_WEEKLY,
684 ),
685 (
686 "keep-yearly",
687 true,
688 &PRUNE_SCHEMA_KEEP_YEARLY,
689 ),
690 $( $list2 )*
691 ]
692 }
693 }
694
695 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
696 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
697 &PruneListItem::API_SCHEMA
698 ).schema();
699
700 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
701 &ApiHandler::Sync(&prune),
702 &ObjectSchema::new(
703 "Prune the datastore.",
704 &add_common_prune_prameters!([
705 ("backup-id", false, &BACKUP_ID_SCHEMA),
706 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
707 ("dry-run", true, &BooleanSchema::new(
708 "Just show what prune would do, but do not delete anything.")
709 .schema()
710 ),
711 ],[
712 ("store", false, &DATASTORE_SCHEMA),
713 ])
714 ))
715 .returns(&API_RETURN_SCHEMA_PRUNE)
716 .access(None, &Permission::Privilege(
717 &["datastore", "{store}"],
718 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
719 true)
720 );
721
722 fn prune(
723 param: Value,
724 _info: &ApiMethod,
725 rpcenv: &mut dyn RpcEnvironment,
726 ) -> Result<Value, Error> {
727
728 let store = tools::required_string_param(&param, "store")?;
729 let backup_type = tools::required_string_param(&param, "backup-type")?;
730 let backup_id = tools::required_string_param(&param, "backup-id")?;
731
732 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
733
734 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
735
736 let group = BackupGroup::new(backup_type, backup_id);
737
738 let datastore = DataStore::lookup_datastore(&store)?;
739
740 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
741
742 let prune_options = PruneOptions {
743 keep_last: param["keep-last"].as_u64(),
744 keep_hourly: param["keep-hourly"].as_u64(),
745 keep_daily: param["keep-daily"].as_u64(),
746 keep_weekly: param["keep-weekly"].as_u64(),
747 keep_monthly: param["keep-monthly"].as_u64(),
748 keep_yearly: param["keep-yearly"].as_u64(),
749 };
750
751 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
752
753 let mut prune_result = Vec::new();
754
755 let list = group.list_backups(&datastore.base_path())?;
756
757 let mut prune_info = compute_prune_info(list, &prune_options)?;
758
759 prune_info.reverse(); // delete older snapshots first
760
761 let keep_all = !prune_options.keeps_something();
762
763 if dry_run {
764 for (info, mut keep) in prune_info {
765 if keep_all { keep = true; }
766
767 let backup_time = info.backup_dir.backup_time();
768 let group = info.backup_dir.group();
769
770 prune_result.push(json!({
771 "backup-type": group.backup_type(),
772 "backup-id": group.backup_id(),
773 "backup-time": backup_time,
774 "keep": keep,
775 }));
776 }
777 return Ok(json!(prune_result));
778 }
779
780
781 // We use a WorkerTask just to have a task log, but run synchrounously
782 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
783
784 if keep_all {
785 worker.log("No prune selection - keeping all files.");
786 } else {
787 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
788 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
789 store, backup_type, backup_id));
790 }
791
792 for (info, mut keep) in prune_info {
793 if keep_all { keep = true; }
794
795 let backup_time = info.backup_dir.backup_time();
796 let timestamp = info.backup_dir.backup_time_string();
797 let group = info.backup_dir.group();
798
799
800 let msg = format!(
801 "{}/{}/{} {}",
802 group.backup_type(),
803 group.backup_id(),
804 timestamp,
805 if keep { "keep" } else { "remove" },
806 );
807
808 worker.log(msg);
809
810 prune_result.push(json!({
811 "backup-type": group.backup_type(),
812 "backup-id": group.backup_id(),
813 "backup-time": backup_time,
814 "keep": keep,
815 }));
816
817 if !(dry_run || keep) {
818 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
819 worker.warn(
820 format!(
821 "failed to remove dir {:?}: {}",
822 info.backup_dir.relative_path(), err
823 )
824 );
825 }
826 }
827 }
828
829 worker.log_result(&Ok(()));
830
831 Ok(json!(prune_result))
832 }
833
834 #[api(
835 input: {
836 properties: {
837 store: {
838 schema: DATASTORE_SCHEMA,
839 },
840 },
841 },
842 returns: {
843 schema: UPID_SCHEMA,
844 },
845 access: {
846 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
847 },
848 )]
849 /// Start garbage collection.
850 fn start_garbage_collection(
851 store: String,
852 _info: &ApiMethod,
853 rpcenv: &mut dyn RpcEnvironment,
854 ) -> Result<Value, Error> {
855
856 let datastore = DataStore::lookup_datastore(&store)?;
857 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
858
859 println!("Starting garbage collection on store {}", store);
860
861 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
862
863 let upid_str = WorkerTask::new_thread(
864 "garbage_collection",
865 Some(store.clone()),
866 auth_id.clone(),
867 to_stdout,
868 move |worker| {
869 worker.log(format!("starting garbage collection on store {}", store));
870 datastore.garbage_collection(&*worker, worker.upid())
871 },
872 )?;
873
874 Ok(json!(upid_str))
875 }
876
877 #[api(
878 input: {
879 properties: {
880 store: {
881 schema: DATASTORE_SCHEMA,
882 },
883 },
884 },
885 returns: {
886 type: GarbageCollectionStatus,
887 },
888 access: {
889 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
890 },
891 )]
892 /// Garbage collection status.
893 pub fn garbage_collection_status(
894 store: String,
895 _info: &ApiMethod,
896 _rpcenv: &mut dyn RpcEnvironment,
897 ) -> Result<GarbageCollectionStatus, Error> {
898
899 let datastore = DataStore::lookup_datastore(&store)?;
900
901 let status = datastore.last_gc_status();
902
903 Ok(status)
904 }
905
906 #[api(
907 returns: {
908 description: "List the accessible datastores.",
909 type: Array,
910 items: {
911 description: "Datastore name and description.",
912 properties: {
913 store: {
914 schema: DATASTORE_SCHEMA,
915 },
916 comment: {
917 optional: true,
918 schema: SINGLE_LINE_COMMENT_SCHEMA,
919 },
920 },
921 },
922 },
923 access: {
924 permission: &Permission::Anybody,
925 },
926 )]
927 /// Datastore list
928 fn get_datastore_list(
929 _param: Value,
930 _info: &ApiMethod,
931 rpcenv: &mut dyn RpcEnvironment,
932 ) -> Result<Value, Error> {
933
934 let (config, _digest) = datastore::config()?;
935
936 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
937 let user_info = CachedUserInfo::new()?;
938
939 let mut list = Vec::new();
940
941 for (store, (_, data)) in &config.sections {
942 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
943 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
944 if allowed {
945 let mut entry = json!({ "store": store });
946 if let Some(comment) = data["comment"].as_str() {
947 entry["comment"] = comment.into();
948 }
949 list.push(entry);
950 }
951 }
952
953 Ok(list.into())
954 }
955
956 #[sortable]
957 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
958 &ApiHandler::AsyncHttp(&download_file),
959 &ObjectSchema::new(
960 "Download single raw file from backup snapshot.",
961 &sorted!([
962 ("store", false, &DATASTORE_SCHEMA),
963 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
964 ("backup-id", false, &BACKUP_ID_SCHEMA),
965 ("backup-time", false, &BACKUP_TIME_SCHEMA),
966 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
967 ]),
968 )
969 ).access(None, &Permission::Privilege(
970 &["datastore", "{store}"],
971 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
972 true)
973 );
974
975 fn download_file(
976 _parts: Parts,
977 _req_body: Body,
978 param: Value,
979 _info: &ApiMethod,
980 rpcenv: Box<dyn RpcEnvironment>,
981 ) -> ApiResponseFuture {
982
983 async move {
984 let store = tools::required_string_param(&param, "store")?;
985 let datastore = DataStore::lookup_datastore(store)?;
986
987 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
988
989 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
990
991 let backup_type = tools::required_string_param(&param, "backup-type")?;
992 let backup_id = tools::required_string_param(&param, "backup-id")?;
993 let backup_time = tools::required_integer_param(&param, "backup-time")?;
994
995 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
996
997 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
998
999 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1000
1001 let mut path = datastore.base_path();
1002 path.push(backup_dir.relative_path());
1003 path.push(&file_name);
1004
1005 let file = tokio::fs::File::open(&path)
1006 .await
1007 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1008
1009 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1010 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
1011 .map_err(move |err| {
1012 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1013 err
1014 });
1015 let body = Body::wrap_stream(payload);
1016
1017 // fixme: set other headers ?
1018 Ok(Response::builder()
1019 .status(StatusCode::OK)
1020 .header(header::CONTENT_TYPE, "application/octet-stream")
1021 .body(body)
1022 .unwrap())
1023 }.boxed()
1024 }
1025
1026 #[sortable]
1027 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1028 &ApiHandler::AsyncHttp(&download_file_decoded),
1029 &ObjectSchema::new(
1030 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1031 &sorted!([
1032 ("store", false, &DATASTORE_SCHEMA),
1033 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1034 ("backup-id", false, &BACKUP_ID_SCHEMA),
1035 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1036 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1037 ]),
1038 )
1039 ).access(None, &Permission::Privilege(
1040 &["datastore", "{store}"],
1041 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1042 true)
1043 );
1044
1045 fn download_file_decoded(
1046 _parts: Parts,
1047 _req_body: Body,
1048 param: Value,
1049 _info: &ApiMethod,
1050 rpcenv: Box<dyn RpcEnvironment>,
1051 ) -> ApiResponseFuture {
1052
1053 async move {
1054 let store = tools::required_string_param(&param, "store")?;
1055 let datastore = DataStore::lookup_datastore(store)?;
1056
1057 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1058
1059 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1060
1061 let backup_type = tools::required_string_param(&param, "backup-type")?;
1062 let backup_id = tools::required_string_param(&param, "backup-id")?;
1063 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1064
1065 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1066
1067 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1068
1069 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1070 for file in files {
1071 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1072 bail!("cannot decode '{}' - is encrypted", file_name);
1073 }
1074 }
1075
1076 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1077
1078 let mut path = datastore.base_path();
1079 path.push(backup_dir.relative_path());
1080 path.push(&file_name);
1081
1082 let extension = file_name.rsplitn(2, '.').next().unwrap();
1083
1084 let body = match extension {
1085 "didx" => {
1086 let index = DynamicIndexReader::open(&path)
1087 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1088 let (csum, size) = index.compute_csum();
1089 manifest.verify_file(&file_name, &csum, size)?;
1090
1091 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1092 let reader = AsyncIndexReader::new(index, chunk_reader);
1093 Body::wrap_stream(AsyncReaderStream::new(reader)
1094 .map_err(move |err| {
1095 eprintln!("error during streaming of '{:?}' - {}", path, err);
1096 err
1097 }))
1098 },
1099 "fidx" => {
1100 let index = FixedIndexReader::open(&path)
1101 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1102
1103 let (csum, size) = index.compute_csum();
1104 manifest.verify_file(&file_name, &csum, size)?;
1105
1106 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1107 let reader = AsyncIndexReader::new(index, chunk_reader);
1108 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1109 .map_err(move |err| {
1110 eprintln!("error during streaming of '{:?}' - {}", path, err);
1111 err
1112 }))
1113 },
1114 "blob" => {
1115 let file = std::fs::File::open(&path)
1116 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1117
1118 // FIXME: load full blob to verify index checksum?
1119
1120 Body::wrap_stream(
1121 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1122 .map_err(move |err| {
1123 eprintln!("error during streaming of '{:?}' - {}", path, err);
1124 err
1125 })
1126 )
1127 },
1128 extension => {
1129 bail!("cannot download '{}' files", extension);
1130 },
1131 };
1132
1133 // fixme: set other headers ?
1134 Ok(Response::builder()
1135 .status(StatusCode::OK)
1136 .header(header::CONTENT_TYPE, "application/octet-stream")
1137 .body(body)
1138 .unwrap())
1139 }.boxed()
1140 }
1141
1142 #[sortable]
1143 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1144 &ApiHandler::AsyncHttp(&upload_backup_log),
1145 &ObjectSchema::new(
1146 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1147 &sorted!([
1148 ("store", false, &DATASTORE_SCHEMA),
1149 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1150 ("backup-id", false, &BACKUP_ID_SCHEMA),
1151 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1152 ]),
1153 )
1154 ).access(
1155 Some("Only the backup creator/owner is allowed to do this."),
1156 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1157 );
1158
1159 fn upload_backup_log(
1160 _parts: Parts,
1161 req_body: Body,
1162 param: Value,
1163 _info: &ApiMethod,
1164 rpcenv: Box<dyn RpcEnvironment>,
1165 ) -> ApiResponseFuture {
1166
1167 async move {
1168 let store = tools::required_string_param(&param, "store")?;
1169 let datastore = DataStore::lookup_datastore(store)?;
1170
1171 let file_name = CLIENT_LOG_BLOB_NAME;
1172
1173 let backup_type = tools::required_string_param(&param, "backup-type")?;
1174 let backup_id = tools::required_string_param(&param, "backup-id")?;
1175 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1176
1177 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1178
1179 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1180 let owner = datastore.get_owner(backup_dir.group())?;
1181 check_backup_owner(&owner, &auth_id)?;
1182
1183 let mut path = datastore.base_path();
1184 path.push(backup_dir.relative_path());
1185 path.push(&file_name);
1186
1187 if path.exists() {
1188 bail!("backup already contains a log.");
1189 }
1190
1191 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1192 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1193
1194 let data = req_body
1195 .map_err(Error::from)
1196 .try_fold(Vec::new(), |mut acc, chunk| {
1197 acc.extend_from_slice(&*chunk);
1198 future::ok::<_, Error>(acc)
1199 })
1200 .await?;
1201
1202 // always verify blob/CRC at server side
1203 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1204
1205 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1206
1207 // fixme: use correct formatter
1208 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1209 }.boxed()
1210 }
1211
1212 #[api(
1213 input: {
1214 properties: {
1215 store: {
1216 schema: DATASTORE_SCHEMA,
1217 },
1218 "backup-type": {
1219 schema: BACKUP_TYPE_SCHEMA,
1220 },
1221 "backup-id": {
1222 schema: BACKUP_ID_SCHEMA,
1223 },
1224 "backup-time": {
1225 schema: BACKUP_TIME_SCHEMA,
1226 },
1227 "filepath": {
1228 description: "Base64 encoded path.",
1229 type: String,
1230 }
1231 },
1232 },
1233 access: {
1234 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1235 },
1236 )]
1237 /// Get the entries of the given path of the catalog
1238 fn catalog(
1239 store: String,
1240 backup_type: String,
1241 backup_id: String,
1242 backup_time: i64,
1243 filepath: String,
1244 _param: Value,
1245 _info: &ApiMethod,
1246 rpcenv: &mut dyn RpcEnvironment,
1247 ) -> Result<Value, Error> {
1248 let datastore = DataStore::lookup_datastore(&store)?;
1249
1250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1251
1252 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1253
1254 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1255
1256 let file_name = CATALOG_NAME;
1257
1258 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1259 for file in files {
1260 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1261 bail!("cannot decode '{}' - is encrypted", file_name);
1262 }
1263 }
1264
1265 let mut path = datastore.base_path();
1266 path.push(backup_dir.relative_path());
1267 path.push(file_name);
1268
1269 let index = DynamicIndexReader::open(&path)
1270 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1271
1272 let (csum, size) = index.compute_csum();
1273 manifest.verify_file(&file_name, &csum, size)?;
1274
1275 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1276 let reader = BufferedDynamicReader::new(index, chunk_reader);
1277
1278 let mut catalog_reader = CatalogReader::new(reader);
1279 let mut current = catalog_reader.root()?;
1280 let mut components = vec![];
1281
1282
1283 if filepath != "root" {
1284 components = base64::decode(filepath)?;
1285 if components.len() > 0 && components[0] == '/' as u8 {
1286 components.remove(0);
1287 }
1288 for component in components.split(|c| *c == '/' as u8) {
1289 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1290 current = entry;
1291 } else {
1292 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1293 }
1294 }
1295 }
1296
1297 let mut res = Vec::new();
1298
1299 for direntry in catalog_reader.read_dir(&current)? {
1300 let mut components = components.clone();
1301 components.push('/' as u8);
1302 components.extend(&direntry.name);
1303 let path = base64::encode(components);
1304 let text = String::from_utf8_lossy(&direntry.name);
1305 let mut entry = json!({
1306 "filepath": path,
1307 "text": text,
1308 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1309 "leaf": true,
1310 });
1311 match direntry.attr {
1312 DirEntryAttribute::Directory { start: _ } => {
1313 entry["leaf"] = false.into();
1314 },
1315 DirEntryAttribute::File { size, mtime } => {
1316 entry["size"] = size.into();
1317 entry["mtime"] = mtime.into();
1318 },
1319 _ => {},
1320 }
1321 res.push(entry);
1322 }
1323
1324 Ok(res.into())
1325 }
1326
1327 fn recurse_files<'a, T, W>(
1328 zip: &'a mut ZipEncoder<W>,
1329 decoder: &'a mut Accessor<T>,
1330 prefix: &'a Path,
1331 file: FileEntry<T>,
1332 ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
1333 where
1334 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1335 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1336 {
1337 Box::pin(async move {
1338 let metadata = file.entry().metadata();
1339 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1340
1341 match file.kind() {
1342 EntryKind::File { .. } => {
1343 let entry = ZipEntry::new(
1344 path,
1345 metadata.stat.mtime.secs,
1346 metadata.stat.mode as u16,
1347 true,
1348 );
1349 zip.add_entry(entry, Some(file.contents().await?))
1350 .await
1351 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1352 }
1353 EntryKind::Hardlink(_) => {
1354 let realfile = decoder.follow_hardlink(&file).await?;
1355 let entry = ZipEntry::new(
1356 path,
1357 metadata.stat.mtime.secs,
1358 metadata.stat.mode as u16,
1359 true,
1360 );
1361 zip.add_entry(entry, Some(realfile.contents().await?))
1362 .await
1363 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1364 }
1365 EntryKind::Directory => {
1366 let dir = file.enter_directory().await?;
1367 let mut readdir = dir.read_dir();
1368 let entry = ZipEntry::new(
1369 path,
1370 metadata.stat.mtime.secs,
1371 metadata.stat.mode as u16,
1372 false,
1373 );
1374 zip.add_entry::<FileContents<T>>(entry, None).await?;
1375 while let Some(entry) = readdir.next().await {
1376 let entry = entry?.decode_entry().await?;
1377 recurse_files(zip, decoder, prefix, entry).await?;
1378 }
1379 }
1380 _ => {} // ignore all else
1381 };
1382
1383 Ok(())
1384 })
1385 }
1386
1387 #[sortable]
1388 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1389 &ApiHandler::AsyncHttp(&pxar_file_download),
1390 &ObjectSchema::new(
1391 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1392 &sorted!([
1393 ("store", false, &DATASTORE_SCHEMA),
1394 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1395 ("backup-id", false, &BACKUP_ID_SCHEMA),
1396 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1397 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1398 ]),
1399 )
1400 ).access(None, &Permission::Privilege(
1401 &["datastore", "{store}"],
1402 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1403 true)
1404 );
1405
1406 fn pxar_file_download(
1407 _parts: Parts,
1408 _req_body: Body,
1409 param: Value,
1410 _info: &ApiMethod,
1411 rpcenv: Box<dyn RpcEnvironment>,
1412 ) -> ApiResponseFuture {
1413
1414 async move {
1415 let store = tools::required_string_param(&param, "store")?;
1416 let datastore = DataStore::lookup_datastore(&store)?;
1417
1418 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1419
1420 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1421
1422 let backup_type = tools::required_string_param(&param, "backup-type")?;
1423 let backup_id = tools::required_string_param(&param, "backup-id")?;
1424 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1425
1426 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1427
1428 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1429
1430 let mut components = base64::decode(&filepath)?;
1431 if components.len() > 0 && components[0] == '/' as u8 {
1432 components.remove(0);
1433 }
1434
1435 let mut split = components.splitn(2, |c| *c == '/' as u8);
1436 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1437 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1438 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1439 for file in files {
1440 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1441 bail!("cannot decode '{}' - is encrypted", pxar_name);
1442 }
1443 }
1444
1445 let mut path = datastore.base_path();
1446 path.push(backup_dir.relative_path());
1447 path.push(pxar_name);
1448
1449 let index = DynamicIndexReader::open(&path)
1450 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1451
1452 let (csum, size) = index.compute_csum();
1453 manifest.verify_file(&pxar_name, &csum, size)?;
1454
1455 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1456 let reader = BufferedDynamicReader::new(index, chunk_reader);
1457 let archive_size = reader.archive_size();
1458 let reader = LocalDynamicReadAt::new(reader);
1459
1460 let decoder = Accessor::new(reader, archive_size).await?;
1461 let root = decoder.open_root().await?;
1462 let file = root
1463 .lookup(OsStr::from_bytes(file_path)).await?
1464 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1465
1466 let body = match file.kind() {
1467 EntryKind::File { .. } => Body::wrap_stream(
1468 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1469 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1470 err
1471 }),
1472 ),
1473 EntryKind::Hardlink(_) => Body::wrap_stream(
1474 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1475 .map_err(move |err| {
1476 eprintln!(
1477 "error during streaming of hardlink '{:?}' - {}",
1478 filepath, err
1479 );
1480 err
1481 }),
1482 ),
1483 EntryKind::Directory => {
1484 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1485 let mut prefix = PathBuf::new();
1486 let mut components = file.entry().path().components();
1487 components.next_back(); // discar last
1488 for comp in components {
1489 prefix.push(comp);
1490 }
1491
1492 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1493
1494 crate::server::spawn_internal_task(async move {
1495 let mut zipencoder = ZipEncoder::new(channelwriter);
1496 let mut decoder = decoder;
1497 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
1498 .await
1499 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1500
1501 zipencoder
1502 .finish()
1503 .await
1504 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1505 });
1506
1507 Body::wrap_stream(receiver.map_err(move |err| {
1508 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1509 err
1510 }))
1511 }
1512 other => bail!("cannot download file of type {:?}", other),
1513 };
1514
1515 // fixme: set other headers ?
1516 Ok(Response::builder()
1517 .status(StatusCode::OK)
1518 .header(header::CONTENT_TYPE, "application/octet-stream")
1519 .body(body)
1520 .unwrap())
1521 }.boxed()
1522 }
1523
1524 #[api(
1525 input: {
1526 properties: {
1527 store: {
1528 schema: DATASTORE_SCHEMA,
1529 },
1530 timeframe: {
1531 type: RRDTimeFrameResolution,
1532 },
1533 cf: {
1534 type: RRDMode,
1535 },
1536 },
1537 },
1538 access: {
1539 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1540 },
1541 )]
1542 /// Read datastore stats
1543 fn get_rrd_stats(
1544 store: String,
1545 timeframe: RRDTimeFrameResolution,
1546 cf: RRDMode,
1547 _param: Value,
1548 ) -> Result<Value, Error> {
1549
1550 create_value_from_rrd(
1551 &format!("datastore/{}", store),
1552 &[
1553 "total", "used",
1554 "read_ios", "read_bytes",
1555 "write_ios", "write_bytes",
1556 "io_ticks",
1557 ],
1558 timeframe,
1559 cf,
1560 )
1561 }
1562
1563 #[api(
1564 input: {
1565 properties: {
1566 store: {
1567 schema: DATASTORE_SCHEMA,
1568 },
1569 "backup-type": {
1570 schema: BACKUP_TYPE_SCHEMA,
1571 },
1572 "backup-id": {
1573 schema: BACKUP_ID_SCHEMA,
1574 },
1575 "backup-time": {
1576 schema: BACKUP_TIME_SCHEMA,
1577 },
1578 },
1579 },
1580 access: {
1581 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1582 },
1583 )]
1584 /// Get "notes" for a specific backup
1585 fn get_notes(
1586 store: String,
1587 backup_type: String,
1588 backup_id: String,
1589 backup_time: i64,
1590 rpcenv: &mut dyn RpcEnvironment,
1591 ) -> Result<String, Error> {
1592 let datastore = DataStore::lookup_datastore(&store)?;
1593
1594 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1595 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1596
1597 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1598
1599 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1600
1601 let notes = manifest.unprotected["notes"]
1602 .as_str()
1603 .unwrap_or("");
1604
1605 Ok(String::from(notes))
1606 }
1607
1608 #[api(
1609 input: {
1610 properties: {
1611 store: {
1612 schema: DATASTORE_SCHEMA,
1613 },
1614 "backup-type": {
1615 schema: BACKUP_TYPE_SCHEMA,
1616 },
1617 "backup-id": {
1618 schema: BACKUP_ID_SCHEMA,
1619 },
1620 "backup-time": {
1621 schema: BACKUP_TIME_SCHEMA,
1622 },
1623 notes: {
1624 description: "A multiline text.",
1625 },
1626 },
1627 },
1628 access: {
1629 permission: &Permission::Privilege(&["datastore", "{store}"],
1630 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1631 true),
1632 },
1633 )]
1634 /// Set "notes" for a specific backup
1635 fn set_notes(
1636 store: String,
1637 backup_type: String,
1638 backup_id: String,
1639 backup_time: i64,
1640 notes: String,
1641 rpcenv: &mut dyn RpcEnvironment,
1642 ) -> Result<(), Error> {
1643 let datastore = DataStore::lookup_datastore(&store)?;
1644
1645 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1646 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1647
1648 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1649
1650 datastore.update_manifest(&backup_dir,|manifest| {
1651 manifest.unprotected["notes"] = notes.into();
1652 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1653
1654 Ok(())
1655 }
1656
1657 #[api(
1658 input: {
1659 properties: {
1660 store: {
1661 schema: DATASTORE_SCHEMA,
1662 },
1663 "backup-type": {
1664 schema: BACKUP_TYPE_SCHEMA,
1665 },
1666 "backup-id": {
1667 schema: BACKUP_ID_SCHEMA,
1668 },
1669 "new-owner": {
1670 type: Authid,
1671 },
1672 },
1673 },
1674 access: {
1675 permission: &Permission::Anybody,
1676 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
1677 },
1678 )]
1679 /// Change owner of a backup group
1680 fn set_backup_owner(
1681 store: String,
1682 backup_type: String,
1683 backup_id: String,
1684 new_owner: Authid,
1685 rpcenv: &mut dyn RpcEnvironment,
1686 ) -> Result<(), Error> {
1687
1688 let datastore = DataStore::lookup_datastore(&store)?;
1689
1690 let backup_group = BackupGroup::new(backup_type, backup_id);
1691
1692 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1693
1694 let user_info = CachedUserInfo::new()?;
1695
1696 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1697
1698 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1699 // High-privilege user/token
1700 true
1701 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1702 let owner = datastore.get_owner(&backup_group)?;
1703
1704 match (owner.is_token(), new_owner.is_token()) {
1705 (true, true) => {
1706 // API token to API token, owned by same user
1707 let owner = owner.user();
1708 let new_owner = new_owner.user();
1709 owner == new_owner && Authid::from(owner.clone()) == auth_id
1710 },
1711 (true, false) => {
1712 // API token to API token owner
1713 Authid::from(owner.user().clone()) == auth_id
1714 && new_owner == auth_id
1715 },
1716 (false, true) => {
1717 // API token owner to API token
1718 owner == auth_id
1719 && Authid::from(new_owner.user().clone()) == auth_id
1720 },
1721 (false, false) => {
1722 // User to User, not allowed for unprivileged users
1723 false
1724 },
1725 }
1726 } else {
1727 false
1728 };
1729
1730 if !allowed {
1731 return Err(http_err!(UNAUTHORIZED,
1732 "{} does not have permission to change owner of backup group '{}' to {}",
1733 auth_id,
1734 backup_group,
1735 new_owner,
1736 ));
1737 }
1738
1739 if !user_info.is_active_auth_id(&new_owner) {
1740 bail!("{} '{}' is inactive or non-existent",
1741 if new_owner.is_token() {
1742 "API token".to_string()
1743 } else {
1744 "user".to_string()
1745 },
1746 new_owner);
1747 }
1748
1749 datastore.set_owner(&backup_group, &new_owner, true)?;
1750
1751 Ok(())
1752 }
1753
1754 #[sortable]
1755 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1756 (
1757 "catalog",
1758 &Router::new()
1759 .get(&API_METHOD_CATALOG)
1760 ),
1761 (
1762 "change-owner",
1763 &Router::new()
1764 .post(&API_METHOD_SET_BACKUP_OWNER)
1765 ),
1766 (
1767 "download",
1768 &Router::new()
1769 .download(&API_METHOD_DOWNLOAD_FILE)
1770 ),
1771 (
1772 "download-decoded",
1773 &Router::new()
1774 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1775 ),
1776 (
1777 "files",
1778 &Router::new()
1779 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1780 ),
1781 (
1782 "gc",
1783 &Router::new()
1784 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1785 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1786 ),
1787 (
1788 "groups",
1789 &Router::new()
1790 .get(&API_METHOD_LIST_GROUPS)
1791 ),
1792 (
1793 "notes",
1794 &Router::new()
1795 .get(&API_METHOD_GET_NOTES)
1796 .put(&API_METHOD_SET_NOTES)
1797 ),
1798 (
1799 "prune",
1800 &Router::new()
1801 .post(&API_METHOD_PRUNE)
1802 ),
1803 (
1804 "pxar-file-download",
1805 &Router::new()
1806 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1807 ),
1808 (
1809 "rrd",
1810 &Router::new()
1811 .get(&API_METHOD_GET_RRD_STATS)
1812 ),
1813 (
1814 "snapshots",
1815 &Router::new()
1816 .get(&API_METHOD_LIST_SNAPSHOTS)
1817 .delete(&API_METHOD_DELETE_SNAPSHOT)
1818 ),
1819 (
1820 "status",
1821 &Router::new()
1822 .get(&API_METHOD_STATUS)
1823 ),
1824 (
1825 "upload-backup-log",
1826 &Router::new()
1827 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1828 ),
1829 (
1830 "verify",
1831 &Router::new()
1832 .post(&API_METHOD_VERIFY)
1833 ),
1834 ];
1835
1836 const DATASTORE_INFO_ROUTER: Router = Router::new()
1837 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1838 .subdirs(DATASTORE_INFO_SUBDIRS);
1839
1840
1841 pub const ROUTER: Router = Router::new()
1842 .get(&API_METHOD_GET_DATASTORE_LIST)
1843 .match_all("store", &DATASTORE_INFO_ROUTER);