]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
verify: factor out common parameters
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::os::unix::ffi::OsStrExt;
6 use std::path::{Path, PathBuf};
7 use std::pin::Pin;
8
9 use anyhow::{bail, format_err, Error};
10 use futures::*;
11 use hyper::http::request::Parts;
12 use hyper::{header, Body, Response, StatusCode};
13 use serde_json::{json, Value};
14 use tokio_stream::wrappers::ReceiverStream;
15
16 use proxmox::api::{
17 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
18 RpcEnvironment, RpcEnvironmentType, Permission
19 };
20 use proxmox::api::router::{ReturnType, SubdirMap};
21 use proxmox::api::schema::*;
22 use proxmox::tools::fs::{replace_file, CreateOptions};
23 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
24
25 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
26 use pxar::EntryKind;
27
28 use crate::api2::types::*;
29 use crate::api2::node::rrd::create_value_from_rrd;
30 use crate::backup::*;
31 use crate::config::datastore;
32 use crate::config::cached_user_info::CachedUserInfo;
33
34 use crate::server::{jobstate::Job, WorkerTask};
35 use crate::tools::{
36 self,
37 zip::{ZipEncoder, ZipEntry},
38 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
39 };
40
41 use crate::config::acl::{
42 PRIV_DATASTORE_AUDIT,
43 PRIV_DATASTORE_MODIFY,
44 PRIV_DATASTORE_READ,
45 PRIV_DATASTORE_PRUNE,
46 PRIV_DATASTORE_BACKUP,
47 PRIV_DATASTORE_VERIFY,
48 };
49
50 fn check_priv_or_backup_owner(
51 store: &DataStore,
52 group: &BackupGroup,
53 auth_id: &Authid,
54 required_privs: u64,
55 ) -> Result<(), Error> {
56 let user_info = CachedUserInfo::new()?;
57 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
58
59 if privs & required_privs == 0 {
60 let owner = store.get_owner(group)?;
61 check_backup_owner(&owner, auth_id)?;
62 }
63 Ok(())
64 }
65
66 fn check_backup_owner(
67 owner: &Authid,
68 auth_id: &Authid,
69 ) -> Result<(), Error> {
70 let correct_owner = owner == auth_id
71 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
72 if !correct_owner {
73 bail!("backup owner check failed ({} != {})", auth_id, owner);
74 }
75 Ok(())
76 }
77
78 fn read_backup_index(
79 store: &DataStore,
80 backup_dir: &BackupDir,
81 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
82
83 let (manifest, index_size) = store.load_manifest(backup_dir)?;
84
85 let mut result = Vec::new();
86 for item in manifest.files() {
87 result.push(BackupContent {
88 filename: item.filename.clone(),
89 crypt_mode: Some(item.crypt_mode),
90 size: Some(item.size),
91 });
92 }
93
94 result.push(BackupContent {
95 filename: MANIFEST_BLOB_NAME.to_string(),
96 crypt_mode: match manifest.signature {
97 Some(_) => Some(CryptMode::SignOnly),
98 None => Some(CryptMode::None),
99 },
100 size: Some(index_size),
101 });
102
103 Ok((manifest, result))
104 }
105
106 fn get_all_snapshot_files(
107 store: &DataStore,
108 info: &BackupInfo,
109 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
110
111 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
112
113 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
114 acc.insert(item.filename.clone());
115 acc
116 });
117
118 for file in &info.files {
119 if file_set.contains(file) { continue; }
120 files.push(BackupContent {
121 filename: file.to_string(),
122 size: None,
123 crypt_mode: None,
124 });
125 }
126
127 Ok((manifest, files))
128 }
129
130 #[api(
131 input: {
132 properties: {
133 store: {
134 schema: DATASTORE_SCHEMA,
135 },
136 },
137 },
138 returns: {
139 type: Array,
140 description: "Returns the list of backup groups.",
141 items: {
142 type: GroupListItem,
143 }
144 },
145 access: {
146 permission: &Permission::Privilege(
147 &["datastore", "{store}"],
148 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
149 true),
150 },
151 )]
152 /// List backup groups.
153 pub fn list_groups(
154 store: String,
155 rpcenv: &mut dyn RpcEnvironment,
156 ) -> Result<Vec<GroupListItem>, Error> {
157
158 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
159 let user_info = CachedUserInfo::new()?;
160 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
161
162 let datastore = DataStore::lookup_datastore(&store)?;
163 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
164
165 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
166
167 let group_info = backup_groups
168 .into_iter()
169 .fold(Vec::new(), |mut group_info, group| {
170 let owner = match datastore.get_owner(&group) {
171 Ok(auth_id) => auth_id,
172 Err(err) => {
173 eprintln!("Failed to get owner of group '{}/{}' - {}",
174 &store,
175 group,
176 err);
177 return group_info;
178 },
179 };
180 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
181 return group_info;
182 }
183
184 let snapshots = match group.list_backups(&datastore.base_path()) {
185 Ok(snapshots) => snapshots,
186 Err(_) => {
187 return group_info;
188 },
189 };
190
191 let backup_count: u64 = snapshots.len() as u64;
192 if backup_count == 0 {
193 return group_info;
194 }
195
196 let last_backup = snapshots
197 .iter()
198 .fold(&snapshots[0], |last, curr| {
199 if curr.is_finished()
200 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
201 curr
202 } else {
203 last
204 }
205 })
206 .to_owned();
207
208 group_info.push(GroupListItem {
209 backup_type: group.backup_type().to_string(),
210 backup_id: group.backup_id().to_string(),
211 last_backup: last_backup.backup_dir.backup_time(),
212 owner: Some(owner),
213 backup_count,
214 files: last_backup.files,
215 });
216
217 group_info
218 });
219
220 Ok(group_info)
221 }
222
223 #[api(
224 input: {
225 properties: {
226 store: {
227 schema: DATASTORE_SCHEMA,
228 },
229 "backup-type": {
230 schema: BACKUP_TYPE_SCHEMA,
231 },
232 "backup-id": {
233 schema: BACKUP_ID_SCHEMA,
234 },
235 "backup-time": {
236 schema: BACKUP_TIME_SCHEMA,
237 },
238 },
239 },
240 returns: {
241 type: Array,
242 description: "Returns the list of archive files inside a backup snapshots.",
243 items: {
244 type: BackupContent,
245 }
246 },
247 access: {
248 permission: &Permission::Privilege(
249 &["datastore", "{store}"],
250 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
251 true),
252 },
253 )]
254 /// List snapshot files.
255 pub fn list_snapshot_files(
256 store: String,
257 backup_type: String,
258 backup_id: String,
259 backup_time: i64,
260 _info: &ApiMethod,
261 rpcenv: &mut dyn RpcEnvironment,
262 ) -> Result<Vec<BackupContent>, Error> {
263
264 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
265 let datastore = DataStore::lookup_datastore(&store)?;
266
267 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
268
269 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
270
271 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
272
273 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
274
275 Ok(files)
276 }
277
278 #[api(
279 input: {
280 properties: {
281 store: {
282 schema: DATASTORE_SCHEMA,
283 },
284 "backup-type": {
285 schema: BACKUP_TYPE_SCHEMA,
286 },
287 "backup-id": {
288 schema: BACKUP_ID_SCHEMA,
289 },
290 "backup-time": {
291 schema: BACKUP_TIME_SCHEMA,
292 },
293 },
294 },
295 access: {
296 permission: &Permission::Privilege(
297 &["datastore", "{store}"],
298 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
299 true),
300 },
301 )]
302 /// Delete backup snapshot.
303 pub fn delete_snapshot(
304 store: String,
305 backup_type: String,
306 backup_id: String,
307 backup_time: i64,
308 _info: &ApiMethod,
309 rpcenv: &mut dyn RpcEnvironment,
310 ) -> Result<Value, Error> {
311
312 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
313
314 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
315 let datastore = DataStore::lookup_datastore(&store)?;
316
317 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
318
319 datastore.remove_backup_dir(&snapshot, false)?;
320
321 Ok(Value::Null)
322 }
323
324 #[api(
325 input: {
326 properties: {
327 store: {
328 schema: DATASTORE_SCHEMA,
329 },
330 "backup-type": {
331 optional: true,
332 schema: BACKUP_TYPE_SCHEMA,
333 },
334 "backup-id": {
335 optional: true,
336 schema: BACKUP_ID_SCHEMA,
337 },
338 },
339 },
340 returns: {
341 type: Array,
342 description: "Returns the list of snapshots.",
343 items: {
344 type: SnapshotListItem,
345 }
346 },
347 access: {
348 permission: &Permission::Privilege(
349 &["datastore", "{store}"],
350 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
351 true),
352 },
353 )]
354 /// List backup snapshots.
355 pub fn list_snapshots (
356 store: String,
357 backup_type: Option<String>,
358 backup_id: Option<String>,
359 _param: Value,
360 _info: &ApiMethod,
361 rpcenv: &mut dyn RpcEnvironment,
362 ) -> Result<Vec<SnapshotListItem>, Error> {
363
364 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
365 let user_info = CachedUserInfo::new()?;
366 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
367
368 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
369
370 let datastore = DataStore::lookup_datastore(&store)?;
371
372 let base_path = datastore.base_path();
373
374 let groups = match (backup_type, backup_id) {
375 (Some(backup_type), Some(backup_id)) => {
376 let mut groups = Vec::with_capacity(1);
377 groups.push(BackupGroup::new(backup_type, backup_id));
378 groups
379 },
380 (Some(backup_type), None) => {
381 BackupInfo::list_backup_groups(&base_path)?
382 .into_iter()
383 .filter(|group| group.backup_type() == backup_type)
384 .collect()
385 },
386 (None, Some(backup_id)) => {
387 BackupInfo::list_backup_groups(&base_path)?
388 .into_iter()
389 .filter(|group| group.backup_id() == backup_id)
390 .collect()
391 },
392 _ => BackupInfo::list_backup_groups(&base_path)?,
393 };
394
395 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
396 let backup_type = group.backup_type().to_string();
397 let backup_id = group.backup_id().to_string();
398 let backup_time = info.backup_dir.backup_time();
399
400 match get_all_snapshot_files(&datastore, &info) {
401 Ok((manifest, files)) => {
402 // extract the first line from notes
403 let comment: Option<String> = manifest.unprotected["notes"]
404 .as_str()
405 .and_then(|notes| notes.lines().next())
406 .map(String::from);
407
408 let fingerprint = match manifest.fingerprint() {
409 Ok(fp) => fp,
410 Err(err) => {
411 eprintln!("error parsing fingerprint: '{}'", err);
412 None
413 },
414 };
415
416 let verification = manifest.unprotected["verify_state"].clone();
417 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
418 Ok(verify) => verify,
419 Err(err) => {
420 eprintln!("error parsing verification state : '{}'", err);
421 None
422 }
423 };
424
425 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
426
427 SnapshotListItem {
428 backup_type,
429 backup_id,
430 backup_time,
431 comment,
432 verification,
433 fingerprint,
434 files,
435 size,
436 owner,
437 }
438 },
439 Err(err) => {
440 eprintln!("error during snapshot file listing: '{}'", err);
441 let files = info
442 .files
443 .into_iter()
444 .map(|filename| BackupContent {
445 filename,
446 size: None,
447 crypt_mode: None,
448 })
449 .collect();
450
451 SnapshotListItem {
452 backup_type,
453 backup_id,
454 backup_time,
455 comment: None,
456 verification: None,
457 fingerprint: None,
458 files,
459 size: None,
460 owner,
461 }
462 },
463 }
464 };
465
466 groups
467 .iter()
468 .try_fold(Vec::new(), |mut snapshots, group| {
469 let owner = match datastore.get_owner(group) {
470 Ok(auth_id) => auth_id,
471 Err(err) => {
472 eprintln!("Failed to get owner of group '{}/{}' - {}",
473 &store,
474 group,
475 err);
476 return Ok(snapshots);
477 },
478 };
479
480 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
481 return Ok(snapshots);
482 }
483
484 let group_backups = group.list_backups(&datastore.base_path())?;
485
486 snapshots.extend(
487 group_backups
488 .into_iter()
489 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
490 );
491
492 Ok(snapshots)
493 })
494 }
495
496 fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
497 let base_path = store.base_path();
498 let groups = BackupInfo::list_backup_groups(&base_path)?;
499
500 groups.iter()
501 .filter(|group| {
502 let owner = match store.get_owner(&group) {
503 Ok(owner) => owner,
504 Err(err) => {
505 eprintln!("Failed to get owner of group '{}/{}' - {}",
506 store.name(),
507 group,
508 err);
509 return false;
510 },
511 };
512
513 match filter_owner {
514 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
515 None => true,
516 }
517 })
518 .try_fold(Counts::default(), |mut counts, group| {
519 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
520
521 let type_count = match group.backup_type() {
522 "ct" => counts.ct.get_or_insert(Default::default()),
523 "vm" => counts.vm.get_or_insert(Default::default()),
524 "host" => counts.host.get_or_insert(Default::default()),
525 _ => counts.other.get_or_insert(Default::default()),
526 };
527
528 type_count.groups += 1;
529 type_count.snapshots += snapshot_count;
530
531 Ok(counts)
532 })
533 }
534
535 #[api(
536 input: {
537 properties: {
538 store: {
539 schema: DATASTORE_SCHEMA,
540 },
541 verbose: {
542 type: bool,
543 default: false,
544 optional: true,
545 description: "Include additional information like snapshot counts and GC status.",
546 },
547 },
548
549 },
550 returns: {
551 type: DataStoreStatus,
552 },
553 access: {
554 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
555 },
556 )]
557 /// Get datastore status.
558 pub fn status(
559 store: String,
560 verbose: bool,
561 _info: &ApiMethod,
562 rpcenv: &mut dyn RpcEnvironment,
563 ) -> Result<DataStoreStatus, Error> {
564 let datastore = DataStore::lookup_datastore(&store)?;
565 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
566 let (counts, gc_status) = if verbose {
567 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
568 let user_info = CachedUserInfo::new()?;
569
570 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
571 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
572 None
573 } else {
574 Some(&auth_id)
575 };
576
577 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
578 let gc_status = Some(datastore.last_gc_status());
579
580 (counts, gc_status)
581 } else {
582 (None, None)
583 };
584
585 Ok(DataStoreStatus {
586 total: storage.total,
587 used: storage.used,
588 avail: storage.avail,
589 gc_status,
590 counts,
591 })
592 }
593
594 #[api(
595 input: {
596 properties: {
597 store: {
598 schema: DATASTORE_SCHEMA,
599 },
600 "backup-type": {
601 schema: BACKUP_TYPE_SCHEMA,
602 optional: true,
603 },
604 "backup-id": {
605 schema: BACKUP_ID_SCHEMA,
606 optional: true,
607 },
608 "backup-time": {
609 schema: BACKUP_TIME_SCHEMA,
610 optional: true,
611 },
612 },
613 },
614 returns: {
615 schema: UPID_SCHEMA,
616 },
617 access: {
618 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
619 },
620 )]
621 /// Verify backups.
622 ///
623 /// This function can verify a single backup snapshot, all backup from a backup group,
624 /// or all backups in the datastore.
625 pub fn verify(
626 store: String,
627 backup_type: Option<String>,
628 backup_id: Option<String>,
629 backup_time: Option<i64>,
630 rpcenv: &mut dyn RpcEnvironment,
631 ) -> Result<Value, Error> {
632 let datastore = DataStore::lookup_datastore(&store)?;
633
634 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
635 let worker_id;
636
637 let mut backup_dir = None;
638 let mut backup_group = None;
639 let mut worker_type = "verify";
640
641 match (backup_type, backup_id, backup_time) {
642 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
643 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
644 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
645
646 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
647
648 backup_dir = Some(dir);
649 worker_type = "verify_snapshot";
650 }
651 (Some(backup_type), Some(backup_id), None) => {
652 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
653 let group = BackupGroup::new(backup_type, backup_id);
654
655 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
656
657 backup_group = Some(group);
658 worker_type = "verify_group";
659 }
660 (None, None, None) => {
661 worker_id = store.clone();
662 }
663 _ => bail!("parameters do not specify a backup group or snapshot"),
664 }
665
666 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
667
668 let upid_str = WorkerTask::new_thread(
669 worker_type,
670 Some(worker_id),
671 auth_id.clone(),
672 to_stdout,
673 move |worker| {
674 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
675 let failed_dirs = if let Some(backup_dir) = backup_dir {
676 let mut res = Vec::new();
677 if !verify_backup_dir(
678 &verify_worker,
679 &backup_dir,
680 worker.upid().clone(),
681 None,
682 )? {
683 res.push(backup_dir.to_string());
684 }
685 res
686 } else if let Some(backup_group) = backup_group {
687 let failed_dirs = verify_backup_group(
688 &verify_worker,
689 &backup_group,
690 &mut StoreProgress::new(1),
691 worker.upid(),
692 None,
693 )?;
694 failed_dirs
695 } else {
696 let privs = CachedUserInfo::new()?
697 .lookup_privs(&auth_id, &["datastore", &store]);
698
699 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
700 Some(auth_id)
701 } else {
702 None
703 };
704
705 verify_all_backups(&verify_worker, worker.upid(), owner, None)?
706 };
707 if !failed_dirs.is_empty() {
708 worker.log("Failed to verify the following snapshots/groups:");
709 for dir in failed_dirs {
710 worker.log(format!("\t{}", dir));
711 }
712 bail!("verification failed - please check the log for details");
713 }
714 Ok(())
715 },
716 )?;
717
718 Ok(json!(upid_str))
719 }
720
721 #[macro_export]
722 macro_rules! add_common_prune_prameters {
723 ( [ $( $list1:tt )* ] ) => {
724 add_common_prune_prameters!([$( $list1 )* ] , [])
725 };
726 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
727 [
728 $( $list1 )*
729 (
730 "keep-daily",
731 true,
732 &PRUNE_SCHEMA_KEEP_DAILY,
733 ),
734 (
735 "keep-hourly",
736 true,
737 &PRUNE_SCHEMA_KEEP_HOURLY,
738 ),
739 (
740 "keep-last",
741 true,
742 &PRUNE_SCHEMA_KEEP_LAST,
743 ),
744 (
745 "keep-monthly",
746 true,
747 &PRUNE_SCHEMA_KEEP_MONTHLY,
748 ),
749 (
750 "keep-weekly",
751 true,
752 &PRUNE_SCHEMA_KEEP_WEEKLY,
753 ),
754 (
755 "keep-yearly",
756 true,
757 &PRUNE_SCHEMA_KEEP_YEARLY,
758 ),
759 $( $list2 )*
760 ]
761 }
762 }
763
764 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
765 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
766 &PruneListItem::API_SCHEMA
767 ).schema();
768
769 pub const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
770 &ApiHandler::Sync(&prune),
771 &ObjectSchema::new(
772 "Prune the datastore.",
773 &add_common_prune_prameters!([
774 ("backup-id", false, &BACKUP_ID_SCHEMA),
775 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
776 ("dry-run", true, &BooleanSchema::new(
777 "Just show what prune would do, but do not delete anything.")
778 .schema()
779 ),
780 ],[
781 ("store", false, &DATASTORE_SCHEMA),
782 ])
783 ))
784 .returns(ReturnType::new(false, &API_RETURN_SCHEMA_PRUNE))
785 .access(None, &Permission::Privilege(
786 &["datastore", "{store}"],
787 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
788 true)
789 );
790
791 pub fn prune(
792 param: Value,
793 _info: &ApiMethod,
794 rpcenv: &mut dyn RpcEnvironment,
795 ) -> Result<Value, Error> {
796
797 let store = tools::required_string_param(&param, "store")?;
798 let backup_type = tools::required_string_param(&param, "backup-type")?;
799 let backup_id = tools::required_string_param(&param, "backup-id")?;
800
801 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
802
803 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
804
805 let group = BackupGroup::new(backup_type, backup_id);
806
807 let datastore = DataStore::lookup_datastore(&store)?;
808
809 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
810
811 let prune_options = PruneOptions {
812 keep_last: param["keep-last"].as_u64(),
813 keep_hourly: param["keep-hourly"].as_u64(),
814 keep_daily: param["keep-daily"].as_u64(),
815 keep_weekly: param["keep-weekly"].as_u64(),
816 keep_monthly: param["keep-monthly"].as_u64(),
817 keep_yearly: param["keep-yearly"].as_u64(),
818 };
819
820 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
821
822 let mut prune_result = Vec::new();
823
824 let list = group.list_backups(&datastore.base_path())?;
825
826 let mut prune_info = compute_prune_info(list, &prune_options)?;
827
828 prune_info.reverse(); // delete older snapshots first
829
830 let keep_all = !prune_options.keeps_something();
831
832 if dry_run {
833 for (info, mut keep) in prune_info {
834 if keep_all { keep = true; }
835
836 let backup_time = info.backup_dir.backup_time();
837 let group = info.backup_dir.group();
838
839 prune_result.push(json!({
840 "backup-type": group.backup_type(),
841 "backup-id": group.backup_id(),
842 "backup-time": backup_time,
843 "keep": keep,
844 }));
845 }
846 return Ok(json!(prune_result));
847 }
848
849
850 // We use a WorkerTask just to have a task log, but run synchrounously
851 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
852
853 if keep_all {
854 worker.log("No prune selection - keeping all files.");
855 } else {
856 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
857 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
858 store, backup_type, backup_id));
859 }
860
861 for (info, mut keep) in prune_info {
862 if keep_all { keep = true; }
863
864 let backup_time = info.backup_dir.backup_time();
865 let timestamp = info.backup_dir.backup_time_string();
866 let group = info.backup_dir.group();
867
868
869 let msg = format!(
870 "{}/{}/{} {}",
871 group.backup_type(),
872 group.backup_id(),
873 timestamp,
874 if keep { "keep" } else { "remove" },
875 );
876
877 worker.log(msg);
878
879 prune_result.push(json!({
880 "backup-type": group.backup_type(),
881 "backup-id": group.backup_id(),
882 "backup-time": backup_time,
883 "keep": keep,
884 }));
885
886 if !(dry_run || keep) {
887 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
888 worker.warn(
889 format!(
890 "failed to remove dir {:?}: {}",
891 info.backup_dir.relative_path(), err
892 )
893 );
894 }
895 }
896 }
897
898 worker.log_result(&Ok(()));
899
900 Ok(json!(prune_result))
901 }
902
903 #[api(
904 input: {
905 properties: {
906 store: {
907 schema: DATASTORE_SCHEMA,
908 },
909 },
910 },
911 returns: {
912 schema: UPID_SCHEMA,
913 },
914 access: {
915 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
916 },
917 )]
918 /// Start garbage collection.
919 pub fn start_garbage_collection(
920 store: String,
921 _info: &ApiMethod,
922 rpcenv: &mut dyn RpcEnvironment,
923 ) -> Result<Value, Error> {
924
925 let datastore = DataStore::lookup_datastore(&store)?;
926 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
927
928 let job = Job::new("garbage_collection", &store)
929 .map_err(|_| format_err!("garbage collection already running"))?;
930
931 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
932
933 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
934 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
935
936 Ok(json!(upid_str))
937 }
938
939 #[api(
940 input: {
941 properties: {
942 store: {
943 schema: DATASTORE_SCHEMA,
944 },
945 },
946 },
947 returns: {
948 type: GarbageCollectionStatus,
949 },
950 access: {
951 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
952 },
953 )]
954 /// Garbage collection status.
955 pub fn garbage_collection_status(
956 store: String,
957 _info: &ApiMethod,
958 _rpcenv: &mut dyn RpcEnvironment,
959 ) -> Result<GarbageCollectionStatus, Error> {
960
961 let datastore = DataStore::lookup_datastore(&store)?;
962
963 let status = datastore.last_gc_status();
964
965 Ok(status)
966 }
967
968 #[api(
969 returns: {
970 description: "List the accessible datastores.",
971 type: Array,
972 items: { type: DataStoreListItem },
973 },
974 access: {
975 permission: &Permission::Anybody,
976 },
977 )]
978 /// Datastore list
979 pub fn get_datastore_list(
980 _param: Value,
981 _info: &ApiMethod,
982 rpcenv: &mut dyn RpcEnvironment,
983 ) -> Result<Vec<DataStoreListItem>, Error> {
984
985 let (config, _digest) = datastore::config()?;
986
987 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
988 let user_info = CachedUserInfo::new()?;
989
990 let mut list = Vec::new();
991
992 for (store, (_, data)) in &config.sections {
993 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
994 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
995 if allowed {
996 list.push(
997 DataStoreListItem {
998 store: store.clone(),
999 comment: data["comment"].as_str().map(String::from),
1000 }
1001 );
1002 }
1003 }
1004
1005 Ok(list)
1006 }
1007
1008 #[sortable]
1009 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1010 &ApiHandler::AsyncHttp(&download_file),
1011 &ObjectSchema::new(
1012 "Download single raw file from backup snapshot.",
1013 &sorted!([
1014 ("store", false, &DATASTORE_SCHEMA),
1015 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1016 ("backup-id", false, &BACKUP_ID_SCHEMA),
1017 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1018 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1019 ]),
1020 )
1021 ).access(None, &Permission::Privilege(
1022 &["datastore", "{store}"],
1023 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1024 true)
1025 );
1026
1027 pub fn download_file(
1028 _parts: Parts,
1029 _req_body: Body,
1030 param: Value,
1031 _info: &ApiMethod,
1032 rpcenv: Box<dyn RpcEnvironment>,
1033 ) -> ApiResponseFuture {
1034
1035 async move {
1036 let store = tools::required_string_param(&param, "store")?;
1037 let datastore = DataStore::lookup_datastore(store)?;
1038
1039 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1040
1041 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1042
1043 let backup_type = tools::required_string_param(&param, "backup-type")?;
1044 let backup_id = tools::required_string_param(&param, "backup-id")?;
1045 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1046
1047 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1048
1049 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1050
1051 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1052
1053 let mut path = datastore.base_path();
1054 path.push(backup_dir.relative_path());
1055 path.push(&file_name);
1056
1057 let file = tokio::fs::File::open(&path)
1058 .await
1059 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1060
1061 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1062 .map_ok(|bytes| bytes.freeze())
1063 .map_err(move |err| {
1064 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1065 err
1066 });
1067 let body = Body::wrap_stream(payload);
1068
1069 // fixme: set other headers ?
1070 Ok(Response::builder()
1071 .status(StatusCode::OK)
1072 .header(header::CONTENT_TYPE, "application/octet-stream")
1073 .body(body)
1074 .unwrap())
1075 }.boxed()
1076 }
1077
1078 #[sortable]
1079 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1080 &ApiHandler::AsyncHttp(&download_file_decoded),
1081 &ObjectSchema::new(
1082 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1083 &sorted!([
1084 ("store", false, &DATASTORE_SCHEMA),
1085 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1086 ("backup-id", false, &BACKUP_ID_SCHEMA),
1087 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1088 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1089 ]),
1090 )
1091 ).access(None, &Permission::Privilege(
1092 &["datastore", "{store}"],
1093 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1094 true)
1095 );
1096
1097 pub fn download_file_decoded(
1098 _parts: Parts,
1099 _req_body: Body,
1100 param: Value,
1101 _info: &ApiMethod,
1102 rpcenv: Box<dyn RpcEnvironment>,
1103 ) -> ApiResponseFuture {
1104
1105 async move {
1106 let store = tools::required_string_param(&param, "store")?;
1107 let datastore = DataStore::lookup_datastore(store)?;
1108
1109 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1110
1111 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1112
1113 let backup_type = tools::required_string_param(&param, "backup-type")?;
1114 let backup_id = tools::required_string_param(&param, "backup-id")?;
1115 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1116
1117 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1118
1119 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1120
1121 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1122 for file in files {
1123 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1124 bail!("cannot decode '{}' - is encrypted", file_name);
1125 }
1126 }
1127
1128 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1129
1130 let mut path = datastore.base_path();
1131 path.push(backup_dir.relative_path());
1132 path.push(&file_name);
1133
1134 let extension = file_name.rsplitn(2, '.').next().unwrap();
1135
1136 let body = match extension {
1137 "didx" => {
1138 let index = DynamicIndexReader::open(&path)
1139 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1140 let (csum, size) = index.compute_csum();
1141 manifest.verify_file(&file_name, &csum, size)?;
1142
1143 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1144 let reader = AsyncIndexReader::new(index, chunk_reader);
1145 Body::wrap_stream(AsyncReaderStream::new(reader)
1146 .map_err(move |err| {
1147 eprintln!("error during streaming of '{:?}' - {}", path, err);
1148 err
1149 }))
1150 },
1151 "fidx" => {
1152 let index = FixedIndexReader::open(&path)
1153 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1154
1155 let (csum, size) = index.compute_csum();
1156 manifest.verify_file(&file_name, &csum, size)?;
1157
1158 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1159 let reader = AsyncIndexReader::new(index, chunk_reader);
1160 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1161 .map_err(move |err| {
1162 eprintln!("error during streaming of '{:?}' - {}", path, err);
1163 err
1164 }))
1165 },
1166 "blob" => {
1167 let file = std::fs::File::open(&path)
1168 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1169
1170 // FIXME: load full blob to verify index checksum?
1171
1172 Body::wrap_stream(
1173 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1174 .map_err(move |err| {
1175 eprintln!("error during streaming of '{:?}' - {}", path, err);
1176 err
1177 })
1178 )
1179 },
1180 extension => {
1181 bail!("cannot download '{}' files", extension);
1182 },
1183 };
1184
1185 // fixme: set other headers ?
1186 Ok(Response::builder()
1187 .status(StatusCode::OK)
1188 .header(header::CONTENT_TYPE, "application/octet-stream")
1189 .body(body)
1190 .unwrap())
1191 }.boxed()
1192 }
1193
1194 #[sortable]
1195 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1196 &ApiHandler::AsyncHttp(&upload_backup_log),
1197 &ObjectSchema::new(
1198 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1199 &sorted!([
1200 ("store", false, &DATASTORE_SCHEMA),
1201 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1202 ("backup-id", false, &BACKUP_ID_SCHEMA),
1203 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1204 ]),
1205 )
1206 ).access(
1207 Some("Only the backup creator/owner is allowed to do this."),
1208 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1209 );
1210
1211 pub fn upload_backup_log(
1212 _parts: Parts,
1213 req_body: Body,
1214 param: Value,
1215 _info: &ApiMethod,
1216 rpcenv: Box<dyn RpcEnvironment>,
1217 ) -> ApiResponseFuture {
1218
1219 async move {
1220 let store = tools::required_string_param(&param, "store")?;
1221 let datastore = DataStore::lookup_datastore(store)?;
1222
1223 let file_name = CLIENT_LOG_BLOB_NAME;
1224
1225 let backup_type = tools::required_string_param(&param, "backup-type")?;
1226 let backup_id = tools::required_string_param(&param, "backup-id")?;
1227 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1228
1229 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1230
1231 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1232 let owner = datastore.get_owner(backup_dir.group())?;
1233 check_backup_owner(&owner, &auth_id)?;
1234
1235 let mut path = datastore.base_path();
1236 path.push(backup_dir.relative_path());
1237 path.push(&file_name);
1238
1239 if path.exists() {
1240 bail!("backup already contains a log.");
1241 }
1242
1243 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1244 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1245
1246 let data = req_body
1247 .map_err(Error::from)
1248 .try_fold(Vec::new(), |mut acc, chunk| {
1249 acc.extend_from_slice(&*chunk);
1250 future::ok::<_, Error>(acc)
1251 })
1252 .await?;
1253
1254 // always verify blob/CRC at server side
1255 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1256
1257 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1258
1259 // fixme: use correct formatter
1260 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1261 }.boxed()
1262 }
1263
1264 #[api(
1265 input: {
1266 properties: {
1267 store: {
1268 schema: DATASTORE_SCHEMA,
1269 },
1270 "backup-type": {
1271 schema: BACKUP_TYPE_SCHEMA,
1272 },
1273 "backup-id": {
1274 schema: BACKUP_ID_SCHEMA,
1275 },
1276 "backup-time": {
1277 schema: BACKUP_TIME_SCHEMA,
1278 },
1279 "filepath": {
1280 description: "Base64 encoded path.",
1281 type: String,
1282 }
1283 },
1284 },
1285 access: {
1286 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1287 },
1288 )]
1289 /// Get the entries of the given path of the catalog
1290 pub fn catalog(
1291 store: String,
1292 backup_type: String,
1293 backup_id: String,
1294 backup_time: i64,
1295 filepath: String,
1296 _param: Value,
1297 _info: &ApiMethod,
1298 rpcenv: &mut dyn RpcEnvironment,
1299 ) -> Result<Value, Error> {
1300 let datastore = DataStore::lookup_datastore(&store)?;
1301
1302 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1303
1304 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1305
1306 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1307
1308 let file_name = CATALOG_NAME;
1309
1310 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1311 for file in files {
1312 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1313 bail!("cannot decode '{}' - is encrypted", file_name);
1314 }
1315 }
1316
1317 let mut path = datastore.base_path();
1318 path.push(backup_dir.relative_path());
1319 path.push(file_name);
1320
1321 let index = DynamicIndexReader::open(&path)
1322 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1323
1324 let (csum, size) = index.compute_csum();
1325 manifest.verify_file(&file_name, &csum, size)?;
1326
1327 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1328 let reader = BufferedDynamicReader::new(index, chunk_reader);
1329
1330 let mut catalog_reader = CatalogReader::new(reader);
1331 let mut current = catalog_reader.root()?;
1332 let mut components = vec![];
1333
1334
1335 if filepath != "root" {
1336 components = base64::decode(filepath)?;
1337 if !components.is_empty() && components[0] == b'/' {
1338 components.remove(0);
1339 }
1340 for component in components.split(|c| *c == b'/') {
1341 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1342 current = entry;
1343 } else {
1344 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1345 }
1346 }
1347 }
1348
1349 let mut res = Vec::new();
1350
1351 for direntry in catalog_reader.read_dir(&current)? {
1352 let mut components = components.clone();
1353 components.push(b'/');
1354 components.extend(&direntry.name);
1355 let path = base64::encode(components);
1356 let text = String::from_utf8_lossy(&direntry.name);
1357 let mut entry = json!({
1358 "filepath": path,
1359 "text": text,
1360 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1361 "leaf": true,
1362 });
1363 match direntry.attr {
1364 DirEntryAttribute::Directory { start: _ } => {
1365 entry["leaf"] = false.into();
1366 },
1367 DirEntryAttribute::File { size, mtime } => {
1368 entry["size"] = size.into();
1369 entry["mtime"] = mtime.into();
1370 },
1371 _ => {},
1372 }
1373 res.push(entry);
1374 }
1375
1376 Ok(res.into())
1377 }
1378
1379 fn recurse_files<'a, T, W>(
1380 zip: &'a mut ZipEncoder<W>,
1381 decoder: &'a mut Accessor<T>,
1382 prefix: &'a Path,
1383 file: FileEntry<T>,
1384 ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
1385 where
1386 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1387 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1388 {
1389 Box::pin(async move {
1390 let metadata = file.entry().metadata();
1391 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1392
1393 match file.kind() {
1394 EntryKind::File { .. } => {
1395 let entry = ZipEntry::new(
1396 path,
1397 metadata.stat.mtime.secs,
1398 metadata.stat.mode as u16,
1399 true,
1400 );
1401 zip.add_entry(entry, Some(file.contents().await?))
1402 .await
1403 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1404 }
1405 EntryKind::Hardlink(_) => {
1406 let realfile = decoder.follow_hardlink(&file).await?;
1407 let entry = ZipEntry::new(
1408 path,
1409 metadata.stat.mtime.secs,
1410 metadata.stat.mode as u16,
1411 true,
1412 );
1413 zip.add_entry(entry, Some(realfile.contents().await?))
1414 .await
1415 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1416 }
1417 EntryKind::Directory => {
1418 let dir = file.enter_directory().await?;
1419 let mut readdir = dir.read_dir();
1420 let entry = ZipEntry::new(
1421 path,
1422 metadata.stat.mtime.secs,
1423 metadata.stat.mode as u16,
1424 false,
1425 );
1426 zip.add_entry::<FileContents<T>>(entry, None).await?;
1427 while let Some(entry) = readdir.next().await {
1428 let entry = entry?.decode_entry().await?;
1429 recurse_files(zip, decoder, prefix, entry).await?;
1430 }
1431 }
1432 _ => {} // ignore all else
1433 };
1434
1435 Ok(())
1436 })
1437 }
1438
1439 #[sortable]
1440 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1441 &ApiHandler::AsyncHttp(&pxar_file_download),
1442 &ObjectSchema::new(
1443 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1444 &sorted!([
1445 ("store", false, &DATASTORE_SCHEMA),
1446 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1447 ("backup-id", false, &BACKUP_ID_SCHEMA),
1448 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1449 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1450 ]),
1451 )
1452 ).access(None, &Permission::Privilege(
1453 &["datastore", "{store}"],
1454 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1455 true)
1456 );
1457
1458 pub fn pxar_file_download(
1459 _parts: Parts,
1460 _req_body: Body,
1461 param: Value,
1462 _info: &ApiMethod,
1463 rpcenv: Box<dyn RpcEnvironment>,
1464 ) -> ApiResponseFuture {
1465
1466 async move {
1467 let store = tools::required_string_param(&param, "store")?;
1468 let datastore = DataStore::lookup_datastore(&store)?;
1469
1470 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1471
1472 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1473
1474 let backup_type = tools::required_string_param(&param, "backup-type")?;
1475 let backup_id = tools::required_string_param(&param, "backup-id")?;
1476 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1477
1478 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1479
1480 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1481
1482 let mut components = base64::decode(&filepath)?;
1483 if !components.is_empty() && components[0] == b'/' {
1484 components.remove(0);
1485 }
1486
1487 let mut split = components.splitn(2, |c| *c == b'/');
1488 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1489 let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
1490 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1491 for file in files {
1492 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1493 bail!("cannot decode '{}' - is encrypted", pxar_name);
1494 }
1495 }
1496
1497 let mut path = datastore.base_path();
1498 path.push(backup_dir.relative_path());
1499 path.push(pxar_name);
1500
1501 let index = DynamicIndexReader::open(&path)
1502 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1503
1504 let (csum, size) = index.compute_csum();
1505 manifest.verify_file(&pxar_name, &csum, size)?;
1506
1507 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1508 let reader = BufferedDynamicReader::new(index, chunk_reader);
1509 let archive_size = reader.archive_size();
1510 let reader = LocalDynamicReadAt::new(reader);
1511
1512 let decoder = Accessor::new(reader, archive_size).await?;
1513 let root = decoder.open_root().await?;
1514 let file = root
1515 .lookup(OsStr::from_bytes(file_path)).await?
1516 .ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
1517
1518 let body = match file.kind() {
1519 EntryKind::File { .. } => Body::wrap_stream(
1520 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1521 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1522 err
1523 }),
1524 ),
1525 EntryKind::Hardlink(_) => Body::wrap_stream(
1526 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1527 .map_err(move |err| {
1528 eprintln!(
1529 "error during streaming of hardlink '{:?}' - {}",
1530 filepath, err
1531 );
1532 err
1533 }),
1534 ),
1535 EntryKind::Directory => {
1536 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1537 let mut prefix = PathBuf::new();
1538 let mut components = file.entry().path().components();
1539 components.next_back(); // discar last
1540 for comp in components {
1541 prefix.push(comp);
1542 }
1543
1544 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1545
1546 crate::server::spawn_internal_task(async move {
1547 let mut zipencoder = ZipEncoder::new(channelwriter);
1548 let mut decoder = decoder;
1549 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
1550 .await
1551 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1552
1553 zipencoder
1554 .finish()
1555 .await
1556 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1557 });
1558
1559 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1560 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1561 err
1562 }))
1563 }
1564 other => bail!("cannot download file of type {:?}", other),
1565 };
1566
1567 // fixme: set other headers ?
1568 Ok(Response::builder()
1569 .status(StatusCode::OK)
1570 .header(header::CONTENT_TYPE, "application/octet-stream")
1571 .body(body)
1572 .unwrap())
1573 }.boxed()
1574 }
1575
1576 #[api(
1577 input: {
1578 properties: {
1579 store: {
1580 schema: DATASTORE_SCHEMA,
1581 },
1582 timeframe: {
1583 type: RRDTimeFrameResolution,
1584 },
1585 cf: {
1586 type: RRDMode,
1587 },
1588 },
1589 },
1590 access: {
1591 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1592 },
1593 )]
1594 /// Read datastore stats
1595 pub fn get_rrd_stats(
1596 store: String,
1597 timeframe: RRDTimeFrameResolution,
1598 cf: RRDMode,
1599 _param: Value,
1600 ) -> Result<Value, Error> {
1601
1602 create_value_from_rrd(
1603 &format!("datastore/{}", store),
1604 &[
1605 "total", "used",
1606 "read_ios", "read_bytes",
1607 "write_ios", "write_bytes",
1608 "io_ticks",
1609 ],
1610 timeframe,
1611 cf,
1612 )
1613 }
1614
1615 #[api(
1616 input: {
1617 properties: {
1618 store: {
1619 schema: DATASTORE_SCHEMA,
1620 },
1621 "backup-type": {
1622 schema: BACKUP_TYPE_SCHEMA,
1623 },
1624 "backup-id": {
1625 schema: BACKUP_ID_SCHEMA,
1626 },
1627 "backup-time": {
1628 schema: BACKUP_TIME_SCHEMA,
1629 },
1630 },
1631 },
1632 access: {
1633 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1634 },
1635 )]
1636 /// Get "notes" for a specific backup
1637 pub fn get_notes(
1638 store: String,
1639 backup_type: String,
1640 backup_id: String,
1641 backup_time: i64,
1642 rpcenv: &mut dyn RpcEnvironment,
1643 ) -> Result<String, Error> {
1644 let datastore = DataStore::lookup_datastore(&store)?;
1645
1646 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1647 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1648
1649 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1650
1651 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1652
1653 let notes = manifest.unprotected["notes"]
1654 .as_str()
1655 .unwrap_or("");
1656
1657 Ok(String::from(notes))
1658 }
1659
1660 #[api(
1661 input: {
1662 properties: {
1663 store: {
1664 schema: DATASTORE_SCHEMA,
1665 },
1666 "backup-type": {
1667 schema: BACKUP_TYPE_SCHEMA,
1668 },
1669 "backup-id": {
1670 schema: BACKUP_ID_SCHEMA,
1671 },
1672 "backup-time": {
1673 schema: BACKUP_TIME_SCHEMA,
1674 },
1675 notes: {
1676 description: "A multiline text.",
1677 },
1678 },
1679 },
1680 access: {
1681 permission: &Permission::Privilege(&["datastore", "{store}"],
1682 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1683 true),
1684 },
1685 )]
1686 /// Set "notes" for a specific backup
1687 pub fn set_notes(
1688 store: String,
1689 backup_type: String,
1690 backup_id: String,
1691 backup_time: i64,
1692 notes: String,
1693 rpcenv: &mut dyn RpcEnvironment,
1694 ) -> Result<(), Error> {
1695 let datastore = DataStore::lookup_datastore(&store)?;
1696
1697 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1698 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1699
1700 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1701
1702 datastore.update_manifest(&backup_dir,|manifest| {
1703 manifest.unprotected["notes"] = notes.into();
1704 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1705
1706 Ok(())
1707 }
1708
1709 #[api(
1710 input: {
1711 properties: {
1712 store: {
1713 schema: DATASTORE_SCHEMA,
1714 },
1715 "backup-type": {
1716 schema: BACKUP_TYPE_SCHEMA,
1717 },
1718 "backup-id": {
1719 schema: BACKUP_ID_SCHEMA,
1720 },
1721 "new-owner": {
1722 type: Authid,
1723 },
1724 },
1725 },
1726 access: {
1727 permission: &Permission::Anybody,
1728 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
1729 },
1730 )]
1731 /// Change owner of a backup group
1732 pub fn set_backup_owner(
1733 store: String,
1734 backup_type: String,
1735 backup_id: String,
1736 new_owner: Authid,
1737 rpcenv: &mut dyn RpcEnvironment,
1738 ) -> Result<(), Error> {
1739
1740 let datastore = DataStore::lookup_datastore(&store)?;
1741
1742 let backup_group = BackupGroup::new(backup_type, backup_id);
1743
1744 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1745
1746 let user_info = CachedUserInfo::new()?;
1747
1748 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1749
1750 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1751 // High-privilege user/token
1752 true
1753 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1754 let owner = datastore.get_owner(&backup_group)?;
1755
1756 match (owner.is_token(), new_owner.is_token()) {
1757 (true, true) => {
1758 // API token to API token, owned by same user
1759 let owner = owner.user();
1760 let new_owner = new_owner.user();
1761 owner == new_owner && Authid::from(owner.clone()) == auth_id
1762 },
1763 (true, false) => {
1764 // API token to API token owner
1765 Authid::from(owner.user().clone()) == auth_id
1766 && new_owner == auth_id
1767 },
1768 (false, true) => {
1769 // API token owner to API token
1770 owner == auth_id
1771 && Authid::from(new_owner.user().clone()) == auth_id
1772 },
1773 (false, false) => {
1774 // User to User, not allowed for unprivileged users
1775 false
1776 },
1777 }
1778 } else {
1779 false
1780 };
1781
1782 if !allowed {
1783 return Err(http_err!(UNAUTHORIZED,
1784 "{} does not have permission to change owner of backup group '{}' to {}",
1785 auth_id,
1786 backup_group,
1787 new_owner,
1788 ));
1789 }
1790
1791 if !user_info.is_active_auth_id(&new_owner) {
1792 bail!("{} '{}' is inactive or non-existent",
1793 if new_owner.is_token() {
1794 "API token".to_string()
1795 } else {
1796 "user".to_string()
1797 },
1798 new_owner);
1799 }
1800
1801 datastore.set_owner(&backup_group, &new_owner, true)?;
1802
1803 Ok(())
1804 }
1805
1806 #[sortable]
1807 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1808 (
1809 "catalog",
1810 &Router::new()
1811 .get(&API_METHOD_CATALOG)
1812 ),
1813 (
1814 "change-owner",
1815 &Router::new()
1816 .post(&API_METHOD_SET_BACKUP_OWNER)
1817 ),
1818 (
1819 "download",
1820 &Router::new()
1821 .download(&API_METHOD_DOWNLOAD_FILE)
1822 ),
1823 (
1824 "download-decoded",
1825 &Router::new()
1826 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1827 ),
1828 (
1829 "files",
1830 &Router::new()
1831 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1832 ),
1833 (
1834 "gc",
1835 &Router::new()
1836 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1837 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1838 ),
1839 (
1840 "groups",
1841 &Router::new()
1842 .get(&API_METHOD_LIST_GROUPS)
1843 ),
1844 (
1845 "notes",
1846 &Router::new()
1847 .get(&API_METHOD_GET_NOTES)
1848 .put(&API_METHOD_SET_NOTES)
1849 ),
1850 (
1851 "prune",
1852 &Router::new()
1853 .post(&API_METHOD_PRUNE)
1854 ),
1855 (
1856 "pxar-file-download",
1857 &Router::new()
1858 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1859 ),
1860 (
1861 "rrd",
1862 &Router::new()
1863 .get(&API_METHOD_GET_RRD_STATS)
1864 ),
1865 (
1866 "snapshots",
1867 &Router::new()
1868 .get(&API_METHOD_LIST_SNAPSHOTS)
1869 .delete(&API_METHOD_DELETE_SNAPSHOT)
1870 ),
1871 (
1872 "status",
1873 &Router::new()
1874 .get(&API_METHOD_STATUS)
1875 ),
1876 (
1877 "upload-backup-log",
1878 &Router::new()
1879 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1880 ),
1881 (
1882 "verify",
1883 &Router::new()
1884 .post(&API_METHOD_VERIFY)
1885 ),
1886 ];
1887
1888 const DATASTORE_INFO_ROUTER: Router = Router::new()
1889 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1890 .subdirs(DATASTORE_INFO_SUBDIRS);
1891
1892
1893 pub const ROUTER: Router = Router::new()
1894 .get(&API_METHOD_GET_DATASTORE_LIST)
1895 .match_all("store", &DATASTORE_INFO_ROUTER);