]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
cb748194ca9cf47f6e0716b78092004fa8604139
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::HashSet;
2 use std::ffi::OsStr;
3 use std::os::unix::ffi::OsStrExt;
4 use std::sync::{Arc, Mutex};
5 use std::path::{Path, PathBuf};
6 use std::pin::Pin;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13 use tokio_stream::wrappers::ReceiverStream;
14
15 use proxmox::api::{
16 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
17 RpcEnvironment, RpcEnvironmentType, Permission
18 };
19 use proxmox::api::router::{ReturnType, SubdirMap};
20 use proxmox::api::schema::*;
21 use proxmox::tools::fs::{replace_file, CreateOptions};
22 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
23
24 use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
25 use pxar::EntryKind;
26
27 use crate::api2::types::*;
28 use crate::api2::node::rrd::create_value_from_rrd;
29 use crate::backup::*;
30 use crate::config::datastore;
31 use crate::config::cached_user_info::CachedUserInfo;
32
33 use crate::server::{jobstate::Job, WorkerTask};
34 use crate::tools::{
35 self,
36 zip::{ZipEncoder, ZipEntry},
37 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
38 };
39
40 use crate::config::acl::{
41 PRIV_DATASTORE_AUDIT,
42 PRIV_DATASTORE_MODIFY,
43 PRIV_DATASTORE_READ,
44 PRIV_DATASTORE_PRUNE,
45 PRIV_DATASTORE_BACKUP,
46 PRIV_DATASTORE_VERIFY,
47 };
48
49 fn check_priv_or_backup_owner(
50 store: &DataStore,
51 group: &BackupGroup,
52 auth_id: &Authid,
53 required_privs: u64,
54 ) -> Result<(), Error> {
55 let user_info = CachedUserInfo::new()?;
56 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
57
58 if privs & required_privs == 0 {
59 let owner = store.get_owner(group)?;
60 check_backup_owner(&owner, auth_id)?;
61 }
62 Ok(())
63 }
64
65 fn check_backup_owner(
66 owner: &Authid,
67 auth_id: &Authid,
68 ) -> Result<(), Error> {
69 let correct_owner = owner == auth_id
70 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
71 if !correct_owner {
72 bail!("backup owner check failed ({} != {})", auth_id, owner);
73 }
74 Ok(())
75 }
76
77 fn read_backup_index(
78 store: &DataStore,
79 backup_dir: &BackupDir,
80 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
81
82 let (manifest, index_size) = store.load_manifest(backup_dir)?;
83
84 let mut result = Vec::new();
85 for item in manifest.files() {
86 result.push(BackupContent {
87 filename: item.filename.clone(),
88 crypt_mode: Some(item.crypt_mode),
89 size: Some(item.size),
90 });
91 }
92
93 result.push(BackupContent {
94 filename: MANIFEST_BLOB_NAME.to_string(),
95 crypt_mode: match manifest.signature {
96 Some(_) => Some(CryptMode::SignOnly),
97 None => Some(CryptMode::None),
98 },
99 size: Some(index_size),
100 });
101
102 Ok((manifest, result))
103 }
104
105 fn get_all_snapshot_files(
106 store: &DataStore,
107 info: &BackupInfo,
108 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
109
110 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
111
112 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
113 acc.insert(item.filename.clone());
114 acc
115 });
116
117 for file in &info.files {
118 if file_set.contains(file) { continue; }
119 files.push(BackupContent {
120 filename: file.to_string(),
121 size: None,
122 crypt_mode: None,
123 });
124 }
125
126 Ok((manifest, files))
127 }
128
129 #[api(
130 input: {
131 properties: {
132 store: {
133 schema: DATASTORE_SCHEMA,
134 },
135 },
136 },
137 returns: {
138 type: Array,
139 description: "Returns the list of backup groups.",
140 items: {
141 type: GroupListItem,
142 }
143 },
144 access: {
145 permission: &Permission::Privilege(
146 &["datastore", "{store}"],
147 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
148 true),
149 },
150 )]
151 /// List backup groups.
152 pub fn list_groups(
153 store: String,
154 rpcenv: &mut dyn RpcEnvironment,
155 ) -> Result<Vec<GroupListItem>, Error> {
156
157 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
158 let user_info = CachedUserInfo::new()?;
159 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
160
161 let datastore = DataStore::lookup_datastore(&store)?;
162 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
163
164 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
165
166 let group_info = backup_groups
167 .into_iter()
168 .fold(Vec::new(), |mut group_info, group| {
169 let owner = match datastore.get_owner(&group) {
170 Ok(auth_id) => auth_id,
171 Err(err) => {
172 eprintln!("Failed to get owner of group '{}/{}' - {}",
173 &store,
174 group,
175 err);
176 return group_info;
177 },
178 };
179 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
180 return group_info;
181 }
182
183 let snapshots = match group.list_backups(&datastore.base_path()) {
184 Ok(snapshots) => snapshots,
185 Err(_) => {
186 return group_info;
187 },
188 };
189
190 let backup_count: u64 = snapshots.len() as u64;
191 if backup_count == 0 {
192 return group_info;
193 }
194
195 let last_backup = snapshots
196 .iter()
197 .fold(&snapshots[0], |last, curr| {
198 if curr.is_finished()
199 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
200 curr
201 } else {
202 last
203 }
204 })
205 .to_owned();
206
207 group_info.push(GroupListItem {
208 backup_type: group.backup_type().to_string(),
209 backup_id: group.backup_id().to_string(),
210 last_backup: last_backup.backup_dir.backup_time(),
211 owner: Some(owner),
212 backup_count,
213 files: last_backup.files,
214 });
215
216 group_info
217 });
218
219 Ok(group_info)
220 }
221
222 #[api(
223 input: {
224 properties: {
225 store: {
226 schema: DATASTORE_SCHEMA,
227 },
228 "backup-type": {
229 schema: BACKUP_TYPE_SCHEMA,
230 },
231 "backup-id": {
232 schema: BACKUP_ID_SCHEMA,
233 },
234 "backup-time": {
235 schema: BACKUP_TIME_SCHEMA,
236 },
237 },
238 },
239 returns: {
240 type: Array,
241 description: "Returns the list of archive files inside a backup snapshots.",
242 items: {
243 type: BackupContent,
244 }
245 },
246 access: {
247 permission: &Permission::Privilege(
248 &["datastore", "{store}"],
249 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
250 true),
251 },
252 )]
253 /// List snapshot files.
254 pub fn list_snapshot_files(
255 store: String,
256 backup_type: String,
257 backup_id: String,
258 backup_time: i64,
259 _info: &ApiMethod,
260 rpcenv: &mut dyn RpcEnvironment,
261 ) -> Result<Vec<BackupContent>, Error> {
262
263 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
264 let datastore = DataStore::lookup_datastore(&store)?;
265
266 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
267
268 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
269
270 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
271
272 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
273
274 Ok(files)
275 }
276
277 #[api(
278 input: {
279 properties: {
280 store: {
281 schema: DATASTORE_SCHEMA,
282 },
283 "backup-type": {
284 schema: BACKUP_TYPE_SCHEMA,
285 },
286 "backup-id": {
287 schema: BACKUP_ID_SCHEMA,
288 },
289 "backup-time": {
290 schema: BACKUP_TIME_SCHEMA,
291 },
292 },
293 },
294 access: {
295 permission: &Permission::Privilege(
296 &["datastore", "{store}"],
297 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
298 true),
299 },
300 )]
301 /// Delete backup snapshot.
302 fn delete_snapshot(
303 store: String,
304 backup_type: String,
305 backup_id: String,
306 backup_time: i64,
307 _info: &ApiMethod,
308 rpcenv: &mut dyn RpcEnvironment,
309 ) -> Result<Value, Error> {
310
311 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
312
313 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
314 let datastore = DataStore::lookup_datastore(&store)?;
315
316 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
317
318 datastore.remove_backup_dir(&snapshot, false)?;
319
320 Ok(Value::Null)
321 }
322
323 #[api(
324 input: {
325 properties: {
326 store: {
327 schema: DATASTORE_SCHEMA,
328 },
329 "backup-type": {
330 optional: true,
331 schema: BACKUP_TYPE_SCHEMA,
332 },
333 "backup-id": {
334 optional: true,
335 schema: BACKUP_ID_SCHEMA,
336 },
337 },
338 },
339 returns: {
340 type: Array,
341 description: "Returns the list of snapshots.",
342 items: {
343 type: SnapshotListItem,
344 }
345 },
346 access: {
347 permission: &Permission::Privilege(
348 &["datastore", "{store}"],
349 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
350 true),
351 },
352 )]
353 /// List backup snapshots.
354 pub fn list_snapshots (
355 store: String,
356 backup_type: Option<String>,
357 backup_id: Option<String>,
358 _param: Value,
359 _info: &ApiMethod,
360 rpcenv: &mut dyn RpcEnvironment,
361 ) -> Result<Vec<SnapshotListItem>, Error> {
362
363 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
364 let user_info = CachedUserInfo::new()?;
365 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
366
367 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
368
369 let datastore = DataStore::lookup_datastore(&store)?;
370
371 let base_path = datastore.base_path();
372
373 let groups = match (backup_type, backup_id) {
374 (Some(backup_type), Some(backup_id)) => {
375 let mut groups = Vec::with_capacity(1);
376 groups.push(BackupGroup::new(backup_type, backup_id));
377 groups
378 },
379 (Some(backup_type), None) => {
380 BackupInfo::list_backup_groups(&base_path)?
381 .into_iter()
382 .filter(|group| group.backup_type() == backup_type)
383 .collect()
384 },
385 (None, Some(backup_id)) => {
386 BackupInfo::list_backup_groups(&base_path)?
387 .into_iter()
388 .filter(|group| group.backup_id() == backup_id)
389 .collect()
390 },
391 _ => BackupInfo::list_backup_groups(&base_path)?,
392 };
393
394 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
395 let backup_type = group.backup_type().to_string();
396 let backup_id = group.backup_id().to_string();
397 let backup_time = info.backup_dir.backup_time();
398
399 match get_all_snapshot_files(&datastore, &info) {
400 Ok((manifest, files)) => {
401 // extract the first line from notes
402 let comment: Option<String> = manifest.unprotected["notes"]
403 .as_str()
404 .and_then(|notes| notes.lines().next())
405 .map(String::from);
406
407 let fingerprint = match manifest.fingerprint() {
408 Ok(fp) => fp,
409 Err(err) => {
410 eprintln!("error parsing fingerprint: '{}'", err);
411 None
412 },
413 };
414
415 let verification = manifest.unprotected["verify_state"].clone();
416 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
417 Ok(verify) => verify,
418 Err(err) => {
419 eprintln!("error parsing verification state : '{}'", err);
420 None
421 }
422 };
423
424 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
425
426 SnapshotListItem {
427 backup_type,
428 backup_id,
429 backup_time,
430 comment,
431 verification,
432 fingerprint,
433 files,
434 size,
435 owner,
436 }
437 },
438 Err(err) => {
439 eprintln!("error during snapshot file listing: '{}'", err);
440 let files = info
441 .files
442 .into_iter()
443 .map(|filename| BackupContent {
444 filename,
445 size: None,
446 crypt_mode: None,
447 })
448 .collect();
449
450 SnapshotListItem {
451 backup_type,
452 backup_id,
453 backup_time,
454 comment: None,
455 verification: None,
456 fingerprint: None,
457 files,
458 size: None,
459 owner,
460 }
461 },
462 }
463 };
464
465 groups
466 .iter()
467 .try_fold(Vec::new(), |mut snapshots, group| {
468 let owner = match datastore.get_owner(group) {
469 Ok(auth_id) => auth_id,
470 Err(err) => {
471 eprintln!("Failed to get owner of group '{}/{}' - {}",
472 &store,
473 group,
474 err);
475 return Ok(snapshots);
476 },
477 };
478
479 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
480 return Ok(snapshots);
481 }
482
483 let group_backups = group.list_backups(&datastore.base_path())?;
484
485 snapshots.extend(
486 group_backups
487 .into_iter()
488 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
489 );
490
491 Ok(snapshots)
492 })
493 }
494
495 fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
496 let base_path = store.base_path();
497 let groups = BackupInfo::list_backup_groups(&base_path)?;
498
499 groups.iter()
500 .filter(|group| {
501 let owner = match store.get_owner(&group) {
502 Ok(owner) => owner,
503 Err(err) => {
504 eprintln!("Failed to get owner of group '{}/{}' - {}",
505 store.name(),
506 group,
507 err);
508 return false;
509 },
510 };
511
512 match filter_owner {
513 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
514 None => true,
515 }
516 })
517 .try_fold(Counts::default(), |mut counts, group| {
518 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
519
520 let type_count = match group.backup_type() {
521 "ct" => counts.ct.get_or_insert(Default::default()),
522 "vm" => counts.vm.get_or_insert(Default::default()),
523 "host" => counts.host.get_or_insert(Default::default()),
524 _ => counts.other.get_or_insert(Default::default()),
525 };
526
527 type_count.groups += 1;
528 type_count.snapshots += snapshot_count;
529
530 Ok(counts)
531 })
532 }
533
534 #[api(
535 input: {
536 properties: {
537 store: {
538 schema: DATASTORE_SCHEMA,
539 },
540 verbose: {
541 type: bool,
542 default: false,
543 optional: true,
544 description: "Include additional information like snapshot counts and GC status.",
545 },
546 },
547
548 },
549 returns: {
550 type: DataStoreStatus,
551 },
552 access: {
553 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
554 },
555 )]
556 /// Get datastore status.
557 pub fn status(
558 store: String,
559 verbose: bool,
560 _info: &ApiMethod,
561 rpcenv: &mut dyn RpcEnvironment,
562 ) -> Result<DataStoreStatus, Error> {
563 let datastore = DataStore::lookup_datastore(&store)?;
564 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
565 let (counts, gc_status) = if verbose {
566 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
567 let user_info = CachedUserInfo::new()?;
568
569 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
570 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
571 None
572 } else {
573 Some(&auth_id)
574 };
575
576 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
577 let gc_status = Some(datastore.last_gc_status());
578
579 (counts, gc_status)
580 } else {
581 (None, None)
582 };
583
584 Ok(DataStoreStatus {
585 total: storage.total,
586 used: storage.used,
587 avail: storage.avail,
588 gc_status,
589 counts,
590 })
591 }
592
593 #[api(
594 input: {
595 properties: {
596 store: {
597 schema: DATASTORE_SCHEMA,
598 },
599 "backup-type": {
600 schema: BACKUP_TYPE_SCHEMA,
601 optional: true,
602 },
603 "backup-id": {
604 schema: BACKUP_ID_SCHEMA,
605 optional: true,
606 },
607 "backup-time": {
608 schema: BACKUP_TIME_SCHEMA,
609 optional: true,
610 },
611 },
612 },
613 returns: {
614 schema: UPID_SCHEMA,
615 },
616 access: {
617 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
618 },
619 )]
620 /// Verify backups.
621 ///
622 /// This function can verify a single backup snapshot, all backup from a backup group,
623 /// or all backups in the datastore.
624 pub fn verify(
625 store: String,
626 backup_type: Option<String>,
627 backup_id: Option<String>,
628 backup_time: Option<i64>,
629 rpcenv: &mut dyn RpcEnvironment,
630 ) -> Result<Value, Error> {
631 let datastore = DataStore::lookup_datastore(&store)?;
632
633 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
634 let worker_id;
635
636 let mut backup_dir = None;
637 let mut backup_group = None;
638 let mut worker_type = "verify";
639
640 match (backup_type, backup_id, backup_time) {
641 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
642 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
643 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
644
645 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
646
647 backup_dir = Some(dir);
648 worker_type = "verify_snapshot";
649 }
650 (Some(backup_type), Some(backup_id), None) => {
651 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
652 let group = BackupGroup::new(backup_type, backup_id);
653
654 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
655
656 backup_group = Some(group);
657 worker_type = "verify_group";
658 }
659 (None, None, None) => {
660 worker_id = store.clone();
661 }
662 _ => bail!("parameters do not specify a backup group or snapshot"),
663 }
664
665 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
666
667 let upid_str = WorkerTask::new_thread(
668 worker_type,
669 Some(worker_id),
670 auth_id.clone(),
671 to_stdout,
672 move |worker| {
673 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
674 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
675
676 let failed_dirs = if let Some(backup_dir) = backup_dir {
677 let mut res = Vec::new();
678 if !verify_backup_dir(
679 datastore,
680 &backup_dir,
681 verified_chunks,
682 corrupt_chunks,
683 worker.clone(),
684 worker.upid().clone(),
685 None,
686 )? {
687 res.push(backup_dir.to_string());
688 }
689 res
690 } else if let Some(backup_group) = backup_group {
691 let failed_dirs = verify_backup_group(
692 datastore,
693 &backup_group,
694 verified_chunks,
695 corrupt_chunks,
696 &mut StoreProgress::new(1),
697 worker.clone(),
698 worker.upid(),
699 None,
700 )?;
701 failed_dirs
702 } else {
703 let privs = CachedUserInfo::new()?
704 .lookup_privs(&auth_id, &["datastore", &store]);
705
706 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
707 Some(auth_id)
708 } else {
709 None
710 };
711
712 verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
713 };
714 if failed_dirs.len() > 0 {
715 worker.log("Failed to verify the following snapshots/groups:");
716 for dir in failed_dirs {
717 worker.log(format!("\t{}", dir));
718 }
719 bail!("verification failed - please check the log for details");
720 }
721 Ok(())
722 },
723 )?;
724
725 Ok(json!(upid_str))
726 }
727
728 #[macro_export]
729 macro_rules! add_common_prune_prameters {
730 ( [ $( $list1:tt )* ] ) => {
731 add_common_prune_prameters!([$( $list1 )* ] , [])
732 };
733 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
734 [
735 $( $list1 )*
736 (
737 "keep-daily",
738 true,
739 &PRUNE_SCHEMA_KEEP_DAILY,
740 ),
741 (
742 "keep-hourly",
743 true,
744 &PRUNE_SCHEMA_KEEP_HOURLY,
745 ),
746 (
747 "keep-last",
748 true,
749 &PRUNE_SCHEMA_KEEP_LAST,
750 ),
751 (
752 "keep-monthly",
753 true,
754 &PRUNE_SCHEMA_KEEP_MONTHLY,
755 ),
756 (
757 "keep-weekly",
758 true,
759 &PRUNE_SCHEMA_KEEP_WEEKLY,
760 ),
761 (
762 "keep-yearly",
763 true,
764 &PRUNE_SCHEMA_KEEP_YEARLY,
765 ),
766 $( $list2 )*
767 ]
768 }
769 }
770
771 pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
772 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
773 &PruneListItem::API_SCHEMA
774 ).schema();
775
776 pub const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
777 &ApiHandler::Sync(&prune),
778 &ObjectSchema::new(
779 "Prune the datastore.",
780 &add_common_prune_prameters!([
781 ("backup-id", false, &BACKUP_ID_SCHEMA),
782 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
783 ("dry-run", true, &BooleanSchema::new(
784 "Just show what prune would do, but do not delete anything.")
785 .schema()
786 ),
787 ],[
788 ("store", false, &DATASTORE_SCHEMA),
789 ])
790 ))
791 .returns(ReturnType::new(false, &API_RETURN_SCHEMA_PRUNE))
792 .access(None, &Permission::Privilege(
793 &["datastore", "{store}"],
794 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
795 true)
796 );
797
798 fn prune(
799 param: Value,
800 _info: &ApiMethod,
801 rpcenv: &mut dyn RpcEnvironment,
802 ) -> Result<Value, Error> {
803
804 let store = tools::required_string_param(&param, "store")?;
805 let backup_type = tools::required_string_param(&param, "backup-type")?;
806 let backup_id = tools::required_string_param(&param, "backup-id")?;
807
808 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
809
810 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
811
812 let group = BackupGroup::new(backup_type, backup_id);
813
814 let datastore = DataStore::lookup_datastore(&store)?;
815
816 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
817
818 let prune_options = PruneOptions {
819 keep_last: param["keep-last"].as_u64(),
820 keep_hourly: param["keep-hourly"].as_u64(),
821 keep_daily: param["keep-daily"].as_u64(),
822 keep_weekly: param["keep-weekly"].as_u64(),
823 keep_monthly: param["keep-monthly"].as_u64(),
824 keep_yearly: param["keep-yearly"].as_u64(),
825 };
826
827 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
828
829 let mut prune_result = Vec::new();
830
831 let list = group.list_backups(&datastore.base_path())?;
832
833 let mut prune_info = compute_prune_info(list, &prune_options)?;
834
835 prune_info.reverse(); // delete older snapshots first
836
837 let keep_all = !prune_options.keeps_something();
838
839 if dry_run {
840 for (info, mut keep) in prune_info {
841 if keep_all { keep = true; }
842
843 let backup_time = info.backup_dir.backup_time();
844 let group = info.backup_dir.group();
845
846 prune_result.push(json!({
847 "backup-type": group.backup_type(),
848 "backup-id": group.backup_id(),
849 "backup-time": backup_time,
850 "keep": keep,
851 }));
852 }
853 return Ok(json!(prune_result));
854 }
855
856
857 // We use a WorkerTask just to have a task log, but run synchrounously
858 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
859
860 if keep_all {
861 worker.log("No prune selection - keeping all files.");
862 } else {
863 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
864 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
865 store, backup_type, backup_id));
866 }
867
868 for (info, mut keep) in prune_info {
869 if keep_all { keep = true; }
870
871 let backup_time = info.backup_dir.backup_time();
872 let timestamp = info.backup_dir.backup_time_string();
873 let group = info.backup_dir.group();
874
875
876 let msg = format!(
877 "{}/{}/{} {}",
878 group.backup_type(),
879 group.backup_id(),
880 timestamp,
881 if keep { "keep" } else { "remove" },
882 );
883
884 worker.log(msg);
885
886 prune_result.push(json!({
887 "backup-type": group.backup_type(),
888 "backup-id": group.backup_id(),
889 "backup-time": backup_time,
890 "keep": keep,
891 }));
892
893 if !(dry_run || keep) {
894 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
895 worker.warn(
896 format!(
897 "failed to remove dir {:?}: {}",
898 info.backup_dir.relative_path(), err
899 )
900 );
901 }
902 }
903 }
904
905 worker.log_result(&Ok(()));
906
907 Ok(json!(prune_result))
908 }
909
910 #[api(
911 input: {
912 properties: {
913 store: {
914 schema: DATASTORE_SCHEMA,
915 },
916 },
917 },
918 returns: {
919 schema: UPID_SCHEMA,
920 },
921 access: {
922 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
923 },
924 )]
925 /// Start garbage collection.
926 fn start_garbage_collection(
927 store: String,
928 _info: &ApiMethod,
929 rpcenv: &mut dyn RpcEnvironment,
930 ) -> Result<Value, Error> {
931
932 let datastore = DataStore::lookup_datastore(&store)?;
933 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
934
935 let job = Job::new("garbage_collection", &store)
936 .map_err(|_| format_err!("garbage collection already running"))?;
937
938 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
939
940 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
941 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
942
943 Ok(json!(upid_str))
944 }
945
946 #[api(
947 input: {
948 properties: {
949 store: {
950 schema: DATASTORE_SCHEMA,
951 },
952 },
953 },
954 returns: {
955 type: GarbageCollectionStatus,
956 },
957 access: {
958 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
959 },
960 )]
961 /// Garbage collection status.
962 pub fn garbage_collection_status(
963 store: String,
964 _info: &ApiMethod,
965 _rpcenv: &mut dyn RpcEnvironment,
966 ) -> Result<GarbageCollectionStatus, Error> {
967
968 let datastore = DataStore::lookup_datastore(&store)?;
969
970 let status = datastore.last_gc_status();
971
972 Ok(status)
973 }
974
975 #[api(
976 returns: {
977 description: "List the accessible datastores.",
978 type: Array,
979 items: { type: DataStoreListItem },
980 },
981 access: {
982 permission: &Permission::Anybody,
983 },
984 )]
985 /// Datastore list
986 fn get_datastore_list(
987 _param: Value,
988 _info: &ApiMethod,
989 rpcenv: &mut dyn RpcEnvironment,
990 ) -> Result<Vec<DataStoreListItem>, Error> {
991
992 let (config, _digest) = datastore::config()?;
993
994 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
995 let user_info = CachedUserInfo::new()?;
996
997 let mut list = Vec::new();
998
999 for (store, (_, data)) in &config.sections {
1000 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1001 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
1002 if allowed {
1003 list.push(
1004 DataStoreListItem {
1005 store: store.clone(),
1006 comment: data["comment"].as_str().map(String::from),
1007 }
1008 );
1009 }
1010 }
1011
1012 Ok(list)
1013 }
1014
1015 #[sortable]
1016 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1017 &ApiHandler::AsyncHttp(&download_file),
1018 &ObjectSchema::new(
1019 "Download single raw file from backup snapshot.",
1020 &sorted!([
1021 ("store", false, &DATASTORE_SCHEMA),
1022 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1023 ("backup-id", false, &BACKUP_ID_SCHEMA),
1024 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1025 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1026 ]),
1027 )
1028 ).access(None, &Permission::Privilege(
1029 &["datastore", "{store}"],
1030 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1031 true)
1032 );
1033
1034 fn download_file(
1035 _parts: Parts,
1036 _req_body: Body,
1037 param: Value,
1038 _info: &ApiMethod,
1039 rpcenv: Box<dyn RpcEnvironment>,
1040 ) -> ApiResponseFuture {
1041
1042 async move {
1043 let store = tools::required_string_param(&param, "store")?;
1044 let datastore = DataStore::lookup_datastore(store)?;
1045
1046 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1047
1048 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1049
1050 let backup_type = tools::required_string_param(&param, "backup-type")?;
1051 let backup_id = tools::required_string_param(&param, "backup-id")?;
1052 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1053
1054 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1055
1056 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1057
1058 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1059
1060 let mut path = datastore.base_path();
1061 path.push(backup_dir.relative_path());
1062 path.push(&file_name);
1063
1064 let file = tokio::fs::File::open(&path)
1065 .await
1066 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1067
1068 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1069 .map_ok(|bytes| bytes.freeze())
1070 .map_err(move |err| {
1071 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1072 err
1073 });
1074 let body = Body::wrap_stream(payload);
1075
1076 // fixme: set other headers ?
1077 Ok(Response::builder()
1078 .status(StatusCode::OK)
1079 .header(header::CONTENT_TYPE, "application/octet-stream")
1080 .body(body)
1081 .unwrap())
1082 }.boxed()
1083 }
1084
1085 #[sortable]
1086 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1087 &ApiHandler::AsyncHttp(&download_file_decoded),
1088 &ObjectSchema::new(
1089 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1090 &sorted!([
1091 ("store", false, &DATASTORE_SCHEMA),
1092 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1093 ("backup-id", false, &BACKUP_ID_SCHEMA),
1094 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1095 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1096 ]),
1097 )
1098 ).access(None, &Permission::Privilege(
1099 &["datastore", "{store}"],
1100 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1101 true)
1102 );
1103
1104 fn download_file_decoded(
1105 _parts: Parts,
1106 _req_body: Body,
1107 param: Value,
1108 _info: &ApiMethod,
1109 rpcenv: Box<dyn RpcEnvironment>,
1110 ) -> ApiResponseFuture {
1111
1112 async move {
1113 let store = tools::required_string_param(&param, "store")?;
1114 let datastore = DataStore::lookup_datastore(store)?;
1115
1116 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1117
1118 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1119
1120 let backup_type = tools::required_string_param(&param, "backup-type")?;
1121 let backup_id = tools::required_string_param(&param, "backup-id")?;
1122 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1123
1124 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1125
1126 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1127
1128 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1129 for file in files {
1130 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1131 bail!("cannot decode '{}' - is encrypted", file_name);
1132 }
1133 }
1134
1135 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1136
1137 let mut path = datastore.base_path();
1138 path.push(backup_dir.relative_path());
1139 path.push(&file_name);
1140
1141 let extension = file_name.rsplitn(2, '.').next().unwrap();
1142
1143 let body = match extension {
1144 "didx" => {
1145 let index = DynamicIndexReader::open(&path)
1146 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1147 let (csum, size) = index.compute_csum();
1148 manifest.verify_file(&file_name, &csum, size)?;
1149
1150 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1151 let reader = AsyncIndexReader::new(index, chunk_reader);
1152 Body::wrap_stream(AsyncReaderStream::new(reader)
1153 .map_err(move |err| {
1154 eprintln!("error during streaming of '{:?}' - {}", path, err);
1155 err
1156 }))
1157 },
1158 "fidx" => {
1159 let index = FixedIndexReader::open(&path)
1160 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1161
1162 let (csum, size) = index.compute_csum();
1163 manifest.verify_file(&file_name, &csum, size)?;
1164
1165 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1166 let reader = AsyncIndexReader::new(index, chunk_reader);
1167 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1168 .map_err(move |err| {
1169 eprintln!("error during streaming of '{:?}' - {}", path, err);
1170 err
1171 }))
1172 },
1173 "blob" => {
1174 let file = std::fs::File::open(&path)
1175 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1176
1177 // FIXME: load full blob to verify index checksum?
1178
1179 Body::wrap_stream(
1180 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1181 .map_err(move |err| {
1182 eprintln!("error during streaming of '{:?}' - {}", path, err);
1183 err
1184 })
1185 )
1186 },
1187 extension => {
1188 bail!("cannot download '{}' files", extension);
1189 },
1190 };
1191
1192 // fixme: set other headers ?
1193 Ok(Response::builder()
1194 .status(StatusCode::OK)
1195 .header(header::CONTENT_TYPE, "application/octet-stream")
1196 .body(body)
1197 .unwrap())
1198 }.boxed()
1199 }
1200
1201 #[sortable]
1202 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1203 &ApiHandler::AsyncHttp(&upload_backup_log),
1204 &ObjectSchema::new(
1205 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1206 &sorted!([
1207 ("store", false, &DATASTORE_SCHEMA),
1208 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1209 ("backup-id", false, &BACKUP_ID_SCHEMA),
1210 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1211 ]),
1212 )
1213 ).access(
1214 Some("Only the backup creator/owner is allowed to do this."),
1215 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1216 );
1217
1218 fn upload_backup_log(
1219 _parts: Parts,
1220 req_body: Body,
1221 param: Value,
1222 _info: &ApiMethod,
1223 rpcenv: Box<dyn RpcEnvironment>,
1224 ) -> ApiResponseFuture {
1225
1226 async move {
1227 let store = tools::required_string_param(&param, "store")?;
1228 let datastore = DataStore::lookup_datastore(store)?;
1229
1230 let file_name = CLIENT_LOG_BLOB_NAME;
1231
1232 let backup_type = tools::required_string_param(&param, "backup-type")?;
1233 let backup_id = tools::required_string_param(&param, "backup-id")?;
1234 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1235
1236 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1237
1238 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1239 let owner = datastore.get_owner(backup_dir.group())?;
1240 check_backup_owner(&owner, &auth_id)?;
1241
1242 let mut path = datastore.base_path();
1243 path.push(backup_dir.relative_path());
1244 path.push(&file_name);
1245
1246 if path.exists() {
1247 bail!("backup already contains a log.");
1248 }
1249
1250 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1251 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1252
1253 let data = req_body
1254 .map_err(Error::from)
1255 .try_fold(Vec::new(), |mut acc, chunk| {
1256 acc.extend_from_slice(&*chunk);
1257 future::ok::<_, Error>(acc)
1258 })
1259 .await?;
1260
1261 // always verify blob/CRC at server side
1262 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1263
1264 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
1265
1266 // fixme: use correct formatter
1267 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1268 }.boxed()
1269 }
1270
1271 #[api(
1272 input: {
1273 properties: {
1274 store: {
1275 schema: DATASTORE_SCHEMA,
1276 },
1277 "backup-type": {
1278 schema: BACKUP_TYPE_SCHEMA,
1279 },
1280 "backup-id": {
1281 schema: BACKUP_ID_SCHEMA,
1282 },
1283 "backup-time": {
1284 schema: BACKUP_TIME_SCHEMA,
1285 },
1286 "filepath": {
1287 description: "Base64 encoded path.",
1288 type: String,
1289 }
1290 },
1291 },
1292 access: {
1293 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1294 },
1295 )]
1296 /// Get the entries of the given path of the catalog
1297 fn catalog(
1298 store: String,
1299 backup_type: String,
1300 backup_id: String,
1301 backup_time: i64,
1302 filepath: String,
1303 _param: Value,
1304 _info: &ApiMethod,
1305 rpcenv: &mut dyn RpcEnvironment,
1306 ) -> Result<Value, Error> {
1307 let datastore = DataStore::lookup_datastore(&store)?;
1308
1309 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1310
1311 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1312
1313 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1314
1315 let file_name = CATALOG_NAME;
1316
1317 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1318 for file in files {
1319 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1320 bail!("cannot decode '{}' - is encrypted", file_name);
1321 }
1322 }
1323
1324 let mut path = datastore.base_path();
1325 path.push(backup_dir.relative_path());
1326 path.push(file_name);
1327
1328 let index = DynamicIndexReader::open(&path)
1329 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1330
1331 let (csum, size) = index.compute_csum();
1332 manifest.verify_file(&file_name, &csum, size)?;
1333
1334 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1335 let reader = BufferedDynamicReader::new(index, chunk_reader);
1336
1337 let mut catalog_reader = CatalogReader::new(reader);
1338 let mut current = catalog_reader.root()?;
1339 let mut components = vec![];
1340
1341
1342 if filepath != "root" {
1343 components = base64::decode(filepath)?;
1344 if components.len() > 0 && components[0] == '/' as u8 {
1345 components.remove(0);
1346 }
1347 for component in components.split(|c| *c == '/' as u8) {
1348 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1349 current = entry;
1350 } else {
1351 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1352 }
1353 }
1354 }
1355
1356 let mut res = Vec::new();
1357
1358 for direntry in catalog_reader.read_dir(&current)? {
1359 let mut components = components.clone();
1360 components.push('/' as u8);
1361 components.extend(&direntry.name);
1362 let path = base64::encode(components);
1363 let text = String::from_utf8_lossy(&direntry.name);
1364 let mut entry = json!({
1365 "filepath": path,
1366 "text": text,
1367 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1368 "leaf": true,
1369 });
1370 match direntry.attr {
1371 DirEntryAttribute::Directory { start: _ } => {
1372 entry["leaf"] = false.into();
1373 },
1374 DirEntryAttribute::File { size, mtime } => {
1375 entry["size"] = size.into();
1376 entry["mtime"] = mtime.into();
1377 },
1378 _ => {},
1379 }
1380 res.push(entry);
1381 }
1382
1383 Ok(res.into())
1384 }
1385
1386 fn recurse_files<'a, T, W>(
1387 zip: &'a mut ZipEncoder<W>,
1388 decoder: &'a mut Accessor<T>,
1389 prefix: &'a Path,
1390 file: FileEntry<T>,
1391 ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
1392 where
1393 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1394 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1395 {
1396 Box::pin(async move {
1397 let metadata = file.entry().metadata();
1398 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1399
1400 match file.kind() {
1401 EntryKind::File { .. } => {
1402 let entry = ZipEntry::new(
1403 path,
1404 metadata.stat.mtime.secs,
1405 metadata.stat.mode as u16,
1406 true,
1407 );
1408 zip.add_entry(entry, Some(file.contents().await?))
1409 .await
1410 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1411 }
1412 EntryKind::Hardlink(_) => {
1413 let realfile = decoder.follow_hardlink(&file).await?;
1414 let entry = ZipEntry::new(
1415 path,
1416 metadata.stat.mtime.secs,
1417 metadata.stat.mode as u16,
1418 true,
1419 );
1420 zip.add_entry(entry, Some(realfile.contents().await?))
1421 .await
1422 .map_err(|err| format_err!("could not send file entry: {}", err))?;
1423 }
1424 EntryKind::Directory => {
1425 let dir = file.enter_directory().await?;
1426 let mut readdir = dir.read_dir();
1427 let entry = ZipEntry::new(
1428 path,
1429 metadata.stat.mtime.secs,
1430 metadata.stat.mode as u16,
1431 false,
1432 );
1433 zip.add_entry::<FileContents<T>>(entry, None).await?;
1434 while let Some(entry) = readdir.next().await {
1435 let entry = entry?.decode_entry().await?;
1436 recurse_files(zip, decoder, prefix, entry).await?;
1437 }
1438 }
1439 _ => {} // ignore all else
1440 };
1441
1442 Ok(())
1443 })
1444 }
1445
1446 #[sortable]
1447 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1448 &ApiHandler::AsyncHttp(&pxar_file_download),
1449 &ObjectSchema::new(
1450 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1451 &sorted!([
1452 ("store", false, &DATASTORE_SCHEMA),
1453 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1454 ("backup-id", false, &BACKUP_ID_SCHEMA),
1455 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1456 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1457 ]),
1458 )
1459 ).access(None, &Permission::Privilege(
1460 &["datastore", "{store}"],
1461 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1462 true)
1463 );
1464
1465 fn pxar_file_download(
1466 _parts: Parts,
1467 _req_body: Body,
1468 param: Value,
1469 _info: &ApiMethod,
1470 rpcenv: Box<dyn RpcEnvironment>,
1471 ) -> ApiResponseFuture {
1472
1473 async move {
1474 let store = tools::required_string_param(&param, "store")?;
1475 let datastore = DataStore::lookup_datastore(&store)?;
1476
1477 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1478
1479 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1480
1481 let backup_type = tools::required_string_param(&param, "backup-type")?;
1482 let backup_id = tools::required_string_param(&param, "backup-id")?;
1483 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1484
1485 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1486
1487 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1488
1489 let mut components = base64::decode(&filepath)?;
1490 if components.len() > 0 && components[0] == '/' as u8 {
1491 components.remove(0);
1492 }
1493
1494 let mut split = components.splitn(2, |c| *c == '/' as u8);
1495 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1496 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
1497 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1498 for file in files {
1499 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1500 bail!("cannot decode '{}' - is encrypted", pxar_name);
1501 }
1502 }
1503
1504 let mut path = datastore.base_path();
1505 path.push(backup_dir.relative_path());
1506 path.push(pxar_name);
1507
1508 let index = DynamicIndexReader::open(&path)
1509 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1510
1511 let (csum, size) = index.compute_csum();
1512 manifest.verify_file(&pxar_name, &csum, size)?;
1513
1514 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1515 let reader = BufferedDynamicReader::new(index, chunk_reader);
1516 let archive_size = reader.archive_size();
1517 let reader = LocalDynamicReadAt::new(reader);
1518
1519 let decoder = Accessor::new(reader, archive_size).await?;
1520 let root = decoder.open_root().await?;
1521 let file = root
1522 .lookup(OsStr::from_bytes(file_path)).await?
1523 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1524
1525 let body = match file.kind() {
1526 EntryKind::File { .. } => Body::wrap_stream(
1527 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1528 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1529 err
1530 }),
1531 ),
1532 EntryKind::Hardlink(_) => Body::wrap_stream(
1533 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1534 .map_err(move |err| {
1535 eprintln!(
1536 "error during streaming of hardlink '{:?}' - {}",
1537 filepath, err
1538 );
1539 err
1540 }),
1541 ),
1542 EntryKind::Directory => {
1543 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1544 let mut prefix = PathBuf::new();
1545 let mut components = file.entry().path().components();
1546 components.next_back(); // discar last
1547 for comp in components {
1548 prefix.push(comp);
1549 }
1550
1551 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1552
1553 crate::server::spawn_internal_task(async move {
1554 let mut zipencoder = ZipEncoder::new(channelwriter);
1555 let mut decoder = decoder;
1556 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
1557 .await
1558 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1559
1560 zipencoder
1561 .finish()
1562 .await
1563 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1564 });
1565
1566 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1567 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
1568 err
1569 }))
1570 }
1571 other => bail!("cannot download file of type {:?}", other),
1572 };
1573
1574 // fixme: set other headers ?
1575 Ok(Response::builder()
1576 .status(StatusCode::OK)
1577 .header(header::CONTENT_TYPE, "application/octet-stream")
1578 .body(body)
1579 .unwrap())
1580 }.boxed()
1581 }
1582
1583 #[api(
1584 input: {
1585 properties: {
1586 store: {
1587 schema: DATASTORE_SCHEMA,
1588 },
1589 timeframe: {
1590 type: RRDTimeFrameResolution,
1591 },
1592 cf: {
1593 type: RRDMode,
1594 },
1595 },
1596 },
1597 access: {
1598 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1599 },
1600 )]
1601 /// Read datastore stats
1602 fn get_rrd_stats(
1603 store: String,
1604 timeframe: RRDTimeFrameResolution,
1605 cf: RRDMode,
1606 _param: Value,
1607 ) -> Result<Value, Error> {
1608
1609 create_value_from_rrd(
1610 &format!("datastore/{}", store),
1611 &[
1612 "total", "used",
1613 "read_ios", "read_bytes",
1614 "write_ios", "write_bytes",
1615 "io_ticks",
1616 ],
1617 timeframe,
1618 cf,
1619 )
1620 }
1621
1622 #[api(
1623 input: {
1624 properties: {
1625 store: {
1626 schema: DATASTORE_SCHEMA,
1627 },
1628 "backup-type": {
1629 schema: BACKUP_TYPE_SCHEMA,
1630 },
1631 "backup-id": {
1632 schema: BACKUP_ID_SCHEMA,
1633 },
1634 "backup-time": {
1635 schema: BACKUP_TIME_SCHEMA,
1636 },
1637 },
1638 },
1639 access: {
1640 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1641 },
1642 )]
1643 /// Get "notes" for a specific backup
1644 fn get_notes(
1645 store: String,
1646 backup_type: String,
1647 backup_id: String,
1648 backup_time: i64,
1649 rpcenv: &mut dyn RpcEnvironment,
1650 ) -> Result<String, Error> {
1651 let datastore = DataStore::lookup_datastore(&store)?;
1652
1653 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1654 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1655
1656 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1657
1658 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1659
1660 let notes = manifest.unprotected["notes"]
1661 .as_str()
1662 .unwrap_or("");
1663
1664 Ok(String::from(notes))
1665 }
1666
1667 #[api(
1668 input: {
1669 properties: {
1670 store: {
1671 schema: DATASTORE_SCHEMA,
1672 },
1673 "backup-type": {
1674 schema: BACKUP_TYPE_SCHEMA,
1675 },
1676 "backup-id": {
1677 schema: BACKUP_ID_SCHEMA,
1678 },
1679 "backup-time": {
1680 schema: BACKUP_TIME_SCHEMA,
1681 },
1682 notes: {
1683 description: "A multiline text.",
1684 },
1685 },
1686 },
1687 access: {
1688 permission: &Permission::Privilege(&["datastore", "{store}"],
1689 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1690 true),
1691 },
1692 )]
1693 /// Set "notes" for a specific backup
1694 fn set_notes(
1695 store: String,
1696 backup_type: String,
1697 backup_id: String,
1698 backup_time: i64,
1699 notes: String,
1700 rpcenv: &mut dyn RpcEnvironment,
1701 ) -> Result<(), Error> {
1702 let datastore = DataStore::lookup_datastore(&store)?;
1703
1704 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1705 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1706
1707 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1708
1709 datastore.update_manifest(&backup_dir,|manifest| {
1710 manifest.unprotected["notes"] = notes.into();
1711 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1712
1713 Ok(())
1714 }
1715
1716 #[api(
1717 input: {
1718 properties: {
1719 store: {
1720 schema: DATASTORE_SCHEMA,
1721 },
1722 "backup-type": {
1723 schema: BACKUP_TYPE_SCHEMA,
1724 },
1725 "backup-id": {
1726 schema: BACKUP_ID_SCHEMA,
1727 },
1728 "new-owner": {
1729 type: Authid,
1730 },
1731 },
1732 },
1733 access: {
1734 permission: &Permission::Anybody,
1735 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
1736 },
1737 )]
1738 /// Change owner of a backup group
1739 fn set_backup_owner(
1740 store: String,
1741 backup_type: String,
1742 backup_id: String,
1743 new_owner: Authid,
1744 rpcenv: &mut dyn RpcEnvironment,
1745 ) -> Result<(), Error> {
1746
1747 let datastore = DataStore::lookup_datastore(&store)?;
1748
1749 let backup_group = BackupGroup::new(backup_type, backup_id);
1750
1751 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1752
1753 let user_info = CachedUserInfo::new()?;
1754
1755 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1756
1757 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1758 // High-privilege user/token
1759 true
1760 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1761 let owner = datastore.get_owner(&backup_group)?;
1762
1763 match (owner.is_token(), new_owner.is_token()) {
1764 (true, true) => {
1765 // API token to API token, owned by same user
1766 let owner = owner.user();
1767 let new_owner = new_owner.user();
1768 owner == new_owner && Authid::from(owner.clone()) == auth_id
1769 },
1770 (true, false) => {
1771 // API token to API token owner
1772 Authid::from(owner.user().clone()) == auth_id
1773 && new_owner == auth_id
1774 },
1775 (false, true) => {
1776 // API token owner to API token
1777 owner == auth_id
1778 && Authid::from(new_owner.user().clone()) == auth_id
1779 },
1780 (false, false) => {
1781 // User to User, not allowed for unprivileged users
1782 false
1783 },
1784 }
1785 } else {
1786 false
1787 };
1788
1789 if !allowed {
1790 return Err(http_err!(UNAUTHORIZED,
1791 "{} does not have permission to change owner of backup group '{}' to {}",
1792 auth_id,
1793 backup_group,
1794 new_owner,
1795 ));
1796 }
1797
1798 if !user_info.is_active_auth_id(&new_owner) {
1799 bail!("{} '{}' is inactive or non-existent",
1800 if new_owner.is_token() {
1801 "API token".to_string()
1802 } else {
1803 "user".to_string()
1804 },
1805 new_owner);
1806 }
1807
1808 datastore.set_owner(&backup_group, &new_owner, true)?;
1809
1810 Ok(())
1811 }
1812
1813 #[sortable]
1814 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1815 (
1816 "catalog",
1817 &Router::new()
1818 .get(&API_METHOD_CATALOG)
1819 ),
1820 (
1821 "change-owner",
1822 &Router::new()
1823 .post(&API_METHOD_SET_BACKUP_OWNER)
1824 ),
1825 (
1826 "download",
1827 &Router::new()
1828 .download(&API_METHOD_DOWNLOAD_FILE)
1829 ),
1830 (
1831 "download-decoded",
1832 &Router::new()
1833 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1834 ),
1835 (
1836 "files",
1837 &Router::new()
1838 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1839 ),
1840 (
1841 "gc",
1842 &Router::new()
1843 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1844 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1845 ),
1846 (
1847 "groups",
1848 &Router::new()
1849 .get(&API_METHOD_LIST_GROUPS)
1850 ),
1851 (
1852 "notes",
1853 &Router::new()
1854 .get(&API_METHOD_GET_NOTES)
1855 .put(&API_METHOD_SET_NOTES)
1856 ),
1857 (
1858 "prune",
1859 &Router::new()
1860 .post(&API_METHOD_PRUNE)
1861 ),
1862 (
1863 "pxar-file-download",
1864 &Router::new()
1865 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1866 ),
1867 (
1868 "rrd",
1869 &Router::new()
1870 .get(&API_METHOD_GET_RRD_STATS)
1871 ),
1872 (
1873 "snapshots",
1874 &Router::new()
1875 .get(&API_METHOD_LIST_SNAPSHOTS)
1876 .delete(&API_METHOD_DELETE_SNAPSHOT)
1877 ),
1878 (
1879 "status",
1880 &Router::new()
1881 .get(&API_METHOD_STATUS)
1882 ),
1883 (
1884 "upload-backup-log",
1885 &Router::new()
1886 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1887 ),
1888 (
1889 "verify",
1890 &Router::new()
1891 .post(&API_METHOD_VERIFY)
1892 ),
1893 ];
1894
1895 const DATASTORE_INFO_ROUTER: Router = Router::new()
1896 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1897 .subdirs(DATASTORE_INFO_SUBDIRS);
1898
1899
1900 pub const ROUTER: Router = Router::new()
1901 .get(&API_METHOD_GET_DATASTORE_LIST)
1902 .match_all("store", &DATASTORE_INFO_ROUTER);