]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
datastore status: do not count empty groups
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::os::unix::ffi::OsStrExt;
6 use std::path::PathBuf;
7
8 use anyhow::{bail, format_err, Error};
9 use futures::*;
10 use hyper::http::request::Parts;
11 use hyper::{header, Body, Response, StatusCode};
12 use serde_json::{json, Value};
13 use tokio_stream::wrappers::ReceiverStream;
14
15 use proxmox_sys::sortable;
16 use proxmox_sys::fs::{
17 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
18 };
19 use proxmox_router::{
20 list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
21 RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
22 };
23 use proxmox_schema::*;
24 use proxmox_sys::{task_log, task_warn};
25 use proxmox_async::blocking::WrappedReaderStream;
26 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
27
28 use pxar::accessor::aio::Accessor;
29 use pxar::EntryKind;
30
31 use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
32 DataStoreListItem, GarbageCollectionStatus, GroupListItem,
33 SnapshotListItem, SnapshotVerifyState, PruneOptions,
34 DataStoreStatus, RRDMode, RRDTimeFrame,
35 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
36 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
37 IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
38 VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
39 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
40 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
41
42 };
43 use pbs_client::pxar::create_zip;
44 use pbs_datastore::{
45 check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
46 CATALOG_NAME,
47 };
48 use pbs_datastore::backup_info::BackupInfo;
49 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
50 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
51 use pbs_datastore::data_blob::DataBlob;
52 use pbs_datastore::data_blob_reader::DataBlobReader;
53 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
54 use pbs_datastore::fixed_index::{FixedIndexReader};
55 use pbs_datastore::index::IndexFile;
56 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
57 use pbs_datastore::prune::compute_prune_info;
58 use pbs_tools::json::{required_integer_param, required_string_param};
59 use pbs_config::CachedUserInfo;
60 use proxmox_rest_server::{WorkerTask, formatter};
61
62 use crate::api2::node::rrd::create_value_from_rrd;
63 use crate::backup::{
64 verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
65 };
66
67 use crate::server::jobstate::Job;
68
69
70 const GROUP_NOTES_FILE_NAME: &str = "notes";
71
72 fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
73 let mut note_path = store.base_path();
74 note_path.push(group.group_path());
75 note_path.push(GROUP_NOTES_FILE_NAME);
76 note_path
77 }
78
79 fn check_priv_or_backup_owner(
80 store: &DataStore,
81 group: &BackupGroup,
82 auth_id: &Authid,
83 required_privs: u64,
84 ) -> Result<(), Error> {
85 let user_info = CachedUserInfo::new()?;
86 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
87
88 if privs & required_privs == 0 {
89 let owner = store.get_owner(group)?;
90 check_backup_owner(&owner, auth_id)?;
91 }
92 Ok(())
93 }
94
95 fn read_backup_index(
96 store: &DataStore,
97 backup_dir: &BackupDir,
98 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
99
100 let (manifest, index_size) = store.load_manifest(backup_dir)?;
101
102 let mut result = Vec::new();
103 for item in manifest.files() {
104 result.push(BackupContent {
105 filename: item.filename.clone(),
106 crypt_mode: Some(item.crypt_mode),
107 size: Some(item.size),
108 });
109 }
110
111 result.push(BackupContent {
112 filename: MANIFEST_BLOB_NAME.to_string(),
113 crypt_mode: match manifest.signature {
114 Some(_) => Some(CryptMode::SignOnly),
115 None => Some(CryptMode::None),
116 },
117 size: Some(index_size),
118 });
119
120 Ok((manifest, result))
121 }
122
123 fn get_all_snapshot_files(
124 store: &DataStore,
125 info: &BackupInfo,
126 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
127
128 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
129
130 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
131 acc.insert(item.filename.clone());
132 acc
133 });
134
135 for file in &info.files {
136 if file_set.contains(file) { continue; }
137 files.push(BackupContent {
138 filename: file.to_string(),
139 size: None,
140 crypt_mode: None,
141 });
142 }
143
144 Ok((manifest, files))
145 }
146
147 #[api(
148 input: {
149 properties: {
150 store: {
151 schema: DATASTORE_SCHEMA,
152 },
153 },
154 },
155 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
156 access: {
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
161 },
162 )]
163 /// List backup groups.
164 pub fn list_groups(
165 store: String,
166 rpcenv: &mut dyn RpcEnvironment,
167 ) -> Result<Vec<GroupListItem>, Error> {
168
169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
170 let user_info = CachedUserInfo::new()?;
171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
172
173 let datastore = DataStore::lookup_datastore(&store)?;
174 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
175
176 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
177
178 let group_info = backup_groups
179 .into_iter()
180 .fold(Vec::new(), |mut group_info, group| {
181 let owner = match datastore.get_owner(&group) {
182 Ok(auth_id) => auth_id,
183 Err(err) => {
184 eprintln!("Failed to get owner of group '{}/{}' - {}",
185 &store,
186 group,
187 err);
188 return group_info;
189 },
190 };
191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
192 return group_info;
193 }
194
195 let snapshots = match group.list_backups(&datastore.base_path()) {
196 Ok(snapshots) => snapshots,
197 Err(_) => {
198 return group_info;
199 },
200 };
201
202 let backup_count: u64 = snapshots.len() as u64;
203 if backup_count == 0 {
204 return group_info;
205 }
206
207 let last_backup = snapshots
208 .iter()
209 .fold(&snapshots[0], |last, curr| {
210 if curr.is_finished()
211 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
212 curr
213 } else {
214 last
215 }
216 })
217 .to_owned();
218
219 let note_path = get_group_note_path(&datastore, &group);
220 let comment = file_read_firstline(&note_path).ok();
221
222 group_info.push(GroupListItem {
223 backup_type: group.backup_type().to_string(),
224 backup_id: group.backup_id().to_string(),
225 last_backup: last_backup.backup_dir.backup_time(),
226 owner: Some(owner),
227 backup_count,
228 files: last_backup.files,
229 comment,
230 });
231
232 group_info
233 });
234
235 Ok(group_info)
236 }
237
238 #[api(
239 input: {
240 properties: {
241 store: {
242 schema: DATASTORE_SCHEMA,
243 },
244 "backup-type": {
245 schema: BACKUP_TYPE_SCHEMA,
246 },
247 "backup-id": {
248 schema: BACKUP_ID_SCHEMA,
249 },
250 },
251 },
252 access: {
253 permission: &Permission::Privilege(
254 &["datastore", "{store}"],
255 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
256 true),
257 },
258 )]
259 /// Delete backup group including all snapshots.
260 pub fn delete_group(
261 store: String,
262 backup_type: String,
263 backup_id: String,
264 _info: &ApiMethod,
265 rpcenv: &mut dyn RpcEnvironment,
266 ) -> Result<Value, Error> {
267
268 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
269
270 let group = BackupGroup::new(backup_type, backup_id);
271 let datastore = DataStore::lookup_datastore(&store)?;
272
273 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
274
275 if !datastore.remove_backup_group(&group)? {
276 bail!("did not delete whole group because of protected snapthots");
277 }
278
279 Ok(Value::Null)
280 }
281
282 #[api(
283 input: {
284 properties: {
285 store: {
286 schema: DATASTORE_SCHEMA,
287 },
288 "backup-type": {
289 schema: BACKUP_TYPE_SCHEMA,
290 },
291 "backup-id": {
292 schema: BACKUP_ID_SCHEMA,
293 },
294 "backup-time": {
295 schema: BACKUP_TIME_SCHEMA,
296 },
297 },
298 },
299 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
300 access: {
301 permission: &Permission::Privilege(
302 &["datastore", "{store}"],
303 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
304 true),
305 },
306 )]
307 /// List snapshot files.
308 pub fn list_snapshot_files(
309 store: String,
310 backup_type: String,
311 backup_id: String,
312 backup_time: i64,
313 _info: &ApiMethod,
314 rpcenv: &mut dyn RpcEnvironment,
315 ) -> Result<Vec<BackupContent>, Error> {
316
317 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
318 let datastore = DataStore::lookup_datastore(&store)?;
319
320 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
321
322 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
323
324 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
325
326 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
327
328 Ok(files)
329 }
330
331 #[api(
332 input: {
333 properties: {
334 store: {
335 schema: DATASTORE_SCHEMA,
336 },
337 "backup-type": {
338 schema: BACKUP_TYPE_SCHEMA,
339 },
340 "backup-id": {
341 schema: BACKUP_ID_SCHEMA,
342 },
343 "backup-time": {
344 schema: BACKUP_TIME_SCHEMA,
345 },
346 },
347 },
348 access: {
349 permission: &Permission::Privilege(
350 &["datastore", "{store}"],
351 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
352 true),
353 },
354 )]
355 /// Delete backup snapshot.
356 pub fn delete_snapshot(
357 store: String,
358 backup_type: String,
359 backup_id: String,
360 backup_time: i64,
361 _info: &ApiMethod,
362 rpcenv: &mut dyn RpcEnvironment,
363 ) -> Result<Value, Error> {
364
365 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
366
367 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
368 let datastore = DataStore::lookup_datastore(&store)?;
369
370 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
371
372 datastore.remove_backup_dir(&snapshot, false)?;
373
374 Ok(Value::Null)
375 }
376
377 #[api(
378 input: {
379 properties: {
380 store: {
381 schema: DATASTORE_SCHEMA,
382 },
383 "backup-type": {
384 optional: true,
385 schema: BACKUP_TYPE_SCHEMA,
386 },
387 "backup-id": {
388 optional: true,
389 schema: BACKUP_ID_SCHEMA,
390 },
391 },
392 },
393 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
394 access: {
395 permission: &Permission::Privilege(
396 &["datastore", "{store}"],
397 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
398 true),
399 },
400 )]
401 /// List backup snapshots.
402 pub fn list_snapshots (
403 store: String,
404 backup_type: Option<String>,
405 backup_id: Option<String>,
406 _param: Value,
407 _info: &ApiMethod,
408 rpcenv: &mut dyn RpcEnvironment,
409 ) -> Result<Vec<SnapshotListItem>, Error> {
410
411 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
412 let user_info = CachedUserInfo::new()?;
413 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
414
415 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
416
417 let datastore = DataStore::lookup_datastore(&store)?;
418
419 let base_path = datastore.base_path();
420
421 let groups = match (backup_type, backup_id) {
422 (Some(backup_type), Some(backup_id)) => {
423 let mut groups = Vec::with_capacity(1);
424 groups.push(BackupGroup::new(backup_type, backup_id));
425 groups
426 },
427 (Some(backup_type), None) => {
428 BackupInfo::list_backup_groups(&base_path)?
429 .into_iter()
430 .filter(|group| group.backup_type() == backup_type)
431 .collect()
432 },
433 (None, Some(backup_id)) => {
434 BackupInfo::list_backup_groups(&base_path)?
435 .into_iter()
436 .filter(|group| group.backup_id() == backup_id)
437 .collect()
438 },
439 _ => BackupInfo::list_backup_groups(&base_path)?,
440 };
441
442 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
443 let backup_type = group.backup_type().to_string();
444 let backup_id = group.backup_id().to_string();
445 let backup_time = info.backup_dir.backup_time();
446 let protected = info.backup_dir.is_protected(base_path.clone());
447
448 match get_all_snapshot_files(&datastore, &info) {
449 Ok((manifest, files)) => {
450 // extract the first line from notes
451 let comment: Option<String> = manifest.unprotected["notes"]
452 .as_str()
453 .and_then(|notes| notes.lines().next())
454 .map(String::from);
455
456 let fingerprint = match manifest.fingerprint() {
457 Ok(fp) => fp,
458 Err(err) => {
459 eprintln!("error parsing fingerprint: '{}'", err);
460 None
461 },
462 };
463
464 let verification = manifest.unprotected["verify_state"].clone();
465 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
466 Ok(verify) => verify,
467 Err(err) => {
468 eprintln!("error parsing verification state : '{}'", err);
469 None
470 }
471 };
472
473 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
474
475 SnapshotListItem {
476 backup_type,
477 backup_id,
478 backup_time,
479 comment,
480 verification,
481 fingerprint,
482 files,
483 size,
484 owner,
485 protected,
486 }
487 },
488 Err(err) => {
489 eprintln!("error during snapshot file listing: '{}'", err);
490 let files = info
491 .files
492 .into_iter()
493 .map(|filename| BackupContent {
494 filename,
495 size: None,
496 crypt_mode: None,
497 })
498 .collect();
499
500 SnapshotListItem {
501 backup_type,
502 backup_id,
503 backup_time,
504 comment: None,
505 verification: None,
506 fingerprint: None,
507 files,
508 size: None,
509 owner,
510 protected,
511 }
512 },
513 }
514 };
515
516 groups
517 .iter()
518 .try_fold(Vec::new(), |mut snapshots, group| {
519 let owner = match datastore.get_owner(group) {
520 Ok(auth_id) => auth_id,
521 Err(err) => {
522 eprintln!("Failed to get owner of group '{}/{}' - {}",
523 &store,
524 group,
525 err);
526 return Ok(snapshots);
527 },
528 };
529
530 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
531 return Ok(snapshots);
532 }
533
534 let group_backups = group.list_backups(&datastore.base_path())?;
535
536 snapshots.extend(
537 group_backups
538 .into_iter()
539 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
540 );
541
542 Ok(snapshots)
543 })
544 }
545
546 fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
547 let base_path = store.base_path();
548 let groups = BackupInfo::list_backup_groups(&base_path)?;
549
550 groups.iter()
551 .filter(|group| {
552 let owner = match store.get_owner(group) {
553 Ok(owner) => owner,
554 Err(err) => {
555 eprintln!("Failed to get owner of group '{}/{}' - {}",
556 store.name(),
557 group,
558 err);
559 return false;
560 },
561 };
562
563 match filter_owner {
564 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
565 None => true,
566 }
567 })
568 .try_fold(Counts::default(), |mut counts, group| {
569 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
570
571 // only include groups with snapshots (avoid confusing users
572 // by counting/displaying emtpy groups)
573 if snapshot_count > 0 {
574 let type_count = match group.backup_type() {
575 "ct" => counts.ct.get_or_insert(Default::default()),
576 "vm" => counts.vm.get_or_insert(Default::default()),
577 "host" => counts.host.get_or_insert(Default::default()),
578 _ => counts.other.get_or_insert(Default::default()),
579 };
580
581 type_count.groups += 1;
582 type_count.snapshots += snapshot_count;
583 }
584
585 Ok(counts)
586 })
587 }
588
589 #[api(
590 input: {
591 properties: {
592 store: {
593 schema: DATASTORE_SCHEMA,
594 },
595 verbose: {
596 type: bool,
597 default: false,
598 optional: true,
599 description: "Include additional information like snapshot counts and GC status.",
600 },
601 },
602
603 },
604 returns: {
605 type: DataStoreStatus,
606 },
607 access: {
608 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
609 },
610 )]
611 /// Get datastore status.
612 pub fn status(
613 store: String,
614 verbose: bool,
615 _info: &ApiMethod,
616 rpcenv: &mut dyn RpcEnvironment,
617 ) -> Result<DataStoreStatus, Error> {
618 let datastore = DataStore::lookup_datastore(&store)?;
619 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
620 let (counts, gc_status) = if verbose {
621 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
622 let user_info = CachedUserInfo::new()?;
623
624 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
625 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
626 None
627 } else {
628 Some(&auth_id)
629 };
630
631 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
632 let gc_status = Some(datastore.last_gc_status());
633
634 (counts, gc_status)
635 } else {
636 (None, None)
637 };
638
639 Ok(DataStoreStatus {
640 total: storage.total,
641 used: storage.used,
642 avail: storage.avail,
643 gc_status,
644 counts,
645 })
646 }
647
648 #[api(
649 input: {
650 properties: {
651 store: {
652 schema: DATASTORE_SCHEMA,
653 },
654 "backup-type": {
655 schema: BACKUP_TYPE_SCHEMA,
656 optional: true,
657 },
658 "backup-id": {
659 schema: BACKUP_ID_SCHEMA,
660 optional: true,
661 },
662 "ignore-verified": {
663 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
664 optional: true,
665 },
666 "outdated-after": {
667 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
668 optional: true,
669 },
670 "backup-time": {
671 schema: BACKUP_TIME_SCHEMA,
672 optional: true,
673 },
674 },
675 },
676 returns: {
677 schema: UPID_SCHEMA,
678 },
679 access: {
680 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
681 },
682 )]
683 /// Verify backups.
684 ///
685 /// This function can verify a single backup snapshot, all backup from a backup group,
686 /// or all backups in the datastore.
687 pub fn verify(
688 store: String,
689 backup_type: Option<String>,
690 backup_id: Option<String>,
691 backup_time: Option<i64>,
692 ignore_verified: Option<bool>,
693 outdated_after: Option<i64>,
694 rpcenv: &mut dyn RpcEnvironment,
695 ) -> Result<Value, Error> {
696 let datastore = DataStore::lookup_datastore(&store)?;
697 let ignore_verified = ignore_verified.unwrap_or(true);
698
699 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
700 let worker_id;
701
702 let mut backup_dir = None;
703 let mut backup_group = None;
704 let mut worker_type = "verify";
705
706 match (backup_type, backup_id, backup_time) {
707 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
708 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
709 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
710
711 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
712
713 backup_dir = Some(dir);
714 worker_type = "verify_snapshot";
715 }
716 (Some(backup_type), Some(backup_id), None) => {
717 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
718 let group = BackupGroup::new(backup_type, backup_id);
719
720 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
721
722 backup_group = Some(group);
723 worker_type = "verify_group";
724 }
725 (None, None, None) => {
726 worker_id = store.clone();
727 }
728 _ => bail!("parameters do not specify a backup group or snapshot"),
729 }
730
731 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
732
733 let upid_str = WorkerTask::new_thread(
734 worker_type,
735 Some(worker_id),
736 auth_id.to_string(),
737 to_stdout,
738 move |worker| {
739 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
740 let failed_dirs = if let Some(backup_dir) = backup_dir {
741 let mut res = Vec::new();
742 if !verify_backup_dir(
743 &verify_worker,
744 &backup_dir,
745 worker.upid().clone(),
746 Some(&move |manifest| {
747 verify_filter(ignore_verified, outdated_after, manifest)
748 }),
749 )? {
750 res.push(backup_dir.to_string());
751 }
752 res
753 } else if let Some(backup_group) = backup_group {
754 let failed_dirs = verify_backup_group(
755 &verify_worker,
756 &backup_group,
757 &mut StoreProgress::new(1),
758 worker.upid(),
759 Some(&move |manifest| {
760 verify_filter(ignore_verified, outdated_after, manifest)
761 }),
762 )?;
763 failed_dirs
764 } else {
765 let privs = CachedUserInfo::new()?
766 .lookup_privs(&auth_id, &["datastore", &store]);
767
768 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
769 Some(auth_id)
770 } else {
771 None
772 };
773
774 verify_all_backups(
775 &verify_worker,
776 worker.upid(),
777 owner,
778 Some(&move |manifest| {
779 verify_filter(ignore_verified, outdated_after, manifest)
780 }),
781 )?
782 };
783 if !failed_dirs.is_empty() {
784 task_log!(worker, "Failed to verify the following snapshots/groups:");
785 for dir in failed_dirs {
786 task_log!(worker, "\t{}", dir);
787 }
788 bail!("verification failed - please check the log for details");
789 }
790 Ok(())
791 },
792 )?;
793
794 Ok(json!(upid_str))
795 }
796
797 #[api(
798 input: {
799 properties: {
800 "backup-id": {
801 schema: BACKUP_ID_SCHEMA,
802 },
803 "backup-type": {
804 schema: BACKUP_TYPE_SCHEMA,
805 },
806 "dry-run": {
807 optional: true,
808 type: bool,
809 default: false,
810 description: "Just show what prune would do, but do not delete anything.",
811 },
812 "prune-options": {
813 type: PruneOptions,
814 flatten: true,
815 },
816 store: {
817 schema: DATASTORE_SCHEMA,
818 },
819 },
820 },
821 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
822 access: {
823 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
824 },
825 )]
826 /// Prune a group on the datastore
827 pub fn prune(
828 backup_id: String,
829 backup_type: String,
830 dry_run: bool,
831 prune_options: PruneOptions,
832 store: String,
833 _param: Value,
834 rpcenv: &mut dyn RpcEnvironment,
835 ) -> Result<Value, Error> {
836
837 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
838
839 let group = BackupGroup::new(&backup_type, &backup_id);
840
841 let datastore = DataStore::lookup_datastore(&store)?;
842
843 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
844
845 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
846
847 let mut prune_result = Vec::new();
848
849 let list = group.list_backups(&datastore.base_path())?;
850
851 let mut prune_info = compute_prune_info(list, &prune_options)?;
852
853 prune_info.reverse(); // delete older snapshots first
854
855 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
856
857 if dry_run {
858 for (info, mark) in prune_info {
859 let keep = keep_all || mark.keep();
860
861 let backup_time = info.backup_dir.backup_time();
862 let group = info.backup_dir.group();
863
864 prune_result.push(json!({
865 "backup-type": group.backup_type(),
866 "backup-id": group.backup_id(),
867 "backup-time": backup_time,
868 "keep": keep,
869 "protected": mark.protected(),
870 }));
871 }
872 return Ok(json!(prune_result));
873 }
874
875
876 // We use a WorkerTask just to have a task log, but run synchrounously
877 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
878
879 if keep_all {
880 task_log!(worker, "No prune selection - keeping all files.");
881 } else {
882 task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
883 task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
884 store, backup_type, backup_id);
885 }
886
887 for (info, mark) in prune_info {
888 let keep = keep_all || mark.keep();
889
890 let backup_time = info.backup_dir.backup_time();
891 let timestamp = info.backup_dir.backup_time_string();
892 let group = info.backup_dir.group();
893
894
895 let msg = format!(
896 "{}/{}/{} {}",
897 group.backup_type(),
898 group.backup_id(),
899 timestamp,
900 mark,
901 );
902
903 task_log!(worker, "{}", msg);
904
905 prune_result.push(json!({
906 "backup-type": group.backup_type(),
907 "backup-id": group.backup_id(),
908 "backup-time": backup_time,
909 "keep": keep,
910 "protected": mark.protected(),
911 }));
912
913 if !(dry_run || keep) {
914 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
915 task_warn!(
916 worker,
917 "failed to remove dir {:?}: {}",
918 info.backup_dir.relative_path(),
919 err,
920 );
921 }
922 }
923 }
924
925 worker.log_result(&Ok(()));
926
927 Ok(json!(prune_result))
928 }
929
930 #[api(
931 input: {
932 properties: {
933 "dry-run": {
934 optional: true,
935 type: bool,
936 default: false,
937 description: "Just show what prune would do, but do not delete anything.",
938 },
939 "prune-options": {
940 type: PruneOptions,
941 flatten: true,
942 },
943 store: {
944 schema: DATASTORE_SCHEMA,
945 },
946 },
947 },
948 returns: {
949 schema: UPID_SCHEMA,
950 },
951 access: {
952 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
953 },
954 )]
955 /// Prune the datastore
956 pub fn prune_datastore(
957 dry_run: bool,
958 prune_options: PruneOptions,
959 store: String,
960 _param: Value,
961 rpcenv: &mut dyn RpcEnvironment,
962 ) -> Result<String, Error> {
963
964 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
965
966 let datastore = DataStore::lookup_datastore(&store)?;
967
968 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
969
970 let upid_str = WorkerTask::new_thread(
971 "prune",
972 Some(store.clone()),
973 auth_id.to_string(),
974 to_stdout,
975 move |worker| crate::server::prune_datastore(
976 worker,
977 auth_id,
978 prune_options,
979 &store,
980 datastore,
981 dry_run
982 ),
983 )?;
984
985 Ok(upid_str)
986 }
987
988 #[api(
989 input: {
990 properties: {
991 store: {
992 schema: DATASTORE_SCHEMA,
993 },
994 },
995 },
996 returns: {
997 schema: UPID_SCHEMA,
998 },
999 access: {
1000 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1001 },
1002 )]
1003 /// Start garbage collection.
1004 pub fn start_garbage_collection(
1005 store: String,
1006 _info: &ApiMethod,
1007 rpcenv: &mut dyn RpcEnvironment,
1008 ) -> Result<Value, Error> {
1009
1010 let datastore = DataStore::lookup_datastore(&store)?;
1011 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1012
1013 let job = Job::new("garbage_collection", &store)
1014 .map_err(|_| format_err!("garbage collection already running"))?;
1015
1016 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1017
1018 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1019 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
1020
1021 Ok(json!(upid_str))
1022 }
1023
1024 #[api(
1025 input: {
1026 properties: {
1027 store: {
1028 schema: DATASTORE_SCHEMA,
1029 },
1030 },
1031 },
1032 returns: {
1033 type: GarbageCollectionStatus,
1034 },
1035 access: {
1036 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1037 },
1038 )]
1039 /// Garbage collection status.
1040 pub fn garbage_collection_status(
1041 store: String,
1042 _info: &ApiMethod,
1043 _rpcenv: &mut dyn RpcEnvironment,
1044 ) -> Result<GarbageCollectionStatus, Error> {
1045
1046 let datastore = DataStore::lookup_datastore(&store)?;
1047
1048 let status = datastore.last_gc_status();
1049
1050 Ok(status)
1051 }
1052
1053 #[api(
1054 returns: {
1055 description: "List the accessible datastores.",
1056 type: Array,
1057 items: { type: DataStoreListItem },
1058 },
1059 access: {
1060 permission: &Permission::Anybody,
1061 },
1062 )]
1063 /// Datastore list
1064 pub fn get_datastore_list(
1065 _param: Value,
1066 _info: &ApiMethod,
1067 rpcenv: &mut dyn RpcEnvironment,
1068 ) -> Result<Vec<DataStoreListItem>, Error> {
1069
1070 let (config, _digest) = pbs_config::datastore::config()?;
1071
1072 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1073 let user_info = CachedUserInfo::new()?;
1074
1075 let mut list = Vec::new();
1076
1077 for (store, (_, data)) in &config.sections {
1078 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
1079 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
1080 if allowed {
1081 list.push(
1082 DataStoreListItem {
1083 store: store.clone(),
1084 comment: data["comment"].as_str().map(String::from),
1085 }
1086 );
1087 }
1088 }
1089
1090 Ok(list)
1091 }
1092
1093 #[sortable]
1094 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1095 &ApiHandler::AsyncHttp(&download_file),
1096 &ObjectSchema::new(
1097 "Download single raw file from backup snapshot.",
1098 &sorted!([
1099 ("store", false, &DATASTORE_SCHEMA),
1100 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1101 ("backup-id", false, &BACKUP_ID_SCHEMA),
1102 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1103 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1104 ]),
1105 )
1106 ).access(None, &Permission::Privilege(
1107 &["datastore", "{store}"],
1108 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1109 true)
1110 );
1111
1112 pub fn download_file(
1113 _parts: Parts,
1114 _req_body: Body,
1115 param: Value,
1116 _info: &ApiMethod,
1117 rpcenv: Box<dyn RpcEnvironment>,
1118 ) -> ApiResponseFuture {
1119
1120 async move {
1121 let store = required_string_param(&param, "store")?;
1122 let datastore = DataStore::lookup_datastore(store)?;
1123
1124 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1125
1126 let file_name = required_string_param(&param, "file-name")?.to_owned();
1127
1128 let backup_type = required_string_param(&param, "backup-type")?;
1129 let backup_id = required_string_param(&param, "backup-id")?;
1130 let backup_time = required_integer_param(&param, "backup-time")?;
1131
1132 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1133
1134 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1135
1136 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1137
1138 let mut path = datastore.base_path();
1139 path.push(backup_dir.relative_path());
1140 path.push(&file_name);
1141
1142 let file = tokio::fs::File::open(&path)
1143 .await
1144 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1145
1146 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1147 .map_ok(|bytes| bytes.freeze())
1148 .map_err(move |err| {
1149 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1150 err
1151 });
1152 let body = Body::wrap_stream(payload);
1153
1154 // fixme: set other headers ?
1155 Ok(Response::builder()
1156 .status(StatusCode::OK)
1157 .header(header::CONTENT_TYPE, "application/octet-stream")
1158 .body(body)
1159 .unwrap())
1160 }.boxed()
1161 }
1162
1163 #[sortable]
1164 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1165 &ApiHandler::AsyncHttp(&download_file_decoded),
1166 &ObjectSchema::new(
1167 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1168 &sorted!([
1169 ("store", false, &DATASTORE_SCHEMA),
1170 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1171 ("backup-id", false, &BACKUP_ID_SCHEMA),
1172 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1173 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1174 ]),
1175 )
1176 ).access(None, &Permission::Privilege(
1177 &["datastore", "{store}"],
1178 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1179 true)
1180 );
1181
1182 pub fn download_file_decoded(
1183 _parts: Parts,
1184 _req_body: Body,
1185 param: Value,
1186 _info: &ApiMethod,
1187 rpcenv: Box<dyn RpcEnvironment>,
1188 ) -> ApiResponseFuture {
1189
1190 async move {
1191 let store = required_string_param(&param, "store")?;
1192 let datastore = DataStore::lookup_datastore(store)?;
1193
1194 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1195
1196 let file_name = required_string_param(&param, "file-name")?.to_owned();
1197
1198 let backup_type = required_string_param(&param, "backup-type")?;
1199 let backup_id = required_string_param(&param, "backup-id")?;
1200 let backup_time = required_integer_param(&param, "backup-time")?;
1201
1202 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1203
1204 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1205
1206 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1207 for file in files {
1208 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1209 bail!("cannot decode '{}' - is encrypted", file_name);
1210 }
1211 }
1212
1213 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1214
1215 let mut path = datastore.base_path();
1216 path.push(backup_dir.relative_path());
1217 path.push(&file_name);
1218
1219 let extension = file_name.rsplitn(2, '.').next().unwrap();
1220
1221 let body = match extension {
1222 "didx" => {
1223 let index = DynamicIndexReader::open(&path)
1224 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1225 let (csum, size) = index.compute_csum();
1226 manifest.verify_file(&file_name, &csum, size)?;
1227
1228 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1229 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1230 Body::wrap_stream(AsyncReaderStream::new(reader)
1231 .map_err(move |err| {
1232 eprintln!("error during streaming of '{:?}' - {}", path, err);
1233 err
1234 }))
1235 },
1236 "fidx" => {
1237 let index = FixedIndexReader::open(&path)
1238 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1239
1240 let (csum, size) = index.compute_csum();
1241 manifest.verify_file(&file_name, &csum, size)?;
1242
1243 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1244 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1245 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
1246 .map_err(move |err| {
1247 eprintln!("error during streaming of '{:?}' - {}", path, err);
1248 err
1249 }))
1250 },
1251 "blob" => {
1252 let file = std::fs::File::open(&path)
1253 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1254
1255 // FIXME: load full blob to verify index checksum?
1256
1257 Body::wrap_stream(
1258 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1259 .map_err(move |err| {
1260 eprintln!("error during streaming of '{:?}' - {}", path, err);
1261 err
1262 })
1263 )
1264 },
1265 extension => {
1266 bail!("cannot download '{}' files", extension);
1267 },
1268 };
1269
1270 // fixme: set other headers ?
1271 Ok(Response::builder()
1272 .status(StatusCode::OK)
1273 .header(header::CONTENT_TYPE, "application/octet-stream")
1274 .body(body)
1275 .unwrap())
1276 }.boxed()
1277 }
1278
1279 #[sortable]
1280 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1281 &ApiHandler::AsyncHttp(&upload_backup_log),
1282 &ObjectSchema::new(
1283 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1284 &sorted!([
1285 ("store", false, &DATASTORE_SCHEMA),
1286 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1287 ("backup-id", false, &BACKUP_ID_SCHEMA),
1288 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1289 ]),
1290 )
1291 ).access(
1292 Some("Only the backup creator/owner is allowed to do this."),
1293 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1294 );
1295
1296 pub fn upload_backup_log(
1297 _parts: Parts,
1298 req_body: Body,
1299 param: Value,
1300 _info: &ApiMethod,
1301 rpcenv: Box<dyn RpcEnvironment>,
1302 ) -> ApiResponseFuture {
1303
1304 async move {
1305 let store = required_string_param(&param, "store")?;
1306 let datastore = DataStore::lookup_datastore(store)?;
1307
1308 let file_name = CLIENT_LOG_BLOB_NAME;
1309
1310 let backup_type = required_string_param(&param, "backup-type")?;
1311 let backup_id = required_string_param(&param, "backup-id")?;
1312 let backup_time = required_integer_param(&param, "backup-time")?;
1313
1314 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1315
1316 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1317 let owner = datastore.get_owner(backup_dir.group())?;
1318 check_backup_owner(&owner, &auth_id)?;
1319
1320 let mut path = datastore.base_path();
1321 path.push(backup_dir.relative_path());
1322 path.push(&file_name);
1323
1324 if path.exists() {
1325 bail!("backup already contains a log.");
1326 }
1327
1328 println!("Upload backup log to {}/{}/{}/{}/{}", store,
1329 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
1330
1331 let data = req_body
1332 .map_err(Error::from)
1333 .try_fold(Vec::new(), |mut acc, chunk| {
1334 acc.extend_from_slice(&*chunk);
1335 future::ok::<_, Error>(acc)
1336 })
1337 .await?;
1338
1339 // always verify blob/CRC at server side
1340 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1341
1342 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1343
1344 // fixme: use correct formatter
1345 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1346 }.boxed()
1347 }
1348
1349 #[api(
1350 input: {
1351 properties: {
1352 store: {
1353 schema: DATASTORE_SCHEMA,
1354 },
1355 "backup-type": {
1356 schema: BACKUP_TYPE_SCHEMA,
1357 },
1358 "backup-id": {
1359 schema: BACKUP_ID_SCHEMA,
1360 },
1361 "backup-time": {
1362 schema: BACKUP_TIME_SCHEMA,
1363 },
1364 "filepath": {
1365 description: "Base64 encoded path.",
1366 type: String,
1367 }
1368 },
1369 },
1370 access: {
1371 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1372 },
1373 )]
1374 /// Get the entries of the given path of the catalog
1375 pub fn catalog(
1376 store: String,
1377 backup_type: String,
1378 backup_id: String,
1379 backup_time: i64,
1380 filepath: String,
1381 rpcenv: &mut dyn RpcEnvironment,
1382 ) -> Result<Vec<ArchiveEntry>, Error> {
1383 let datastore = DataStore::lookup_datastore(&store)?;
1384
1385 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1386
1387 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1388
1389 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1390
1391 let file_name = CATALOG_NAME;
1392
1393 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1394 for file in files {
1395 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1396 bail!("cannot decode '{}' - is encrypted", file_name);
1397 }
1398 }
1399
1400 let mut path = datastore.base_path();
1401 path.push(backup_dir.relative_path());
1402 path.push(file_name);
1403
1404 let index = DynamicIndexReader::open(&path)
1405 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1406
1407 let (csum, size) = index.compute_csum();
1408 manifest.verify_file(file_name, &csum, size)?;
1409
1410 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1411 let reader = BufferedDynamicReader::new(index, chunk_reader);
1412
1413 let mut catalog_reader = CatalogReader::new(reader);
1414
1415 let path = if filepath != "root" && filepath != "/" {
1416 base64::decode(filepath)?
1417 } else {
1418 vec![b'/']
1419 };
1420
1421 catalog_reader.list_dir_contents(&path)
1422 }
1423
1424 #[sortable]
1425 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1426 &ApiHandler::AsyncHttp(&pxar_file_download),
1427 &ObjectSchema::new(
1428 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1429 &sorted!([
1430 ("store", false, &DATASTORE_SCHEMA),
1431 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1432 ("backup-id", false, &BACKUP_ID_SCHEMA),
1433 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1434 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1435 ]),
1436 )
1437 ).access(None, &Permission::Privilege(
1438 &["datastore", "{store}"],
1439 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1440 true)
1441 );
1442
1443 pub fn pxar_file_download(
1444 _parts: Parts,
1445 _req_body: Body,
1446 param: Value,
1447 _info: &ApiMethod,
1448 rpcenv: Box<dyn RpcEnvironment>,
1449 ) -> ApiResponseFuture {
1450
1451 async move {
1452 let store = required_string_param(&param, "store")?;
1453 let datastore = DataStore::lookup_datastore(store)?;
1454
1455 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1456
1457 let filepath = required_string_param(&param, "filepath")?.to_owned();
1458
1459 let backup_type = required_string_param(&param, "backup-type")?;
1460 let backup_id = required_string_param(&param, "backup-id")?;
1461 let backup_time = required_integer_param(&param, "backup-time")?;
1462
1463 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1464
1465 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
1466
1467 let mut components = base64::decode(&filepath)?;
1468 if !components.is_empty() && components[0] == b'/' {
1469 components.remove(0);
1470 }
1471
1472 let mut split = components.splitn(2, |c| *c == b'/');
1473 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1474 let file_path = split.next().unwrap_or(b"/");
1475 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
1476 for file in files {
1477 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1478 bail!("cannot decode '{}' - is encrypted", pxar_name);
1479 }
1480 }
1481
1482 let mut path = datastore.base_path();
1483 path.push(backup_dir.relative_path());
1484 path.push(pxar_name);
1485
1486 let index = DynamicIndexReader::open(&path)
1487 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1488
1489 let (csum, size) = index.compute_csum();
1490 manifest.verify_file(pxar_name, &csum, size)?;
1491
1492 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1493 let reader = BufferedDynamicReader::new(index, chunk_reader);
1494 let archive_size = reader.archive_size();
1495 let reader = LocalDynamicReadAt::new(reader);
1496
1497 let decoder = Accessor::new(reader, archive_size).await?;
1498 let root = decoder.open_root().await?;
1499 let path = OsStr::from_bytes(file_path).to_os_string();
1500 let file = root
1501 .lookup(&path).await?
1502 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1503
1504 let body = match file.kind() {
1505 EntryKind::File { .. } => Body::wrap_stream(
1506 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1507 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1508 err
1509 }),
1510 ),
1511 EntryKind::Hardlink(_) => Body::wrap_stream(
1512 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1513 .map_err(move |err| {
1514 eprintln!(
1515 "error during streaming of hardlink '{:?}' - {}",
1516 path, err
1517 );
1518 err
1519 }),
1520 ),
1521 EntryKind::Directory => {
1522 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1523 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1524 proxmox_rest_server::spawn_internal_task(
1525 create_zip(channelwriter, decoder, path.clone(), false)
1526 );
1527 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1528 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1529 err
1530 }))
1531 }
1532 other => bail!("cannot download file of type {:?}", other),
1533 };
1534
1535 // fixme: set other headers ?
1536 Ok(Response::builder()
1537 .status(StatusCode::OK)
1538 .header(header::CONTENT_TYPE, "application/octet-stream")
1539 .body(body)
1540 .unwrap())
1541 }.boxed()
1542 }
1543
1544 #[api(
1545 input: {
1546 properties: {
1547 store: {
1548 schema: DATASTORE_SCHEMA,
1549 },
1550 timeframe: {
1551 type: RRDTimeFrame,
1552 },
1553 cf: {
1554 type: RRDMode,
1555 },
1556 },
1557 },
1558 access: {
1559 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1560 },
1561 )]
1562 /// Read datastore stats
1563 pub fn get_rrd_stats(
1564 store: String,
1565 timeframe: RRDTimeFrame,
1566 cf: RRDMode,
1567 _param: Value,
1568 ) -> Result<Value, Error> {
1569
1570 create_value_from_rrd(
1571 &format!("datastore/{}", store),
1572 &[
1573 "total", "used",
1574 "read_ios", "read_bytes",
1575 "write_ios", "write_bytes",
1576 "io_ticks",
1577 ],
1578 timeframe,
1579 cf,
1580 )
1581 }
1582
1583 #[api(
1584 input: {
1585 properties: {
1586 store: {
1587 schema: DATASTORE_SCHEMA,
1588 },
1589 "backup-type": {
1590 schema: BACKUP_TYPE_SCHEMA,
1591 },
1592 "backup-id": {
1593 schema: BACKUP_ID_SCHEMA,
1594 },
1595 },
1596 },
1597 access: {
1598 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1599 },
1600 )]
1601 /// Get "notes" for a backup group
1602 pub fn get_group_notes(
1603 store: String,
1604 backup_type: String,
1605 backup_id: String,
1606 rpcenv: &mut dyn RpcEnvironment,
1607 ) -> Result<String, Error> {
1608 let datastore = DataStore::lookup_datastore(&store)?;
1609
1610 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1611 let backup_group = BackupGroup::new(backup_type, backup_id);
1612
1613 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1614
1615 let note_path = get_group_note_path(&datastore, &backup_group);
1616 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1617 }
1618
1619 #[api(
1620 input: {
1621 properties: {
1622 store: {
1623 schema: DATASTORE_SCHEMA,
1624 },
1625 "backup-type": {
1626 schema: BACKUP_TYPE_SCHEMA,
1627 },
1628 "backup-id": {
1629 schema: BACKUP_ID_SCHEMA,
1630 },
1631 notes: {
1632 description: "A multiline text.",
1633 },
1634 },
1635 },
1636 access: {
1637 permission: &Permission::Privilege(&["datastore", "{store}"],
1638 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1639 true),
1640 },
1641 )]
1642 /// Set "notes" for a backup group
1643 pub fn set_group_notes(
1644 store: String,
1645 backup_type: String,
1646 backup_id: String,
1647 notes: String,
1648 rpcenv: &mut dyn RpcEnvironment,
1649 ) -> Result<(), Error> {
1650 let datastore = DataStore::lookup_datastore(&store)?;
1651
1652 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1653 let backup_group = BackupGroup::new(backup_type, backup_id);
1654
1655 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1656
1657 let note_path = get_group_note_path(&datastore, &backup_group);
1658 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
1659
1660 Ok(())
1661 }
1662
1663 #[api(
1664 input: {
1665 properties: {
1666 store: {
1667 schema: DATASTORE_SCHEMA,
1668 },
1669 "backup-type": {
1670 schema: BACKUP_TYPE_SCHEMA,
1671 },
1672 "backup-id": {
1673 schema: BACKUP_ID_SCHEMA,
1674 },
1675 "backup-time": {
1676 schema: BACKUP_TIME_SCHEMA,
1677 },
1678 },
1679 },
1680 access: {
1681 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1682 },
1683 )]
1684 /// Get "notes" for a specific backup
1685 pub fn get_notes(
1686 store: String,
1687 backup_type: String,
1688 backup_id: String,
1689 backup_time: i64,
1690 rpcenv: &mut dyn RpcEnvironment,
1691 ) -> Result<String, Error> {
1692 let datastore = DataStore::lookup_datastore(&store)?;
1693
1694 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1695 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1696
1697 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1698
1699 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
1700
1701 let notes = manifest.unprotected["notes"]
1702 .as_str()
1703 .unwrap_or("");
1704
1705 Ok(String::from(notes))
1706 }
1707
1708 #[api(
1709 input: {
1710 properties: {
1711 store: {
1712 schema: DATASTORE_SCHEMA,
1713 },
1714 "backup-type": {
1715 schema: BACKUP_TYPE_SCHEMA,
1716 },
1717 "backup-id": {
1718 schema: BACKUP_ID_SCHEMA,
1719 },
1720 "backup-time": {
1721 schema: BACKUP_TIME_SCHEMA,
1722 },
1723 notes: {
1724 description: "A multiline text.",
1725 },
1726 },
1727 },
1728 access: {
1729 permission: &Permission::Privilege(&["datastore", "{store}"],
1730 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1731 true),
1732 },
1733 )]
1734 /// Set "notes" for a specific backup
1735 pub fn set_notes(
1736 store: String,
1737 backup_type: String,
1738 backup_id: String,
1739 backup_time: i64,
1740 notes: String,
1741 rpcenv: &mut dyn RpcEnvironment,
1742 ) -> Result<(), Error> {
1743 let datastore = DataStore::lookup_datastore(&store)?;
1744
1745 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1746 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1747
1748 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1749
1750 datastore.update_manifest(&backup_dir,|manifest| {
1751 manifest.unprotected["notes"] = notes.into();
1752 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
1753
1754 Ok(())
1755 }
1756
1757 #[api(
1758 input: {
1759 properties: {
1760 store: {
1761 schema: DATASTORE_SCHEMA,
1762 },
1763 "backup-type": {
1764 schema: BACKUP_TYPE_SCHEMA,
1765 },
1766 "backup-id": {
1767 schema: BACKUP_ID_SCHEMA,
1768 },
1769 "backup-time": {
1770 schema: BACKUP_TIME_SCHEMA,
1771 },
1772 },
1773 },
1774 access: {
1775 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1776 },
1777 )]
1778 /// Query protection for a specific backup
1779 pub fn get_protection(
1780 store: String,
1781 backup_type: String,
1782 backup_id: String,
1783 backup_time: i64,
1784 rpcenv: &mut dyn RpcEnvironment,
1785 ) -> Result<bool, Error> {
1786 let datastore = DataStore::lookup_datastore(&store)?;
1787
1788 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1789 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1790
1791 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1792
1793 Ok(backup_dir.is_protected(datastore.base_path()))
1794 }
1795
1796 #[api(
1797 input: {
1798 properties: {
1799 store: {
1800 schema: DATASTORE_SCHEMA,
1801 },
1802 "backup-type": {
1803 schema: BACKUP_TYPE_SCHEMA,
1804 },
1805 "backup-id": {
1806 schema: BACKUP_ID_SCHEMA,
1807 },
1808 "backup-time": {
1809 schema: BACKUP_TIME_SCHEMA,
1810 },
1811 protected: {
1812 description: "Enable/disable protection.",
1813 },
1814 },
1815 },
1816 access: {
1817 permission: &Permission::Privilege(&["datastore", "{store}"],
1818 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1819 true),
1820 },
1821 )]
1822 /// En- or disable protection for a specific backup
1823 pub fn set_protection(
1824 store: String,
1825 backup_type: String,
1826 backup_id: String,
1827 backup_time: i64,
1828 protected: bool,
1829 rpcenv: &mut dyn RpcEnvironment,
1830 ) -> Result<(), Error> {
1831 let datastore = DataStore::lookup_datastore(&store)?;
1832
1833 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1834 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1835
1836 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1837
1838 datastore.update_protection(&backup_dir, protected)
1839 }
1840
1841 #[api(
1842 input: {
1843 properties: {
1844 store: {
1845 schema: DATASTORE_SCHEMA,
1846 },
1847 "backup-type": {
1848 schema: BACKUP_TYPE_SCHEMA,
1849 },
1850 "backup-id": {
1851 schema: BACKUP_ID_SCHEMA,
1852 },
1853 "new-owner": {
1854 type: Authid,
1855 },
1856 },
1857 },
1858 access: {
1859 permission: &Permission::Anybody,
1860 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
1861 },
1862 )]
1863 /// Change owner of a backup group
1864 pub fn set_backup_owner(
1865 store: String,
1866 backup_type: String,
1867 backup_id: String,
1868 new_owner: Authid,
1869 rpcenv: &mut dyn RpcEnvironment,
1870 ) -> Result<(), Error> {
1871
1872 let datastore = DataStore::lookup_datastore(&store)?;
1873
1874 let backup_group = BackupGroup::new(backup_type, backup_id);
1875
1876 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1877
1878 let user_info = CachedUserInfo::new()?;
1879
1880 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1881
1882 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1883 // High-privilege user/token
1884 true
1885 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1886 let owner = datastore.get_owner(&backup_group)?;
1887
1888 match (owner.is_token(), new_owner.is_token()) {
1889 (true, true) => {
1890 // API token to API token, owned by same user
1891 let owner = owner.user();
1892 let new_owner = new_owner.user();
1893 owner == new_owner && Authid::from(owner.clone()) == auth_id
1894 },
1895 (true, false) => {
1896 // API token to API token owner
1897 Authid::from(owner.user().clone()) == auth_id
1898 && new_owner == auth_id
1899 },
1900 (false, true) => {
1901 // API token owner to API token
1902 owner == auth_id
1903 && Authid::from(new_owner.user().clone()) == auth_id
1904 },
1905 (false, false) => {
1906 // User to User, not allowed for unprivileged users
1907 false
1908 },
1909 }
1910 } else {
1911 false
1912 };
1913
1914 if !allowed {
1915 return Err(http_err!(UNAUTHORIZED,
1916 "{} does not have permission to change owner of backup group '{}' to {}",
1917 auth_id,
1918 backup_group,
1919 new_owner,
1920 ));
1921 }
1922
1923 if !user_info.is_active_auth_id(&new_owner) {
1924 bail!("{} '{}' is inactive or non-existent",
1925 if new_owner.is_token() {
1926 "API token".to_string()
1927 } else {
1928 "user".to_string()
1929 },
1930 new_owner);
1931 }
1932
1933 datastore.set_owner(&backup_group, &new_owner, true)?;
1934
1935 Ok(())
1936 }
1937
1938 #[sortable]
1939 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
1940 (
1941 "catalog",
1942 &Router::new()
1943 .get(&API_METHOD_CATALOG)
1944 ),
1945 (
1946 "change-owner",
1947 &Router::new()
1948 .post(&API_METHOD_SET_BACKUP_OWNER)
1949 ),
1950 (
1951 "download",
1952 &Router::new()
1953 .download(&API_METHOD_DOWNLOAD_FILE)
1954 ),
1955 (
1956 "download-decoded",
1957 &Router::new()
1958 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1959 ),
1960 (
1961 "files",
1962 &Router::new()
1963 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
1964 ),
1965 (
1966 "gc",
1967 &Router::new()
1968 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1969 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1970 ),
1971 (
1972 "group-notes",
1973 &Router::new()
1974 .get(&API_METHOD_GET_GROUP_NOTES)
1975 .put(&API_METHOD_SET_GROUP_NOTES)
1976 ),
1977 (
1978 "groups",
1979 &Router::new()
1980 .get(&API_METHOD_LIST_GROUPS)
1981 .delete(&API_METHOD_DELETE_GROUP)
1982 ),
1983 (
1984 "notes",
1985 &Router::new()
1986 .get(&API_METHOD_GET_NOTES)
1987 .put(&API_METHOD_SET_NOTES)
1988 ),
1989 (
1990 "protected",
1991 &Router::new()
1992 .get(&API_METHOD_GET_PROTECTION)
1993 .put(&API_METHOD_SET_PROTECTION)
1994 ),
1995 (
1996 "prune",
1997 &Router::new()
1998 .post(&API_METHOD_PRUNE)
1999 ),
2000 (
2001 "prune-datastore",
2002 &Router::new()
2003 .post(&API_METHOD_PRUNE_DATASTORE)
2004 ),
2005 (
2006 "pxar-file-download",
2007 &Router::new()
2008 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
2009 ),
2010 (
2011 "rrd",
2012 &Router::new()
2013 .get(&API_METHOD_GET_RRD_STATS)
2014 ),
2015 (
2016 "snapshots",
2017 &Router::new()
2018 .get(&API_METHOD_LIST_SNAPSHOTS)
2019 .delete(&API_METHOD_DELETE_SNAPSHOT)
2020 ),
2021 (
2022 "status",
2023 &Router::new()
2024 .get(&API_METHOD_STATUS)
2025 ),
2026 (
2027 "upload-backup-log",
2028 &Router::new()
2029 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
2030 ),
2031 (
2032 "verify",
2033 &Router::new()
2034 .post(&API_METHOD_VERIFY)
2035 ),
2036 ];
2037
2038 const DATASTORE_INFO_ROUTER: Router = Router::new()
2039 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2040 .subdirs(DATASTORE_INFO_SUBDIRS);
2041
2042
2043 pub const ROUTER: Router = Router::new()
2044 .get(&API_METHOD_GET_DATASTORE_LIST)
2045 .match_all("store", &DATASTORE_INFO_ROUTER);