]> git.proxmox.com Git - proxmox-backup.git/blame_incremental - src/api2/admin/datastore.rs
api: list datastore: avoid iterating over NS for priv check, use AclTree
[proxmox-backup.git] / src / api2 / admin / datastore.rs
... / ...
CommitLineData
1//! Datastore Management
2
3use std::collections::HashSet;
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
6use std::path::PathBuf;
7use std::sync::Arc;
8
9use anyhow::{bail, format_err, Error};
10use futures::*;
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
13use serde::Deserialize;
14use serde_json::{json, Value};
15use tokio_stream::wrappers::ReceiverStream;
16
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
19use proxmox_compression::zstd::ZstdEncoder;
20use proxmox_router::{
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
23};
24use proxmox_schema::*;
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
29use proxmox_sys::{task_log, task_warn};
30
31use pxar::accessor::aio::Accessor;
32use pxar::EntryKind;
33
34use pbs_api_types::{
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
43};
44use pbs_client::pxar::{create_tar, create_zip};
45use pbs_config::CachedUserInfo;
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
52use pbs_datastore::fixed_index::FixedIndexReader;
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
55use pbs_datastore::prune::compute_prune_info;
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
60use pbs_tools::json::required_string_param;
61use proxmox_rest_server::{formatter, WorkerTask};
62
63use crate::api2::backup::optional_ns_param;
64use crate::api2::node::rrd::create_value_from_rrd;
65use crate::backup::{
66 can_access_any_namespace, check_ns_privs_full, verify_all_backups, verify_backup_dir,
67 verify_backup_group, verify_filter, ListAccessibleBackupGroups, NS_PRIVS_OK,
68};
69
70use crate::server::jobstate::Job;
71
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
89 store: &str,
90 ns: &BackupNamespace,
91 auth_id: &Authid,
92 full_access_privs: u64,
93 partial_access_privs: u64,
94 operation: Option<Operation>,
95 backup_group: &pbs_api_types::BackupGroup,
96) -> Result<Arc<DataStore>, Error> {
97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
98
99 let datastore = DataStore::lookup_datastore(store, operation)?;
100
101 if limited {
102 let owner = datastore.get_owner(ns, backup_group)?;
103 check_backup_owner(&owner, &auth_id)?;
104 }
105
106 Ok(datastore)
107}
108
109fn read_backup_index(
110 backup_dir: &BackupDir,
111) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
112 let (manifest, index_size) = backup_dir.load_manifest()?;
113
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
118 crypt_mode: Some(item.crypt_mode),
119 size: Some(item.size),
120 });
121 }
122
123 result.push(BackupContent {
124 filename: MANIFEST_BLOB_NAME.to_string(),
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
129 size: Some(index_size),
130 });
131
132 Ok((manifest, result))
133}
134
135fn get_all_snapshot_files(
136 info: &BackupInfo,
137) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
146 if file_set.contains(file) {
147 continue;
148 }
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
154 }
155
156 Ok((manifest, files))
157}
158
159#[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
165 ns: {
166 type: BackupNamespace,
167 optional: true,
168 },
169 },
170 },
171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
172 access: {
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
176 },
177)]
178/// List backup groups.
179pub fn list_groups(
180 store: String,
181 ns: Option<BackupNamespace>,
182 rpcenv: &mut dyn RpcEnvironment,
183) -> Result<Vec<GroupListItem>, Error> {
184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
185 let ns = ns.unwrap_or_default();
186
187 let list_all = !check_ns_privs_full(
188 &store,
189 &ns,
190 &auth_id,
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
193 )?;
194
195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
196
197 datastore
198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
201
202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
203 Ok(auth_id) => auth_id,
204 Err(err) => {
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
208 print_store_and_ns(&store, &ns),
209 err
210 );
211 return Ok(group_info);
212 }
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
217
218 let snapshots = match group.list_backups() {
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
222
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
227
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
240 let comment = file_read_firstline(&note_path).ok();
241
242 group_info.push(GroupListItem {
243 backup: group.into(),
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
249 });
250
251 Ok(group_info)
252 })
253}
254
255#[api(
256 input: {
257 properties: {
258 store: { schema: DATASTORE_SCHEMA },
259 ns: {
260 type: BackupNamespace,
261 optional: true,
262 },
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
267 },
268 },
269 access: {
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
273 },
274)]
275/// Delete backup group including all snapshots.
276pub fn delete_group(
277 store: String,
278 ns: Option<BackupNamespace>,
279 group: pbs_api_types::BackupGroup,
280 _info: &ApiMethod,
281 rpcenv: &mut dyn RpcEnvironment,
282) -> Result<Value, Error> {
283 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
284 let ns = ns.unwrap_or_default();
285
286 let datastore = check_privs_and_load_store(
287 &store,
288 &ns,
289 &auth_id,
290 PRIV_DATASTORE_MODIFY,
291 PRIV_DATASTORE_PRUNE,
292 Some(Operation::Write),
293 &group,
294 )?;
295
296 if !datastore.remove_backup_group(&ns, &group)? {
297 bail!("group only partially deleted due to protected snapshots");
298 }
299
300 Ok(Value::Null)
301}
302
303#[api(
304 input: {
305 properties: {
306 store: { schema: DATASTORE_SCHEMA },
307 ns: {
308 type: BackupNamespace,
309 optional: true,
310 },
311 backup_dir: {
312 type: pbs_api_types::BackupDir,
313 flatten: true,
314 },
315 },
316 },
317 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
318 access: {
319 permission: &Permission::Anybody,
320 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
321 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
322 },
323)]
324/// List snapshot files.
325pub fn list_snapshot_files(
326 store: String,
327 ns: Option<BackupNamespace>,
328 backup_dir: pbs_api_types::BackupDir,
329 _info: &ApiMethod,
330 rpcenv: &mut dyn RpcEnvironment,
331) -> Result<Vec<BackupContent>, Error> {
332 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
333 let ns = ns.unwrap_or_default();
334
335 let datastore = check_privs_and_load_store(
336 &store,
337 &ns,
338 &auth_id,
339 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
340 PRIV_DATASTORE_BACKUP,
341 Some(Operation::Read),
342 &backup_dir.group,
343 )?;
344
345 let snapshot = datastore.backup_dir(ns, backup_dir)?;
346
347 let info = BackupInfo::new(snapshot)?;
348
349 let (_manifest, files) = get_all_snapshot_files(&info)?;
350
351 Ok(files)
352}
353
354#[api(
355 input: {
356 properties: {
357 store: { schema: DATASTORE_SCHEMA },
358 ns: {
359 type: BackupNamespace,
360 optional: true,
361 },
362 backup_dir: {
363 type: pbs_api_types::BackupDir,
364 flatten: true,
365 },
366 },
367 },
368 access: {
369 permission: &Permission::Anybody,
370 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
371 or DATASTORE_PRUNE and being the owner of the group",
372 },
373)]
374/// Delete backup snapshot.
375pub fn delete_snapshot(
376 store: String,
377 ns: Option<BackupNamespace>,
378 backup_dir: pbs_api_types::BackupDir,
379 _info: &ApiMethod,
380 rpcenv: &mut dyn RpcEnvironment,
381) -> Result<Value, Error> {
382 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
383 let ns = ns.unwrap_or_default();
384
385 let datastore = check_privs_and_load_store(
386 &store,
387 &ns,
388 &auth_id,
389 PRIV_DATASTORE_MODIFY,
390 PRIV_DATASTORE_PRUNE,
391 Some(Operation::Write),
392 &backup_dir.group,
393 )?;
394
395 let snapshot = datastore.backup_dir(ns, backup_dir)?;
396
397 snapshot.destroy(false)?;
398
399 Ok(Value::Null)
400}
401
402#[api(
403 streaming: true,
404 input: {
405 properties: {
406 store: { schema: DATASTORE_SCHEMA },
407 ns: {
408 type: BackupNamespace,
409 optional: true,
410 },
411 "backup-type": {
412 optional: true,
413 type: BackupType,
414 },
415 "backup-id": {
416 optional: true,
417 schema: BACKUP_ID_SCHEMA,
418 },
419 },
420 },
421 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
422 access: {
423 permission: &Permission::Anybody,
424 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
425 or DATASTORE_BACKUP and being the owner of the group",
426 },
427)]
428/// List backup snapshots.
429pub fn list_snapshots(
430 store: String,
431 ns: Option<BackupNamespace>,
432 backup_type: Option<BackupType>,
433 backup_id: Option<String>,
434 _param: Value,
435 _info: &ApiMethod,
436 rpcenv: &mut dyn RpcEnvironment,
437) -> Result<Vec<SnapshotListItem>, Error> {
438 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
439
440 let ns = ns.unwrap_or_default();
441
442 let list_all = !check_ns_privs_full(
443 &store,
444 &ns,
445 &auth_id,
446 PRIV_DATASTORE_AUDIT,
447 PRIV_DATASTORE_BACKUP,
448 )?;
449
450 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
451
452 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
453 // backup group and provide an error free (Err -> None) accessor
454 let groups = match (backup_type, backup_id) {
455 (Some(backup_type), Some(backup_id)) => {
456 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
457 }
458 // FIXME: Recursion
459 (Some(backup_type), None) => datastore
460 .iter_backup_groups_ok(ns.clone())?
461 .filter(|group| group.backup_type() == backup_type)
462 .collect(),
463 // FIXME: Recursion
464 (None, Some(backup_id)) => datastore
465 .iter_backup_groups_ok(ns.clone())?
466 .filter(|group| group.backup_id() == backup_id)
467 .collect(),
468 // FIXME: Recursion
469 (None, None) => datastore.list_backup_groups(ns.clone())?,
470 };
471
472 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
473 let backup = pbs_api_types::BackupDir {
474 group: group.into(),
475 time: info.backup_dir.backup_time(),
476 };
477 let protected = info.backup_dir.is_protected();
478
479 match get_all_snapshot_files(&info) {
480 Ok((manifest, files)) => {
481 // extract the first line from notes
482 let comment: Option<String> = manifest.unprotected["notes"]
483 .as_str()
484 .and_then(|notes| notes.lines().next())
485 .map(String::from);
486
487 let fingerprint = match manifest.fingerprint() {
488 Ok(fp) => fp,
489 Err(err) => {
490 eprintln!("error parsing fingerprint: '{}'", err);
491 None
492 }
493 };
494
495 let verification = manifest.unprotected["verify_state"].clone();
496 let verification: Option<SnapshotVerifyState> =
497 match serde_json::from_value(verification) {
498 Ok(verify) => verify,
499 Err(err) => {
500 eprintln!("error parsing verification state : '{}'", err);
501 None
502 }
503 };
504
505 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
506
507 SnapshotListItem {
508 backup,
509 comment,
510 verification,
511 fingerprint,
512 files,
513 size,
514 owner,
515 protected,
516 }
517 }
518 Err(err) => {
519 eprintln!("error during snapshot file listing: '{}'", err);
520 let files = info
521 .files
522 .into_iter()
523 .map(|filename| BackupContent {
524 filename,
525 size: None,
526 crypt_mode: None,
527 })
528 .collect();
529
530 SnapshotListItem {
531 backup,
532 comment: None,
533 verification: None,
534 fingerprint: None,
535 files,
536 size: None,
537 owner,
538 protected,
539 }
540 }
541 }
542 };
543
544 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
545 let owner = match group.get_owner() {
546 Ok(auth_id) => auth_id,
547 Err(err) => {
548 eprintln!(
549 "Failed to get owner of group '{}' in {} - {}",
550 group.group(),
551 print_store_and_ns(&store, &ns),
552 err
553 );
554 return Ok(snapshots);
555 }
556 };
557
558 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
559 return Ok(snapshots);
560 }
561
562 let group_backups = group.list_backups()?;
563
564 snapshots.extend(
565 group_backups
566 .into_iter()
567 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
568 );
569
570 Ok(snapshots)
571 })
572}
573
574fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
575 let root_ns = Default::default();
576 ListAccessibleBackupGroups::new_with_privs(
577 store,
578 root_ns,
579 MAX_NAMESPACE_DEPTH,
580 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
581 None,
582 owner,
583 )?
584 .try_fold(Counts::default(), |mut counts, group| {
585 let group = match group {
586 Ok(group) => group,
587 Err(_) => return Ok(counts), // TODO: add this as error counts?
588 };
589 let snapshot_count = group.list_backups()?.len() as u64;
590
591 // only include groups with snapshots, counting/displaying emtpy groups can confuse
592 if snapshot_count > 0 {
593 let type_count = match group.backup_type() {
594 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
595 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
596 BackupType::Host => counts.host.get_or_insert(Default::default()),
597 };
598
599 type_count.groups += 1;
600 type_count.snapshots += snapshot_count;
601 }
602
603 Ok(counts)
604 })
605}
606
607#[api(
608 input: {
609 properties: {
610 store: {
611 schema: DATASTORE_SCHEMA,
612 },
613 verbose: {
614 type: bool,
615 default: false,
616 optional: true,
617 description: "Include additional information like snapshot counts and GC status.",
618 },
619 },
620
621 },
622 returns: {
623 type: DataStoreStatus,
624 },
625 access: {
626 permission: &Permission::Anybody,
627 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
628 the full statistics. Counts of accessible groups are always returned, if any",
629 },
630)]
631/// Get datastore status.
632pub fn status(
633 store: String,
634 verbose: bool,
635 _info: &ApiMethod,
636 rpcenv: &mut dyn RpcEnvironment,
637) -> Result<DataStoreStatus, Error> {
638 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
639 let user_info = CachedUserInfo::new()?;
640 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
641
642 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
643
644 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
645 true
646 } else if store_privs & PRIV_DATASTORE_READ != 0 {
647 false // allow at least counts, user can read groups anyway..
648 } else if let Ok(ref datastore) = datastore {
649 if !can_access_any_namespace(Arc::clone(datastore), &auth_id, &user_info) {
650 return Err(http_err!(FORBIDDEN, "permission check failed"));
651 }
652 false
653 } else {
654 return Err(http_err!(FORBIDDEN, "permission check failed")); // avoid leaking existance info
655 };
656 let datastore = datastore?; // only unwrap no to avoid leaking existance info
657
658 let (counts, gc_status) = if verbose {
659 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
660 None
661 } else {
662 Some(&auth_id)
663 };
664
665 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
666 let gc_status = if store_stats {
667 Some(datastore.last_gc_status())
668 } else {
669 None
670 };
671
672 (counts, gc_status)
673 } else {
674 (None, None)
675 };
676
677 Ok(if store_stats {
678 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
679 DataStoreStatus {
680 total: storage.total,
681 used: storage.used,
682 avail: storage.avail,
683 gc_status,
684 counts,
685 }
686 } else {
687 DataStoreStatus {
688 total: 0,
689 used: 0,
690 avail: 0,
691 gc_status,
692 counts,
693 }
694 })
695}
696
697#[api(
698 input: {
699 properties: {
700 store: {
701 schema: DATASTORE_SCHEMA,
702 },
703 ns: {
704 type: BackupNamespace,
705 optional: true,
706 },
707 "backup-type": {
708 type: BackupType,
709 optional: true,
710 },
711 "backup-id": {
712 schema: BACKUP_ID_SCHEMA,
713 optional: true,
714 },
715 "ignore-verified": {
716 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
717 optional: true,
718 },
719 "outdated-after": {
720 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
721 optional: true,
722 },
723 "backup-time": {
724 schema: BACKUP_TIME_SCHEMA,
725 optional: true,
726 },
727 "max-depth": {
728 schema: NS_MAX_DEPTH_SCHEMA,
729 optional: true,
730 },
731 },
732 },
733 returns: {
734 schema: UPID_SCHEMA,
735 },
736 access: {
737 permission: &Permission::Anybody,
738 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
739 or DATASTORE_BACKUP and being the owner of the group",
740 },
741)]
742/// Verify backups.
743///
744/// This function can verify a single backup snapshot, all backup from a backup group,
745/// or all backups in the datastore.
746pub fn verify(
747 store: String,
748 ns: Option<BackupNamespace>,
749 backup_type: Option<BackupType>,
750 backup_id: Option<String>,
751 backup_time: Option<i64>,
752 ignore_verified: Option<bool>,
753 outdated_after: Option<i64>,
754 max_depth: Option<usize>,
755 rpcenv: &mut dyn RpcEnvironment,
756) -> Result<Value, Error> {
757 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
758 let ns = ns.unwrap_or_default();
759
760 let owner_check_required = check_ns_privs_full(
761 &store,
762 &ns,
763 &auth_id,
764 PRIV_DATASTORE_VERIFY,
765 PRIV_DATASTORE_BACKUP,
766 )?;
767
768 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
769 let ignore_verified = ignore_verified.unwrap_or(true);
770
771 let worker_id;
772
773 let mut backup_dir = None;
774 let mut backup_group = None;
775 let mut worker_type = "verify";
776
777 match (backup_type, backup_id, backup_time) {
778 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
779 worker_id = format!(
780 "{}:{}/{}/{}/{:08X}",
781 store,
782 ns.display_as_path(),
783 backup_type,
784 backup_id,
785 backup_time
786 );
787 let dir =
788 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
789
790 if owner_check_required {
791 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
792 check_backup_owner(&owner, &auth_id)?;
793 }
794
795 backup_dir = Some(dir);
796 worker_type = "verify_snapshot";
797 }
798 (Some(backup_type), Some(backup_id), None) => {
799 worker_id = format!(
800 "{}:{}/{}/{}",
801 store,
802 ns.display_as_path(),
803 backup_type,
804 backup_id
805 );
806 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
807
808 if owner_check_required {
809 let owner = datastore.get_owner(&ns, &group)?;
810 check_backup_owner(&owner, &auth_id)?;
811 }
812
813 backup_group = Some(datastore.backup_group(ns.clone(), group));
814 worker_type = "verify_group";
815 }
816 (None, None, None) => {
817 worker_id = if ns.is_root() {
818 store
819 } else {
820 format!("{}:{}", store, ns.display_as_path())
821 };
822 }
823 _ => bail!("parameters do not specify a backup group or snapshot"),
824 }
825
826 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
827
828 let upid_str = WorkerTask::new_thread(
829 worker_type,
830 Some(worker_id),
831 auth_id.to_string(),
832 to_stdout,
833 move |worker| {
834 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
835 let failed_dirs = if let Some(backup_dir) = backup_dir {
836 let mut res = Vec::new();
837 if !verify_backup_dir(
838 &verify_worker,
839 &backup_dir,
840 worker.upid().clone(),
841 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
842 )? {
843 res.push(print_ns_and_snapshot(
844 backup_dir.backup_ns(),
845 backup_dir.as_ref(),
846 ));
847 }
848 res
849 } else if let Some(backup_group) = backup_group {
850 let failed_dirs = verify_backup_group(
851 &verify_worker,
852 &backup_group,
853 &mut StoreProgress::new(1),
854 worker.upid(),
855 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
856 )?;
857 failed_dirs
858 } else {
859 let owner = if owner_check_required {
860 Some(&auth_id)
861 } else {
862 None
863 };
864
865 verify_all_backups(
866 &verify_worker,
867 worker.upid(),
868 ns,
869 max_depth,
870 owner,
871 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
872 )?
873 };
874 if !failed_dirs.is_empty() {
875 task_log!(worker, "Failed to verify the following snapshots/groups:");
876 for dir in failed_dirs {
877 task_log!(worker, "\t{}", dir);
878 }
879 bail!("verification failed - please check the log for details");
880 }
881 Ok(())
882 },
883 )?;
884
885 Ok(json!(upid_str))
886}
887
888#[api(
889 input: {
890 properties: {
891 group: {
892 type: pbs_api_types::BackupGroup,
893 flatten: true,
894 },
895 "dry-run": {
896 optional: true,
897 type: bool,
898 default: false,
899 description: "Just show what prune would do, but do not delete anything.",
900 },
901 "keep-options": {
902 type: KeepOptions,
903 flatten: true,
904 },
905 store: {
906 schema: DATASTORE_SCHEMA,
907 },
908 ns: {
909 type: BackupNamespace,
910 optional: true,
911 },
912 },
913 },
914 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
915 access: {
916 permission: &Permission::Anybody,
917 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
918 or DATASTORE_PRUNE and being the owner of the group",
919 },
920)]
921/// Prune a group on the datastore
922pub fn prune(
923 group: pbs_api_types::BackupGroup,
924 dry_run: bool,
925 keep_options: KeepOptions,
926 store: String,
927 ns: Option<BackupNamespace>,
928 _param: Value,
929 rpcenv: &mut dyn RpcEnvironment,
930) -> Result<Value, Error> {
931 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
932 let ns = ns.unwrap_or_default();
933 let datastore = check_privs_and_load_store(
934 &store,
935 &ns,
936 &auth_id,
937 PRIV_DATASTORE_MODIFY,
938 PRIV_DATASTORE_PRUNE,
939 Some(Operation::Write),
940 &group,
941 )?;
942
943 let worker_id = format!("{}:{}:{}", store, ns, group);
944 let group = datastore.backup_group(ns.clone(), group);
945
946 let mut prune_result = Vec::new();
947
948 let list = group.list_backups()?;
949
950 let mut prune_info = compute_prune_info(list, &keep_options)?;
951
952 prune_info.reverse(); // delete older snapshots first
953
954 let keep_all = !keep_options.keeps_something();
955
956 if dry_run {
957 for (info, mark) in prune_info {
958 let keep = keep_all || mark.keep();
959
960 let mut result = json!({
961 "backup-type": info.backup_dir.backup_type(),
962 "backup-id": info.backup_dir.backup_id(),
963 "backup-time": info.backup_dir.backup_time(),
964 "keep": keep,
965 "protected": mark.protected(),
966 });
967 let prune_ns = info.backup_dir.backup_ns();
968 if !prune_ns.is_root() {
969 result["ns"] = serde_json::to_value(prune_ns)?;
970 }
971 prune_result.push(result);
972 }
973 return Ok(json!(prune_result));
974 }
975
976 // We use a WorkerTask just to have a task log, but run synchrounously
977 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
978
979 if keep_all {
980 task_log!(worker, "No prune selection - keeping all files.");
981 } else {
982 let mut opts = Vec::new();
983 if !ns.is_root() {
984 opts.push(format!("--ns {ns}"));
985 }
986 crate::server::cli_keep_options(&mut opts, &keep_options);
987
988 task_log!(worker, "retention options: {}", opts.join(" "));
989 task_log!(
990 worker,
991 "Starting prune on {} group \"{}\"",
992 print_store_and_ns(&store, &ns),
993 group.group(),
994 );
995 }
996
997 for (info, mark) in prune_info {
998 let keep = keep_all || mark.keep();
999
1000 let backup_time = info.backup_dir.backup_time();
1001 let timestamp = info.backup_dir.backup_time_string();
1002 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1003
1004 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
1005
1006 task_log!(worker, "{}", msg);
1007
1008 prune_result.push(json!({
1009 "backup-type": group.ty,
1010 "backup-id": group.id,
1011 "backup-time": backup_time,
1012 "keep": keep,
1013 "protected": mark.protected(),
1014 }));
1015
1016 if !(dry_run || keep) {
1017 if let Err(err) = info.backup_dir.destroy(false) {
1018 task_warn!(
1019 worker,
1020 "failed to remove dir {:?}: {}",
1021 info.backup_dir.relative_path(),
1022 err,
1023 );
1024 }
1025 }
1026 }
1027
1028 worker.log_result(&Ok(()));
1029
1030 Ok(json!(prune_result))
1031}
1032
1033#[api(
1034 input: {
1035 properties: {
1036 "dry-run": {
1037 optional: true,
1038 type: bool,
1039 default: false,
1040 description: "Just show what prune would do, but do not delete anything.",
1041 },
1042 "prune-options": {
1043 type: PruneJobOptions,
1044 flatten: true,
1045 },
1046 store: {
1047 schema: DATASTORE_SCHEMA,
1048 },
1049 },
1050 },
1051 returns: {
1052 schema: UPID_SCHEMA,
1053 },
1054 access: {
1055 permission: &Permission::Anybody,
1056 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
1057 },
1058)]
1059/// Prune the datastore
1060pub fn prune_datastore(
1061 dry_run: bool,
1062 prune_options: PruneJobOptions,
1063 store: String,
1064 _param: Value,
1065 rpcenv: &mut dyn RpcEnvironment,
1066) -> Result<String, Error> {
1067 let user_info = CachedUserInfo::new()?;
1068
1069 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1070
1071 user_info.check_privs(
1072 &auth_id,
1073 &prune_options.acl_path(&store),
1074 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1075 true,
1076 )?;
1077
1078 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1079 let ns = prune_options.ns.clone().unwrap_or_default();
1080 let worker_id = format!("{}:{}", store, ns);
1081
1082 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1083
1084 let upid_str = WorkerTask::new_thread(
1085 "prune",
1086 Some(worker_id),
1087 auth_id.to_string(),
1088 to_stdout,
1089 move |worker| {
1090 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
1091 },
1092 )?;
1093
1094 Ok(upid_str)
1095}
1096
1097#[api(
1098 input: {
1099 properties: {
1100 store: {
1101 schema: DATASTORE_SCHEMA,
1102 },
1103 },
1104 },
1105 returns: {
1106 schema: UPID_SCHEMA,
1107 },
1108 access: {
1109 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1110 },
1111)]
1112/// Start garbage collection.
1113pub fn start_garbage_collection(
1114 store: String,
1115 _info: &ApiMethod,
1116 rpcenv: &mut dyn RpcEnvironment,
1117) -> Result<Value, Error> {
1118 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1119 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1120
1121 let job = Job::new("garbage_collection", &store)
1122 .map_err(|_| format_err!("garbage collection already running"))?;
1123
1124 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1125
1126 let upid_str =
1127 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1128 .map_err(|err| {
1129 format_err!(
1130 "unable to start garbage collection job on datastore {} - {}",
1131 store,
1132 err
1133 )
1134 })?;
1135
1136 Ok(json!(upid_str))
1137}
1138
1139#[api(
1140 input: {
1141 properties: {
1142 store: {
1143 schema: DATASTORE_SCHEMA,
1144 },
1145 },
1146 },
1147 returns: {
1148 type: GarbageCollectionStatus,
1149 },
1150 access: {
1151 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1152 },
1153)]
1154/// Garbage collection status.
1155pub fn garbage_collection_status(
1156 store: String,
1157 _info: &ApiMethod,
1158 _rpcenv: &mut dyn RpcEnvironment,
1159) -> Result<GarbageCollectionStatus, Error> {
1160 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1161
1162 let status = datastore.last_gc_status();
1163
1164 Ok(status)
1165}
1166
1167#[api(
1168 returns: {
1169 description: "List the accessible datastores.",
1170 type: Array,
1171 items: { type: DataStoreListItem },
1172 },
1173 access: {
1174 permission: &Permission::Anybody,
1175 },
1176)]
1177/// Datastore list
1178pub fn get_datastore_list(
1179 _param: Value,
1180 _info: &ApiMethod,
1181 rpcenv: &mut dyn RpcEnvironment,
1182) -> Result<Vec<DataStoreListItem>, Error> {
1183 let (config, _digest) = pbs_config::datastore::config()?;
1184
1185 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1186 let user_info = CachedUserInfo::new()?;
1187
1188 let mut list = Vec::new();
1189
1190 for (store, (_, data)) in &config.sections {
1191 let acl_path = &["datastore", store];
1192 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
1193 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1194
1195 let mut allow_id = false;
1196 if !allowed {
1197 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1198 allow_id = any_privs;
1199 }
1200 }
1201
1202 if allowed || allow_id {
1203 list.push(DataStoreListItem {
1204 store: store.clone(),
1205 comment: if !allowed {
1206 None
1207 } else {
1208 data["comment"].as_str().map(String::from)
1209 },
1210 maintenance: data["maintenance-mode"].as_str().map(String::from),
1211 });
1212 }
1213 }
1214
1215 Ok(list)
1216}
1217
1218#[sortable]
1219pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1220 &ApiHandler::AsyncHttp(&download_file),
1221 &ObjectSchema::new(
1222 "Download single raw file from backup snapshot.",
1223 &sorted!([
1224 ("store", false, &DATASTORE_SCHEMA),
1225 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1226 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1227 ("backup-id", false, &BACKUP_ID_SCHEMA),
1228 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1229 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1230 ]),
1231 ),
1232)
1233.access(
1234 Some(
1235 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1236 DATASTORE_BACKUP and being the owner of the group",
1237 ),
1238 &Permission::Anybody,
1239);
1240
1241pub fn download_file(
1242 _parts: Parts,
1243 _req_body: Body,
1244 param: Value,
1245 _info: &ApiMethod,
1246 rpcenv: Box<dyn RpcEnvironment>,
1247) -> ApiResponseFuture {
1248 async move {
1249 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1250 let store = required_string_param(&param, "store")?;
1251 let backup_ns = optional_ns_param(&param)?;
1252
1253 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1254 let datastore = check_privs_and_load_store(
1255 &store,
1256 &backup_ns,
1257 &auth_id,
1258 PRIV_DATASTORE_READ,
1259 PRIV_DATASTORE_BACKUP,
1260 Some(Operation::Read),
1261 &backup_dir.group,
1262 )?;
1263
1264 let file_name = required_string_param(&param, "file-name")?.to_owned();
1265
1266 println!(
1267 "Download {} from {} ({}/{})",
1268 file_name,
1269 print_store_and_ns(&store, &backup_ns),
1270 backup_dir,
1271 file_name
1272 );
1273
1274 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1275
1276 let mut path = datastore.base_path();
1277 path.push(backup_dir.relative_path());
1278 path.push(&file_name);
1279
1280 let file = tokio::fs::File::open(&path)
1281 .await
1282 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1283
1284 let payload =
1285 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1286 .map_ok(|bytes| bytes.freeze())
1287 .map_err(move |err| {
1288 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1289 err
1290 });
1291 let body = Body::wrap_stream(payload);
1292
1293 // fixme: set other headers ?
1294 Ok(Response::builder()
1295 .status(StatusCode::OK)
1296 .header(header::CONTENT_TYPE, "application/octet-stream")
1297 .body(body)
1298 .unwrap())
1299 }
1300 .boxed()
1301}
1302
1303#[sortable]
1304pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1305 &ApiHandler::AsyncHttp(&download_file_decoded),
1306 &ObjectSchema::new(
1307 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1308 &sorted!([
1309 ("store", false, &DATASTORE_SCHEMA),
1310 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1311 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1312 ("backup-id", false, &BACKUP_ID_SCHEMA),
1313 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1314 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1315 ]),
1316 ),
1317)
1318.access(
1319 Some(
1320 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1321 DATASTORE_BACKUP and being the owner of the group",
1322 ),
1323 &Permission::Anybody,
1324);
1325
1326pub fn download_file_decoded(
1327 _parts: Parts,
1328 _req_body: Body,
1329 param: Value,
1330 _info: &ApiMethod,
1331 rpcenv: Box<dyn RpcEnvironment>,
1332) -> ApiResponseFuture {
1333 async move {
1334 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1335 let store = required_string_param(&param, "store")?;
1336 let backup_ns = optional_ns_param(&param)?;
1337
1338 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1339 let datastore = check_privs_and_load_store(
1340 &store,
1341 &backup_ns,
1342 &auth_id,
1343 PRIV_DATASTORE_READ,
1344 PRIV_DATASTORE_BACKUP,
1345 Some(Operation::Read),
1346 &backup_dir_api.group,
1347 )?;
1348
1349 let file_name = required_string_param(&param, "file-name")?.to_owned();
1350 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1351
1352 let (manifest, files) = read_backup_index(&backup_dir)?;
1353 for file in files {
1354 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1355 bail!("cannot decode '{}' - is encrypted", file_name);
1356 }
1357 }
1358
1359 println!(
1360 "Download {} from {} ({}/{})",
1361 file_name,
1362 print_store_and_ns(&store, &backup_ns),
1363 backup_dir_api,
1364 file_name
1365 );
1366
1367 let mut path = datastore.base_path();
1368 path.push(backup_dir.relative_path());
1369 path.push(&file_name);
1370
1371 let extension = file_name.rsplitn(2, '.').next().unwrap();
1372
1373 let body = match extension {
1374 "didx" => {
1375 let index = DynamicIndexReader::open(&path).map_err(|err| {
1376 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1377 })?;
1378 let (csum, size) = index.compute_csum();
1379 manifest.verify_file(&file_name, &csum, size)?;
1380
1381 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1382 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1383 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1384 eprintln!("error during streaming of '{:?}' - {}", path, err);
1385 err
1386 }))
1387 }
1388 "fidx" => {
1389 let index = FixedIndexReader::open(&path).map_err(|err| {
1390 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1391 })?;
1392
1393 let (csum, size) = index.compute_csum();
1394 manifest.verify_file(&file_name, &csum, size)?;
1395
1396 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1397 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1398 Body::wrap_stream(
1399 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1400 move |err| {
1401 eprintln!("error during streaming of '{:?}' - {}", path, err);
1402 err
1403 },
1404 ),
1405 )
1406 }
1407 "blob" => {
1408 let file = std::fs::File::open(&path)
1409 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1410
1411 // FIXME: load full blob to verify index checksum?
1412
1413 Body::wrap_stream(
1414 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1415 move |err| {
1416 eprintln!("error during streaming of '{:?}' - {}", path, err);
1417 err
1418 },
1419 ),
1420 )
1421 }
1422 extension => {
1423 bail!("cannot download '{}' files", extension);
1424 }
1425 };
1426
1427 // fixme: set other headers ?
1428 Ok(Response::builder()
1429 .status(StatusCode::OK)
1430 .header(header::CONTENT_TYPE, "application/octet-stream")
1431 .body(body)
1432 .unwrap())
1433 }
1434 .boxed()
1435}
1436
1437#[sortable]
1438pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1439 &ApiHandler::AsyncHttp(&upload_backup_log),
1440 &ObjectSchema::new(
1441 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1442 &sorted!([
1443 ("store", false, &DATASTORE_SCHEMA),
1444 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1445 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1446 ("backup-id", false, &BACKUP_ID_SCHEMA),
1447 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1448 ]),
1449 ),
1450)
1451.access(
1452 Some("Only the backup creator/owner is allowed to do this."),
1453 &Permission::Anybody,
1454);
1455
1456pub fn upload_backup_log(
1457 _parts: Parts,
1458 req_body: Body,
1459 param: Value,
1460 _info: &ApiMethod,
1461 rpcenv: Box<dyn RpcEnvironment>,
1462) -> ApiResponseFuture {
1463 async move {
1464 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1465 let store = required_string_param(&param, "store")?;
1466 let backup_ns = optional_ns_param(&param)?;
1467
1468 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1469
1470 let datastore = check_privs_and_load_store(
1471 &store,
1472 &backup_ns,
1473 &auth_id,
1474 0,
1475 PRIV_DATASTORE_BACKUP,
1476 Some(Operation::Write),
1477 &backup_dir_api.group,
1478 )?;
1479 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1480
1481 let file_name = CLIENT_LOG_BLOB_NAME;
1482
1483 let mut path = backup_dir.full_path();
1484 path.push(&file_name);
1485
1486 if path.exists() {
1487 bail!("backup already contains a log.");
1488 }
1489
1490 println!(
1491 "Upload backup log to {} {backup_dir_api}/{file_name}",
1492 print_store_and_ns(&store, &backup_ns),
1493 );
1494
1495 let data = req_body
1496 .map_err(Error::from)
1497 .try_fold(Vec::new(), |mut acc, chunk| {
1498 acc.extend_from_slice(&*chunk);
1499 future::ok::<_, Error>(acc)
1500 })
1501 .await?;
1502
1503 // always verify blob/CRC at server side
1504 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1505
1506 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1507
1508 // fixme: use correct formatter
1509 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1510 }
1511 .boxed()
1512}
1513
1514#[api(
1515 input: {
1516 properties: {
1517 store: { schema: DATASTORE_SCHEMA },
1518 ns: {
1519 type: BackupNamespace,
1520 optional: true,
1521 },
1522 backup_dir: {
1523 type: pbs_api_types::BackupDir,
1524 flatten: true,
1525 },
1526 "filepath": {
1527 description: "Base64 encoded path.",
1528 type: String,
1529 }
1530 },
1531 },
1532 access: {
1533 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1534 DATASTORE_BACKUP and being the owner of the group",
1535 permission: &Permission::Anybody,
1536 },
1537)]
1538/// Get the entries of the given path of the catalog
1539pub fn catalog(
1540 store: String,
1541 ns: Option<BackupNamespace>,
1542 backup_dir: pbs_api_types::BackupDir,
1543 filepath: String,
1544 rpcenv: &mut dyn RpcEnvironment,
1545) -> Result<Vec<ArchiveEntry>, Error> {
1546 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1547 let ns = ns.unwrap_or_default();
1548
1549 let datastore = check_privs_and_load_store(
1550 &store,
1551 &ns,
1552 &auth_id,
1553 PRIV_DATASTORE_READ,
1554 PRIV_DATASTORE_BACKUP,
1555 Some(Operation::Read),
1556 &backup_dir.group,
1557 )?;
1558
1559 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1560
1561 let file_name = CATALOG_NAME;
1562
1563 let (manifest, files) = read_backup_index(&backup_dir)?;
1564 for file in files {
1565 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1566 bail!("cannot decode '{}' - is encrypted", file_name);
1567 }
1568 }
1569
1570 let mut path = datastore.base_path();
1571 path.push(backup_dir.relative_path());
1572 path.push(file_name);
1573
1574 let index = DynamicIndexReader::open(&path)
1575 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1576
1577 let (csum, size) = index.compute_csum();
1578 manifest.verify_file(file_name, &csum, size)?;
1579
1580 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1581 let reader = BufferedDynamicReader::new(index, chunk_reader);
1582
1583 let mut catalog_reader = CatalogReader::new(reader);
1584
1585 let path = if filepath != "root" && filepath != "/" {
1586 base64::decode(filepath)?
1587 } else {
1588 vec![b'/']
1589 };
1590
1591 catalog_reader.list_dir_contents(&path)
1592}
1593
1594#[sortable]
1595pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1596 &ApiHandler::AsyncHttp(&pxar_file_download),
1597 &ObjectSchema::new(
1598 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1599 &sorted!([
1600 ("store", false, &DATASTORE_SCHEMA),
1601 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1602 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1603 ("backup-id", false, &BACKUP_ID_SCHEMA),
1604 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1605 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1606 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1607 ]),
1608 )
1609).access(
1610 Some(
1611 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1612 DATASTORE_BACKUP and being the owner of the group",
1613 ),
1614 &Permission::Anybody,
1615);
1616
1617pub fn pxar_file_download(
1618 _parts: Parts,
1619 _req_body: Body,
1620 param: Value,
1621 _info: &ApiMethod,
1622 rpcenv: Box<dyn RpcEnvironment>,
1623) -> ApiResponseFuture {
1624 async move {
1625 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1626 let store = required_string_param(&param, "store")?;
1627 let ns = optional_ns_param(&param)?;
1628
1629 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1630 let datastore = check_privs_and_load_store(
1631 &store,
1632 &ns,
1633 &auth_id,
1634 PRIV_DATASTORE_READ,
1635 PRIV_DATASTORE_BACKUP,
1636 Some(Operation::Read),
1637 &backup_dir.group,
1638 )?;
1639
1640 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1641
1642 let filepath = required_string_param(&param, "filepath")?.to_owned();
1643
1644 let tar = param["tar"].as_bool().unwrap_or(false);
1645
1646 let mut components = base64::decode(&filepath)?;
1647 if !components.is_empty() && components[0] == b'/' {
1648 components.remove(0);
1649 }
1650
1651 let mut split = components.splitn(2, |c| *c == b'/');
1652 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1653 let file_path = split.next().unwrap_or(b"/");
1654 let (manifest, files) = read_backup_index(&backup_dir)?;
1655 for file in files {
1656 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1657 bail!("cannot decode '{}' - is encrypted", pxar_name);
1658 }
1659 }
1660
1661 let mut path = datastore.base_path();
1662 path.push(backup_dir.relative_path());
1663 path.push(pxar_name);
1664
1665 let index = DynamicIndexReader::open(&path)
1666 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1667
1668 let (csum, size) = index.compute_csum();
1669 manifest.verify_file(pxar_name, &csum, size)?;
1670
1671 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1672 let reader = BufferedDynamicReader::new(index, chunk_reader);
1673 let archive_size = reader.archive_size();
1674 let reader = LocalDynamicReadAt::new(reader);
1675
1676 let decoder = Accessor::new(reader, archive_size).await?;
1677 let root = decoder.open_root().await?;
1678 let path = OsStr::from_bytes(file_path).to_os_string();
1679 let file = root
1680 .lookup(&path)
1681 .await?
1682 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1683
1684 let body = match file.kind() {
1685 EntryKind::File { .. } => Body::wrap_stream(
1686 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1687 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1688 err
1689 }),
1690 ),
1691 EntryKind::Hardlink(_) => Body::wrap_stream(
1692 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1693 .map_err(move |err| {
1694 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1695 err
1696 }),
1697 ),
1698 EntryKind::Directory => {
1699 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1700 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1701 if tar {
1702 proxmox_rest_server::spawn_internal_task(create_tar(
1703 channelwriter,
1704 decoder,
1705 path.clone(),
1706 false,
1707 ));
1708 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1709 Body::wrap_stream(zstdstream.map_err(move |err| {
1710 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1711 err
1712 }))
1713 } else {
1714 proxmox_rest_server::spawn_internal_task(create_zip(
1715 channelwriter,
1716 decoder,
1717 path.clone(),
1718 false,
1719 ));
1720 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1721 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1722 err
1723 }))
1724 }
1725 }
1726 other => bail!("cannot download file of type {:?}", other),
1727 };
1728
1729 // fixme: set other headers ?
1730 Ok(Response::builder()
1731 .status(StatusCode::OK)
1732 .header(header::CONTENT_TYPE, "application/octet-stream")
1733 .body(body)
1734 .unwrap())
1735 }
1736 .boxed()
1737}
1738
1739#[api(
1740 input: {
1741 properties: {
1742 store: {
1743 schema: DATASTORE_SCHEMA,
1744 },
1745 timeframe: {
1746 type: RRDTimeFrame,
1747 },
1748 cf: {
1749 type: RRDMode,
1750 },
1751 },
1752 },
1753 access: {
1754 permission: &Permission::Privilege(
1755 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1756 },
1757)]
1758/// Read datastore stats
1759pub fn get_rrd_stats(
1760 store: String,
1761 timeframe: RRDTimeFrame,
1762 cf: RRDMode,
1763 _param: Value,
1764) -> Result<Value, Error> {
1765 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1766 let disk_manager = crate::tools::disks::DiskManage::new();
1767
1768 let mut rrd_fields = vec![
1769 "total",
1770 "used",
1771 "read_ios",
1772 "read_bytes",
1773 "write_ios",
1774 "write_bytes",
1775 ];
1776
1777 // we do not have io_ticks for zpools, so don't include them
1778 match disk_manager.find_mounted_device(&datastore.base_path()) {
1779 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1780 _ => rrd_fields.push("io_ticks"),
1781 };
1782
1783 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1784}
1785
1786#[api(
1787 input: {
1788 properties: {
1789 store: {
1790 schema: DATASTORE_SCHEMA,
1791 },
1792 },
1793 },
1794 access: {
1795 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1796 },
1797)]
1798/// Read datastore stats
1799pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1800 let active_operations = task_tracking::get_active_operations(&store)?;
1801 Ok(json!({
1802 "read": active_operations.read,
1803 "write": active_operations.write,
1804 }))
1805}
1806
1807#[api(
1808 input: {
1809 properties: {
1810 store: { schema: DATASTORE_SCHEMA },
1811 ns: {
1812 type: BackupNamespace,
1813 optional: true,
1814 },
1815 backup_group: {
1816 type: pbs_api_types::BackupGroup,
1817 flatten: true,
1818 },
1819 },
1820 },
1821 access: {
1822 permission: &Permission::Anybody,
1823 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1824 or DATASTORE_BACKUP and being the owner of the group",
1825 },
1826)]
1827/// Get "notes" for a backup group
1828pub fn get_group_notes(
1829 store: String,
1830 ns: Option<BackupNamespace>,
1831 backup_group: pbs_api_types::BackupGroup,
1832 rpcenv: &mut dyn RpcEnvironment,
1833) -> Result<String, Error> {
1834 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1835 let ns = ns.unwrap_or_default();
1836
1837 let datastore = check_privs_and_load_store(
1838 &store,
1839 &ns,
1840 &auth_id,
1841 PRIV_DATASTORE_AUDIT,
1842 PRIV_DATASTORE_BACKUP,
1843 Some(Operation::Read),
1844 &backup_group,
1845 )?;
1846
1847 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1848 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1849}
1850
1851#[api(
1852 input: {
1853 properties: {
1854 store: { schema: DATASTORE_SCHEMA },
1855 ns: {
1856 type: BackupNamespace,
1857 optional: true,
1858 },
1859 backup_group: {
1860 type: pbs_api_types::BackupGroup,
1861 flatten: true,
1862 },
1863 notes: {
1864 description: "A multiline text.",
1865 },
1866 },
1867 },
1868 access: {
1869 permission: &Permission::Anybody,
1870 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1871 or DATASTORE_BACKUP and being the owner of the group",
1872 },
1873)]
1874/// Set "notes" for a backup group
1875pub fn set_group_notes(
1876 store: String,
1877 ns: Option<BackupNamespace>,
1878 backup_group: pbs_api_types::BackupGroup,
1879 notes: String,
1880 rpcenv: &mut dyn RpcEnvironment,
1881) -> Result<(), Error> {
1882 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1883 let ns = ns.unwrap_or_default();
1884
1885 let datastore = check_privs_and_load_store(
1886 &store,
1887 &ns,
1888 &auth_id,
1889 PRIV_DATASTORE_MODIFY,
1890 PRIV_DATASTORE_BACKUP,
1891 Some(Operation::Write),
1892 &backup_group,
1893 )?;
1894
1895 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1896 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
1897
1898 Ok(())
1899}
1900
1901#[api(
1902 input: {
1903 properties: {
1904 store: { schema: DATASTORE_SCHEMA },
1905 ns: {
1906 type: BackupNamespace,
1907 optional: true,
1908 },
1909 backup_dir: {
1910 type: pbs_api_types::BackupDir,
1911 flatten: true,
1912 },
1913 },
1914 },
1915 access: {
1916 permission: &Permission::Anybody,
1917 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1918 or DATASTORE_BACKUP and being the owner of the group",
1919 },
1920)]
1921/// Get "notes" for a specific backup
1922pub fn get_notes(
1923 store: String,
1924 ns: Option<BackupNamespace>,
1925 backup_dir: pbs_api_types::BackupDir,
1926 rpcenv: &mut dyn RpcEnvironment,
1927) -> Result<String, Error> {
1928 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1929 let ns = ns.unwrap_or_default();
1930
1931 let datastore = check_privs_and_load_store(
1932 &store,
1933 &ns,
1934 &auth_id,
1935 PRIV_DATASTORE_AUDIT,
1936 PRIV_DATASTORE_BACKUP,
1937 Some(Operation::Read),
1938 &backup_dir.group,
1939 )?;
1940
1941 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1942
1943 let (manifest, _) = backup_dir.load_manifest()?;
1944
1945 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
1946
1947 Ok(String::from(notes))
1948}
1949
1950#[api(
1951 input: {
1952 properties: {
1953 store: { schema: DATASTORE_SCHEMA },
1954 ns: {
1955 type: BackupNamespace,
1956 optional: true,
1957 },
1958 backup_dir: {
1959 type: pbs_api_types::BackupDir,
1960 flatten: true,
1961 },
1962 notes: {
1963 description: "A multiline text.",
1964 },
1965 },
1966 },
1967 access: {
1968 permission: &Permission::Anybody,
1969 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1970 or DATASTORE_BACKUP and being the owner of the group",
1971 },
1972)]
1973/// Set "notes" for a specific backup
1974pub fn set_notes(
1975 store: String,
1976 ns: Option<BackupNamespace>,
1977 backup_dir: pbs_api_types::BackupDir,
1978 notes: String,
1979 rpcenv: &mut dyn RpcEnvironment,
1980) -> Result<(), Error> {
1981 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1982 let ns = ns.unwrap_or_default();
1983
1984 let datastore = check_privs_and_load_store(
1985 &store,
1986 &ns,
1987 &auth_id,
1988 PRIV_DATASTORE_MODIFY,
1989 PRIV_DATASTORE_BACKUP,
1990 Some(Operation::Write),
1991 &backup_dir.group,
1992 )?;
1993
1994 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1995
1996 backup_dir
1997 .update_manifest(|manifest| {
1998 manifest.unprotected["notes"] = notes.into();
1999 })
2000 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2001
2002 Ok(())
2003}
2004
2005#[api(
2006 input: {
2007 properties: {
2008 store: { schema: DATASTORE_SCHEMA },
2009 ns: {
2010 type: BackupNamespace,
2011 optional: true,
2012 },
2013 backup_dir: {
2014 type: pbs_api_types::BackupDir,
2015 flatten: true,
2016 },
2017 },
2018 },
2019 access: {
2020 permission: &Permission::Anybody,
2021 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2022 or DATASTORE_BACKUP and being the owner of the group",
2023 },
2024)]
2025/// Query protection for a specific backup
2026pub fn get_protection(
2027 store: String,
2028 ns: Option<BackupNamespace>,
2029 backup_dir: pbs_api_types::BackupDir,
2030 rpcenv: &mut dyn RpcEnvironment,
2031) -> Result<bool, Error> {
2032 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2033 let ns = ns.unwrap_or_default();
2034 let datastore = check_privs_and_load_store(
2035 &store,
2036 &ns,
2037 &auth_id,
2038 PRIV_DATASTORE_AUDIT,
2039 PRIV_DATASTORE_BACKUP,
2040 Some(Operation::Read),
2041 &backup_dir.group,
2042 )?;
2043
2044 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2045
2046 Ok(backup_dir.is_protected())
2047}
2048
2049#[api(
2050 input: {
2051 properties: {
2052 store: { schema: DATASTORE_SCHEMA },
2053 ns: {
2054 type: BackupNamespace,
2055 optional: true,
2056 },
2057 backup_dir: {
2058 type: pbs_api_types::BackupDir,
2059 flatten: true,
2060 },
2061 protected: {
2062 description: "Enable/disable protection.",
2063 },
2064 },
2065 },
2066 access: {
2067 permission: &Permission::Anybody,
2068 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2069 or DATASTORE_BACKUP and being the owner of the group",
2070 },
2071)]
2072/// En- or disable protection for a specific backup
2073pub fn set_protection(
2074 store: String,
2075 ns: Option<BackupNamespace>,
2076 backup_dir: pbs_api_types::BackupDir,
2077 protected: bool,
2078 rpcenv: &mut dyn RpcEnvironment,
2079) -> Result<(), Error> {
2080 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2081 let ns = ns.unwrap_or_default();
2082 let datastore = check_privs_and_load_store(
2083 &store,
2084 &ns,
2085 &auth_id,
2086 PRIV_DATASTORE_MODIFY,
2087 PRIV_DATASTORE_BACKUP,
2088 Some(Operation::Write),
2089 &backup_dir.group,
2090 )?;
2091
2092 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2093
2094 datastore.update_protection(&backup_dir, protected)
2095}
2096
2097#[api(
2098 input: {
2099 properties: {
2100 store: { schema: DATASTORE_SCHEMA },
2101 ns: {
2102 type: BackupNamespace,
2103 optional: true,
2104 },
2105 backup_group: {
2106 type: pbs_api_types::BackupGroup,
2107 flatten: true,
2108 },
2109 "new-owner": {
2110 type: Authid,
2111 },
2112 },
2113 },
2114 access: {
2115 permission: &Permission::Anybody,
2116 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2117 a user's token for owned backups with Datastore.Backup"
2118 },
2119)]
2120/// Change owner of a backup group
2121pub fn set_backup_owner(
2122 store: String,
2123 ns: Option<BackupNamespace>,
2124 backup_group: pbs_api_types::BackupGroup,
2125 new_owner: Authid,
2126 rpcenv: &mut dyn RpcEnvironment,
2127) -> Result<(), Error> {
2128 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2129 let ns = ns.unwrap_or_default();
2130 let owner_check_required = check_ns_privs_full(
2131 &store,
2132 &ns,
2133 &auth_id,
2134 PRIV_DATASTORE_MODIFY,
2135 PRIV_DATASTORE_BACKUP,
2136 )?;
2137
2138 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2139
2140 let backup_group = datastore.backup_group(ns, backup_group);
2141
2142 if owner_check_required {
2143 let owner = backup_group.get_owner()?;
2144
2145 let allowed = match (owner.is_token(), new_owner.is_token()) {
2146 (true, true) => {
2147 // API token to API token, owned by same user
2148 let owner = owner.user();
2149 let new_owner = new_owner.user();
2150 owner == new_owner && Authid::from(owner.clone()) == auth_id
2151 }
2152 (true, false) => {
2153 // API token to API token owner
2154 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2155 }
2156 (false, true) => {
2157 // API token owner to API token
2158 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2159 }
2160 (false, false) => {
2161 // User to User, not allowed for unprivileged users
2162 false
2163 }
2164 };
2165
2166 if !allowed {
2167 return Err(http_err!(
2168 UNAUTHORIZED,
2169 "{} does not have permission to change owner of backup group '{}' to {}",
2170 auth_id,
2171 backup_group.group(),
2172 new_owner,
2173 ));
2174 }
2175 }
2176
2177 let user_info = CachedUserInfo::new()?;
2178
2179 if !user_info.is_active_auth_id(&new_owner) {
2180 bail!(
2181 "{} '{}' is inactive or non-existent",
2182 if new_owner.is_token() {
2183 "API token".to_string()
2184 } else {
2185 "user".to_string()
2186 },
2187 new_owner
2188 );
2189 }
2190
2191 backup_group.set_owner(&new_owner, true)?;
2192
2193 Ok(())
2194}
2195
2196#[sortable]
2197const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2198 (
2199 "active-operations",
2200 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2201 ),
2202 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2203 (
2204 "change-owner",
2205 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2206 ),
2207 (
2208 "download",
2209 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2210 ),
2211 (
2212 "download-decoded",
2213 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2214 ),
2215 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2216 (
2217 "gc",
2218 &Router::new()
2219 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2220 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2221 ),
2222 (
2223 "group-notes",
2224 &Router::new()
2225 .get(&API_METHOD_GET_GROUP_NOTES)
2226 .put(&API_METHOD_SET_GROUP_NOTES),
2227 ),
2228 (
2229 "groups",
2230 &Router::new()
2231 .get(&API_METHOD_LIST_GROUPS)
2232 .delete(&API_METHOD_DELETE_GROUP),
2233 ),
2234 (
2235 "namespace",
2236 // FIXME: move into datastore:: sub-module?!
2237 &crate::api2::admin::namespace::ROUTER,
2238 ),
2239 (
2240 "notes",
2241 &Router::new()
2242 .get(&API_METHOD_GET_NOTES)
2243 .put(&API_METHOD_SET_NOTES),
2244 ),
2245 (
2246 "protected",
2247 &Router::new()
2248 .get(&API_METHOD_GET_PROTECTION)
2249 .put(&API_METHOD_SET_PROTECTION),
2250 ),
2251 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2252 (
2253 "prune-datastore",
2254 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2255 ),
2256 (
2257 "pxar-file-download",
2258 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2259 ),
2260 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2261 (
2262 "snapshots",
2263 &Router::new()
2264 .get(&API_METHOD_LIST_SNAPSHOTS)
2265 .delete(&API_METHOD_DELETE_SNAPSHOT),
2266 ),
2267 ("status", &Router::new().get(&API_METHOD_STATUS)),
2268 (
2269 "upload-backup-log",
2270 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2271 ),
2272 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2273];
2274
2275const DATASTORE_INFO_ROUTER: Router = Router::new()
2276 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2277 .subdirs(DATASTORE_INFO_SUBDIRS);
2278
2279pub const ROUTER: Router = Router::new()
2280 .get(&API_METHOD_GET_DATASTORE_LIST)
2281 .match_all("store", &DATASTORE_INFO_ROUTER);