]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
ui: metricserver: fix enable column
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::os::unix::ffi::OsStrExt;
6 use std::path::PathBuf;
7 use std::sync::Arc;
8
9 use anyhow::{bail, format_err, Error};
10 use futures::*;
11 use hyper::http::request::Parts;
12 use hyper::{header, Body, Response, StatusCode};
13 use serde::Deserialize;
14 use serde_json::{json, Value};
15 use tokio_stream::wrappers::ReceiverStream;
16
17 use proxmox_async::blocking::WrappedReaderStream;
18 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
19 use proxmox_compression::zstd::ZstdEncoder;
20 use proxmox_router::{
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
23 };
24 use proxmox_schema::*;
25 use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27 };
28 use proxmox_sys::sortable;
29 use proxmox_sys::{task_log, task_warn};
30
31 use pxar::accessor::aio::Accessor;
32 use pxar::EntryKind;
33
34 use pbs_api_types::{
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
43 };
44 use pbs_client::pxar::{create_tar, create_zip};
45 use pbs_config::CachedUserInfo;
46 use pbs_datastore::backup_info::BackupInfo;
47 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
48 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
49 use pbs_datastore::data_blob::DataBlob;
50 use pbs_datastore::data_blob_reader::DataBlobReader;
51 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
52 use pbs_datastore::fixed_index::FixedIndexReader;
53 use pbs_datastore::index::IndexFile;
54 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
55 use pbs_datastore::prune::compute_prune_info;
56 use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59 };
60 use pbs_tools::json::required_string_param;
61 use proxmox_rest_server::{formatter, WorkerTask};
62
63 use crate::api2::backup::optional_ns_param;
64 use crate::api2::node::rrd::create_value_from_rrd;
65 use crate::backup::{
66 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups, NS_PRIVS_OK,
68 };
69
70 use crate::server::jobstate::Job;
71
72 const GROUP_NOTES_FILE_NAME: &str = "notes";
73
74 fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78 ) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82 }
83
84 // helper to unify common sequence of checks:
85 // 1. check privs on NS (full or limited access)
86 // 2. load datastore
87 // 3. if needed (only limited access), check owner of group
88 fn check_privs_and_load_store(
89 store: &str,
90 ns: &BackupNamespace,
91 auth_id: &Authid,
92 full_access_privs: u64,
93 partial_access_privs: u64,
94 operation: Option<Operation>,
95 backup_group: &pbs_api_types::BackupGroup,
96 ) -> Result<Arc<DataStore>, Error> {
97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
98
99 let datastore = DataStore::lookup_datastore(store, operation)?;
100
101 if limited {
102 let owner = datastore.get_owner(ns, backup_group)?;
103 check_backup_owner(&owner, auth_id)?;
104 }
105
106 Ok(datastore)
107 }
108
109 fn read_backup_index(
110 backup_dir: &BackupDir,
111 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
112 let (manifest, index_size) = backup_dir.load_manifest()?;
113
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
118 crypt_mode: Some(item.crypt_mode),
119 size: Some(item.size),
120 });
121 }
122
123 result.push(BackupContent {
124 filename: MANIFEST_BLOB_NAME.to_string(),
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
129 size: Some(index_size),
130 });
131
132 Ok((manifest, result))
133 }
134
135 fn get_all_snapshot_files(
136 info: &BackupInfo,
137 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
146 if file_set.contains(file) {
147 continue;
148 }
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
154 }
155
156 Ok((manifest, files))
157 }
158
159 #[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
165 ns: {
166 type: BackupNamespace,
167 optional: true,
168 },
169 },
170 },
171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
172 access: {
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
176 },
177 )]
178 /// List backup groups.
179 pub fn list_groups(
180 store: String,
181 ns: Option<BackupNamespace>,
182 rpcenv: &mut dyn RpcEnvironment,
183 ) -> Result<Vec<GroupListItem>, Error> {
184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
185 let ns = ns.unwrap_or_default();
186
187 let list_all = !check_ns_privs_full(
188 &store,
189 &ns,
190 &auth_id,
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
193 )?;
194
195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
196
197 datastore
198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
201
202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
203 Ok(auth_id) => auth_id,
204 Err(err) => {
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
208 print_store_and_ns(&store, &ns),
209 err
210 );
211 return Ok(group_info);
212 }
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
217
218 let snapshots = match group.list_backups() {
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
222
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
227
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
240 let comment = file_read_firstline(&note_path).ok();
241
242 group_info.push(GroupListItem {
243 backup: group.into(),
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
249 });
250
251 Ok(group_info)
252 })
253 }
254
255 #[api(
256 input: {
257 properties: {
258 store: { schema: DATASTORE_SCHEMA },
259 ns: {
260 type: BackupNamespace,
261 optional: true,
262 },
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
267 },
268 },
269 access: {
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
273 },
274 )]
275 /// Delete backup group including all snapshots.
276 pub async fn delete_group(
277 store: String,
278 ns: Option<BackupNamespace>,
279 group: pbs_api_types::BackupGroup,
280 rpcenv: &mut dyn RpcEnvironment,
281 ) -> Result<Value, Error> {
282 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
283
284 tokio::task::spawn_blocking(move || {
285 let ns = ns.unwrap_or_default();
286
287 let datastore = check_privs_and_load_store(
288 &store,
289 &ns,
290 &auth_id,
291 PRIV_DATASTORE_MODIFY,
292 PRIV_DATASTORE_PRUNE,
293 Some(Operation::Write),
294 &group,
295 )?;
296
297 if !datastore.remove_backup_group(&ns, &group)? {
298 bail!("group only partially deleted due to protected snapshots");
299 }
300
301 Ok(Value::Null)
302 })
303 .await?
304 }
305
306 #[api(
307 input: {
308 properties: {
309 store: { schema: DATASTORE_SCHEMA },
310 ns: {
311 type: BackupNamespace,
312 optional: true,
313 },
314 backup_dir: {
315 type: pbs_api_types::BackupDir,
316 flatten: true,
317 },
318 },
319 },
320 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
321 access: {
322 permission: &Permission::Anybody,
323 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
324 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
325 },
326 )]
327 /// List snapshot files.
328 pub async fn list_snapshot_files(
329 store: String,
330 ns: Option<BackupNamespace>,
331 backup_dir: pbs_api_types::BackupDir,
332 _info: &ApiMethod,
333 rpcenv: &mut dyn RpcEnvironment,
334 ) -> Result<Vec<BackupContent>, Error> {
335 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
336
337 tokio::task::spawn_blocking(move || {
338 let ns = ns.unwrap_or_default();
339
340 let datastore = check_privs_and_load_store(
341 &store,
342 &ns,
343 &auth_id,
344 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
345 PRIV_DATASTORE_BACKUP,
346 Some(Operation::Read),
347 &backup_dir.group,
348 )?;
349
350 let snapshot = datastore.backup_dir(ns, backup_dir)?;
351
352 let info = BackupInfo::new(snapshot)?;
353
354 let (_manifest, files) = get_all_snapshot_files(&info)?;
355
356 Ok(files)
357 })
358 .await?
359 }
360
361 #[api(
362 input: {
363 properties: {
364 store: { schema: DATASTORE_SCHEMA },
365 ns: {
366 type: BackupNamespace,
367 optional: true,
368 },
369 backup_dir: {
370 type: pbs_api_types::BackupDir,
371 flatten: true,
372 },
373 },
374 },
375 access: {
376 permission: &Permission::Anybody,
377 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
378 or DATASTORE_PRUNE and being the owner of the group",
379 },
380 )]
381 /// Delete backup snapshot.
382 pub async fn delete_snapshot(
383 store: String,
384 ns: Option<BackupNamespace>,
385 backup_dir: pbs_api_types::BackupDir,
386 _info: &ApiMethod,
387 rpcenv: &mut dyn RpcEnvironment,
388 ) -> Result<Value, Error> {
389 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
390
391 tokio::task::spawn_blocking(move || {
392 let ns = ns.unwrap_or_default();
393
394 let datastore = check_privs_and_load_store(
395 &store,
396 &ns,
397 &auth_id,
398 PRIV_DATASTORE_MODIFY,
399 PRIV_DATASTORE_PRUNE,
400 Some(Operation::Write),
401 &backup_dir.group,
402 )?;
403
404 let snapshot = datastore.backup_dir(ns, backup_dir)?;
405
406 snapshot.destroy(false)?;
407
408 Ok(Value::Null)
409 })
410 .await?
411 }
412
413 #[api(
414 streaming: true,
415 input: {
416 properties: {
417 store: { schema: DATASTORE_SCHEMA },
418 ns: {
419 type: BackupNamespace,
420 optional: true,
421 },
422 "backup-type": {
423 optional: true,
424 type: BackupType,
425 },
426 "backup-id": {
427 optional: true,
428 schema: BACKUP_ID_SCHEMA,
429 },
430 },
431 },
432 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
433 access: {
434 permission: &Permission::Anybody,
435 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
436 or DATASTORE_BACKUP and being the owner of the group",
437 },
438 )]
439 /// List backup snapshots.
440 pub async fn list_snapshots(
441 store: String,
442 ns: Option<BackupNamespace>,
443 backup_type: Option<BackupType>,
444 backup_id: Option<String>,
445 _param: Value,
446 _info: &ApiMethod,
447 rpcenv: &mut dyn RpcEnvironment,
448 ) -> Result<Vec<SnapshotListItem>, Error> {
449 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
450
451 tokio::task::spawn_blocking(move || unsafe {
452 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
453 })
454 .await
455 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
456 }
457
458 /// This must not run in a main worker thread as it potentially does tons of I/O.
459 unsafe fn list_snapshots_blocking(
460 store: String,
461 ns: Option<BackupNamespace>,
462 backup_type: Option<BackupType>,
463 backup_id: Option<String>,
464 auth_id: Authid,
465 ) -> Result<Vec<SnapshotListItem>, Error> {
466 let ns = ns.unwrap_or_default();
467
468 let list_all = !check_ns_privs_full(
469 &store,
470 &ns,
471 &auth_id,
472 PRIV_DATASTORE_AUDIT,
473 PRIV_DATASTORE_BACKUP,
474 )?;
475
476 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
477
478 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
479 // backup group and provide an error free (Err -> None) accessor
480 let groups = match (backup_type, backup_id) {
481 (Some(backup_type), Some(backup_id)) => {
482 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
483 }
484 // FIXME: Recursion
485 (Some(backup_type), None) => datastore
486 .iter_backup_type_ok(ns.clone(), backup_type)?
487 .collect(),
488 // FIXME: Recursion
489 (None, Some(backup_id)) => BackupType::iter()
490 .filter_map(|backup_type| {
491 let group =
492 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
493 group.exists().then_some(group)
494 })
495 .collect(),
496 // FIXME: Recursion
497 (None, None) => datastore.list_backup_groups(ns.clone())?,
498 };
499
500 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
501 let backup = pbs_api_types::BackupDir {
502 group: group.into(),
503 time: info.backup_dir.backup_time(),
504 };
505 let protected = info.backup_dir.is_protected();
506
507 match get_all_snapshot_files(&info) {
508 Ok((manifest, files)) => {
509 // extract the first line from notes
510 let comment: Option<String> = manifest.unprotected["notes"]
511 .as_str()
512 .and_then(|notes| notes.lines().next())
513 .map(String::from);
514
515 let fingerprint = match manifest.fingerprint() {
516 Ok(fp) => fp,
517 Err(err) => {
518 eprintln!("error parsing fingerprint: '{}'", err);
519 None
520 }
521 };
522
523 let verification = manifest.unprotected["verify_state"].clone();
524 let verification: Option<SnapshotVerifyState> =
525 match serde_json::from_value(verification) {
526 Ok(verify) => verify,
527 Err(err) => {
528 eprintln!("error parsing verification state : '{}'", err);
529 None
530 }
531 };
532
533 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
534
535 SnapshotListItem {
536 backup,
537 comment,
538 verification,
539 fingerprint,
540 files,
541 size,
542 owner,
543 protected,
544 }
545 }
546 Err(err) => {
547 eprintln!("error during snapshot file listing: '{}'", err);
548 let files = info
549 .files
550 .into_iter()
551 .map(|filename| BackupContent {
552 filename,
553 size: None,
554 crypt_mode: None,
555 })
556 .collect();
557
558 SnapshotListItem {
559 backup,
560 comment: None,
561 verification: None,
562 fingerprint: None,
563 files,
564 size: None,
565 owner,
566 protected,
567 }
568 }
569 }
570 };
571
572 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
573 let owner = match group.get_owner() {
574 Ok(auth_id) => auth_id,
575 Err(err) => {
576 eprintln!(
577 "Failed to get owner of group '{}' in {} - {}",
578 group.group(),
579 print_store_and_ns(&store, &ns),
580 err
581 );
582 return Ok(snapshots);
583 }
584 };
585
586 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
587 return Ok(snapshots);
588 }
589
590 let group_backups = group.list_backups()?;
591
592 snapshots.extend(
593 group_backups
594 .into_iter()
595 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
596 );
597
598 Ok(snapshots)
599 })
600 }
601
602 async fn get_snapshots_count(
603 store: &Arc<DataStore>,
604 owner: Option<&Authid>,
605 ) -> Result<Counts, Error> {
606 let store = Arc::clone(store);
607 let owner = owner.cloned();
608 tokio::task::spawn_blocking(move || {
609 let root_ns = Default::default();
610 ListAccessibleBackupGroups::new_with_privs(
611 &store,
612 root_ns,
613 MAX_NAMESPACE_DEPTH,
614 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
615 None,
616 owner.as_ref(),
617 )?
618 .try_fold(Counts::default(), |mut counts, group| {
619 let group = match group {
620 Ok(group) => group,
621 Err(_) => return Ok(counts), // TODO: add this as error counts?
622 };
623 let snapshot_count = group.list_backups()?.len() as u64;
624
625 // only include groups with snapshots, counting/displaying empty groups can confuse
626 if snapshot_count > 0 {
627 let type_count = match group.backup_type() {
628 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
629 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
630 BackupType::Host => counts.host.get_or_insert(Default::default()),
631 };
632
633 type_count.groups += 1;
634 type_count.snapshots += snapshot_count;
635 }
636
637 Ok(counts)
638 })
639 })
640 .await?
641 }
642
643 #[api(
644 input: {
645 properties: {
646 store: {
647 schema: DATASTORE_SCHEMA,
648 },
649 verbose: {
650 type: bool,
651 default: false,
652 optional: true,
653 description: "Include additional information like snapshot counts and GC status.",
654 },
655 },
656
657 },
658 returns: {
659 type: DataStoreStatus,
660 },
661 access: {
662 permission: &Permission::Anybody,
663 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
664 the full statistics. Counts of accessible groups are always returned, if any",
665 },
666 )]
667 /// Get datastore status.
668 pub async fn status(
669 store: String,
670 verbose: bool,
671 _info: &ApiMethod,
672 rpcenv: &mut dyn RpcEnvironment,
673 ) -> Result<DataStoreStatus, Error> {
674 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
675 let user_info = CachedUserInfo::new()?;
676 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
677
678 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
679
680 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
681 true
682 } else if store_privs & PRIV_DATASTORE_READ != 0 {
683 false // allow at least counts, user can read groups anyway..
684 } else {
685 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
686 // avoid leaking existence info if users hasn't at least any priv. below
687 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
688 _ => false,
689 }
690 };
691 let datastore = datastore?; // only unwrap no to avoid leaking existence info
692
693 let (counts, gc_status) = if verbose {
694 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
695 None
696 } else {
697 Some(&auth_id)
698 };
699
700 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
701 let gc_status = if store_stats {
702 Some(datastore.last_gc_status())
703 } else {
704 None
705 };
706
707 (counts, gc_status)
708 } else {
709 (None, None)
710 };
711
712 Ok(if store_stats {
713 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
714 DataStoreStatus {
715 total: storage.total,
716 used: storage.used,
717 avail: storage.available,
718 gc_status,
719 counts,
720 }
721 } else {
722 DataStoreStatus {
723 total: 0,
724 used: 0,
725 avail: 0,
726 gc_status,
727 counts,
728 }
729 })
730 }
731
732 #[api(
733 input: {
734 properties: {
735 store: {
736 schema: DATASTORE_SCHEMA,
737 },
738 ns: {
739 type: BackupNamespace,
740 optional: true,
741 },
742 "backup-type": {
743 type: BackupType,
744 optional: true,
745 },
746 "backup-id": {
747 schema: BACKUP_ID_SCHEMA,
748 optional: true,
749 },
750 "ignore-verified": {
751 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
752 optional: true,
753 },
754 "outdated-after": {
755 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
756 optional: true,
757 },
758 "backup-time": {
759 schema: BACKUP_TIME_SCHEMA,
760 optional: true,
761 },
762 "max-depth": {
763 schema: NS_MAX_DEPTH_SCHEMA,
764 optional: true,
765 },
766 },
767 },
768 returns: {
769 schema: UPID_SCHEMA,
770 },
771 access: {
772 permission: &Permission::Anybody,
773 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
774 or DATASTORE_BACKUP and being the owner of the group",
775 },
776 )]
777 /// Verify backups.
778 ///
779 /// This function can verify a single backup snapshot, all backup from a backup group,
780 /// or all backups in the datastore.
781 #[allow(clippy::too_many_arguments)]
782 pub fn verify(
783 store: String,
784 ns: Option<BackupNamespace>,
785 backup_type: Option<BackupType>,
786 backup_id: Option<String>,
787 backup_time: Option<i64>,
788 ignore_verified: Option<bool>,
789 outdated_after: Option<i64>,
790 max_depth: Option<usize>,
791 rpcenv: &mut dyn RpcEnvironment,
792 ) -> Result<Value, Error> {
793 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
794 let ns = ns.unwrap_or_default();
795
796 let owner_check_required = check_ns_privs_full(
797 &store,
798 &ns,
799 &auth_id,
800 PRIV_DATASTORE_VERIFY,
801 PRIV_DATASTORE_BACKUP,
802 )?;
803
804 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
805 let ignore_verified = ignore_verified.unwrap_or(true);
806
807 let worker_id;
808
809 let mut backup_dir = None;
810 let mut backup_group = None;
811 let mut worker_type = "verify";
812
813 match (backup_type, backup_id, backup_time) {
814 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
815 worker_id = format!(
816 "{}:{}/{}/{}/{:08X}",
817 store,
818 ns.display_as_path(),
819 backup_type,
820 backup_id,
821 backup_time
822 );
823 let dir =
824 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
825
826 if owner_check_required {
827 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
828 check_backup_owner(&owner, &auth_id)?;
829 }
830
831 backup_dir = Some(dir);
832 worker_type = "verify_snapshot";
833 }
834 (Some(backup_type), Some(backup_id), None) => {
835 worker_id = format!(
836 "{}:{}/{}/{}",
837 store,
838 ns.display_as_path(),
839 backup_type,
840 backup_id
841 );
842 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
843
844 if owner_check_required {
845 let owner = datastore.get_owner(&ns, &group)?;
846 check_backup_owner(&owner, &auth_id)?;
847 }
848
849 backup_group = Some(datastore.backup_group(ns.clone(), group));
850 worker_type = "verify_group";
851 }
852 (None, None, None) => {
853 worker_id = if ns.is_root() {
854 store
855 } else {
856 format!("{}:{}", store, ns.display_as_path())
857 };
858 }
859 _ => bail!("parameters do not specify a backup group or snapshot"),
860 }
861
862 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
863
864 let upid_str = WorkerTask::new_thread(
865 worker_type,
866 Some(worker_id),
867 auth_id.to_string(),
868 to_stdout,
869 move |worker| {
870 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
871 let failed_dirs = if let Some(backup_dir) = backup_dir {
872 let mut res = Vec::new();
873 if !verify_backup_dir(
874 &verify_worker,
875 &backup_dir,
876 worker.upid().clone(),
877 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
878 )? {
879 res.push(print_ns_and_snapshot(
880 backup_dir.backup_ns(),
881 backup_dir.as_ref(),
882 ));
883 }
884 res
885 } else if let Some(backup_group) = backup_group {
886 verify_backup_group(
887 &verify_worker,
888 &backup_group,
889 &mut StoreProgress::new(1),
890 worker.upid(),
891 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
892 )?
893 } else {
894 let owner = if owner_check_required {
895 Some(&auth_id)
896 } else {
897 None
898 };
899
900 verify_all_backups(
901 &verify_worker,
902 worker.upid(),
903 ns,
904 max_depth,
905 owner,
906 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
907 )?
908 };
909 if !failed_dirs.is_empty() {
910 task_log!(worker, "Failed to verify the following snapshots/groups:");
911 for dir in failed_dirs {
912 task_log!(worker, "\t{}", dir);
913 }
914 bail!("verification failed - please check the log for details");
915 }
916 Ok(())
917 },
918 )?;
919
920 Ok(json!(upid_str))
921 }
922
923 #[api(
924 input: {
925 properties: {
926 group: {
927 type: pbs_api_types::BackupGroup,
928 flatten: true,
929 },
930 "dry-run": {
931 optional: true,
932 type: bool,
933 default: false,
934 description: "Just show what prune would do, but do not delete anything.",
935 },
936 "keep-options": {
937 type: KeepOptions,
938 flatten: true,
939 },
940 store: {
941 schema: DATASTORE_SCHEMA,
942 },
943 ns: {
944 type: BackupNamespace,
945 optional: true,
946 },
947 },
948 },
949 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
950 access: {
951 permission: &Permission::Anybody,
952 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
953 or DATASTORE_PRUNE and being the owner of the group",
954 },
955 )]
956 /// Prune a group on the datastore
957 pub fn prune(
958 group: pbs_api_types::BackupGroup,
959 dry_run: bool,
960 keep_options: KeepOptions,
961 store: String,
962 ns: Option<BackupNamespace>,
963 _param: Value,
964 rpcenv: &mut dyn RpcEnvironment,
965 ) -> Result<Value, Error> {
966 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
967 let ns = ns.unwrap_or_default();
968 let datastore = check_privs_and_load_store(
969 &store,
970 &ns,
971 &auth_id,
972 PRIV_DATASTORE_MODIFY,
973 PRIV_DATASTORE_PRUNE,
974 Some(Operation::Write),
975 &group,
976 )?;
977
978 let worker_id = format!("{}:{}:{}", store, ns, group);
979 let group = datastore.backup_group(ns.clone(), group);
980
981 let mut prune_result = Vec::new();
982
983 let list = group.list_backups()?;
984
985 let mut prune_info = compute_prune_info(list, &keep_options)?;
986
987 prune_info.reverse(); // delete older snapshots first
988
989 let keep_all = !keep_options.keeps_something();
990
991 if dry_run {
992 for (info, mark) in prune_info {
993 let keep = keep_all || mark.keep();
994
995 let mut result = json!({
996 "backup-type": info.backup_dir.backup_type(),
997 "backup-id": info.backup_dir.backup_id(),
998 "backup-time": info.backup_dir.backup_time(),
999 "keep": keep,
1000 "protected": mark.protected(),
1001 });
1002 let prune_ns = info.backup_dir.backup_ns();
1003 if !prune_ns.is_root() {
1004 result["ns"] = serde_json::to_value(prune_ns)?;
1005 }
1006 prune_result.push(result);
1007 }
1008 return Ok(json!(prune_result));
1009 }
1010
1011 // We use a WorkerTask just to have a task log, but run synchrounously
1012 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1013
1014 if keep_all {
1015 task_log!(worker, "No prune selection - keeping all files.");
1016 } else {
1017 let mut opts = Vec::new();
1018 if !ns.is_root() {
1019 opts.push(format!("--ns {ns}"));
1020 }
1021 crate::server::cli_keep_options(&mut opts, &keep_options);
1022
1023 task_log!(worker, "retention options: {}", opts.join(" "));
1024 task_log!(
1025 worker,
1026 "Starting prune on {} group \"{}\"",
1027 print_store_and_ns(&store, &ns),
1028 group.group(),
1029 );
1030 }
1031
1032 for (info, mark) in prune_info {
1033 let keep = keep_all || mark.keep();
1034
1035 let backup_time = info.backup_dir.backup_time();
1036 let timestamp = info.backup_dir.backup_time_string();
1037 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1038
1039 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
1040
1041 task_log!(worker, "{}", msg);
1042
1043 prune_result.push(json!({
1044 "backup-type": group.ty,
1045 "backup-id": group.id,
1046 "backup-time": backup_time,
1047 "keep": keep,
1048 "protected": mark.protected(),
1049 }));
1050
1051 if !(dry_run || keep) {
1052 if let Err(err) = info.backup_dir.destroy(false) {
1053 task_warn!(
1054 worker,
1055 "failed to remove dir {:?}: {}",
1056 info.backup_dir.relative_path(),
1057 err,
1058 );
1059 }
1060 }
1061 }
1062
1063 worker.log_result(&Ok(()));
1064
1065 Ok(json!(prune_result))
1066 }
1067
1068 #[api(
1069 input: {
1070 properties: {
1071 "dry-run": {
1072 optional: true,
1073 type: bool,
1074 default: false,
1075 description: "Just show what prune would do, but do not delete anything.",
1076 },
1077 "prune-options": {
1078 type: PruneJobOptions,
1079 flatten: true,
1080 },
1081 store: {
1082 schema: DATASTORE_SCHEMA,
1083 },
1084 },
1085 },
1086 returns: {
1087 schema: UPID_SCHEMA,
1088 },
1089 access: {
1090 permission: &Permission::Anybody,
1091 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
1092 },
1093 )]
1094 /// Prune the datastore
1095 pub fn prune_datastore(
1096 dry_run: bool,
1097 prune_options: PruneJobOptions,
1098 store: String,
1099 _param: Value,
1100 rpcenv: &mut dyn RpcEnvironment,
1101 ) -> Result<String, Error> {
1102 let user_info = CachedUserInfo::new()?;
1103
1104 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1105
1106 user_info.check_privs(
1107 &auth_id,
1108 &prune_options.acl_path(&store),
1109 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1110 true,
1111 )?;
1112
1113 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1114 let ns = prune_options.ns.clone().unwrap_or_default();
1115 let worker_id = format!("{}:{}", store, ns);
1116
1117 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1118
1119 let upid_str = WorkerTask::new_thread(
1120 "prune",
1121 Some(worker_id),
1122 auth_id.to_string(),
1123 to_stdout,
1124 move |worker| {
1125 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
1126 },
1127 )?;
1128
1129 Ok(upid_str)
1130 }
1131
1132 #[api(
1133 input: {
1134 properties: {
1135 store: {
1136 schema: DATASTORE_SCHEMA,
1137 },
1138 },
1139 },
1140 returns: {
1141 schema: UPID_SCHEMA,
1142 },
1143 access: {
1144 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1145 },
1146 )]
1147 /// Start garbage collection.
1148 pub fn start_garbage_collection(
1149 store: String,
1150 _info: &ApiMethod,
1151 rpcenv: &mut dyn RpcEnvironment,
1152 ) -> Result<Value, Error> {
1153 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1154 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1155
1156 let job = Job::new("garbage_collection", &store)
1157 .map_err(|_| format_err!("garbage collection already running"))?;
1158
1159 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1160
1161 let upid_str =
1162 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1163 .map_err(|err| {
1164 format_err!(
1165 "unable to start garbage collection job on datastore {} - {}",
1166 store,
1167 err
1168 )
1169 })?;
1170
1171 Ok(json!(upid_str))
1172 }
1173
1174 #[api(
1175 input: {
1176 properties: {
1177 store: {
1178 schema: DATASTORE_SCHEMA,
1179 },
1180 },
1181 },
1182 returns: {
1183 type: GarbageCollectionStatus,
1184 },
1185 access: {
1186 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1187 },
1188 )]
1189 /// Garbage collection status.
1190 pub fn garbage_collection_status(
1191 store: String,
1192 _info: &ApiMethod,
1193 _rpcenv: &mut dyn RpcEnvironment,
1194 ) -> Result<GarbageCollectionStatus, Error> {
1195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1196
1197 let status = datastore.last_gc_status();
1198
1199 Ok(status)
1200 }
1201
1202 #[api(
1203 returns: {
1204 description: "List the accessible datastores.",
1205 type: Array,
1206 items: { type: DataStoreListItem },
1207 },
1208 access: {
1209 permission: &Permission::Anybody,
1210 },
1211 )]
1212 /// Datastore list
1213 pub fn get_datastore_list(
1214 _param: Value,
1215 _info: &ApiMethod,
1216 rpcenv: &mut dyn RpcEnvironment,
1217 ) -> Result<Vec<DataStoreListItem>, Error> {
1218 let (config, _digest) = pbs_config::datastore::config()?;
1219
1220 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1221 let user_info = CachedUserInfo::new()?;
1222
1223 let mut list = Vec::new();
1224
1225 for (store, (_, data)) in &config.sections {
1226 let acl_path = &["datastore", store];
1227 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
1228 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1229
1230 let mut allow_id = false;
1231 if !allowed {
1232 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1233 allow_id = any_privs;
1234 }
1235 }
1236
1237 if allowed || allow_id {
1238 list.push(DataStoreListItem {
1239 store: store.clone(),
1240 comment: if !allowed {
1241 None
1242 } else {
1243 data["comment"].as_str().map(String::from)
1244 },
1245 maintenance: data["maintenance-mode"].as_str().map(String::from),
1246 });
1247 }
1248 }
1249
1250 Ok(list)
1251 }
1252
1253 #[sortable]
1254 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1255 &ApiHandler::AsyncHttp(&download_file),
1256 &ObjectSchema::new(
1257 "Download single raw file from backup snapshot.",
1258 &sorted!([
1259 ("store", false, &DATASTORE_SCHEMA),
1260 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1261 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1262 ("backup-id", false, &BACKUP_ID_SCHEMA),
1263 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1264 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1265 ]),
1266 ),
1267 )
1268 .access(
1269 Some(
1270 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1271 DATASTORE_BACKUP and being the owner of the group",
1272 ),
1273 &Permission::Anybody,
1274 );
1275
1276 pub fn download_file(
1277 _parts: Parts,
1278 _req_body: Body,
1279 param: Value,
1280 _info: &ApiMethod,
1281 rpcenv: Box<dyn RpcEnvironment>,
1282 ) -> ApiResponseFuture {
1283 async move {
1284 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1285 let store = required_string_param(&param, "store")?;
1286 let backup_ns = optional_ns_param(&param)?;
1287
1288 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1289 let datastore = check_privs_and_load_store(
1290 store,
1291 &backup_ns,
1292 &auth_id,
1293 PRIV_DATASTORE_READ,
1294 PRIV_DATASTORE_BACKUP,
1295 Some(Operation::Read),
1296 &backup_dir.group,
1297 )?;
1298
1299 let file_name = required_string_param(&param, "file-name")?.to_owned();
1300
1301 println!(
1302 "Download {} from {} ({}/{})",
1303 file_name,
1304 print_store_and_ns(store, &backup_ns),
1305 backup_dir,
1306 file_name
1307 );
1308
1309 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1310
1311 let mut path = datastore.base_path();
1312 path.push(backup_dir.relative_path());
1313 path.push(&file_name);
1314
1315 let file = tokio::fs::File::open(&path)
1316 .await
1317 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1318
1319 let payload =
1320 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1321 .map_ok(|bytes| bytes.freeze())
1322 .map_err(move |err| {
1323 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1324 err
1325 });
1326 let body = Body::wrap_stream(payload);
1327
1328 // fixme: set other headers ?
1329 Ok(Response::builder()
1330 .status(StatusCode::OK)
1331 .header(header::CONTENT_TYPE, "application/octet-stream")
1332 .body(body)
1333 .unwrap())
1334 }
1335 .boxed()
1336 }
1337
1338 #[sortable]
1339 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1340 &ApiHandler::AsyncHttp(&download_file_decoded),
1341 &ObjectSchema::new(
1342 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1343 &sorted!([
1344 ("store", false, &DATASTORE_SCHEMA),
1345 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1346 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1347 ("backup-id", false, &BACKUP_ID_SCHEMA),
1348 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1349 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1350 ]),
1351 ),
1352 )
1353 .access(
1354 Some(
1355 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1356 DATASTORE_BACKUP and being the owner of the group",
1357 ),
1358 &Permission::Anybody,
1359 );
1360
1361 pub fn download_file_decoded(
1362 _parts: Parts,
1363 _req_body: Body,
1364 param: Value,
1365 _info: &ApiMethod,
1366 rpcenv: Box<dyn RpcEnvironment>,
1367 ) -> ApiResponseFuture {
1368 async move {
1369 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1370 let store = required_string_param(&param, "store")?;
1371 let backup_ns = optional_ns_param(&param)?;
1372
1373 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1374 let datastore = check_privs_and_load_store(
1375 store,
1376 &backup_ns,
1377 &auth_id,
1378 PRIV_DATASTORE_READ,
1379 PRIV_DATASTORE_BACKUP,
1380 Some(Operation::Read),
1381 &backup_dir_api.group,
1382 )?;
1383
1384 let file_name = required_string_param(&param, "file-name")?.to_owned();
1385 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1386
1387 let (manifest, files) = read_backup_index(&backup_dir)?;
1388 for file in files {
1389 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1390 bail!("cannot decode '{}' - is encrypted", file_name);
1391 }
1392 }
1393
1394 println!(
1395 "Download {} from {} ({}/{})",
1396 file_name,
1397 print_store_and_ns(store, &backup_ns),
1398 backup_dir_api,
1399 file_name
1400 );
1401
1402 let mut path = datastore.base_path();
1403 path.push(backup_dir.relative_path());
1404 path.push(&file_name);
1405
1406 let (_, extension) = file_name.rsplit_once('.').unwrap();
1407
1408 let body = match extension {
1409 "didx" => {
1410 let index = DynamicIndexReader::open(&path).map_err(|err| {
1411 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1412 })?;
1413 let (csum, size) = index.compute_csum();
1414 manifest.verify_file(&file_name, &csum, size)?;
1415
1416 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1417 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1418 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1419 eprintln!("error during streaming of '{:?}' - {}", path, err);
1420 err
1421 }))
1422 }
1423 "fidx" => {
1424 let index = FixedIndexReader::open(&path).map_err(|err| {
1425 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1426 })?;
1427
1428 let (csum, size) = index.compute_csum();
1429 manifest.verify_file(&file_name, &csum, size)?;
1430
1431 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1432 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1433 Body::wrap_stream(
1434 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1435 move |err| {
1436 eprintln!("error during streaming of '{:?}' - {}", path, err);
1437 err
1438 },
1439 ),
1440 )
1441 }
1442 "blob" => {
1443 let file = std::fs::File::open(&path)
1444 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1445
1446 // FIXME: load full blob to verify index checksum?
1447
1448 Body::wrap_stream(
1449 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1450 move |err| {
1451 eprintln!("error during streaming of '{:?}' - {}", path, err);
1452 err
1453 },
1454 ),
1455 )
1456 }
1457 extension => {
1458 bail!("cannot download '{}' files", extension);
1459 }
1460 };
1461
1462 // fixme: set other headers ?
1463 Ok(Response::builder()
1464 .status(StatusCode::OK)
1465 .header(header::CONTENT_TYPE, "application/octet-stream")
1466 .body(body)
1467 .unwrap())
1468 }
1469 .boxed()
1470 }
1471
1472 #[sortable]
1473 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1474 &ApiHandler::AsyncHttp(&upload_backup_log),
1475 &ObjectSchema::new(
1476 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1477 &sorted!([
1478 ("store", false, &DATASTORE_SCHEMA),
1479 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1480 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1481 ("backup-id", false, &BACKUP_ID_SCHEMA),
1482 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1483 ]),
1484 ),
1485 )
1486 .access(
1487 Some("Only the backup creator/owner is allowed to do this."),
1488 &Permission::Anybody,
1489 );
1490
1491 pub fn upload_backup_log(
1492 _parts: Parts,
1493 req_body: Body,
1494 param: Value,
1495 _info: &ApiMethod,
1496 rpcenv: Box<dyn RpcEnvironment>,
1497 ) -> ApiResponseFuture {
1498 async move {
1499 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1500 let store = required_string_param(&param, "store")?;
1501 let backup_ns = optional_ns_param(&param)?;
1502
1503 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1504
1505 let datastore = check_privs_and_load_store(
1506 store,
1507 &backup_ns,
1508 &auth_id,
1509 0,
1510 PRIV_DATASTORE_BACKUP,
1511 Some(Operation::Write),
1512 &backup_dir_api.group,
1513 )?;
1514 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1515
1516 let file_name = CLIENT_LOG_BLOB_NAME;
1517
1518 let mut path = backup_dir.full_path();
1519 path.push(&file_name);
1520
1521 if path.exists() {
1522 bail!("backup already contains a log.");
1523 }
1524
1525 println!(
1526 "Upload backup log to {} {backup_dir_api}/{file_name}",
1527 print_store_and_ns(store, &backup_ns),
1528 );
1529
1530 let data = req_body
1531 .map_err(Error::from)
1532 .try_fold(Vec::new(), |mut acc, chunk| {
1533 acc.extend_from_slice(&chunk);
1534 future::ok::<_, Error>(acc)
1535 })
1536 .await?;
1537
1538 // always verify blob/CRC at server side
1539 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1540
1541 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1542
1543 // fixme: use correct formatter
1544 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1545 }
1546 .boxed()
1547 }
1548
1549 #[api(
1550 input: {
1551 properties: {
1552 store: { schema: DATASTORE_SCHEMA },
1553 ns: {
1554 type: BackupNamespace,
1555 optional: true,
1556 },
1557 backup_dir: {
1558 type: pbs_api_types::BackupDir,
1559 flatten: true,
1560 },
1561 "filepath": {
1562 description: "Base64 encoded path.",
1563 type: String,
1564 }
1565 },
1566 },
1567 access: {
1568 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1569 DATASTORE_BACKUP and being the owner of the group",
1570 permission: &Permission::Anybody,
1571 },
1572 )]
1573 /// Get the entries of the given path of the catalog
1574 pub async fn catalog(
1575 store: String,
1576 ns: Option<BackupNamespace>,
1577 backup_dir: pbs_api_types::BackupDir,
1578 filepath: String,
1579 rpcenv: &mut dyn RpcEnvironment,
1580 ) -> Result<Vec<ArchiveEntry>, Error> {
1581 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1582
1583 tokio::task::spawn_blocking(move || {
1584 let ns = ns.unwrap_or_default();
1585
1586 let datastore = check_privs_and_load_store(
1587 &store,
1588 &ns,
1589 &auth_id,
1590 PRIV_DATASTORE_READ,
1591 PRIV_DATASTORE_BACKUP,
1592 Some(Operation::Read),
1593 &backup_dir.group,
1594 )?;
1595
1596 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1597
1598 let file_name = CATALOG_NAME;
1599
1600 let (manifest, files) = read_backup_index(&backup_dir)?;
1601 for file in files {
1602 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1603 bail!("cannot decode '{}' - is encrypted", file_name);
1604 }
1605 }
1606
1607 let mut path = datastore.base_path();
1608 path.push(backup_dir.relative_path());
1609 path.push(file_name);
1610
1611 let index = DynamicIndexReader::open(&path)
1612 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1613
1614 let (csum, size) = index.compute_csum();
1615 manifest.verify_file(file_name, &csum, size)?;
1616
1617 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1618 let reader = BufferedDynamicReader::new(index, chunk_reader);
1619
1620 let mut catalog_reader = CatalogReader::new(reader);
1621
1622 let path = if filepath != "root" && filepath != "/" {
1623 base64::decode(filepath)?
1624 } else {
1625 vec![b'/']
1626 };
1627
1628 catalog_reader.list_dir_contents(&path)
1629 })
1630 .await?
1631 }
1632
1633 #[sortable]
1634 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1635 &ApiHandler::AsyncHttp(&pxar_file_download),
1636 &ObjectSchema::new(
1637 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1638 &sorted!([
1639 ("store", false, &DATASTORE_SCHEMA),
1640 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1641 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1642 ("backup-id", false, &BACKUP_ID_SCHEMA),
1643 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1644 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1645 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1646 ]),
1647 )
1648 ).access(
1649 Some(
1650 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1651 DATASTORE_BACKUP and being the owner of the group",
1652 ),
1653 &Permission::Anybody,
1654 );
1655
1656 pub fn pxar_file_download(
1657 _parts: Parts,
1658 _req_body: Body,
1659 param: Value,
1660 _info: &ApiMethod,
1661 rpcenv: Box<dyn RpcEnvironment>,
1662 ) -> ApiResponseFuture {
1663 async move {
1664 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1665 let store = required_string_param(&param, "store")?;
1666 let ns = optional_ns_param(&param)?;
1667
1668 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1669 let datastore = check_privs_and_load_store(
1670 store,
1671 &ns,
1672 &auth_id,
1673 PRIV_DATASTORE_READ,
1674 PRIV_DATASTORE_BACKUP,
1675 Some(Operation::Read),
1676 &backup_dir.group,
1677 )?;
1678
1679 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1680
1681 let filepath = required_string_param(&param, "filepath")?.to_owned();
1682
1683 let tar = param["tar"].as_bool().unwrap_or(false);
1684
1685 let mut components = base64::decode(&filepath)?;
1686 if !components.is_empty() && components[0] == b'/' {
1687 components.remove(0);
1688 }
1689
1690 let mut split = components.splitn(2, |c| *c == b'/');
1691 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1692 let file_path = split.next().unwrap_or(b"/");
1693 let (manifest, files) = read_backup_index(&backup_dir)?;
1694 for file in files {
1695 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1696 bail!("cannot decode '{}' - is encrypted", pxar_name);
1697 }
1698 }
1699
1700 let mut path = datastore.base_path();
1701 path.push(backup_dir.relative_path());
1702 path.push(pxar_name);
1703
1704 let index = DynamicIndexReader::open(&path)
1705 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1706
1707 let (csum, size) = index.compute_csum();
1708 manifest.verify_file(pxar_name, &csum, size)?;
1709
1710 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1711 let reader = BufferedDynamicReader::new(index, chunk_reader);
1712 let archive_size = reader.archive_size();
1713 let reader = LocalDynamicReadAt::new(reader);
1714
1715 let decoder = Accessor::new(reader, archive_size).await?;
1716 let root = decoder.open_root().await?;
1717 let path = OsStr::from_bytes(file_path).to_os_string();
1718 let file = root
1719 .lookup(&path)
1720 .await?
1721 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1722
1723 let body = match file.kind() {
1724 EntryKind::File { .. } => Body::wrap_stream(
1725 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1726 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1727 err
1728 }),
1729 ),
1730 EntryKind::Hardlink(_) => Body::wrap_stream(
1731 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1732 .map_err(move |err| {
1733 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1734 err
1735 }),
1736 ),
1737 EntryKind::Directory => {
1738 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1739 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1740 if tar {
1741 proxmox_rest_server::spawn_internal_task(create_tar(
1742 channelwriter,
1743 decoder,
1744 path.clone(),
1745 ));
1746 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1747 Body::wrap_stream(zstdstream.map_err(move |err| {
1748 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
1749 err
1750 }))
1751 } else {
1752 proxmox_rest_server::spawn_internal_task(create_zip(
1753 channelwriter,
1754 decoder,
1755 path.clone(),
1756 ));
1757 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1758 log::error!("error during streaming of zip '{:?}' - {}", path, err);
1759 err
1760 }))
1761 }
1762 }
1763 other => bail!("cannot download file of type {:?}", other),
1764 };
1765
1766 // fixme: set other headers ?
1767 Ok(Response::builder()
1768 .status(StatusCode::OK)
1769 .header(header::CONTENT_TYPE, "application/octet-stream")
1770 .body(body)
1771 .unwrap())
1772 }
1773 .boxed()
1774 }
1775
1776 #[api(
1777 input: {
1778 properties: {
1779 store: {
1780 schema: DATASTORE_SCHEMA,
1781 },
1782 timeframe: {
1783 type: RRDTimeFrame,
1784 },
1785 cf: {
1786 type: RRDMode,
1787 },
1788 },
1789 },
1790 access: {
1791 permission: &Permission::Privilege(
1792 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1793 },
1794 )]
1795 /// Read datastore stats
1796 pub fn get_rrd_stats(
1797 store: String,
1798 timeframe: RRDTimeFrame,
1799 cf: RRDMode,
1800 _param: Value,
1801 ) -> Result<Value, Error> {
1802 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1803 let disk_manager = crate::tools::disks::DiskManage::new();
1804
1805 let mut rrd_fields = vec![
1806 "total",
1807 "available",
1808 "used",
1809 "read_ios",
1810 "read_bytes",
1811 "write_ios",
1812 "write_bytes",
1813 ];
1814
1815 // we do not have io_ticks for zpools, so don't include them
1816 match disk_manager.find_mounted_device(&datastore.base_path()) {
1817 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1818 _ => rrd_fields.push("io_ticks"),
1819 };
1820
1821 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1822 }
1823
1824 #[api(
1825 input: {
1826 properties: {
1827 store: {
1828 schema: DATASTORE_SCHEMA,
1829 },
1830 },
1831 },
1832 access: {
1833 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1834 },
1835 )]
1836 /// Read datastore stats
1837 pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1838 let active_operations = task_tracking::get_active_operations(&store)?;
1839 Ok(json!({
1840 "read": active_operations.read,
1841 "write": active_operations.write,
1842 }))
1843 }
1844
1845 #[api(
1846 input: {
1847 properties: {
1848 store: { schema: DATASTORE_SCHEMA },
1849 ns: {
1850 type: BackupNamespace,
1851 optional: true,
1852 },
1853 backup_group: {
1854 type: pbs_api_types::BackupGroup,
1855 flatten: true,
1856 },
1857 },
1858 },
1859 access: {
1860 permission: &Permission::Anybody,
1861 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1862 or DATASTORE_BACKUP and being the owner of the group",
1863 },
1864 )]
1865 /// Get "notes" for a backup group
1866 pub fn get_group_notes(
1867 store: String,
1868 ns: Option<BackupNamespace>,
1869 backup_group: pbs_api_types::BackupGroup,
1870 rpcenv: &mut dyn RpcEnvironment,
1871 ) -> Result<String, Error> {
1872 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1873 let ns = ns.unwrap_or_default();
1874
1875 let datastore = check_privs_and_load_store(
1876 &store,
1877 &ns,
1878 &auth_id,
1879 PRIV_DATASTORE_AUDIT,
1880 PRIV_DATASTORE_BACKUP,
1881 Some(Operation::Read),
1882 &backup_group,
1883 )?;
1884
1885 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1886 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1887 }
1888
1889 #[api(
1890 input: {
1891 properties: {
1892 store: { schema: DATASTORE_SCHEMA },
1893 ns: {
1894 type: BackupNamespace,
1895 optional: true,
1896 },
1897 backup_group: {
1898 type: pbs_api_types::BackupGroup,
1899 flatten: true,
1900 },
1901 notes: {
1902 description: "A multiline text.",
1903 },
1904 },
1905 },
1906 access: {
1907 permission: &Permission::Anybody,
1908 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1909 or DATASTORE_BACKUP and being the owner of the group",
1910 },
1911 )]
1912 /// Set "notes" for a backup group
1913 pub fn set_group_notes(
1914 store: String,
1915 ns: Option<BackupNamespace>,
1916 backup_group: pbs_api_types::BackupGroup,
1917 notes: String,
1918 rpcenv: &mut dyn RpcEnvironment,
1919 ) -> Result<(), Error> {
1920 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1921 let ns = ns.unwrap_or_default();
1922
1923 let datastore = check_privs_and_load_store(
1924 &store,
1925 &ns,
1926 &auth_id,
1927 PRIV_DATASTORE_MODIFY,
1928 PRIV_DATASTORE_BACKUP,
1929 Some(Operation::Write),
1930 &backup_group,
1931 )?;
1932
1933 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1934 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
1935
1936 Ok(())
1937 }
1938
1939 #[api(
1940 input: {
1941 properties: {
1942 store: { schema: DATASTORE_SCHEMA },
1943 ns: {
1944 type: BackupNamespace,
1945 optional: true,
1946 },
1947 backup_dir: {
1948 type: pbs_api_types::BackupDir,
1949 flatten: true,
1950 },
1951 },
1952 },
1953 access: {
1954 permission: &Permission::Anybody,
1955 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1956 or DATASTORE_BACKUP and being the owner of the group",
1957 },
1958 )]
1959 /// Get "notes" for a specific backup
1960 pub fn get_notes(
1961 store: String,
1962 ns: Option<BackupNamespace>,
1963 backup_dir: pbs_api_types::BackupDir,
1964 rpcenv: &mut dyn RpcEnvironment,
1965 ) -> Result<String, Error> {
1966 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1967 let ns = ns.unwrap_or_default();
1968
1969 let datastore = check_privs_and_load_store(
1970 &store,
1971 &ns,
1972 &auth_id,
1973 PRIV_DATASTORE_AUDIT,
1974 PRIV_DATASTORE_BACKUP,
1975 Some(Operation::Read),
1976 &backup_dir.group,
1977 )?;
1978
1979 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1980
1981 let (manifest, _) = backup_dir.load_manifest()?;
1982
1983 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
1984
1985 Ok(String::from(notes))
1986 }
1987
1988 #[api(
1989 input: {
1990 properties: {
1991 store: { schema: DATASTORE_SCHEMA },
1992 ns: {
1993 type: BackupNamespace,
1994 optional: true,
1995 },
1996 backup_dir: {
1997 type: pbs_api_types::BackupDir,
1998 flatten: true,
1999 },
2000 notes: {
2001 description: "A multiline text.",
2002 },
2003 },
2004 },
2005 access: {
2006 permission: &Permission::Anybody,
2007 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2008 or DATASTORE_BACKUP and being the owner of the group",
2009 },
2010 )]
2011 /// Set "notes" for a specific backup
2012 pub fn set_notes(
2013 store: String,
2014 ns: Option<BackupNamespace>,
2015 backup_dir: pbs_api_types::BackupDir,
2016 notes: String,
2017 rpcenv: &mut dyn RpcEnvironment,
2018 ) -> Result<(), Error> {
2019 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2020 let ns = ns.unwrap_or_default();
2021
2022 let datastore = check_privs_and_load_store(
2023 &store,
2024 &ns,
2025 &auth_id,
2026 PRIV_DATASTORE_MODIFY,
2027 PRIV_DATASTORE_BACKUP,
2028 Some(Operation::Write),
2029 &backup_dir.group,
2030 )?;
2031
2032 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2033
2034 backup_dir
2035 .update_manifest(|manifest| {
2036 manifest.unprotected["notes"] = notes.into();
2037 })
2038 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2039
2040 Ok(())
2041 }
2042
2043 #[api(
2044 input: {
2045 properties: {
2046 store: { schema: DATASTORE_SCHEMA },
2047 ns: {
2048 type: BackupNamespace,
2049 optional: true,
2050 },
2051 backup_dir: {
2052 type: pbs_api_types::BackupDir,
2053 flatten: true,
2054 },
2055 },
2056 },
2057 access: {
2058 permission: &Permission::Anybody,
2059 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2060 or DATASTORE_BACKUP and being the owner of the group",
2061 },
2062 )]
2063 /// Query protection for a specific backup
2064 pub fn get_protection(
2065 store: String,
2066 ns: Option<BackupNamespace>,
2067 backup_dir: pbs_api_types::BackupDir,
2068 rpcenv: &mut dyn RpcEnvironment,
2069 ) -> Result<bool, Error> {
2070 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2071 let ns = ns.unwrap_or_default();
2072 let datastore = check_privs_and_load_store(
2073 &store,
2074 &ns,
2075 &auth_id,
2076 PRIV_DATASTORE_AUDIT,
2077 PRIV_DATASTORE_BACKUP,
2078 Some(Operation::Read),
2079 &backup_dir.group,
2080 )?;
2081
2082 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2083
2084 Ok(backup_dir.is_protected())
2085 }
2086
2087 #[api(
2088 input: {
2089 properties: {
2090 store: { schema: DATASTORE_SCHEMA },
2091 ns: {
2092 type: BackupNamespace,
2093 optional: true,
2094 },
2095 backup_dir: {
2096 type: pbs_api_types::BackupDir,
2097 flatten: true,
2098 },
2099 protected: {
2100 description: "Enable/disable protection.",
2101 },
2102 },
2103 },
2104 access: {
2105 permission: &Permission::Anybody,
2106 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2107 or DATASTORE_BACKUP and being the owner of the group",
2108 },
2109 )]
2110 /// En- or disable protection for a specific backup
2111 pub async fn set_protection(
2112 store: String,
2113 ns: Option<BackupNamespace>,
2114 backup_dir: pbs_api_types::BackupDir,
2115 protected: bool,
2116 rpcenv: &mut dyn RpcEnvironment,
2117 ) -> Result<(), Error> {
2118 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2119
2120 tokio::task::spawn_blocking(move || {
2121 let ns = ns.unwrap_or_default();
2122 let datastore = check_privs_and_load_store(
2123 &store,
2124 &ns,
2125 &auth_id,
2126 PRIV_DATASTORE_MODIFY,
2127 PRIV_DATASTORE_BACKUP,
2128 Some(Operation::Write),
2129 &backup_dir.group,
2130 )?;
2131
2132 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2133
2134 datastore.update_protection(&backup_dir, protected)
2135 })
2136 .await?
2137 }
2138
2139 #[api(
2140 input: {
2141 properties: {
2142 store: { schema: DATASTORE_SCHEMA },
2143 ns: {
2144 type: BackupNamespace,
2145 optional: true,
2146 },
2147 backup_group: {
2148 type: pbs_api_types::BackupGroup,
2149 flatten: true,
2150 },
2151 "new-owner": {
2152 type: Authid,
2153 },
2154 },
2155 },
2156 access: {
2157 permission: &Permission::Anybody,
2158 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2159 a user's token for owned backups with Datastore.Backup"
2160 },
2161 )]
2162 /// Change owner of a backup group
2163 pub async fn set_backup_owner(
2164 store: String,
2165 ns: Option<BackupNamespace>,
2166 backup_group: pbs_api_types::BackupGroup,
2167 new_owner: Authid,
2168 rpcenv: &mut dyn RpcEnvironment,
2169 ) -> Result<(), Error> {
2170 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2171
2172 tokio::task::spawn_blocking(move || {
2173 let ns = ns.unwrap_or_default();
2174 let owner_check_required = check_ns_privs_full(
2175 &store,
2176 &ns,
2177 &auth_id,
2178 PRIV_DATASTORE_MODIFY,
2179 PRIV_DATASTORE_BACKUP,
2180 )?;
2181
2182 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2183
2184 let backup_group = datastore.backup_group(ns, backup_group);
2185
2186 if owner_check_required {
2187 let owner = backup_group.get_owner()?;
2188
2189 let allowed = match (owner.is_token(), new_owner.is_token()) {
2190 (true, true) => {
2191 // API token to API token, owned by same user
2192 let owner = owner.user();
2193 let new_owner = new_owner.user();
2194 owner == new_owner && Authid::from(owner.clone()) == auth_id
2195 }
2196 (true, false) => {
2197 // API token to API token owner
2198 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2199 }
2200 (false, true) => {
2201 // API token owner to API token
2202 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2203 }
2204 (false, false) => {
2205 // User to User, not allowed for unprivileged users
2206 false
2207 }
2208 };
2209
2210 if !allowed {
2211 return Err(http_err!(
2212 UNAUTHORIZED,
2213 "{} does not have permission to change owner of backup group '{}' to {}",
2214 auth_id,
2215 backup_group.group(),
2216 new_owner,
2217 ));
2218 }
2219 }
2220
2221 let user_info = CachedUserInfo::new()?;
2222
2223 if !user_info.is_active_auth_id(&new_owner) {
2224 bail!(
2225 "{} '{}' is inactive or non-existent",
2226 if new_owner.is_token() {
2227 "API token".to_string()
2228 } else {
2229 "user".to_string()
2230 },
2231 new_owner
2232 );
2233 }
2234
2235 backup_group.set_owner(&new_owner, true)?;
2236
2237 Ok(())
2238 })
2239 .await?
2240 }
2241
2242 #[sortable]
2243 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2244 (
2245 "active-operations",
2246 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2247 ),
2248 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2249 (
2250 "change-owner",
2251 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2252 ),
2253 (
2254 "download",
2255 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2256 ),
2257 (
2258 "download-decoded",
2259 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2260 ),
2261 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2262 (
2263 "gc",
2264 &Router::new()
2265 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2266 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2267 ),
2268 (
2269 "group-notes",
2270 &Router::new()
2271 .get(&API_METHOD_GET_GROUP_NOTES)
2272 .put(&API_METHOD_SET_GROUP_NOTES),
2273 ),
2274 (
2275 "groups",
2276 &Router::new()
2277 .get(&API_METHOD_LIST_GROUPS)
2278 .delete(&API_METHOD_DELETE_GROUP),
2279 ),
2280 (
2281 "namespace",
2282 // FIXME: move into datastore:: sub-module?!
2283 &crate::api2::admin::namespace::ROUTER,
2284 ),
2285 (
2286 "notes",
2287 &Router::new()
2288 .get(&API_METHOD_GET_NOTES)
2289 .put(&API_METHOD_SET_NOTES),
2290 ),
2291 (
2292 "protected",
2293 &Router::new()
2294 .get(&API_METHOD_GET_PROTECTION)
2295 .put(&API_METHOD_SET_PROTECTION),
2296 ),
2297 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2298 (
2299 "prune-datastore",
2300 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2301 ),
2302 (
2303 "pxar-file-download",
2304 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2305 ),
2306 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2307 (
2308 "snapshots",
2309 &Router::new()
2310 .get(&API_METHOD_LIST_SNAPSHOTS)
2311 .delete(&API_METHOD_DELETE_SNAPSHOT),
2312 ),
2313 ("status", &Router::new().get(&API_METHOD_STATUS)),
2314 (
2315 "upload-backup-log",
2316 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2317 ),
2318 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2319 ];
2320
2321 const DATASTORE_INFO_ROUTER: Router = Router::new()
2322 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2323 .subdirs(DATASTORE_INFO_SUBDIRS);
2324
2325 pub const ROUTER: Router = Router::new()
2326 .get(&API_METHOD_GET_DATASTORE_LIST)
2327 .match_all("store", &DATASTORE_INFO_ROUTER);