]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
priv handling: use DatastoreWithNamespace
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::os::unix::ffi::OsStrExt;
6 use std::path::PathBuf;
7 use std::sync::Arc;
8
9 use anyhow::{bail, format_err, Error};
10 use futures::*;
11 use hyper::http::request::Parts;
12 use hyper::{header, Body, Response, StatusCode};
13 use serde::Deserialize;
14 use serde_json::{json, Value};
15 use tokio_stream::wrappers::ReceiverStream;
16
17 use proxmox_async::blocking::WrappedReaderStream;
18 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
19 use proxmox_compression::zstd::ZstdEncoder;
20 use proxmox_router::{
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
23 };
24 use proxmox_schema::*;
25 use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27 };
28 use proxmox_sys::sortable;
29 use proxmox_sys::{task_log, task_warn};
30
31 use pxar::accessor::aio::Accessor;
32 use pxar::EntryKind;
33
34 use pbs_api_types::{
35 print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
36 DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
37 GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
43 };
44 use pbs_client::pxar::{create_tar, create_zip};
45 use pbs_config::CachedUserInfo;
46 use pbs_datastore::backup_info::BackupInfo;
47 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
48 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
49 use pbs_datastore::data_blob::DataBlob;
50 use pbs_datastore::data_blob_reader::DataBlobReader;
51 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
52 use pbs_datastore::fixed_index::FixedIndexReader;
53 use pbs_datastore::index::IndexFile;
54 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
55 use pbs_datastore::prune::compute_prune_info;
56 use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59 };
60 use pbs_tools::json::required_string_param;
61 use proxmox_rest_server::{formatter, WorkerTask};
62
63 use crate::api2::backup::optional_ns_param;
64 use crate::api2::node::rrd::create_value_from_rrd;
65 use crate::backup::{
66 verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups,
68 };
69
70 use crate::server::jobstate::Job;
71
72 const GROUP_NOTES_FILE_NAME: &str = "notes";
73
74 fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78 ) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82 }
83
84 // TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
85 fn get_ns_privs(store_with_ns: &DatastoreWithNamespace, auth_id: &Authid) -> Result<u64, Error> {
86 let user_info = CachedUserInfo::new()?;
87
88 Ok(user_info.lookup_privs(auth_id, &store_with_ns.acl_path()))
89 }
90
91 // asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
92 // returning value indicates whether further checks like group ownerships are required
93 fn check_ns_privs(
94 store: &str,
95 ns: &BackupNamespace,
96 auth_id: &Authid,
97 full_access_privs: u64,
98 partial_access_privs: u64,
99 ) -> Result<bool, Error> {
100 let store_with_ns = DatastoreWithNamespace {
101 store: store.to_string(),
102 ns: ns.clone(),
103 };
104 let privs = get_ns_privs(&store_with_ns, auth_id)?;
105
106 if full_access_privs != 0 && (privs & full_access_privs) != 0 {
107 return Ok(false);
108 }
109 if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
110 return Ok(true);
111 }
112
113 proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
114 }
115
116 // helper to unify common sequence of checks:
117 // 1. check privs on NS (full or limited access)
118 // 2. load datastore
119 // 3. if needed (only limited access), check owner of group
120 fn check_privs_and_load_store(
121 store: &str,
122 ns: &BackupNamespace,
123 auth_id: &Authid,
124 full_access_privs: u64,
125 partial_access_privs: u64,
126 operation: Option<Operation>,
127 backup_group: &pbs_api_types::BackupGroup,
128 ) -> Result<Arc<DataStore>, Error> {
129 let limited = check_ns_privs(store, ns, auth_id, full_access_privs, partial_access_privs)?;
130
131 let datastore = DataStore::lookup_datastore(&store, operation)?;
132
133 if limited {
134 let owner = datastore.get_owner(&ns, backup_group)?;
135 check_backup_owner(&owner, &auth_id)?;
136 }
137
138 Ok(datastore)
139 }
140
141 fn read_backup_index(
142 backup_dir: &BackupDir,
143 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
144 let (manifest, index_size) = backup_dir.load_manifest()?;
145
146 let mut result = Vec::new();
147 for item in manifest.files() {
148 result.push(BackupContent {
149 filename: item.filename.clone(),
150 crypt_mode: Some(item.crypt_mode),
151 size: Some(item.size),
152 });
153 }
154
155 result.push(BackupContent {
156 filename: MANIFEST_BLOB_NAME.to_string(),
157 crypt_mode: match manifest.signature {
158 Some(_) => Some(CryptMode::SignOnly),
159 None => Some(CryptMode::None),
160 },
161 size: Some(index_size),
162 });
163
164 Ok((manifest, result))
165 }
166
167 fn get_all_snapshot_files(
168 info: &BackupInfo,
169 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
170 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
171
172 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
173 acc.insert(item.filename.clone());
174 acc
175 });
176
177 for file in &info.files {
178 if file_set.contains(file) {
179 continue;
180 }
181 files.push(BackupContent {
182 filename: file.to_string(),
183 size: None,
184 crypt_mode: None,
185 });
186 }
187
188 Ok((manifest, files))
189 }
190
191 #[api(
192 input: {
193 properties: {
194 store: {
195 schema: DATASTORE_SCHEMA,
196 },
197 ns: {
198 type: BackupNamespace,
199 optional: true,
200 },
201 },
202 },
203 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
204 access: {
205 permission: &Permission::Anybody,
206 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
207 /datastore/{store}[/{namespace}]",
208 },
209 )]
210 /// List backup groups.
211 pub fn list_groups(
212 store: String,
213 ns: Option<BackupNamespace>,
214 rpcenv: &mut dyn RpcEnvironment,
215 ) -> Result<Vec<GroupListItem>, Error> {
216 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
217
218 let ns = ns.unwrap_or_default();
219 let list_all = !check_ns_privs(
220 &store,
221 &ns,
222 &auth_id,
223 PRIV_DATASTORE_AUDIT,
224 PRIV_DATASTORE_BACKUP,
225 )?;
226
227 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
228 let store_with_ns = DatastoreWithNamespace {
229 store: store.to_owned(),
230 ns: ns.clone(),
231 };
232
233 datastore
234 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
235 .try_fold(Vec::new(), |mut group_info, group| {
236 let group = group?;
237
238 let owner = match datastore.get_owner(&ns, group.as_ref()) {
239 Ok(auth_id) => auth_id,
240 Err(err) => {
241 eprintln!(
242 "Failed to get owner of group '{}' in {} - {}",
243 group.group(),
244 store_with_ns,
245 err
246 );
247 return Ok(group_info);
248 }
249 };
250 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
251 return Ok(group_info);
252 }
253
254 let snapshots = match group.list_backups() {
255 Ok(snapshots) => snapshots,
256 Err(_) => return Ok(group_info),
257 };
258
259 let backup_count: u64 = snapshots.len() as u64;
260 if backup_count == 0 {
261 return Ok(group_info);
262 }
263
264 let last_backup = snapshots
265 .iter()
266 .fold(&snapshots[0], |a, b| {
267 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
268 a
269 } else {
270 b
271 }
272 })
273 .to_owned();
274
275 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
276 let comment = file_read_firstline(&note_path).ok();
277
278 group_info.push(GroupListItem {
279 backup: group.into(),
280 last_backup: last_backup.backup_dir.backup_time(),
281 owner: Some(owner),
282 backup_count,
283 files: last_backup.files,
284 comment,
285 });
286
287 Ok(group_info)
288 })
289 }
290
291 #[api(
292 input: {
293 properties: {
294 store: { schema: DATASTORE_SCHEMA },
295 ns: {
296 type: BackupNamespace,
297 optional: true,
298 },
299 group: {
300 type: pbs_api_types::BackupGroup,
301 flatten: true,
302 },
303 },
304 },
305 access: {
306 permission: &Permission::Anybody,
307 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
308 or DATASTORE_PRUNE and being the owner of the group",
309 },
310 )]
311 /// Delete backup group including all snapshots.
312 pub fn delete_group(
313 store: String,
314 ns: Option<BackupNamespace>,
315 group: pbs_api_types::BackupGroup,
316 _info: &ApiMethod,
317 rpcenv: &mut dyn RpcEnvironment,
318 ) -> Result<Value, Error> {
319 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
320
321 let ns = ns.unwrap_or_default();
322
323 let datastore = check_privs_and_load_store(
324 &store,
325 &ns,
326 &auth_id,
327 PRIV_DATASTORE_MODIFY,
328 PRIV_DATASTORE_PRUNE,
329 Some(Operation::Write),
330 &group,
331 )?;
332
333 if !datastore.remove_backup_group(&ns, &group)? {
334 bail!("group only partially deleted due to protected snapshots");
335 }
336
337 Ok(Value::Null)
338 }
339
340 #[api(
341 input: {
342 properties: {
343 store: { schema: DATASTORE_SCHEMA },
344 ns: {
345 type: BackupNamespace,
346 optional: true,
347 },
348 backup_dir: {
349 type: pbs_api_types::BackupDir,
350 flatten: true,
351 },
352 },
353 },
354 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
355 access: {
356 permission: &Permission::Anybody,
357 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
358 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
359 },
360 )]
361 /// List snapshot files.
362 pub fn list_snapshot_files(
363 store: String,
364 ns: Option<BackupNamespace>,
365 backup_dir: pbs_api_types::BackupDir,
366 _info: &ApiMethod,
367 rpcenv: &mut dyn RpcEnvironment,
368 ) -> Result<Vec<BackupContent>, Error> {
369 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
370
371 let ns = ns.unwrap_or_default();
372
373 let datastore = check_privs_and_load_store(
374 &store,
375 &ns,
376 &auth_id,
377 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
378 PRIV_DATASTORE_BACKUP,
379 Some(Operation::Read),
380 &backup_dir.group,
381 )?;
382
383 let snapshot = datastore.backup_dir(ns, backup_dir)?;
384
385 let info = BackupInfo::new(snapshot)?;
386
387 let (_manifest, files) = get_all_snapshot_files(&info)?;
388
389 Ok(files)
390 }
391
392 #[api(
393 input: {
394 properties: {
395 store: { schema: DATASTORE_SCHEMA },
396 ns: {
397 type: BackupNamespace,
398 optional: true,
399 },
400 backup_dir: {
401 type: pbs_api_types::BackupDir,
402 flatten: true,
403 },
404 },
405 },
406 access: {
407 permission: &Permission::Anybody,
408 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
409 or DATASTORE_PRUNE and being the owner of the group",
410 },
411 )]
412 /// Delete backup snapshot.
413 pub fn delete_snapshot(
414 store: String,
415 ns: Option<BackupNamespace>,
416 backup_dir: pbs_api_types::BackupDir,
417 _info: &ApiMethod,
418 rpcenv: &mut dyn RpcEnvironment,
419 ) -> Result<Value, Error> {
420 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
421
422 let ns = ns.unwrap_or_default();
423 let datastore = check_privs_and_load_store(
424 &store,
425 &ns,
426 &auth_id,
427 PRIV_DATASTORE_MODIFY,
428 PRIV_DATASTORE_PRUNE,
429 Some(Operation::Write),
430 &backup_dir.group,
431 )?;
432
433 let snapshot = datastore.backup_dir(ns, backup_dir)?;
434
435 snapshot.destroy(false)?;
436
437 Ok(Value::Null)
438 }
439
440 #[api(
441 streaming: true,
442 input: {
443 properties: {
444 store: { schema: DATASTORE_SCHEMA },
445 ns: {
446 type: BackupNamespace,
447 optional: true,
448 },
449 "backup-type": {
450 optional: true,
451 type: BackupType,
452 },
453 "backup-id": {
454 optional: true,
455 schema: BACKUP_ID_SCHEMA,
456 },
457 },
458 },
459 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
460 access: {
461 permission: &Permission::Anybody,
462 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
463 or DATASTORE_BACKUP and being the owner of the group",
464 },
465 )]
466 /// List backup snapshots.
467 pub fn list_snapshots(
468 store: String,
469 ns: Option<BackupNamespace>,
470 backup_type: Option<BackupType>,
471 backup_id: Option<String>,
472 _param: Value,
473 _info: &ApiMethod,
474 rpcenv: &mut dyn RpcEnvironment,
475 ) -> Result<Vec<SnapshotListItem>, Error> {
476 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
477
478 let ns = ns.unwrap_or_default();
479
480 let list_all = !check_ns_privs(
481 &store,
482 &ns,
483 &auth_id,
484 PRIV_DATASTORE_AUDIT,
485 PRIV_DATASTORE_BACKUP,
486 )?;
487
488 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
489 let store_with_ns = DatastoreWithNamespace {
490 store: store.to_owned(),
491 ns: ns.clone(),
492 };
493
494 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
495 // backup group and provide an error free (Err -> None) accessor
496 let groups = match (backup_type, backup_id) {
497 (Some(backup_type), Some(backup_id)) => {
498 vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
499 }
500 // FIXME: Recursion
501 (Some(backup_type), None) => datastore
502 .iter_backup_groups_ok(ns)?
503 .filter(|group| group.backup_type() == backup_type)
504 .collect(),
505 // FIXME: Recursion
506 (None, Some(backup_id)) => datastore
507 .iter_backup_groups_ok(ns)?
508 .filter(|group| group.backup_id() == backup_id)
509 .collect(),
510 // FIXME: Recursion
511 (None, None) => datastore.list_backup_groups(ns)?,
512 };
513
514 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
515 let backup = pbs_api_types::BackupDir {
516 group: group.into(),
517 time: info.backup_dir.backup_time(),
518 };
519 let protected = info.backup_dir.is_protected();
520
521 match get_all_snapshot_files(&info) {
522 Ok((manifest, files)) => {
523 // extract the first line from notes
524 let comment: Option<String> = manifest.unprotected["notes"]
525 .as_str()
526 .and_then(|notes| notes.lines().next())
527 .map(String::from);
528
529 let fingerprint = match manifest.fingerprint() {
530 Ok(fp) => fp,
531 Err(err) => {
532 eprintln!("error parsing fingerprint: '{}'", err);
533 None
534 }
535 };
536
537 let verification = manifest.unprotected["verify_state"].clone();
538 let verification: Option<SnapshotVerifyState> =
539 match serde_json::from_value(verification) {
540 Ok(verify) => verify,
541 Err(err) => {
542 eprintln!("error parsing verification state : '{}'", err);
543 None
544 }
545 };
546
547 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
548
549 SnapshotListItem {
550 backup,
551 comment,
552 verification,
553 fingerprint,
554 files,
555 size,
556 owner,
557 protected,
558 }
559 }
560 Err(err) => {
561 eprintln!("error during snapshot file listing: '{}'", err);
562 let files = info
563 .files
564 .into_iter()
565 .map(|filename| BackupContent {
566 filename,
567 size: None,
568 crypt_mode: None,
569 })
570 .collect();
571
572 SnapshotListItem {
573 backup,
574 comment: None,
575 verification: None,
576 fingerprint: None,
577 files,
578 size: None,
579 owner,
580 protected,
581 }
582 }
583 }
584 };
585
586 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
587 let owner = match group.get_owner() {
588 Ok(auth_id) => auth_id,
589 Err(err) => {
590 eprintln!(
591 "Failed to get owner of group '{}' in {} - {}",
592 group.group(),
593 &store_with_ns,
594 err
595 );
596 return Ok(snapshots);
597 }
598 };
599
600 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
601 return Ok(snapshots);
602 }
603
604 let group_backups = group.list_backups()?;
605
606 snapshots.extend(
607 group_backups
608 .into_iter()
609 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
610 );
611
612 Ok(snapshots)
613 })
614 }
615
616 fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
617 let root_ns = Default::default();
618 ListAccessibleBackupGroups::new_with_privs(
619 store,
620 root_ns,
621 MAX_NAMESPACE_DEPTH,
622 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
623 None,
624 owner,
625 )?
626 .try_fold(Counts::default(), |mut counts, group| {
627 let group = match group {
628 Ok(group) => group,
629 Err(_) => return Ok(counts), // TODO: add this as error counts?
630 };
631 let snapshot_count = group.list_backups()?.len() as u64;
632
633 // only include groups with snapshots, counting/displaying emtpy groups can confuse
634 if snapshot_count > 0 {
635 let type_count = match group.backup_type() {
636 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
637 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
638 BackupType::Host => counts.host.get_or_insert(Default::default()),
639 };
640
641 type_count.groups += 1;
642 type_count.snapshots += snapshot_count;
643 }
644
645 Ok(counts)
646 })
647 }
648
649 #[api(
650 input: {
651 properties: {
652 store: {
653 schema: DATASTORE_SCHEMA,
654 },
655 verbose: {
656 type: bool,
657 default: false,
658 optional: true,
659 description: "Include additional information like snapshot counts and GC status.",
660 },
661 },
662
663 },
664 returns: {
665 type: DataStoreStatus,
666 },
667 access: {
668 permission: &Permission::Privilege(
669 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
670 },
671 )]
672 /// Get datastore status.
673 pub fn status(
674 store: String,
675 verbose: bool,
676 _info: &ApiMethod,
677 rpcenv: &mut dyn RpcEnvironment,
678 ) -> Result<DataStoreStatus, Error> {
679 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
680 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
681 let (counts, gc_status) = if verbose {
682 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
683 let user_info = CachedUserInfo::new()?;
684
685 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
686 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
687 None
688 } else {
689 Some(&auth_id)
690 };
691
692 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
693 let gc_status = Some(datastore.last_gc_status());
694
695 (counts, gc_status)
696 } else {
697 (None, None)
698 };
699
700 Ok(DataStoreStatus {
701 total: storage.total,
702 used: storage.used,
703 avail: storage.avail,
704 gc_status,
705 counts,
706 })
707 }
708
709 #[api(
710 input: {
711 properties: {
712 store: {
713 schema: DATASTORE_SCHEMA,
714 },
715 ns: {
716 type: BackupNamespace,
717 optional: true,
718 },
719 "backup-type": {
720 type: BackupType,
721 optional: true,
722 },
723 "backup-id": {
724 schema: BACKUP_ID_SCHEMA,
725 optional: true,
726 },
727 "ignore-verified": {
728 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
729 optional: true,
730 },
731 "outdated-after": {
732 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
733 optional: true,
734 },
735 "backup-time": {
736 schema: BACKUP_TIME_SCHEMA,
737 optional: true,
738 },
739 "max-depth": {
740 schema: NS_MAX_DEPTH_SCHEMA,
741 optional: true,
742 },
743 },
744 },
745 returns: {
746 schema: UPID_SCHEMA,
747 },
748 access: {
749 permission: &Permission::Anybody,
750 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
751 or DATASTORE_BACKUP and being the owner of the group",
752 },
753 )]
754 /// Verify backups.
755 ///
756 /// This function can verify a single backup snapshot, all backup from a backup group,
757 /// or all backups in the datastore.
758 pub fn verify(
759 store: String,
760 ns: Option<BackupNamespace>,
761 backup_type: Option<BackupType>,
762 backup_id: Option<String>,
763 backup_time: Option<i64>,
764 ignore_verified: Option<bool>,
765 outdated_after: Option<i64>,
766 max_depth: Option<usize>,
767 rpcenv: &mut dyn RpcEnvironment,
768 ) -> Result<Value, Error> {
769 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
770 let ns = ns.unwrap_or_default();
771 let owner_check_required = check_ns_privs(
772 &store,
773 &ns,
774 &auth_id,
775 PRIV_DATASTORE_VERIFY,
776 PRIV_DATASTORE_BACKUP,
777 )?;
778
779 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
780 let ignore_verified = ignore_verified.unwrap_or(true);
781
782 let worker_id;
783
784 let mut backup_dir = None;
785 let mut backup_group = None;
786 let mut worker_type = "verify";
787
788 match (backup_type, backup_id, backup_time) {
789 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
790 worker_id = format!(
791 "{}:{}/{}/{}/{:08X}",
792 store,
793 ns.display_as_path(),
794 backup_type,
795 backup_id,
796 backup_time
797 );
798 let dir =
799 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
800
801 if owner_check_required {
802 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
803 check_backup_owner(&owner, &auth_id)?;
804 }
805
806 backup_dir = Some(dir);
807 worker_type = "verify_snapshot";
808 }
809 (Some(backup_type), Some(backup_id), None) => {
810 worker_id = format!(
811 "{}:{}/{}/{}",
812 store,
813 ns.display_as_path(),
814 backup_type,
815 backup_id
816 );
817 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
818
819 if owner_check_required {
820 let owner = datastore.get_owner(&ns, &group)?;
821 check_backup_owner(&owner, &auth_id)?;
822 }
823
824 backup_group = Some(datastore.backup_group(ns.clone(), group));
825 worker_type = "verify_group";
826 }
827 (None, None, None) => {
828 worker_id = if ns.is_root() {
829 store.clone()
830 } else {
831 format!("{store}:{}", ns.display_as_path())
832 };
833 }
834 _ => bail!("parameters do not specify a backup group or snapshot"),
835 }
836
837 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
838
839 let upid_str = WorkerTask::new_thread(
840 worker_type,
841 Some(worker_id),
842 auth_id.to_string(),
843 to_stdout,
844 move |worker| {
845 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
846 let failed_dirs = if let Some(backup_dir) = backup_dir {
847 let mut res = Vec::new();
848 if !verify_backup_dir(
849 &verify_worker,
850 &backup_dir,
851 worker.upid().clone(),
852 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
853 )? {
854 res.push(print_ns_and_snapshot(
855 backup_dir.backup_ns(),
856 backup_dir.as_ref(),
857 ));
858 }
859 res
860 } else if let Some(backup_group) = backup_group {
861 let failed_dirs = verify_backup_group(
862 &verify_worker,
863 &backup_group,
864 &mut StoreProgress::new(1),
865 worker.upid(),
866 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
867 )?;
868 failed_dirs
869 } else {
870 let owner = if owner_check_required {
871 Some(&auth_id)
872 } else {
873 None
874 };
875
876 verify_all_backups(
877 &verify_worker,
878 worker.upid(),
879 ns,
880 max_depth,
881 owner,
882 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
883 )?
884 };
885 if !failed_dirs.is_empty() {
886 task_log!(worker, "Failed to verify the following snapshots/groups:");
887 for dir in failed_dirs {
888 task_log!(worker, "\t{}", dir);
889 }
890 bail!("verification failed - please check the log for details");
891 }
892 Ok(())
893 },
894 )?;
895
896 Ok(json!(upid_str))
897 }
898
899 #[api(
900 input: {
901 properties: {
902 ns: {
903 type: BackupNamespace,
904 optional: true,
905 },
906 group: {
907 type: pbs_api_types::BackupGroup,
908 flatten: true,
909 },
910 "dry-run": {
911 optional: true,
912 type: bool,
913 default: false,
914 description: "Just show what prune would do, but do not delete anything.",
915 },
916 "prune-options": {
917 type: PruneOptions,
918 flatten: true,
919 },
920 store: {
921 schema: DATASTORE_SCHEMA,
922 },
923 },
924 },
925 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
926 access: {
927 permission: &Permission::Anybody,
928 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
929 or DATASTORE_PRUNE and being the owner of the group",
930 },
931 )]
932 /// Prune a group on the datastore
933 pub fn prune(
934 ns: Option<BackupNamespace>,
935 group: pbs_api_types::BackupGroup,
936 dry_run: bool,
937 prune_options: PruneOptions,
938 store: String,
939 _param: Value,
940 rpcenv: &mut dyn RpcEnvironment,
941 ) -> Result<Value, Error> {
942 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
943 let ns = ns.unwrap_or_default();
944 let datastore = check_privs_and_load_store(
945 &store,
946 &ns,
947 &auth_id,
948 PRIV_DATASTORE_MODIFY,
949 PRIV_DATASTORE_PRUNE,
950 Some(Operation::Write),
951 &group,
952 )?;
953 let store_with_ns = DatastoreWithNamespace {
954 store: store.to_owned(),
955 ns: ns.clone(),
956 };
957
958 let worker_id = format!("{}:{}:{}", store, ns, group);
959 let group = datastore.backup_group(ns, group);
960
961 let mut prune_result = Vec::new();
962
963 let list = group.list_backups()?;
964
965 let mut prune_info = compute_prune_info(list, &prune_options)?;
966
967 prune_info.reverse(); // delete older snapshots first
968
969 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
970
971 if dry_run {
972 for (info, mark) in prune_info {
973 let keep = keep_all || mark.keep();
974
975 let mut result = json!({
976 "backup-type": info.backup_dir.backup_type(),
977 "backup-id": info.backup_dir.backup_id(),
978 "backup-time": info.backup_dir.backup_time(),
979 "keep": keep,
980 "protected": mark.protected(),
981 });
982 let prune_ns = info.backup_dir.backup_ns();
983 if !prune_ns.is_root() {
984 result["ns"] = serde_json::to_value(prune_ns)?;
985 }
986 prune_result.push(result);
987 }
988 return Ok(json!(prune_result));
989 }
990
991 // We use a WorkerTask just to have a task log, but run synchrounously
992 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
993
994 if keep_all {
995 task_log!(worker, "No prune selection - keeping all files.");
996 } else {
997 task_log!(
998 worker,
999 "retention options: {}",
1000 pbs_datastore::prune::cli_options_string(&prune_options)
1001 );
1002 task_log!(
1003 worker,
1004 "Starting prune on {} group \"{}\"",
1005 store_with_ns,
1006 group.group(),
1007 );
1008 }
1009
1010 for (info, mark) in prune_info {
1011 let keep = keep_all || mark.keep();
1012
1013 let backup_time = info.backup_dir.backup_time();
1014 let timestamp = info.backup_dir.backup_time_string();
1015 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1016
1017 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
1018
1019 task_log!(worker, "{}", msg);
1020
1021 prune_result.push(json!({
1022 "backup-type": group.ty,
1023 "backup-id": group.id,
1024 "backup-time": backup_time,
1025 "keep": keep,
1026 "protected": mark.protected(),
1027 }));
1028
1029 if !(dry_run || keep) {
1030 if let Err(err) = info.backup_dir.destroy(false) {
1031 task_warn!(
1032 worker,
1033 "failed to remove dir {:?}: {}",
1034 info.backup_dir.relative_path(),
1035 err,
1036 );
1037 }
1038 }
1039 }
1040
1041 worker.log_result(&Ok(()));
1042
1043 Ok(json!(prune_result))
1044 }
1045
1046 #[api(
1047 input: {
1048 properties: {
1049 "dry-run": {
1050 optional: true,
1051 type: bool,
1052 default: false,
1053 description: "Just show what prune would do, but do not delete anything.",
1054 },
1055 "prune-options": {
1056 type: PruneOptions,
1057 flatten: true,
1058 },
1059 store: {
1060 schema: DATASTORE_SCHEMA,
1061 },
1062 ns: {
1063 type: BackupNamespace,
1064 optional: true,
1065 },
1066 "max-depth": {
1067 schema: NS_MAX_DEPTH_SCHEMA,
1068 optional: true,
1069 },
1070 },
1071 },
1072 returns: {
1073 schema: UPID_SCHEMA,
1074 },
1075 access: {
1076 permission: &Permission::Privilege(
1077 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
1078 },
1079 )]
1080 /// Prune the datastore
1081 pub fn prune_datastore(
1082 dry_run: bool,
1083 prune_options: PruneOptions,
1084 store: String,
1085 ns: Option<BackupNamespace>,
1086 max_depth: Option<usize>,
1087 _param: Value,
1088 rpcenv: &mut dyn RpcEnvironment,
1089 ) -> Result<String, Error> {
1090 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1091
1092 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1093 let ns = ns.unwrap_or_default();
1094 let worker_id = format!("{}:{}", store, ns);
1095
1096 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1097
1098 let upid_str = WorkerTask::new_thread(
1099 "prune",
1100 Some(worker_id),
1101 auth_id.to_string(),
1102 to_stdout,
1103 move |worker| {
1104 crate::server::prune_datastore(
1105 worker,
1106 auth_id,
1107 prune_options,
1108 datastore,
1109 ns,
1110 max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
1111 dry_run,
1112 )
1113 },
1114 )?;
1115
1116 Ok(upid_str)
1117 }
1118
1119 #[api(
1120 input: {
1121 properties: {
1122 store: {
1123 schema: DATASTORE_SCHEMA,
1124 },
1125 },
1126 },
1127 returns: {
1128 schema: UPID_SCHEMA,
1129 },
1130 access: {
1131 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1132 },
1133 )]
1134 /// Start garbage collection.
1135 pub fn start_garbage_collection(
1136 store: String,
1137 _info: &ApiMethod,
1138 rpcenv: &mut dyn RpcEnvironment,
1139 ) -> Result<Value, Error> {
1140 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1141 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1142
1143 let job = Job::new("garbage_collection", &store)
1144 .map_err(|_| format_err!("garbage collection already running"))?;
1145
1146 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1147
1148 let upid_str =
1149 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1150 .map_err(|err| {
1151 format_err!(
1152 "unable to start garbage collection job on datastore {} - {}",
1153 store,
1154 err
1155 )
1156 })?;
1157
1158 Ok(json!(upid_str))
1159 }
1160
1161 #[api(
1162 input: {
1163 properties: {
1164 store: {
1165 schema: DATASTORE_SCHEMA,
1166 },
1167 },
1168 },
1169 returns: {
1170 type: GarbageCollectionStatus,
1171 },
1172 access: {
1173 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1174 },
1175 )]
1176 /// Garbage collection status.
1177 pub fn garbage_collection_status(
1178 store: String,
1179 _info: &ApiMethod,
1180 _rpcenv: &mut dyn RpcEnvironment,
1181 ) -> Result<GarbageCollectionStatus, Error> {
1182 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1183
1184 let status = datastore.last_gc_status();
1185
1186 Ok(status)
1187 }
1188
1189 fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1190 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1191 // below /datastore/{store}" helper
1192 let mut iter =
1193 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1194 iter
1195 } else {
1196 return false;
1197 };
1198 let wanted =
1199 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1200 let name = store.name();
1201 iter.any(|ns| -> bool {
1202 let store_with_ns = DatastoreWithNamespace {
1203 store: name.to_string(),
1204 ns: ns,
1205 };
1206 let user_privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
1207 user_privs & wanted != 0
1208 })
1209 }
1210
1211 #[api(
1212 returns: {
1213 description: "List the accessible datastores.",
1214 type: Array,
1215 items: { type: DataStoreListItem },
1216 },
1217 access: {
1218 permission: &Permission::Anybody,
1219 },
1220 )]
1221 /// Datastore list
1222 pub fn get_datastore_list(
1223 _param: Value,
1224 _info: &ApiMethod,
1225 rpcenv: &mut dyn RpcEnvironment,
1226 ) -> Result<Vec<DataStoreListItem>, Error> {
1227 let (config, _digest) = pbs_config::datastore::config()?;
1228
1229 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1230 let user_info = CachedUserInfo::new()?;
1231
1232 let mut list = Vec::new();
1233
1234 for (store, (_, data)) in &config.sections {
1235 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
1236 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1237
1238 let mut allow_id = false;
1239 if !allowed {
1240 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1241 // safety: we just cannot go through lookup as we must avoid an operation check
1242 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1243 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1244 }
1245 }
1246
1247 if allowed || allow_id {
1248 list.push(DataStoreListItem {
1249 store: store.clone(),
1250 comment: if !allowed {
1251 None
1252 } else {
1253 data["comment"].as_str().map(String::from)
1254 },
1255 maintenance: data["maintenance-mode"].as_str().map(String::from),
1256 });
1257 }
1258 }
1259
1260 Ok(list)
1261 }
1262
1263 #[sortable]
1264 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1265 &ApiHandler::AsyncHttp(&download_file),
1266 &ObjectSchema::new(
1267 "Download single raw file from backup snapshot.",
1268 &sorted!([
1269 ("store", false, &DATASTORE_SCHEMA),
1270 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1271 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1272 ("backup-id", false, &BACKUP_ID_SCHEMA),
1273 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1274 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1275 ]),
1276 ),
1277 )
1278 .access(
1279 Some(
1280 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1281 DATASTORE_BACKUP and being the owner of the group",
1282 ),
1283 &Permission::Anybody,
1284 );
1285
1286 pub fn download_file(
1287 _parts: Parts,
1288 _req_body: Body,
1289 param: Value,
1290 _info: &ApiMethod,
1291 rpcenv: Box<dyn RpcEnvironment>,
1292 ) -> ApiResponseFuture {
1293 async move {
1294 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1295 let store = required_string_param(&param, "store")?;
1296 let backup_ns = optional_ns_param(&param)?;
1297
1298 let store_with_ns = DatastoreWithNamespace {
1299 store: store.to_owned(),
1300 ns: backup_ns.clone(),
1301 };
1302 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1303 let datastore = check_privs_and_load_store(
1304 &store,
1305 &backup_ns,
1306 &auth_id,
1307 PRIV_DATASTORE_READ,
1308 PRIV_DATASTORE_BACKUP,
1309 Some(Operation::Read),
1310 &backup_dir.group,
1311 )?;
1312
1313 let file_name = required_string_param(&param, "file-name")?.to_owned();
1314
1315 println!(
1316 "Download {} from {} ({}/{})",
1317 file_name, store_with_ns, backup_dir, file_name
1318 );
1319
1320 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1321
1322 let mut path = datastore.base_path();
1323 path.push(backup_dir.relative_path());
1324 path.push(&file_name);
1325
1326 let file = tokio::fs::File::open(&path)
1327 .await
1328 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1329
1330 let payload =
1331 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1332 .map_ok(|bytes| bytes.freeze())
1333 .map_err(move |err| {
1334 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1335 err
1336 });
1337 let body = Body::wrap_stream(payload);
1338
1339 // fixme: set other headers ?
1340 Ok(Response::builder()
1341 .status(StatusCode::OK)
1342 .header(header::CONTENT_TYPE, "application/octet-stream")
1343 .body(body)
1344 .unwrap())
1345 }
1346 .boxed()
1347 }
1348
1349 #[sortable]
1350 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1351 &ApiHandler::AsyncHttp(&download_file_decoded),
1352 &ObjectSchema::new(
1353 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1354 &sorted!([
1355 ("store", false, &DATASTORE_SCHEMA),
1356 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1357 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1358 ("backup-id", false, &BACKUP_ID_SCHEMA),
1359 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1360 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1361 ]),
1362 ),
1363 )
1364 .access(
1365 Some(
1366 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1367 DATASTORE_BACKUP and being the owner of the group",
1368 ),
1369 &Permission::Anybody,
1370 );
1371
1372 pub fn download_file_decoded(
1373 _parts: Parts,
1374 _req_body: Body,
1375 param: Value,
1376 _info: &ApiMethod,
1377 rpcenv: Box<dyn RpcEnvironment>,
1378 ) -> ApiResponseFuture {
1379 async move {
1380 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1381 let store = required_string_param(&param, "store")?;
1382 let backup_ns = optional_ns_param(&param)?;
1383 let store_with_ns = DatastoreWithNamespace {
1384 store: store.to_owned(),
1385 ns: backup_ns.clone(),
1386 };
1387 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1388 let datastore = check_privs_and_load_store(
1389 &store,
1390 &backup_ns,
1391 &auth_id,
1392 PRIV_DATASTORE_READ,
1393 PRIV_DATASTORE_BACKUP,
1394 Some(Operation::Read),
1395 &backup_dir_api.group,
1396 )?;
1397
1398 let file_name = required_string_param(&param, "file-name")?.to_owned();
1399 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
1400
1401 let (manifest, files) = read_backup_index(&backup_dir)?;
1402 for file in files {
1403 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1404 bail!("cannot decode '{}' - is encrypted", file_name);
1405 }
1406 }
1407
1408 println!(
1409 "Download {} from {} ({}/{})",
1410 file_name, store_with_ns, backup_dir_api, file_name
1411 );
1412
1413 let mut path = datastore.base_path();
1414 path.push(backup_dir.relative_path());
1415 path.push(&file_name);
1416
1417 let extension = file_name.rsplitn(2, '.').next().unwrap();
1418
1419 let body = match extension {
1420 "didx" => {
1421 let index = DynamicIndexReader::open(&path).map_err(|err| {
1422 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1423 })?;
1424 let (csum, size) = index.compute_csum();
1425 manifest.verify_file(&file_name, &csum, size)?;
1426
1427 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1428 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1429 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1430 eprintln!("error during streaming of '{:?}' - {}", path, err);
1431 err
1432 }))
1433 }
1434 "fidx" => {
1435 let index = FixedIndexReader::open(&path).map_err(|err| {
1436 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1437 })?;
1438
1439 let (csum, size) = index.compute_csum();
1440 manifest.verify_file(&file_name, &csum, size)?;
1441
1442 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1443 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1444 Body::wrap_stream(
1445 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1446 move |err| {
1447 eprintln!("error during streaming of '{:?}' - {}", path, err);
1448 err
1449 },
1450 ),
1451 )
1452 }
1453 "blob" => {
1454 let file = std::fs::File::open(&path)
1455 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1456
1457 // FIXME: load full blob to verify index checksum?
1458
1459 Body::wrap_stream(
1460 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1461 move |err| {
1462 eprintln!("error during streaming of '{:?}' - {}", path, err);
1463 err
1464 },
1465 ),
1466 )
1467 }
1468 extension => {
1469 bail!("cannot download '{}' files", extension);
1470 }
1471 };
1472
1473 // fixme: set other headers ?
1474 Ok(Response::builder()
1475 .status(StatusCode::OK)
1476 .header(header::CONTENT_TYPE, "application/octet-stream")
1477 .body(body)
1478 .unwrap())
1479 }
1480 .boxed()
1481 }
1482
1483 #[sortable]
1484 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1485 &ApiHandler::AsyncHttp(&upload_backup_log),
1486 &ObjectSchema::new(
1487 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1488 &sorted!([
1489 ("store", false, &DATASTORE_SCHEMA),
1490 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1491 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1492 ("backup-id", false, &BACKUP_ID_SCHEMA),
1493 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1494 ]),
1495 ),
1496 )
1497 .access(
1498 Some("Only the backup creator/owner is allowed to do this."),
1499 &Permission::Anybody,
1500 );
1501
1502 pub fn upload_backup_log(
1503 _parts: Parts,
1504 req_body: Body,
1505 param: Value,
1506 _info: &ApiMethod,
1507 rpcenv: Box<dyn RpcEnvironment>,
1508 ) -> ApiResponseFuture {
1509 async move {
1510 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1511 let store = required_string_param(&param, "store")?;
1512 let backup_ns = optional_ns_param(&param)?;
1513 let store_with_ns = DatastoreWithNamespace {
1514 store: store.to_owned(),
1515 ns: backup_ns.clone(),
1516 };
1517 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1518
1519 let datastore = check_privs_and_load_store(
1520 &store,
1521 &backup_ns,
1522 &auth_id,
1523 0,
1524 PRIV_DATASTORE_BACKUP,
1525 Some(Operation::Write),
1526 &backup_dir_api.group,
1527 )?;
1528 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
1529
1530 let file_name = CLIENT_LOG_BLOB_NAME;
1531
1532 let mut path = backup_dir.full_path();
1533 path.push(&file_name);
1534
1535 if path.exists() {
1536 bail!("backup already contains a log.");
1537 }
1538
1539 println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
1540
1541 let data = req_body
1542 .map_err(Error::from)
1543 .try_fold(Vec::new(), |mut acc, chunk| {
1544 acc.extend_from_slice(&*chunk);
1545 future::ok::<_, Error>(acc)
1546 })
1547 .await?;
1548
1549 // always verify blob/CRC at server side
1550 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1551
1552 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1553
1554 // fixme: use correct formatter
1555 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1556 }
1557 .boxed()
1558 }
1559
1560 #[api(
1561 input: {
1562 properties: {
1563 store: { schema: DATASTORE_SCHEMA },
1564 ns: {
1565 type: BackupNamespace,
1566 optional: true,
1567 },
1568 backup_dir: {
1569 type: pbs_api_types::BackupDir,
1570 flatten: true,
1571 },
1572 "filepath": {
1573 description: "Base64 encoded path.",
1574 type: String,
1575 }
1576 },
1577 },
1578 access: {
1579 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1580 DATASTORE_BACKUP and being the owner of the group",
1581 permission: &Permission::Anybody,
1582 },
1583 )]
1584 /// Get the entries of the given path of the catalog
1585 pub fn catalog(
1586 store: String,
1587 ns: Option<BackupNamespace>,
1588 backup_dir: pbs_api_types::BackupDir,
1589 filepath: String,
1590 rpcenv: &mut dyn RpcEnvironment,
1591 ) -> Result<Vec<ArchiveEntry>, Error> {
1592 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1593 let ns = ns.unwrap_or_default();
1594 let datastore = check_privs_and_load_store(
1595 &store,
1596 &ns,
1597 &auth_id,
1598 PRIV_DATASTORE_READ,
1599 PRIV_DATASTORE_BACKUP,
1600 Some(Operation::Read),
1601 &backup_dir.group,
1602 )?;
1603
1604 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1605
1606 let file_name = CATALOG_NAME;
1607
1608 let (manifest, files) = read_backup_index(&backup_dir)?;
1609 for file in files {
1610 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1611 bail!("cannot decode '{}' - is encrypted", file_name);
1612 }
1613 }
1614
1615 let mut path = datastore.base_path();
1616 path.push(backup_dir.relative_path());
1617 path.push(file_name);
1618
1619 let index = DynamicIndexReader::open(&path)
1620 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1621
1622 let (csum, size) = index.compute_csum();
1623 manifest.verify_file(file_name, &csum, size)?;
1624
1625 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1626 let reader = BufferedDynamicReader::new(index, chunk_reader);
1627
1628 let mut catalog_reader = CatalogReader::new(reader);
1629
1630 let path = if filepath != "root" && filepath != "/" {
1631 base64::decode(filepath)?
1632 } else {
1633 vec![b'/']
1634 };
1635
1636 catalog_reader.list_dir_contents(&path)
1637 }
1638
1639 #[sortable]
1640 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1641 &ApiHandler::AsyncHttp(&pxar_file_download),
1642 &ObjectSchema::new(
1643 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1644 &sorted!([
1645 ("store", false, &DATASTORE_SCHEMA),
1646 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1647 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1648 ("backup-id", false, &BACKUP_ID_SCHEMA),
1649 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1650 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1651 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1652 ]),
1653 )
1654 ).access(
1655 Some(
1656 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1657 DATASTORE_BACKUP and being the owner of the group",
1658 ),
1659 &Permission::Anybody,
1660 );
1661
1662 pub fn pxar_file_download(
1663 _parts: Parts,
1664 _req_body: Body,
1665 param: Value,
1666 _info: &ApiMethod,
1667 rpcenv: Box<dyn RpcEnvironment>,
1668 ) -> ApiResponseFuture {
1669 async move {
1670 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1671 let store = required_string_param(&param, "store")?;
1672 let ns = optional_ns_param(&param)?;
1673 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1674 let datastore = check_privs_and_load_store(
1675 &store,
1676 &ns,
1677 &auth_id,
1678 PRIV_DATASTORE_READ,
1679 PRIV_DATASTORE_BACKUP,
1680 Some(Operation::Read),
1681 &backup_dir.group,
1682 )?;
1683
1684 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1685
1686 let filepath = required_string_param(&param, "filepath")?.to_owned();
1687
1688 let tar = param["tar"].as_bool().unwrap_or(false);
1689
1690 let mut components = base64::decode(&filepath)?;
1691 if !components.is_empty() && components[0] == b'/' {
1692 components.remove(0);
1693 }
1694
1695 let mut split = components.splitn(2, |c| *c == b'/');
1696 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1697 let file_path = split.next().unwrap_or(b"/");
1698 let (manifest, files) = read_backup_index(&backup_dir)?;
1699 for file in files {
1700 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1701 bail!("cannot decode '{}' - is encrypted", pxar_name);
1702 }
1703 }
1704
1705 let mut path = datastore.base_path();
1706 path.push(backup_dir.relative_path());
1707 path.push(pxar_name);
1708
1709 let index = DynamicIndexReader::open(&path)
1710 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1711
1712 let (csum, size) = index.compute_csum();
1713 manifest.verify_file(pxar_name, &csum, size)?;
1714
1715 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1716 let reader = BufferedDynamicReader::new(index, chunk_reader);
1717 let archive_size = reader.archive_size();
1718 let reader = LocalDynamicReadAt::new(reader);
1719
1720 let decoder = Accessor::new(reader, archive_size).await?;
1721 let root = decoder.open_root().await?;
1722 let path = OsStr::from_bytes(file_path).to_os_string();
1723 let file = root
1724 .lookup(&path)
1725 .await?
1726 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1727
1728 let body = match file.kind() {
1729 EntryKind::File { .. } => Body::wrap_stream(
1730 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1731 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1732 err
1733 }),
1734 ),
1735 EntryKind::Hardlink(_) => Body::wrap_stream(
1736 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1737 .map_err(move |err| {
1738 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1739 err
1740 }),
1741 ),
1742 EntryKind::Directory => {
1743 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1744 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1745 if tar {
1746 proxmox_rest_server::spawn_internal_task(create_tar(
1747 channelwriter,
1748 decoder,
1749 path.clone(),
1750 false,
1751 ));
1752 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1753 Body::wrap_stream(zstdstream.map_err(move |err| {
1754 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1755 err
1756 }))
1757 } else {
1758 proxmox_rest_server::spawn_internal_task(create_zip(
1759 channelwriter,
1760 decoder,
1761 path.clone(),
1762 false,
1763 ));
1764 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1765 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1766 err
1767 }))
1768 }
1769 }
1770 other => bail!("cannot download file of type {:?}", other),
1771 };
1772
1773 // fixme: set other headers ?
1774 Ok(Response::builder()
1775 .status(StatusCode::OK)
1776 .header(header::CONTENT_TYPE, "application/octet-stream")
1777 .body(body)
1778 .unwrap())
1779 }
1780 .boxed()
1781 }
1782
1783 #[api(
1784 input: {
1785 properties: {
1786 store: {
1787 schema: DATASTORE_SCHEMA,
1788 },
1789 timeframe: {
1790 type: RRDTimeFrame,
1791 },
1792 cf: {
1793 type: RRDMode,
1794 },
1795 },
1796 },
1797 access: {
1798 permission: &Permission::Privilege(
1799 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1800 },
1801 )]
1802 /// Read datastore stats
1803 pub fn get_rrd_stats(
1804 store: String,
1805 timeframe: RRDTimeFrame,
1806 cf: RRDMode,
1807 _param: Value,
1808 ) -> Result<Value, Error> {
1809 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1810 let disk_manager = crate::tools::disks::DiskManage::new();
1811
1812 let mut rrd_fields = vec![
1813 "total",
1814 "used",
1815 "read_ios",
1816 "read_bytes",
1817 "write_ios",
1818 "write_bytes",
1819 ];
1820
1821 // we do not have io_ticks for zpools, so don't include them
1822 match disk_manager.find_mounted_device(&datastore.base_path()) {
1823 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1824 _ => rrd_fields.push("io_ticks"),
1825 };
1826
1827 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1828 }
1829
1830 #[api(
1831 input: {
1832 properties: {
1833 store: {
1834 schema: DATASTORE_SCHEMA,
1835 },
1836 },
1837 },
1838 access: {
1839 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1840 },
1841 )]
1842 /// Read datastore stats
1843 pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1844 let active_operations = task_tracking::get_active_operations(&store)?;
1845 Ok(json!({
1846 "read": active_operations.read,
1847 "write": active_operations.write,
1848 }))
1849 }
1850
1851 #[api(
1852 input: {
1853 properties: {
1854 store: { schema: DATASTORE_SCHEMA },
1855 ns: {
1856 type: BackupNamespace,
1857 optional: true,
1858 },
1859 backup_group: {
1860 type: pbs_api_types::BackupGroup,
1861 flatten: true,
1862 },
1863 },
1864 },
1865 access: {
1866 permission: &Permission::Anybody,
1867 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1868 or DATASTORE_BACKUP and being the owner of the group",
1869 },
1870 )]
1871 /// Get "notes" for a backup group
1872 pub fn get_group_notes(
1873 store: String,
1874 ns: Option<BackupNamespace>,
1875 backup_group: pbs_api_types::BackupGroup,
1876 rpcenv: &mut dyn RpcEnvironment,
1877 ) -> Result<String, Error> {
1878 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1879 let ns = ns.unwrap_or_default();
1880 let datastore = check_privs_and_load_store(
1881 &store,
1882 &ns,
1883 &auth_id,
1884 PRIV_DATASTORE_AUDIT,
1885 PRIV_DATASTORE_BACKUP,
1886 Some(Operation::Read),
1887 &backup_group,
1888 )?;
1889
1890 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1891 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1892 }
1893
1894 #[api(
1895 input: {
1896 properties: {
1897 store: { schema: DATASTORE_SCHEMA },
1898 ns: {
1899 type: BackupNamespace,
1900 optional: true,
1901 },
1902 backup_group: {
1903 type: pbs_api_types::BackupGroup,
1904 flatten: true,
1905 },
1906 notes: {
1907 description: "A multiline text.",
1908 },
1909 },
1910 },
1911 access: {
1912 permission: &Permission::Anybody,
1913 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1914 or DATASTORE_BACKUP and being the owner of the group",
1915 },
1916 )]
1917 /// Set "notes" for a backup group
1918 pub fn set_group_notes(
1919 store: String,
1920 ns: Option<BackupNamespace>,
1921 backup_group: pbs_api_types::BackupGroup,
1922 notes: String,
1923 rpcenv: &mut dyn RpcEnvironment,
1924 ) -> Result<(), Error> {
1925 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1926 let ns = ns.unwrap_or_default();
1927 let datastore = check_privs_and_load_store(
1928 &store,
1929 &ns,
1930 &auth_id,
1931 PRIV_DATASTORE_MODIFY,
1932 PRIV_DATASTORE_BACKUP,
1933 Some(Operation::Write),
1934 &backup_group,
1935 )?;
1936
1937 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1938 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
1939
1940 Ok(())
1941 }
1942
1943 #[api(
1944 input: {
1945 properties: {
1946 store: { schema: DATASTORE_SCHEMA },
1947 ns: {
1948 type: BackupNamespace,
1949 optional: true,
1950 },
1951 backup_dir: {
1952 type: pbs_api_types::BackupDir,
1953 flatten: true,
1954 },
1955 },
1956 },
1957 access: {
1958 permission: &Permission::Anybody,
1959 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1960 or DATASTORE_BACKUP and being the owner of the group",
1961 },
1962 )]
1963 /// Get "notes" for a specific backup
1964 pub fn get_notes(
1965 store: String,
1966 ns: Option<BackupNamespace>,
1967 backup_dir: pbs_api_types::BackupDir,
1968 rpcenv: &mut dyn RpcEnvironment,
1969 ) -> Result<String, Error> {
1970 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1971 let ns = ns.unwrap_or_default();
1972 let datastore = check_privs_and_load_store(
1973 &store,
1974 &ns,
1975 &auth_id,
1976 PRIV_DATASTORE_AUDIT,
1977 PRIV_DATASTORE_BACKUP,
1978 Some(Operation::Read),
1979 &backup_dir.group,
1980 )?;
1981
1982 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1983
1984 let (manifest, _) = backup_dir.load_manifest()?;
1985
1986 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
1987
1988 Ok(String::from(notes))
1989 }
1990
1991 #[api(
1992 input: {
1993 properties: {
1994 store: { schema: DATASTORE_SCHEMA },
1995 ns: {
1996 type: BackupNamespace,
1997 optional: true,
1998 },
1999 backup_dir: {
2000 type: pbs_api_types::BackupDir,
2001 flatten: true,
2002 },
2003 notes: {
2004 description: "A multiline text.",
2005 },
2006 },
2007 },
2008 access: {
2009 permission: &Permission::Anybody,
2010 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2011 or DATASTORE_BACKUP and being the owner of the group",
2012 },
2013 )]
2014 /// Set "notes" for a specific backup
2015 pub fn set_notes(
2016 store: String,
2017 ns: Option<BackupNamespace>,
2018 backup_dir: pbs_api_types::BackupDir,
2019 notes: String,
2020 rpcenv: &mut dyn RpcEnvironment,
2021 ) -> Result<(), Error> {
2022 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2023 let ns = ns.unwrap_or_default();
2024 let datastore = check_privs_and_load_store(
2025 &store,
2026 &ns,
2027 &auth_id,
2028 PRIV_DATASTORE_MODIFY,
2029 PRIV_DATASTORE_BACKUP,
2030 Some(Operation::Write),
2031 &backup_dir.group,
2032 )?;
2033
2034 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2035
2036 backup_dir
2037 .update_manifest(|manifest| {
2038 manifest.unprotected["notes"] = notes.into();
2039 })
2040 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2041
2042 Ok(())
2043 }
2044
2045 #[api(
2046 input: {
2047 properties: {
2048 store: { schema: DATASTORE_SCHEMA },
2049 ns: {
2050 type: BackupNamespace,
2051 optional: true,
2052 },
2053 backup_dir: {
2054 type: pbs_api_types::BackupDir,
2055 flatten: true,
2056 },
2057 },
2058 },
2059 access: {
2060 permission: &Permission::Anybody,
2061 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2062 or DATASTORE_BACKUP and being the owner of the group",
2063 },
2064 )]
2065 /// Query protection for a specific backup
2066 pub fn get_protection(
2067 store: String,
2068 ns: Option<BackupNamespace>,
2069 backup_dir: pbs_api_types::BackupDir,
2070 rpcenv: &mut dyn RpcEnvironment,
2071 ) -> Result<bool, Error> {
2072 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2073 let ns = ns.unwrap_or_default();
2074 let datastore = check_privs_and_load_store(
2075 &store,
2076 &ns,
2077 &auth_id,
2078 PRIV_DATASTORE_AUDIT,
2079 PRIV_DATASTORE_BACKUP,
2080 Some(Operation::Read),
2081 &backup_dir.group,
2082 )?;
2083
2084 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2085
2086 Ok(backup_dir.is_protected())
2087 }
2088
2089 #[api(
2090 input: {
2091 properties: {
2092 store: { schema: DATASTORE_SCHEMA },
2093 ns: {
2094 type: BackupNamespace,
2095 optional: true,
2096 },
2097 backup_dir: {
2098 type: pbs_api_types::BackupDir,
2099 flatten: true,
2100 },
2101 protected: {
2102 description: "Enable/disable protection.",
2103 },
2104 },
2105 },
2106 access: {
2107 permission: &Permission::Anybody,
2108 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2109 or DATASTORE_BACKUP and being the owner of the group",
2110 },
2111 )]
2112 /// En- or disable protection for a specific backup
2113 pub fn set_protection(
2114 store: String,
2115 ns: Option<BackupNamespace>,
2116 backup_dir: pbs_api_types::BackupDir,
2117 protected: bool,
2118 rpcenv: &mut dyn RpcEnvironment,
2119 ) -> Result<(), Error> {
2120 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2121 let ns = ns.unwrap_or_default();
2122 let datastore = check_privs_and_load_store(
2123 &store,
2124 &ns,
2125 &auth_id,
2126 PRIV_DATASTORE_MODIFY,
2127 PRIV_DATASTORE_BACKUP,
2128 Some(Operation::Write),
2129 &backup_dir.group,
2130 )?;
2131
2132 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2133
2134 datastore.update_protection(&backup_dir, protected)
2135 }
2136
2137 #[api(
2138 input: {
2139 properties: {
2140 store: { schema: DATASTORE_SCHEMA },
2141 ns: {
2142 type: BackupNamespace,
2143 optional: true,
2144 },
2145 backup_group: {
2146 type: pbs_api_types::BackupGroup,
2147 flatten: true,
2148 },
2149 "new-owner": {
2150 type: Authid,
2151 },
2152 },
2153 },
2154 access: {
2155 permission: &Permission::Anybody,
2156 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2157 a user's token for owned backups with Datastore.Backup"
2158 },
2159 )]
2160 /// Change owner of a backup group
2161 pub fn set_backup_owner(
2162 store: String,
2163 ns: Option<BackupNamespace>,
2164 backup_group: pbs_api_types::BackupGroup,
2165 new_owner: Authid,
2166 rpcenv: &mut dyn RpcEnvironment,
2167 ) -> Result<(), Error> {
2168 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2169 let ns = ns.unwrap_or_default();
2170 let owner_check_required = check_ns_privs(
2171 &store,
2172 &ns,
2173 &auth_id,
2174 PRIV_DATASTORE_MODIFY,
2175 PRIV_DATASTORE_BACKUP,
2176 )?;
2177
2178 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2179
2180 let backup_group = datastore.backup_group(ns, backup_group);
2181
2182 if owner_check_required {
2183 let owner = backup_group.get_owner()?;
2184
2185 let allowed = match (owner.is_token(), new_owner.is_token()) {
2186 (true, true) => {
2187 // API token to API token, owned by same user
2188 let owner = owner.user();
2189 let new_owner = new_owner.user();
2190 owner == new_owner && Authid::from(owner.clone()) == auth_id
2191 }
2192 (true, false) => {
2193 // API token to API token owner
2194 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2195 }
2196 (false, true) => {
2197 // API token owner to API token
2198 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2199 }
2200 (false, false) => {
2201 // User to User, not allowed for unprivileged users
2202 false
2203 }
2204 };
2205
2206 if !allowed {
2207 return Err(http_err!(
2208 UNAUTHORIZED,
2209 "{} does not have permission to change owner of backup group '{}' to {}",
2210 auth_id,
2211 backup_group.group(),
2212 new_owner,
2213 ));
2214 }
2215 }
2216
2217 let user_info = CachedUserInfo::new()?;
2218
2219 if !user_info.is_active_auth_id(&new_owner) {
2220 bail!(
2221 "{} '{}' is inactive or non-existent",
2222 if new_owner.is_token() {
2223 "API token".to_string()
2224 } else {
2225 "user".to_string()
2226 },
2227 new_owner
2228 );
2229 }
2230
2231 backup_group.set_owner(&new_owner, true)?;
2232
2233 Ok(())
2234 }
2235
2236 #[sortable]
2237 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2238 (
2239 "active-operations",
2240 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2241 ),
2242 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2243 (
2244 "change-owner",
2245 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2246 ),
2247 (
2248 "download",
2249 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2250 ),
2251 (
2252 "download-decoded",
2253 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2254 ),
2255 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2256 (
2257 "gc",
2258 &Router::new()
2259 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2260 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2261 ),
2262 (
2263 "group-notes",
2264 &Router::new()
2265 .get(&API_METHOD_GET_GROUP_NOTES)
2266 .put(&API_METHOD_SET_GROUP_NOTES),
2267 ),
2268 (
2269 "groups",
2270 &Router::new()
2271 .get(&API_METHOD_LIST_GROUPS)
2272 .delete(&API_METHOD_DELETE_GROUP),
2273 ),
2274 (
2275 "namespace",
2276 // FIXME: move into datastore:: sub-module?!
2277 &crate::api2::admin::namespace::ROUTER,
2278 ),
2279 (
2280 "notes",
2281 &Router::new()
2282 .get(&API_METHOD_GET_NOTES)
2283 .put(&API_METHOD_SET_NOTES),
2284 ),
2285 (
2286 "protected",
2287 &Router::new()
2288 .get(&API_METHOD_GET_PROTECTION)
2289 .put(&API_METHOD_SET_PROTECTION),
2290 ),
2291 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2292 (
2293 "prune-datastore",
2294 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2295 ),
2296 (
2297 "pxar-file-download",
2298 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2299 ),
2300 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2301 (
2302 "snapshots",
2303 &Router::new()
2304 .get(&API_METHOD_LIST_SNAPSHOTS)
2305 .delete(&API_METHOD_DELETE_SNAPSHOT),
2306 ),
2307 ("status", &Router::new().get(&API_METHOD_STATUS)),
2308 (
2309 "upload-backup-log",
2310 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2311 ),
2312 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2313 ];
2314
2315 const DATASTORE_INFO_ROUTER: Router = Router::new()
2316 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2317 .subdirs(DATASTORE_INFO_SUBDIRS);
2318
2319 pub const ROUTER: Router = Router::new()
2320 .get(&API_METHOD_GET_DATASTORE_LIST)
2321 .match_all("store", &DATASTORE_INFO_ROUTER);