]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
update proxmox-metrics dependency to 0.3.1
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::os::unix::ffi::OsStrExt;
6 use std::path::PathBuf;
7 use std::sync::Arc;
8
9 use anyhow::{bail, format_err, Error};
10 use futures::*;
11 use hyper::http::request::Parts;
12 use hyper::{header, Body, Response, StatusCode};
13 use serde::Deserialize;
14 use serde_json::{json, Value};
15 use tokio_stream::wrappers::ReceiverStream;
16
17 use proxmox_async::blocking::WrappedReaderStream;
18 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
19 use proxmox_compression::zstd::ZstdEncoder;
20 use proxmox_router::{
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
23 };
24 use proxmox_schema::*;
25 use proxmox_sortable_macro::sortable;
26 use proxmox_sys::fs::{
27 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
28 };
29 use proxmox_sys::{task_log, task_warn};
30 use proxmox_time::CalendarEvent;
31
32 use pxar::accessor::aio::Accessor;
33 use pxar::EntryKind;
34
35 use pbs_api_types::{
36 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
37 Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
38 GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
39 PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
40 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
41 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
42 NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
43 PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
44 VERIFICATION_OUTDATED_AFTER_SCHEMA,
45 };
46 use pbs_client::pxar::{create_tar, create_zip};
47 use pbs_config::CachedUserInfo;
48 use pbs_datastore::backup_info::BackupInfo;
49 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
50 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
51 use pbs_datastore::data_blob::DataBlob;
52 use pbs_datastore::data_blob_reader::DataBlobReader;
53 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
54 use pbs_datastore::fixed_index::FixedIndexReader;
55 use pbs_datastore::index::IndexFile;
56 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
57 use pbs_datastore::prune::compute_prune_info;
58 use pbs_datastore::{
59 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
60 StoreProgress, CATALOG_NAME,
61 };
62 use pbs_tools::json::required_string_param;
63 use proxmox_rest_server::{formatter, WorkerTask};
64
65 use crate::api2::backup::optional_ns_param;
66 use crate::api2::node::rrd::create_value_from_rrd;
67 use crate::backup::{
68 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
69 ListAccessibleBackupGroups, NS_PRIVS_OK,
70 };
71
72 use crate::server::jobstate::{compute_schedule_status, Job, JobState};
73
74 const GROUP_NOTES_FILE_NAME: &str = "notes";
75
76 fn get_group_note_path(
77 store: &DataStore,
78 ns: &BackupNamespace,
79 group: &pbs_api_types::BackupGroup,
80 ) -> PathBuf {
81 let mut note_path = store.group_path(ns, group);
82 note_path.push(GROUP_NOTES_FILE_NAME);
83 note_path
84 }
85
86 // helper to unify common sequence of checks:
87 // 1. check privs on NS (full or limited access)
88 // 2. load datastore
89 // 3. if needed (only limited access), check owner of group
90 fn check_privs_and_load_store(
91 store: &str,
92 ns: &BackupNamespace,
93 auth_id: &Authid,
94 full_access_privs: u64,
95 partial_access_privs: u64,
96 operation: Option<Operation>,
97 backup_group: &pbs_api_types::BackupGroup,
98 ) -> Result<Arc<DataStore>, Error> {
99 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
100
101 let datastore = DataStore::lookup_datastore(store, operation)?;
102
103 if limited {
104 let owner = datastore.get_owner(ns, backup_group)?;
105 check_backup_owner(&owner, auth_id)?;
106 }
107
108 Ok(datastore)
109 }
110
111 fn read_backup_index(
112 backup_dir: &BackupDir,
113 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
114 let (manifest, index_size) = backup_dir.load_manifest()?;
115
116 let mut result = Vec::new();
117 for item in manifest.files() {
118 result.push(BackupContent {
119 filename: item.filename.clone(),
120 crypt_mode: Some(item.crypt_mode),
121 size: Some(item.size),
122 });
123 }
124
125 result.push(BackupContent {
126 filename: MANIFEST_BLOB_NAME.to_string(),
127 crypt_mode: match manifest.signature {
128 Some(_) => Some(CryptMode::SignOnly),
129 None => Some(CryptMode::None),
130 },
131 size: Some(index_size),
132 });
133
134 Ok((manifest, result))
135 }
136
137 fn get_all_snapshot_files(
138 info: &BackupInfo,
139 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
140 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
141
142 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
143 acc.insert(item.filename.clone());
144 acc
145 });
146
147 for file in &info.files {
148 if file_set.contains(file) {
149 continue;
150 }
151 files.push(BackupContent {
152 filename: file.to_string(),
153 size: None,
154 crypt_mode: None,
155 });
156 }
157
158 Ok((manifest, files))
159 }
160
161 #[api(
162 input: {
163 properties: {
164 store: {
165 schema: DATASTORE_SCHEMA,
166 },
167 ns: {
168 type: BackupNamespace,
169 optional: true,
170 },
171 },
172 },
173 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
174 access: {
175 permission: &Permission::Anybody,
176 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
177 /datastore/{store}[/{namespace}]",
178 },
179 )]
180 /// List backup groups.
181 pub fn list_groups(
182 store: String,
183 ns: Option<BackupNamespace>,
184 rpcenv: &mut dyn RpcEnvironment,
185 ) -> Result<Vec<GroupListItem>, Error> {
186 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
187 let ns = ns.unwrap_or_default();
188
189 let list_all = !check_ns_privs_full(
190 &store,
191 &ns,
192 &auth_id,
193 PRIV_DATASTORE_AUDIT,
194 PRIV_DATASTORE_BACKUP,
195 )?;
196
197 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
198
199 datastore
200 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
201 .try_fold(Vec::new(), |mut group_info, group| {
202 let group = group?;
203
204 let owner = match datastore.get_owner(&ns, group.as_ref()) {
205 Ok(auth_id) => auth_id,
206 Err(err) => {
207 eprintln!(
208 "Failed to get owner of group '{}' in {} - {}",
209 group.group(),
210 print_store_and_ns(&store, &ns),
211 err
212 );
213 return Ok(group_info);
214 }
215 };
216 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
217 return Ok(group_info);
218 }
219
220 let snapshots = match group.list_backups() {
221 Ok(snapshots) => snapshots,
222 Err(_) => return Ok(group_info),
223 };
224
225 let backup_count: u64 = snapshots.len() as u64;
226 if backup_count == 0 {
227 return Ok(group_info);
228 }
229
230 let last_backup = snapshots
231 .iter()
232 .fold(&snapshots[0], |a, b| {
233 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
234 a
235 } else {
236 b
237 }
238 })
239 .to_owned();
240
241 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
242 let comment = file_read_firstline(note_path).ok();
243
244 group_info.push(GroupListItem {
245 backup: group.into(),
246 last_backup: last_backup.backup_dir.backup_time(),
247 owner: Some(owner),
248 backup_count,
249 files: last_backup.files,
250 comment,
251 });
252
253 Ok(group_info)
254 })
255 }
256
257 #[api(
258 input: {
259 properties: {
260 store: { schema: DATASTORE_SCHEMA },
261 ns: {
262 type: BackupNamespace,
263 optional: true,
264 },
265 group: {
266 type: pbs_api_types::BackupGroup,
267 flatten: true,
268 },
269 },
270 },
271 access: {
272 permission: &Permission::Anybody,
273 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
274 or DATASTORE_PRUNE and being the owner of the group",
275 },
276 )]
277 /// Delete backup group including all snapshots.
278 pub async fn delete_group(
279 store: String,
280 ns: Option<BackupNamespace>,
281 group: pbs_api_types::BackupGroup,
282 rpcenv: &mut dyn RpcEnvironment,
283 ) -> Result<Value, Error> {
284 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
285
286 tokio::task::spawn_blocking(move || {
287 let ns = ns.unwrap_or_default();
288
289 let datastore = check_privs_and_load_store(
290 &store,
291 &ns,
292 &auth_id,
293 PRIV_DATASTORE_MODIFY,
294 PRIV_DATASTORE_PRUNE,
295 Some(Operation::Write),
296 &group,
297 )?;
298
299 let delete_stats = datastore.remove_backup_group(&ns, &group)?;
300 if !delete_stats.all_removed() {
301 bail!("group only partially deleted due to protected snapshots");
302 }
303
304 Ok(Value::Null)
305 })
306 .await?
307 }
308
309 #[api(
310 input: {
311 properties: {
312 store: { schema: DATASTORE_SCHEMA },
313 ns: {
314 type: BackupNamespace,
315 optional: true,
316 },
317 backup_dir: {
318 type: pbs_api_types::BackupDir,
319 flatten: true,
320 },
321 },
322 },
323 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
324 access: {
325 permission: &Permission::Anybody,
326 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
327 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
328 },
329 )]
330 /// List snapshot files.
331 pub async fn list_snapshot_files(
332 store: String,
333 ns: Option<BackupNamespace>,
334 backup_dir: pbs_api_types::BackupDir,
335 _info: &ApiMethod,
336 rpcenv: &mut dyn RpcEnvironment,
337 ) -> Result<Vec<BackupContent>, Error> {
338 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
339
340 tokio::task::spawn_blocking(move || {
341 let ns = ns.unwrap_or_default();
342
343 let datastore = check_privs_and_load_store(
344 &store,
345 &ns,
346 &auth_id,
347 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
348 PRIV_DATASTORE_BACKUP,
349 Some(Operation::Read),
350 &backup_dir.group,
351 )?;
352
353 let snapshot = datastore.backup_dir(ns, backup_dir)?;
354
355 let info = BackupInfo::new(snapshot)?;
356
357 let (_manifest, files) = get_all_snapshot_files(&info)?;
358
359 Ok(files)
360 })
361 .await?
362 }
363
364 #[api(
365 input: {
366 properties: {
367 store: { schema: DATASTORE_SCHEMA },
368 ns: {
369 type: BackupNamespace,
370 optional: true,
371 },
372 backup_dir: {
373 type: pbs_api_types::BackupDir,
374 flatten: true,
375 },
376 },
377 },
378 access: {
379 permission: &Permission::Anybody,
380 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
381 or DATASTORE_PRUNE and being the owner of the group",
382 },
383 )]
384 /// Delete backup snapshot.
385 pub async fn delete_snapshot(
386 store: String,
387 ns: Option<BackupNamespace>,
388 backup_dir: pbs_api_types::BackupDir,
389 _info: &ApiMethod,
390 rpcenv: &mut dyn RpcEnvironment,
391 ) -> Result<Value, Error> {
392 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
393
394 tokio::task::spawn_blocking(move || {
395 let ns = ns.unwrap_or_default();
396
397 let datastore = check_privs_and_load_store(
398 &store,
399 &ns,
400 &auth_id,
401 PRIV_DATASTORE_MODIFY,
402 PRIV_DATASTORE_PRUNE,
403 Some(Operation::Write),
404 &backup_dir.group,
405 )?;
406
407 let snapshot = datastore.backup_dir(ns, backup_dir)?;
408
409 snapshot.destroy(false)?;
410
411 Ok(Value::Null)
412 })
413 .await?
414 }
415
416 #[api(
417 streaming: true,
418 input: {
419 properties: {
420 store: { schema: DATASTORE_SCHEMA },
421 ns: {
422 type: BackupNamespace,
423 optional: true,
424 },
425 "backup-type": {
426 optional: true,
427 type: BackupType,
428 },
429 "backup-id": {
430 optional: true,
431 schema: BACKUP_ID_SCHEMA,
432 },
433 },
434 },
435 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
436 access: {
437 permission: &Permission::Anybody,
438 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
439 or DATASTORE_BACKUP and being the owner of the group",
440 },
441 )]
442 /// List backup snapshots.
443 pub async fn list_snapshots(
444 store: String,
445 ns: Option<BackupNamespace>,
446 backup_type: Option<BackupType>,
447 backup_id: Option<String>,
448 _param: Value,
449 _info: &ApiMethod,
450 rpcenv: &mut dyn RpcEnvironment,
451 ) -> Result<Vec<SnapshotListItem>, Error> {
452 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
453
454 tokio::task::spawn_blocking(move || unsafe {
455 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
456 })
457 .await
458 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
459 }
460
461 /// This must not run in a main worker thread as it potentially does tons of I/O.
462 unsafe fn list_snapshots_blocking(
463 store: String,
464 ns: Option<BackupNamespace>,
465 backup_type: Option<BackupType>,
466 backup_id: Option<String>,
467 auth_id: Authid,
468 ) -> Result<Vec<SnapshotListItem>, Error> {
469 let ns = ns.unwrap_or_default();
470
471 let list_all = !check_ns_privs_full(
472 &store,
473 &ns,
474 &auth_id,
475 PRIV_DATASTORE_AUDIT,
476 PRIV_DATASTORE_BACKUP,
477 )?;
478
479 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
480
481 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
482 // backup group and provide an error free (Err -> None) accessor
483 let groups = match (backup_type, backup_id) {
484 (Some(backup_type), Some(backup_id)) => {
485 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
486 }
487 // FIXME: Recursion
488 (Some(backup_type), None) => datastore
489 .iter_backup_type_ok(ns.clone(), backup_type)?
490 .collect(),
491 // FIXME: Recursion
492 (None, Some(backup_id)) => BackupType::iter()
493 .filter_map(|backup_type| {
494 let group =
495 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
496 group.exists().then_some(group)
497 })
498 .collect(),
499 // FIXME: Recursion
500 (None, None) => datastore.list_backup_groups(ns.clone())?,
501 };
502
503 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
504 let backup = pbs_api_types::BackupDir {
505 group: group.into(),
506 time: info.backup_dir.backup_time(),
507 };
508 let protected = info.backup_dir.is_protected();
509
510 match get_all_snapshot_files(&info) {
511 Ok((manifest, files)) => {
512 // extract the first line from notes
513 let comment: Option<String> = manifest.unprotected["notes"]
514 .as_str()
515 .and_then(|notes| notes.lines().next())
516 .map(String::from);
517
518 let fingerprint = match manifest.fingerprint() {
519 Ok(fp) => fp,
520 Err(err) => {
521 eprintln!("error parsing fingerprint: '{}'", err);
522 None
523 }
524 };
525
526 let verification = manifest.unprotected["verify_state"].clone();
527 let verification: Option<SnapshotVerifyState> =
528 match serde_json::from_value(verification) {
529 Ok(verify) => verify,
530 Err(err) => {
531 eprintln!("error parsing verification state : '{}'", err);
532 None
533 }
534 };
535
536 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
537
538 SnapshotListItem {
539 backup,
540 comment,
541 verification,
542 fingerprint,
543 files,
544 size,
545 owner,
546 protected,
547 }
548 }
549 Err(err) => {
550 eprintln!("error during snapshot file listing: '{}'", err);
551 let files = info
552 .files
553 .into_iter()
554 .map(|filename| BackupContent {
555 filename,
556 size: None,
557 crypt_mode: None,
558 })
559 .collect();
560
561 SnapshotListItem {
562 backup,
563 comment: None,
564 verification: None,
565 fingerprint: None,
566 files,
567 size: None,
568 owner,
569 protected,
570 }
571 }
572 }
573 };
574
575 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
576 let owner = match group.get_owner() {
577 Ok(auth_id) => auth_id,
578 Err(err) => {
579 eprintln!(
580 "Failed to get owner of group '{}' in {} - {}",
581 group.group(),
582 print_store_and_ns(&store, &ns),
583 err
584 );
585 return Ok(snapshots);
586 }
587 };
588
589 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
590 return Ok(snapshots);
591 }
592
593 let group_backups = group.list_backups()?;
594
595 snapshots.extend(
596 group_backups
597 .into_iter()
598 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
599 );
600
601 Ok(snapshots)
602 })
603 }
604
605 async fn get_snapshots_count(
606 store: &Arc<DataStore>,
607 owner: Option<&Authid>,
608 ) -> Result<Counts, Error> {
609 let store = Arc::clone(store);
610 let owner = owner.cloned();
611 tokio::task::spawn_blocking(move || {
612 let root_ns = Default::default();
613 ListAccessibleBackupGroups::new_with_privs(
614 &store,
615 root_ns,
616 MAX_NAMESPACE_DEPTH,
617 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
618 None,
619 owner.as_ref(),
620 )?
621 .try_fold(Counts::default(), |mut counts, group| {
622 let group = match group {
623 Ok(group) => group,
624 Err(_) => return Ok(counts), // TODO: add this as error counts?
625 };
626 let snapshot_count = group.list_backups()?.len() as u64;
627
628 // only include groups with snapshots, counting/displaying empty groups can confuse
629 if snapshot_count > 0 {
630 let type_count = match group.backup_type() {
631 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
632 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
633 BackupType::Host => counts.host.get_or_insert(Default::default()),
634 };
635
636 type_count.groups += 1;
637 type_count.snapshots += snapshot_count;
638 }
639
640 Ok(counts)
641 })
642 })
643 .await?
644 }
645
646 #[api(
647 input: {
648 properties: {
649 store: {
650 schema: DATASTORE_SCHEMA,
651 },
652 verbose: {
653 type: bool,
654 default: false,
655 optional: true,
656 description: "Include additional information like snapshot counts and GC status.",
657 },
658 },
659
660 },
661 returns: {
662 type: DataStoreStatus,
663 },
664 access: {
665 permission: &Permission::Anybody,
666 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
667 the full statistics. Counts of accessible groups are always returned, if any",
668 },
669 )]
670 /// Get datastore status.
671 pub async fn status(
672 store: String,
673 verbose: bool,
674 _info: &ApiMethod,
675 rpcenv: &mut dyn RpcEnvironment,
676 ) -> Result<DataStoreStatus, Error> {
677 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
678 let user_info = CachedUserInfo::new()?;
679 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
680
681 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
682
683 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
684 true
685 } else if store_privs & PRIV_DATASTORE_READ != 0 {
686 false // allow at least counts, user can read groups anyway..
687 } else {
688 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
689 // avoid leaking existence info if users hasn't at least any priv. below
690 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
691 _ => false,
692 }
693 };
694 let datastore = datastore?; // only unwrap no to avoid leaking existence info
695
696 let (counts, gc_status) = if verbose {
697 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
698 None
699 } else {
700 Some(&auth_id)
701 };
702
703 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
704 let gc_status = if store_stats {
705 Some(datastore.last_gc_status())
706 } else {
707 None
708 };
709
710 (counts, gc_status)
711 } else {
712 (None, None)
713 };
714
715 Ok(if store_stats {
716 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
717 DataStoreStatus {
718 total: storage.total,
719 used: storage.used,
720 avail: storage.available,
721 gc_status,
722 counts,
723 }
724 } else {
725 DataStoreStatus {
726 total: 0,
727 used: 0,
728 avail: 0,
729 gc_status,
730 counts,
731 }
732 })
733 }
734
735 #[api(
736 input: {
737 properties: {
738 store: {
739 schema: DATASTORE_SCHEMA,
740 },
741 ns: {
742 type: BackupNamespace,
743 optional: true,
744 },
745 "backup-type": {
746 type: BackupType,
747 optional: true,
748 },
749 "backup-id": {
750 schema: BACKUP_ID_SCHEMA,
751 optional: true,
752 },
753 "ignore-verified": {
754 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
755 optional: true,
756 },
757 "outdated-after": {
758 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
759 optional: true,
760 },
761 "backup-time": {
762 schema: BACKUP_TIME_SCHEMA,
763 optional: true,
764 },
765 "max-depth": {
766 schema: NS_MAX_DEPTH_SCHEMA,
767 optional: true,
768 },
769 },
770 },
771 returns: {
772 schema: UPID_SCHEMA,
773 },
774 access: {
775 permission: &Permission::Anybody,
776 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
777 or DATASTORE_BACKUP and being the owner of the group",
778 },
779 )]
780 /// Verify backups.
781 ///
782 /// This function can verify a single backup snapshot, all backup from a backup group,
783 /// or all backups in the datastore.
784 #[allow(clippy::too_many_arguments)]
785 pub fn verify(
786 store: String,
787 ns: Option<BackupNamespace>,
788 backup_type: Option<BackupType>,
789 backup_id: Option<String>,
790 backup_time: Option<i64>,
791 ignore_verified: Option<bool>,
792 outdated_after: Option<i64>,
793 max_depth: Option<usize>,
794 rpcenv: &mut dyn RpcEnvironment,
795 ) -> Result<Value, Error> {
796 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
797 let ns = ns.unwrap_or_default();
798
799 let owner_check_required = check_ns_privs_full(
800 &store,
801 &ns,
802 &auth_id,
803 PRIV_DATASTORE_VERIFY,
804 PRIV_DATASTORE_BACKUP,
805 )?;
806
807 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
808 let ignore_verified = ignore_verified.unwrap_or(true);
809
810 let worker_id;
811
812 let mut backup_dir = None;
813 let mut backup_group = None;
814 let mut worker_type = "verify";
815
816 match (backup_type, backup_id, backup_time) {
817 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
818 worker_id = format!(
819 "{}:{}/{}/{}/{:08X}",
820 store,
821 ns.display_as_path(),
822 backup_type,
823 backup_id,
824 backup_time
825 );
826 let dir =
827 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
828
829 if owner_check_required {
830 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
831 check_backup_owner(&owner, &auth_id)?;
832 }
833
834 backup_dir = Some(dir);
835 worker_type = "verify_snapshot";
836 }
837 (Some(backup_type), Some(backup_id), None) => {
838 worker_id = format!(
839 "{}:{}/{}/{}",
840 store,
841 ns.display_as_path(),
842 backup_type,
843 backup_id
844 );
845 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
846
847 if owner_check_required {
848 let owner = datastore.get_owner(&ns, &group)?;
849 check_backup_owner(&owner, &auth_id)?;
850 }
851
852 backup_group = Some(datastore.backup_group(ns.clone(), group));
853 worker_type = "verify_group";
854 }
855 (None, None, None) => {
856 worker_id = if ns.is_root() {
857 store
858 } else {
859 format!("{}:{}", store, ns.display_as_path())
860 };
861 }
862 _ => bail!("parameters do not specify a backup group or snapshot"),
863 }
864
865 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
866
867 let upid_str = WorkerTask::new_thread(
868 worker_type,
869 Some(worker_id),
870 auth_id.to_string(),
871 to_stdout,
872 move |worker| {
873 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
874 let failed_dirs = if let Some(backup_dir) = backup_dir {
875 let mut res = Vec::new();
876 if !verify_backup_dir(
877 &verify_worker,
878 &backup_dir,
879 worker.upid().clone(),
880 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
881 )? {
882 res.push(print_ns_and_snapshot(
883 backup_dir.backup_ns(),
884 backup_dir.as_ref(),
885 ));
886 }
887 res
888 } else if let Some(backup_group) = backup_group {
889 verify_backup_group(
890 &verify_worker,
891 &backup_group,
892 &mut StoreProgress::new(1),
893 worker.upid(),
894 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
895 )?
896 } else {
897 let owner = if owner_check_required {
898 Some(&auth_id)
899 } else {
900 None
901 };
902
903 verify_all_backups(
904 &verify_worker,
905 worker.upid(),
906 ns,
907 max_depth,
908 owner,
909 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
910 )?
911 };
912 if !failed_dirs.is_empty() {
913 task_log!(worker, "Failed to verify the following snapshots/groups:");
914 for dir in failed_dirs {
915 task_log!(worker, "\t{}", dir);
916 }
917 bail!("verification failed - please check the log for details");
918 }
919 Ok(())
920 },
921 )?;
922
923 Ok(json!(upid_str))
924 }
925
926 #[api(
927 input: {
928 properties: {
929 group: {
930 type: pbs_api_types::BackupGroup,
931 flatten: true,
932 },
933 "dry-run": {
934 optional: true,
935 type: bool,
936 default: false,
937 description: "Just show what prune would do, but do not delete anything.",
938 },
939 "keep-options": {
940 type: KeepOptions,
941 flatten: true,
942 },
943 store: {
944 schema: DATASTORE_SCHEMA,
945 },
946 ns: {
947 type: BackupNamespace,
948 optional: true,
949 },
950 "use-task": {
951 type: bool,
952 default: false,
953 optional: true,
954 description: "Spins up an asynchronous task that does the work.",
955 },
956 },
957 },
958 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
959 access: {
960 permission: &Permission::Anybody,
961 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
962 or DATASTORE_PRUNE and being the owner of the group",
963 },
964 )]
965 /// Prune a group on the datastore
966 pub fn prune(
967 group: pbs_api_types::BackupGroup,
968 dry_run: bool,
969 keep_options: KeepOptions,
970 store: String,
971 ns: Option<BackupNamespace>,
972 param: Value,
973 rpcenv: &mut dyn RpcEnvironment,
974 ) -> Result<Value, Error> {
975 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
976 let ns = ns.unwrap_or_default();
977 let datastore = check_privs_and_load_store(
978 &store,
979 &ns,
980 &auth_id,
981 PRIV_DATASTORE_MODIFY,
982 PRIV_DATASTORE_PRUNE,
983 Some(Operation::Write),
984 &group,
985 )?;
986
987 let worker_id = format!("{}:{}:{}", store, ns, group);
988 let group = datastore.backup_group(ns.clone(), group);
989
990 #[derive(Debug, serde::Serialize)]
991 struct PruneResult {
992 #[serde(rename = "backup-type")]
993 backup_type: BackupType,
994 #[serde(rename = "backup-id")]
995 backup_id: String,
996 #[serde(rename = "backup-time")]
997 backup_time: i64,
998 keep: bool,
999 protected: bool,
1000 #[serde(skip_serializing_if = "Option::is_none")]
1001 ns: Option<BackupNamespace>,
1002 }
1003 let mut prune_result: Vec<PruneResult> = Vec::new();
1004
1005 let list = group.list_backups()?;
1006
1007 let mut prune_info = compute_prune_info(list, &keep_options)?;
1008
1009 prune_info.reverse(); // delete older snapshots first
1010
1011 let keep_all = !keep_options.keeps_something();
1012
1013 if dry_run {
1014 for (info, mark) in prune_info {
1015 let keep = keep_all || mark.keep();
1016 let backup_dir = &info.backup_dir;
1017
1018 let mut result = PruneResult {
1019 backup_type: backup_dir.backup_type(),
1020 backup_id: backup_dir.backup_id().to_owned(),
1021 backup_time: backup_dir.backup_time(),
1022 keep,
1023 protected: mark.protected(),
1024 ns: None,
1025 };
1026 let prune_ns = backup_dir.backup_ns();
1027 if !prune_ns.is_root() {
1028 result.ns = Some(prune_ns.to_owned());
1029 }
1030 prune_result.push(result);
1031 }
1032 return Ok(json!(prune_result));
1033 }
1034
1035 let prune_group = move |worker: Arc<WorkerTask>| {
1036 if keep_all {
1037 task_log!(worker, "No prune selection - keeping all files.");
1038 } else {
1039 let mut opts = Vec::new();
1040 if !ns.is_root() {
1041 opts.push(format!("--ns {ns}"));
1042 }
1043 crate::server::cli_keep_options(&mut opts, &keep_options);
1044
1045 task_log!(worker, "retention options: {}", opts.join(" "));
1046 task_log!(
1047 worker,
1048 "Starting prune on {} group \"{}\"",
1049 print_store_and_ns(&store, &ns),
1050 group.group(),
1051 );
1052 }
1053
1054 for (info, mark) in prune_info {
1055 let keep = keep_all || mark.keep();
1056 let backup_dir = &info.backup_dir;
1057
1058 let backup_time = backup_dir.backup_time();
1059 let timestamp = backup_dir.backup_time_string();
1060 let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
1061
1062 let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
1063
1064 task_log!(worker, "{msg}");
1065
1066 prune_result.push(PruneResult {
1067 backup_type: group.ty,
1068 backup_id: group.id.clone(),
1069 backup_time,
1070 keep,
1071 protected: mark.protected(),
1072 ns: None,
1073 });
1074
1075 if !keep {
1076 if let Err(err) = backup_dir.destroy(false) {
1077 task_warn!(
1078 worker,
1079 "failed to remove dir {:?}: {}",
1080 backup_dir.relative_path(),
1081 err,
1082 );
1083 }
1084 }
1085 }
1086 prune_result
1087 };
1088
1089 if param["use-task"].as_bool().unwrap_or(false) {
1090 let upid = WorkerTask::spawn(
1091 "prune",
1092 Some(worker_id),
1093 auth_id.to_string(),
1094 true,
1095 move |worker| async move {
1096 let _ = prune_group(worker.clone());
1097 Ok(())
1098 },
1099 )?;
1100 Ok(json!(upid))
1101 } else {
1102 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1103 let result = prune_group(worker.clone());
1104 worker.log_result(&Ok(()));
1105 Ok(json!(result))
1106 }
1107 }
1108
1109 #[api(
1110 input: {
1111 properties: {
1112 "dry-run": {
1113 optional: true,
1114 type: bool,
1115 default: false,
1116 description: "Just show what prune would do, but do not delete anything.",
1117 },
1118 "prune-options": {
1119 type: PruneJobOptions,
1120 flatten: true,
1121 },
1122 store: {
1123 schema: DATASTORE_SCHEMA,
1124 },
1125 },
1126 },
1127 returns: {
1128 schema: UPID_SCHEMA,
1129 },
1130 access: {
1131 permission: &Permission::Anybody,
1132 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
1133 },
1134 )]
1135 /// Prune the datastore
1136 pub fn prune_datastore(
1137 dry_run: bool,
1138 prune_options: PruneJobOptions,
1139 store: String,
1140 _param: Value,
1141 rpcenv: &mut dyn RpcEnvironment,
1142 ) -> Result<String, Error> {
1143 let user_info = CachedUserInfo::new()?;
1144
1145 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1146
1147 user_info.check_privs(
1148 &auth_id,
1149 &prune_options.acl_path(&store),
1150 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1151 true,
1152 )?;
1153
1154 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1155 let ns = prune_options.ns.clone().unwrap_or_default();
1156 let worker_id = format!("{}:{}", store, ns);
1157
1158 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1159
1160 let upid_str = WorkerTask::new_thread(
1161 "prune",
1162 Some(worker_id),
1163 auth_id.to_string(),
1164 to_stdout,
1165 move |worker| {
1166 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
1167 },
1168 )?;
1169
1170 Ok(upid_str)
1171 }
1172
1173 #[api(
1174 input: {
1175 properties: {
1176 store: {
1177 schema: DATASTORE_SCHEMA,
1178 },
1179 },
1180 },
1181 returns: {
1182 schema: UPID_SCHEMA,
1183 },
1184 access: {
1185 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1186 },
1187 )]
1188 /// Start garbage collection.
1189 pub fn start_garbage_collection(
1190 store: String,
1191 _info: &ApiMethod,
1192 rpcenv: &mut dyn RpcEnvironment,
1193 ) -> Result<Value, Error> {
1194 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1195 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1196
1197 let job = Job::new("garbage_collection", &store)
1198 .map_err(|_| format_err!("garbage collection already running"))?;
1199
1200 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1201
1202 let upid_str =
1203 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1204 .map_err(|err| {
1205 format_err!(
1206 "unable to start garbage collection job on datastore {} - {}",
1207 store,
1208 err
1209 )
1210 })?;
1211
1212 Ok(json!(upid_str))
1213 }
1214
1215 #[api(
1216 input: {
1217 properties: {
1218 store: {
1219 schema: DATASTORE_SCHEMA,
1220 },
1221 },
1222 },
1223 returns: {
1224 type: GarbageCollectionJobStatus,
1225 },
1226 access: {
1227 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1228 },
1229 )]
1230 /// Garbage collection status.
1231 pub fn garbage_collection_status(
1232 store: String,
1233 _info: &ApiMethod,
1234 _rpcenv: &mut dyn RpcEnvironment,
1235 ) -> Result<GarbageCollectionJobStatus, Error> {
1236 let (config, _) = pbs_config::datastore::config()?;
1237 let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
1238
1239 let mut info = GarbageCollectionJobStatus {
1240 store: store.clone(),
1241 schedule: store_config.gc_schedule,
1242 ..Default::default()
1243 };
1244
1245 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1246 let status_in_memory = datastore.last_gc_status();
1247 let state_file = JobState::load("garbage_collection", &store)
1248 .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
1249 .ok();
1250
1251 let mut last = proxmox_time::epoch_i64();
1252
1253 if let Some(ref upid) = status_in_memory.upid {
1254 let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
1255 if let Some(state) = state_file {
1256 if let Ok(cs) = compute_schedule_status(&state, Some(&upid)) {
1257 computed_schedule = cs;
1258 }
1259 }
1260
1261 if let Some(endtime) = computed_schedule.last_run_endtime {
1262 last = endtime;
1263 if let Ok(parsed_upid) = upid.parse::<UPID>() {
1264 info.duration = Some(endtime - parsed_upid.starttime);
1265 }
1266 }
1267
1268 info.next_run = computed_schedule.next_run;
1269 info.last_run_endtime = computed_schedule.last_run_endtime;
1270 info.last_run_state = computed_schedule.last_run_state;
1271 }
1272
1273 info.next_run = info
1274 .schedule
1275 .as_ref()
1276 .and_then(|s| {
1277 s.parse::<CalendarEvent>()
1278 .map_err(|err| log::error!("{err}"))
1279 .ok()
1280 })
1281 .and_then(|e| {
1282 e.compute_next_event(last)
1283 .map_err(|err| log::error!("{err}"))
1284 .ok()
1285 })
1286 .and_then(|ne| ne);
1287
1288 info.status = status_in_memory;
1289
1290 Ok(info)
1291 }
1292
1293 #[api(
1294 returns: {
1295 description: "List the accessible datastores.",
1296 type: Array,
1297 items: { type: DataStoreListItem },
1298 },
1299 access: {
1300 permission: &Permission::Anybody,
1301 },
1302 )]
1303 /// Datastore list
1304 pub fn get_datastore_list(
1305 _param: Value,
1306 _info: &ApiMethod,
1307 rpcenv: &mut dyn RpcEnvironment,
1308 ) -> Result<Vec<DataStoreListItem>, Error> {
1309 let (config, _digest) = pbs_config::datastore::config()?;
1310
1311 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1312 let user_info = CachedUserInfo::new()?;
1313
1314 let mut list = Vec::new();
1315
1316 for (store, (_, data)) in &config.sections {
1317 let acl_path = &["datastore", store];
1318 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
1319 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1320
1321 let mut allow_id = false;
1322 if !allowed {
1323 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1324 allow_id = any_privs;
1325 }
1326 }
1327
1328 if allowed || allow_id {
1329 list.push(DataStoreListItem {
1330 store: store.clone(),
1331 comment: if !allowed {
1332 None
1333 } else {
1334 data["comment"].as_str().map(String::from)
1335 },
1336 maintenance: data["maintenance-mode"].as_str().map(String::from),
1337 });
1338 }
1339 }
1340
1341 Ok(list)
1342 }
1343
1344 #[sortable]
1345 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1346 &ApiHandler::AsyncHttp(&download_file),
1347 &ObjectSchema::new(
1348 "Download single raw file from backup snapshot.",
1349 &sorted!([
1350 ("store", false, &DATASTORE_SCHEMA),
1351 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1352 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1353 ("backup-id", false, &BACKUP_ID_SCHEMA),
1354 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1355 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1356 ]),
1357 ),
1358 )
1359 .access(
1360 Some(
1361 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1362 DATASTORE_BACKUP and being the owner of the group",
1363 ),
1364 &Permission::Anybody,
1365 );
1366
1367 pub fn download_file(
1368 _parts: Parts,
1369 _req_body: Body,
1370 param: Value,
1371 _info: &ApiMethod,
1372 rpcenv: Box<dyn RpcEnvironment>,
1373 ) -> ApiResponseFuture {
1374 async move {
1375 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1376 let store = required_string_param(&param, "store")?;
1377 let backup_ns = optional_ns_param(&param)?;
1378
1379 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1380 let datastore = check_privs_and_load_store(
1381 store,
1382 &backup_ns,
1383 &auth_id,
1384 PRIV_DATASTORE_READ,
1385 PRIV_DATASTORE_BACKUP,
1386 Some(Operation::Read),
1387 &backup_dir.group,
1388 )?;
1389
1390 let file_name = required_string_param(&param, "file-name")?.to_owned();
1391
1392 println!(
1393 "Download {} from {} ({}/{})",
1394 file_name,
1395 print_store_and_ns(store, &backup_ns),
1396 backup_dir,
1397 file_name
1398 );
1399
1400 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1401
1402 let mut path = datastore.base_path();
1403 path.push(backup_dir.relative_path());
1404 path.push(&file_name);
1405
1406 let file = tokio::fs::File::open(&path)
1407 .await
1408 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1409
1410 let payload =
1411 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1412 .map_ok(|bytes| bytes.freeze())
1413 .map_err(move |err| {
1414 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1415 err
1416 });
1417 let body = Body::wrap_stream(payload);
1418
1419 // fixme: set other headers ?
1420 Ok(Response::builder()
1421 .status(StatusCode::OK)
1422 .header(header::CONTENT_TYPE, "application/octet-stream")
1423 .body(body)
1424 .unwrap())
1425 }
1426 .boxed()
1427 }
1428
1429 #[sortable]
1430 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1431 &ApiHandler::AsyncHttp(&download_file_decoded),
1432 &ObjectSchema::new(
1433 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1434 &sorted!([
1435 ("store", false, &DATASTORE_SCHEMA),
1436 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1437 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1438 ("backup-id", false, &BACKUP_ID_SCHEMA),
1439 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1440 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1441 ]),
1442 ),
1443 )
1444 .access(
1445 Some(
1446 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1447 DATASTORE_BACKUP and being the owner of the group",
1448 ),
1449 &Permission::Anybody,
1450 );
1451
1452 pub fn download_file_decoded(
1453 _parts: Parts,
1454 _req_body: Body,
1455 param: Value,
1456 _info: &ApiMethod,
1457 rpcenv: Box<dyn RpcEnvironment>,
1458 ) -> ApiResponseFuture {
1459 async move {
1460 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1461 let store = required_string_param(&param, "store")?;
1462 let backup_ns = optional_ns_param(&param)?;
1463
1464 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1465 let datastore = check_privs_and_load_store(
1466 store,
1467 &backup_ns,
1468 &auth_id,
1469 PRIV_DATASTORE_READ,
1470 PRIV_DATASTORE_BACKUP,
1471 Some(Operation::Read),
1472 &backup_dir_api.group,
1473 )?;
1474
1475 let file_name = required_string_param(&param, "file-name")?.to_owned();
1476 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1477
1478 let (manifest, files) = read_backup_index(&backup_dir)?;
1479 for file in files {
1480 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1481 bail!("cannot decode '{}' - is encrypted", file_name);
1482 }
1483 }
1484
1485 println!(
1486 "Download {} from {} ({}/{})",
1487 file_name,
1488 print_store_and_ns(store, &backup_ns),
1489 backup_dir_api,
1490 file_name
1491 );
1492
1493 let mut path = datastore.base_path();
1494 path.push(backup_dir.relative_path());
1495 path.push(&file_name);
1496
1497 let (_, extension) = file_name.rsplit_once('.').unwrap();
1498
1499 let body = match extension {
1500 "didx" => {
1501 let index = DynamicIndexReader::open(&path).map_err(|err| {
1502 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1503 })?;
1504 let (csum, size) = index.compute_csum();
1505 manifest.verify_file(&file_name, &csum, size)?;
1506
1507 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1508 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1509 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1510 eprintln!("error during streaming of '{:?}' - {}", path, err);
1511 err
1512 }))
1513 }
1514 "fidx" => {
1515 let index = FixedIndexReader::open(&path).map_err(|err| {
1516 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1517 })?;
1518
1519 let (csum, size) = index.compute_csum();
1520 manifest.verify_file(&file_name, &csum, size)?;
1521
1522 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1523 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1524 Body::wrap_stream(
1525 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1526 move |err| {
1527 eprintln!("error during streaming of '{:?}' - {}", path, err);
1528 err
1529 },
1530 ),
1531 )
1532 }
1533 "blob" => {
1534 let file = std::fs::File::open(&path)
1535 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1536
1537 // FIXME: load full blob to verify index checksum?
1538
1539 Body::wrap_stream(
1540 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1541 move |err| {
1542 eprintln!("error during streaming of '{:?}' - {}", path, err);
1543 err
1544 },
1545 ),
1546 )
1547 }
1548 extension => {
1549 bail!("cannot download '{}' files", extension);
1550 }
1551 };
1552
1553 // fixme: set other headers ?
1554 Ok(Response::builder()
1555 .status(StatusCode::OK)
1556 .header(header::CONTENT_TYPE, "application/octet-stream")
1557 .body(body)
1558 .unwrap())
1559 }
1560 .boxed()
1561 }
1562
1563 #[sortable]
1564 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1565 &ApiHandler::AsyncHttp(&upload_backup_log),
1566 &ObjectSchema::new(
1567 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1568 &sorted!([
1569 ("store", false, &DATASTORE_SCHEMA),
1570 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1571 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1572 ("backup-id", false, &BACKUP_ID_SCHEMA),
1573 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1574 ]),
1575 ),
1576 )
1577 .access(
1578 Some("Only the backup creator/owner is allowed to do this."),
1579 &Permission::Anybody,
1580 );
1581
1582 pub fn upload_backup_log(
1583 _parts: Parts,
1584 req_body: Body,
1585 param: Value,
1586 _info: &ApiMethod,
1587 rpcenv: Box<dyn RpcEnvironment>,
1588 ) -> ApiResponseFuture {
1589 async move {
1590 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1591 let store = required_string_param(&param, "store")?;
1592 let backup_ns = optional_ns_param(&param)?;
1593
1594 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1595
1596 let datastore = check_privs_and_load_store(
1597 store,
1598 &backup_ns,
1599 &auth_id,
1600 0,
1601 PRIV_DATASTORE_BACKUP,
1602 Some(Operation::Write),
1603 &backup_dir_api.group,
1604 )?;
1605 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1606
1607 let file_name = CLIENT_LOG_BLOB_NAME;
1608
1609 let mut path = backup_dir.full_path();
1610 path.push(file_name);
1611
1612 if path.exists() {
1613 bail!("backup already contains a log.");
1614 }
1615
1616 println!(
1617 "Upload backup log to {} {backup_dir_api}/{file_name}",
1618 print_store_and_ns(store, &backup_ns),
1619 );
1620
1621 let data = req_body
1622 .map_err(Error::from)
1623 .try_fold(Vec::new(), |mut acc, chunk| {
1624 acc.extend_from_slice(&chunk);
1625 future::ok::<_, Error>(acc)
1626 })
1627 .await?;
1628
1629 // always verify blob/CRC at server side
1630 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1631
1632 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1633
1634 // fixme: use correct formatter
1635 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1636 }
1637 .boxed()
1638 }
1639
1640 #[api(
1641 input: {
1642 properties: {
1643 store: { schema: DATASTORE_SCHEMA },
1644 ns: {
1645 type: BackupNamespace,
1646 optional: true,
1647 },
1648 backup_dir: {
1649 type: pbs_api_types::BackupDir,
1650 flatten: true,
1651 },
1652 "filepath": {
1653 description: "Base64 encoded path.",
1654 type: String,
1655 }
1656 },
1657 },
1658 access: {
1659 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1660 DATASTORE_BACKUP and being the owner of the group",
1661 permission: &Permission::Anybody,
1662 },
1663 )]
1664 /// Get the entries of the given path of the catalog
1665 pub async fn catalog(
1666 store: String,
1667 ns: Option<BackupNamespace>,
1668 backup_dir: pbs_api_types::BackupDir,
1669 filepath: String,
1670 rpcenv: &mut dyn RpcEnvironment,
1671 ) -> Result<Vec<ArchiveEntry>, Error> {
1672 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1673
1674 tokio::task::spawn_blocking(move || {
1675 let ns = ns.unwrap_or_default();
1676
1677 let datastore = check_privs_and_load_store(
1678 &store,
1679 &ns,
1680 &auth_id,
1681 PRIV_DATASTORE_READ,
1682 PRIV_DATASTORE_BACKUP,
1683 Some(Operation::Read),
1684 &backup_dir.group,
1685 )?;
1686
1687 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1688
1689 let file_name = CATALOG_NAME;
1690
1691 let (manifest, files) = read_backup_index(&backup_dir)?;
1692 for file in files {
1693 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1694 bail!("cannot decode '{}' - is encrypted", file_name);
1695 }
1696 }
1697
1698 let mut path = datastore.base_path();
1699 path.push(backup_dir.relative_path());
1700 path.push(file_name);
1701
1702 let index = DynamicIndexReader::open(&path)
1703 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1704
1705 let (csum, size) = index.compute_csum();
1706 manifest.verify_file(file_name, &csum, size)?;
1707
1708 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1709 let reader = BufferedDynamicReader::new(index, chunk_reader);
1710
1711 let mut catalog_reader = CatalogReader::new(reader);
1712
1713 let path = if filepath != "root" && filepath != "/" {
1714 base64::decode(filepath)?
1715 } else {
1716 vec![b'/']
1717 };
1718
1719 catalog_reader.list_dir_contents(&path)
1720 })
1721 .await?
1722 }
1723
1724 #[sortable]
1725 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1726 &ApiHandler::AsyncHttp(&pxar_file_download),
1727 &ObjectSchema::new(
1728 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1729 &sorted!([
1730 ("store", false, &DATASTORE_SCHEMA),
1731 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1732 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1733 ("backup-id", false, &BACKUP_ID_SCHEMA),
1734 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1735 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1736 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1737 ]),
1738 )
1739 ).access(
1740 Some(
1741 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1742 DATASTORE_BACKUP and being the owner of the group",
1743 ),
1744 &Permission::Anybody,
1745 );
1746
1747 pub fn pxar_file_download(
1748 _parts: Parts,
1749 _req_body: Body,
1750 param: Value,
1751 _info: &ApiMethod,
1752 rpcenv: Box<dyn RpcEnvironment>,
1753 ) -> ApiResponseFuture {
1754 async move {
1755 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1756 let store = required_string_param(&param, "store")?;
1757 let ns = optional_ns_param(&param)?;
1758
1759 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1760 let datastore = check_privs_and_load_store(
1761 store,
1762 &ns,
1763 &auth_id,
1764 PRIV_DATASTORE_READ,
1765 PRIV_DATASTORE_BACKUP,
1766 Some(Operation::Read),
1767 &backup_dir.group,
1768 )?;
1769
1770 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1771
1772 let filepath = required_string_param(&param, "filepath")?.to_owned();
1773
1774 let tar = param["tar"].as_bool().unwrap_or(false);
1775
1776 let mut components = base64::decode(&filepath)?;
1777 if !components.is_empty() && components[0] == b'/' {
1778 components.remove(0);
1779 }
1780
1781 let mut split = components.splitn(2, |c| *c == b'/');
1782 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
1783 let file_path = split.next().unwrap_or(b"/");
1784 let (manifest, files) = read_backup_index(&backup_dir)?;
1785 for file in files {
1786 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1787 bail!("cannot decode '{}' - is encrypted", pxar_name);
1788 }
1789 }
1790
1791 let mut path = datastore.base_path();
1792 path.push(backup_dir.relative_path());
1793 path.push(pxar_name);
1794
1795 let index = DynamicIndexReader::open(&path)
1796 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1797
1798 let (csum, size) = index.compute_csum();
1799 manifest.verify_file(pxar_name, &csum, size)?;
1800
1801 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1802 let reader = BufferedDynamicReader::new(index, chunk_reader);
1803 let archive_size = reader.archive_size();
1804 let reader = LocalDynamicReadAt::new(reader);
1805
1806 let decoder = Accessor::new(reader, archive_size).await?;
1807 let root = decoder.open_root().await?;
1808 let path = OsStr::from_bytes(file_path).to_os_string();
1809 let file = root
1810 .lookup(&path)
1811 .await?
1812 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1813
1814 let body = match file.kind() {
1815 EntryKind::File { .. } => Body::wrap_stream(
1816 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1817 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1818 err
1819 }),
1820 ),
1821 EntryKind::Hardlink(_) => Body::wrap_stream(
1822 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1823 .map_err(move |err| {
1824 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1825 err
1826 }),
1827 ),
1828 EntryKind::Directory => {
1829 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1830 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1831 if tar {
1832 proxmox_rest_server::spawn_internal_task(create_tar(
1833 channelwriter,
1834 decoder,
1835 path.clone(),
1836 ));
1837 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1838 Body::wrap_stream(zstdstream.map_err(move |err| {
1839 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
1840 err
1841 }))
1842 } else {
1843 proxmox_rest_server::spawn_internal_task(create_zip(
1844 channelwriter,
1845 decoder,
1846 path.clone(),
1847 ));
1848 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1849 log::error!("error during streaming of zip '{:?}' - {}", path, err);
1850 err
1851 }))
1852 }
1853 }
1854 other => bail!("cannot download file of type {:?}", other),
1855 };
1856
1857 // fixme: set other headers ?
1858 Ok(Response::builder()
1859 .status(StatusCode::OK)
1860 .header(header::CONTENT_TYPE, "application/octet-stream")
1861 .body(body)
1862 .unwrap())
1863 }
1864 .boxed()
1865 }
1866
1867 #[api(
1868 input: {
1869 properties: {
1870 store: {
1871 schema: DATASTORE_SCHEMA,
1872 },
1873 timeframe: {
1874 type: RRDTimeFrame,
1875 },
1876 cf: {
1877 type: RRDMode,
1878 },
1879 },
1880 },
1881 access: {
1882 permission: &Permission::Privilege(
1883 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1884 },
1885 )]
1886 /// Read datastore stats
1887 pub fn get_rrd_stats(
1888 store: String,
1889 timeframe: RRDTimeFrame,
1890 cf: RRDMode,
1891 _param: Value,
1892 ) -> Result<Value, Error> {
1893 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1894 let disk_manager = crate::tools::disks::DiskManage::new();
1895
1896 let mut rrd_fields = vec![
1897 "total",
1898 "available",
1899 "used",
1900 "read_ios",
1901 "read_bytes",
1902 "write_ios",
1903 "write_bytes",
1904 ];
1905
1906 // we do not have io_ticks for zpools, so don't include them
1907 match disk_manager.find_mounted_device(&datastore.base_path()) {
1908 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1909 _ => rrd_fields.push("io_ticks"),
1910 };
1911
1912 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1913 }
1914
1915 #[api(
1916 input: {
1917 properties: {
1918 store: {
1919 schema: DATASTORE_SCHEMA,
1920 },
1921 },
1922 },
1923 access: {
1924 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1925 },
1926 )]
1927 /// Read datastore stats
1928 pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1929 let active_operations = task_tracking::get_active_operations(&store)?;
1930 Ok(json!({
1931 "read": active_operations.read,
1932 "write": active_operations.write,
1933 }))
1934 }
1935
1936 #[api(
1937 input: {
1938 properties: {
1939 store: { schema: DATASTORE_SCHEMA },
1940 ns: {
1941 type: BackupNamespace,
1942 optional: true,
1943 },
1944 backup_group: {
1945 type: pbs_api_types::BackupGroup,
1946 flatten: true,
1947 },
1948 },
1949 },
1950 access: {
1951 permission: &Permission::Anybody,
1952 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1953 or DATASTORE_BACKUP and being the owner of the group",
1954 },
1955 )]
1956 /// Get "notes" for a backup group
1957 pub fn get_group_notes(
1958 store: String,
1959 ns: Option<BackupNamespace>,
1960 backup_group: pbs_api_types::BackupGroup,
1961 rpcenv: &mut dyn RpcEnvironment,
1962 ) -> Result<String, Error> {
1963 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1964 let ns = ns.unwrap_or_default();
1965
1966 let datastore = check_privs_and_load_store(
1967 &store,
1968 &ns,
1969 &auth_id,
1970 PRIV_DATASTORE_AUDIT,
1971 PRIV_DATASTORE_BACKUP,
1972 Some(Operation::Read),
1973 &backup_group,
1974 )?;
1975
1976 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
1977 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1978 }
1979
1980 #[api(
1981 input: {
1982 properties: {
1983 store: { schema: DATASTORE_SCHEMA },
1984 ns: {
1985 type: BackupNamespace,
1986 optional: true,
1987 },
1988 backup_group: {
1989 type: pbs_api_types::BackupGroup,
1990 flatten: true,
1991 },
1992 notes: {
1993 description: "A multiline text.",
1994 },
1995 },
1996 },
1997 access: {
1998 permission: &Permission::Anybody,
1999 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2000 or DATASTORE_BACKUP and being the owner of the group",
2001 },
2002 )]
2003 /// Set "notes" for a backup group
2004 pub fn set_group_notes(
2005 store: String,
2006 ns: Option<BackupNamespace>,
2007 backup_group: pbs_api_types::BackupGroup,
2008 notes: String,
2009 rpcenv: &mut dyn RpcEnvironment,
2010 ) -> Result<(), Error> {
2011 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2012 let ns = ns.unwrap_or_default();
2013
2014 let datastore = check_privs_and_load_store(
2015 &store,
2016 &ns,
2017 &auth_id,
2018 PRIV_DATASTORE_MODIFY,
2019 PRIV_DATASTORE_BACKUP,
2020 Some(Operation::Write),
2021 &backup_group,
2022 )?;
2023
2024 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
2025 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
2026
2027 Ok(())
2028 }
2029
2030 #[api(
2031 input: {
2032 properties: {
2033 store: { schema: DATASTORE_SCHEMA },
2034 ns: {
2035 type: BackupNamespace,
2036 optional: true,
2037 },
2038 backup_dir: {
2039 type: pbs_api_types::BackupDir,
2040 flatten: true,
2041 },
2042 },
2043 },
2044 access: {
2045 permission: &Permission::Anybody,
2046 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2047 or DATASTORE_BACKUP and being the owner of the group",
2048 },
2049 )]
2050 /// Get "notes" for a specific backup
2051 pub fn get_notes(
2052 store: String,
2053 ns: Option<BackupNamespace>,
2054 backup_dir: pbs_api_types::BackupDir,
2055 rpcenv: &mut dyn RpcEnvironment,
2056 ) -> Result<String, Error> {
2057 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2058 let ns = ns.unwrap_or_default();
2059
2060 let datastore = check_privs_and_load_store(
2061 &store,
2062 &ns,
2063 &auth_id,
2064 PRIV_DATASTORE_AUDIT,
2065 PRIV_DATASTORE_BACKUP,
2066 Some(Operation::Read),
2067 &backup_dir.group,
2068 )?;
2069
2070 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2071
2072 let (manifest, _) = backup_dir.load_manifest()?;
2073
2074 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
2075
2076 Ok(String::from(notes))
2077 }
2078
2079 #[api(
2080 input: {
2081 properties: {
2082 store: { schema: DATASTORE_SCHEMA },
2083 ns: {
2084 type: BackupNamespace,
2085 optional: true,
2086 },
2087 backup_dir: {
2088 type: pbs_api_types::BackupDir,
2089 flatten: true,
2090 },
2091 notes: {
2092 description: "A multiline text.",
2093 },
2094 },
2095 },
2096 access: {
2097 permission: &Permission::Anybody,
2098 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2099 or DATASTORE_BACKUP and being the owner of the group",
2100 },
2101 )]
2102 /// Set "notes" for a specific backup
2103 pub fn set_notes(
2104 store: String,
2105 ns: Option<BackupNamespace>,
2106 backup_dir: pbs_api_types::BackupDir,
2107 notes: String,
2108 rpcenv: &mut dyn RpcEnvironment,
2109 ) -> Result<(), Error> {
2110 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2111 let ns = ns.unwrap_or_default();
2112
2113 let datastore = check_privs_and_load_store(
2114 &store,
2115 &ns,
2116 &auth_id,
2117 PRIV_DATASTORE_MODIFY,
2118 PRIV_DATASTORE_BACKUP,
2119 Some(Operation::Write),
2120 &backup_dir.group,
2121 )?;
2122
2123 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2124
2125 backup_dir
2126 .update_manifest(|manifest| {
2127 manifest.unprotected["notes"] = notes.into();
2128 })
2129 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2130
2131 Ok(())
2132 }
2133
2134 #[api(
2135 input: {
2136 properties: {
2137 store: { schema: DATASTORE_SCHEMA },
2138 ns: {
2139 type: BackupNamespace,
2140 optional: true,
2141 },
2142 backup_dir: {
2143 type: pbs_api_types::BackupDir,
2144 flatten: true,
2145 },
2146 },
2147 },
2148 access: {
2149 permission: &Permission::Anybody,
2150 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2151 or DATASTORE_BACKUP and being the owner of the group",
2152 },
2153 )]
2154 /// Query protection for a specific backup
2155 pub fn get_protection(
2156 store: String,
2157 ns: Option<BackupNamespace>,
2158 backup_dir: pbs_api_types::BackupDir,
2159 rpcenv: &mut dyn RpcEnvironment,
2160 ) -> Result<bool, Error> {
2161 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2162 let ns = ns.unwrap_or_default();
2163 let datastore = check_privs_and_load_store(
2164 &store,
2165 &ns,
2166 &auth_id,
2167 PRIV_DATASTORE_AUDIT,
2168 PRIV_DATASTORE_BACKUP,
2169 Some(Operation::Read),
2170 &backup_dir.group,
2171 )?;
2172
2173 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2174
2175 Ok(backup_dir.is_protected())
2176 }
2177
2178 #[api(
2179 input: {
2180 properties: {
2181 store: { schema: DATASTORE_SCHEMA },
2182 ns: {
2183 type: BackupNamespace,
2184 optional: true,
2185 },
2186 backup_dir: {
2187 type: pbs_api_types::BackupDir,
2188 flatten: true,
2189 },
2190 protected: {
2191 description: "Enable/disable protection.",
2192 },
2193 },
2194 },
2195 access: {
2196 permission: &Permission::Anybody,
2197 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2198 or DATASTORE_BACKUP and being the owner of the group",
2199 },
2200 )]
2201 /// En- or disable protection for a specific backup
2202 pub async fn set_protection(
2203 store: String,
2204 ns: Option<BackupNamespace>,
2205 backup_dir: pbs_api_types::BackupDir,
2206 protected: bool,
2207 rpcenv: &mut dyn RpcEnvironment,
2208 ) -> Result<(), Error> {
2209 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2210
2211 tokio::task::spawn_blocking(move || {
2212 let ns = ns.unwrap_or_default();
2213 let datastore = check_privs_and_load_store(
2214 &store,
2215 &ns,
2216 &auth_id,
2217 PRIV_DATASTORE_MODIFY,
2218 PRIV_DATASTORE_BACKUP,
2219 Some(Operation::Write),
2220 &backup_dir.group,
2221 )?;
2222
2223 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2224
2225 datastore.update_protection(&backup_dir, protected)
2226 })
2227 .await?
2228 }
2229
2230 #[api(
2231 input: {
2232 properties: {
2233 store: { schema: DATASTORE_SCHEMA },
2234 ns: {
2235 type: BackupNamespace,
2236 optional: true,
2237 },
2238 backup_group: {
2239 type: pbs_api_types::BackupGroup,
2240 flatten: true,
2241 },
2242 "new-owner": {
2243 type: Authid,
2244 },
2245 },
2246 },
2247 access: {
2248 permission: &Permission::Anybody,
2249 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2250 a user's token for owned backups with Datastore.Backup"
2251 },
2252 )]
2253 /// Change owner of a backup group
2254 pub async fn set_backup_owner(
2255 store: String,
2256 ns: Option<BackupNamespace>,
2257 backup_group: pbs_api_types::BackupGroup,
2258 new_owner: Authid,
2259 rpcenv: &mut dyn RpcEnvironment,
2260 ) -> Result<(), Error> {
2261 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2262
2263 tokio::task::spawn_blocking(move || {
2264 let ns = ns.unwrap_or_default();
2265 let owner_check_required = check_ns_privs_full(
2266 &store,
2267 &ns,
2268 &auth_id,
2269 PRIV_DATASTORE_MODIFY,
2270 PRIV_DATASTORE_BACKUP,
2271 )?;
2272
2273 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2274
2275 let backup_group = datastore.backup_group(ns, backup_group);
2276
2277 if owner_check_required {
2278 let owner = backup_group.get_owner()?;
2279
2280 let allowed = match (owner.is_token(), new_owner.is_token()) {
2281 (true, true) => {
2282 // API token to API token, owned by same user
2283 let owner = owner.user();
2284 let new_owner = new_owner.user();
2285 owner == new_owner && Authid::from(owner.clone()) == auth_id
2286 }
2287 (true, false) => {
2288 // API token to API token owner
2289 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2290 }
2291 (false, true) => {
2292 // API token owner to API token
2293 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2294 }
2295 (false, false) => {
2296 // User to User, not allowed for unprivileged users
2297 false
2298 }
2299 };
2300
2301 if !allowed {
2302 return Err(http_err!(
2303 UNAUTHORIZED,
2304 "{} does not have permission to change owner of backup group '{}' to {}",
2305 auth_id,
2306 backup_group.group(),
2307 new_owner,
2308 ));
2309 }
2310 }
2311
2312 let user_info = CachedUserInfo::new()?;
2313
2314 if !user_info.is_active_auth_id(&new_owner) {
2315 bail!(
2316 "{} '{}' is inactive or non-existent",
2317 if new_owner.is_token() {
2318 "API token".to_string()
2319 } else {
2320 "user".to_string()
2321 },
2322 new_owner
2323 );
2324 }
2325
2326 backup_group.set_owner(&new_owner, true)?;
2327
2328 Ok(())
2329 })
2330 .await?
2331 }
2332
2333 #[sortable]
2334 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2335 (
2336 "active-operations",
2337 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2338 ),
2339 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2340 (
2341 "change-owner",
2342 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2343 ),
2344 (
2345 "download",
2346 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2347 ),
2348 (
2349 "download-decoded",
2350 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2351 ),
2352 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2353 (
2354 "gc",
2355 &Router::new()
2356 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2357 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2358 ),
2359 (
2360 "group-notes",
2361 &Router::new()
2362 .get(&API_METHOD_GET_GROUP_NOTES)
2363 .put(&API_METHOD_SET_GROUP_NOTES),
2364 ),
2365 (
2366 "groups",
2367 &Router::new()
2368 .get(&API_METHOD_LIST_GROUPS)
2369 .delete(&API_METHOD_DELETE_GROUP),
2370 ),
2371 (
2372 "namespace",
2373 // FIXME: move into datastore:: sub-module?!
2374 &crate::api2::admin::namespace::ROUTER,
2375 ),
2376 (
2377 "notes",
2378 &Router::new()
2379 .get(&API_METHOD_GET_NOTES)
2380 .put(&API_METHOD_SET_NOTES),
2381 ),
2382 (
2383 "protected",
2384 &Router::new()
2385 .get(&API_METHOD_GET_PROTECTION)
2386 .put(&API_METHOD_SET_PROTECTION),
2387 ),
2388 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2389 (
2390 "prune-datastore",
2391 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2392 ),
2393 (
2394 "pxar-file-download",
2395 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2396 ),
2397 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2398 (
2399 "snapshots",
2400 &Router::new()
2401 .get(&API_METHOD_LIST_SNAPSHOTS)
2402 .delete(&API_METHOD_DELETE_SNAPSHOT),
2403 ),
2404 ("status", &Router::new().get(&API_METHOD_STATUS)),
2405 (
2406 "upload-backup-log",
2407 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2408 ),
2409 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2410 ];
2411
2412 const DATASTORE_INFO_ROUTER: Router = Router::new()
2413 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2414 .subdirs(DATASTORE_INFO_SUBDIRS);
2415
2416 pub const ROUTER: Router = Router::new()
2417 .get(&API_METHOD_GET_DATASTORE_LIST)
2418 .match_all("store", &DATASTORE_INFO_ROUTER);