]> git.proxmox.com Git - proxmox-backup.git/blame_incremental - src/api2/admin/datastore.rs
run cargo fmt
[proxmox-backup.git] / src / api2 / admin / datastore.rs
... / ...
CommitLineData
1//! Datastore Management
2
3use std::collections::HashSet;
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
6use std::path::PathBuf;
7use std::sync::Arc;
8
9use anyhow::{bail, format_err, Error};
10use futures::*;
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
13use serde::Deserialize;
14use serde_json::{json, Value};
15use tokio_stream::wrappers::ReceiverStream;
16
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
19use proxmox_compression::zstd::ZstdEncoder;
20use proxmox_router::{
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
23};
24use proxmox_schema::*;
25use proxmox_sortable_macro::sortable;
26use proxmox_sys::fs::{
27 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
28};
29use proxmox_sys::{task_log, task_warn};
30use proxmox_time::CalendarEvent;
31
32use pxar::accessor::aio::Accessor;
33use pxar::EntryKind;
34
35use pbs_api_types::{
36 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
37 Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
38 GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
39 PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
40 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
41 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
42 NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
43 PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
44 VERIFICATION_OUTDATED_AFTER_SCHEMA,
45};
46use pbs_client::pxar::{create_tar, create_zip};
47use pbs_config::CachedUserInfo;
48use pbs_datastore::backup_info::BackupInfo;
49use pbs_datastore::cached_chunk_reader::CachedChunkReader;
50use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
51use pbs_datastore::data_blob::DataBlob;
52use pbs_datastore::data_blob_reader::DataBlobReader;
53use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
54use pbs_datastore::fixed_index::FixedIndexReader;
55use pbs_datastore::index::IndexFile;
56use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
57use pbs_datastore::prune::compute_prune_info;
58use pbs_datastore::{
59 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
60 StoreProgress, CATALOG_NAME,
61};
62use pbs_tools::json::required_string_param;
63use proxmox_rest_server::{formatter, WorkerTask};
64
65use crate::api2::backup::optional_ns_param;
66use crate::api2::node::rrd::create_value_from_rrd;
67use crate::backup::{
68 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
69 ListAccessibleBackupGroups, NS_PRIVS_OK,
70};
71
72use crate::server::jobstate::{compute_schedule_status, Job, JobState};
73
74const GROUP_NOTES_FILE_NAME: &str = "notes";
75
76fn get_group_note_path(
77 store: &DataStore,
78 ns: &BackupNamespace,
79 group: &pbs_api_types::BackupGroup,
80) -> PathBuf {
81 let mut note_path = store.group_path(ns, group);
82 note_path.push(GROUP_NOTES_FILE_NAME);
83 note_path
84}
85
86// helper to unify common sequence of checks:
87// 1. check privs on NS (full or limited access)
88// 2. load datastore
89// 3. if needed (only limited access), check owner of group
90fn check_privs_and_load_store(
91 store: &str,
92 ns: &BackupNamespace,
93 auth_id: &Authid,
94 full_access_privs: u64,
95 partial_access_privs: u64,
96 operation: Option<Operation>,
97 backup_group: &pbs_api_types::BackupGroup,
98) -> Result<Arc<DataStore>, Error> {
99 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
100
101 let datastore = DataStore::lookup_datastore(store, operation)?;
102
103 if limited {
104 let owner = datastore.get_owner(ns, backup_group)?;
105 check_backup_owner(&owner, auth_id)?;
106 }
107
108 Ok(datastore)
109}
110
111fn read_backup_index(
112 backup_dir: &BackupDir,
113) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
114 let (manifest, index_size) = backup_dir.load_manifest()?;
115
116 let mut result = Vec::new();
117 for item in manifest.files() {
118 result.push(BackupContent {
119 filename: item.filename.clone(),
120 crypt_mode: Some(item.crypt_mode),
121 size: Some(item.size),
122 });
123 }
124
125 result.push(BackupContent {
126 filename: MANIFEST_BLOB_NAME.to_string(),
127 crypt_mode: match manifest.signature {
128 Some(_) => Some(CryptMode::SignOnly),
129 None => Some(CryptMode::None),
130 },
131 size: Some(index_size),
132 });
133
134 Ok((manifest, result))
135}
136
137fn get_all_snapshot_files(
138 info: &BackupInfo,
139) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
140 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
141
142 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
143 acc.insert(item.filename.clone());
144 acc
145 });
146
147 for file in &info.files {
148 if file_set.contains(file) {
149 continue;
150 }
151 files.push(BackupContent {
152 filename: file.to_string(),
153 size: None,
154 crypt_mode: None,
155 });
156 }
157
158 Ok((manifest, files))
159}
160
161#[api(
162 input: {
163 properties: {
164 store: {
165 schema: DATASTORE_SCHEMA,
166 },
167 ns: {
168 type: BackupNamespace,
169 optional: true,
170 },
171 },
172 },
173 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
174 access: {
175 permission: &Permission::Anybody,
176 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
177 /datastore/{store}[/{namespace}]",
178 },
179)]
180/// List backup groups.
181pub fn list_groups(
182 store: String,
183 ns: Option<BackupNamespace>,
184 rpcenv: &mut dyn RpcEnvironment,
185) -> Result<Vec<GroupListItem>, Error> {
186 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
187 let ns = ns.unwrap_or_default();
188
189 let list_all = !check_ns_privs_full(
190 &store,
191 &ns,
192 &auth_id,
193 PRIV_DATASTORE_AUDIT,
194 PRIV_DATASTORE_BACKUP,
195 )?;
196
197 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
198
199 datastore
200 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
201 .try_fold(Vec::new(), |mut group_info, group| {
202 let group = group?;
203
204 let owner = match datastore.get_owner(&ns, group.as_ref()) {
205 Ok(auth_id) => auth_id,
206 Err(err) => {
207 eprintln!(
208 "Failed to get owner of group '{}' in {} - {}",
209 group.group(),
210 print_store_and_ns(&store, &ns),
211 err
212 );
213 return Ok(group_info);
214 }
215 };
216 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
217 return Ok(group_info);
218 }
219
220 let snapshots = match group.list_backups() {
221 Ok(snapshots) => snapshots,
222 Err(_) => return Ok(group_info),
223 };
224
225 let backup_count: u64 = snapshots.len() as u64;
226 if backup_count == 0 {
227 return Ok(group_info);
228 }
229
230 let last_backup = snapshots
231 .iter()
232 .fold(&snapshots[0], |a, b| {
233 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
234 a
235 } else {
236 b
237 }
238 })
239 .to_owned();
240
241 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
242 let comment = file_read_firstline(note_path).ok();
243
244 group_info.push(GroupListItem {
245 backup: group.into(),
246 last_backup: last_backup.backup_dir.backup_time(),
247 owner: Some(owner),
248 backup_count,
249 files: last_backup.files,
250 comment,
251 });
252
253 Ok(group_info)
254 })
255}
256
257#[api(
258 input: {
259 properties: {
260 store: { schema: DATASTORE_SCHEMA },
261 ns: {
262 type: BackupNamespace,
263 optional: true,
264 },
265 group: {
266 type: pbs_api_types::BackupGroup,
267 flatten: true,
268 },
269 },
270 },
271 access: {
272 permission: &Permission::Anybody,
273 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
274 or DATASTORE_PRUNE and being the owner of the group",
275 },
276)]
277/// Delete backup group including all snapshots.
278pub async fn delete_group(
279 store: String,
280 ns: Option<BackupNamespace>,
281 group: pbs_api_types::BackupGroup,
282 rpcenv: &mut dyn RpcEnvironment,
283) -> Result<Value, Error> {
284 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
285
286 tokio::task::spawn_blocking(move || {
287 let ns = ns.unwrap_or_default();
288
289 let datastore = check_privs_and_load_store(
290 &store,
291 &ns,
292 &auth_id,
293 PRIV_DATASTORE_MODIFY,
294 PRIV_DATASTORE_PRUNE,
295 Some(Operation::Write),
296 &group,
297 )?;
298
299 let delete_stats = datastore.remove_backup_group(&ns, &group)?;
300 if !delete_stats.all_removed() {
301 bail!("group only partially deleted due to protected snapshots");
302 }
303
304 Ok(Value::Null)
305 })
306 .await?
307}
308
309#[api(
310 input: {
311 properties: {
312 store: { schema: DATASTORE_SCHEMA },
313 ns: {
314 type: BackupNamespace,
315 optional: true,
316 },
317 backup_dir: {
318 type: pbs_api_types::BackupDir,
319 flatten: true,
320 },
321 },
322 },
323 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
324 access: {
325 permission: &Permission::Anybody,
326 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
327 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
328 },
329)]
330/// List snapshot files.
331pub async fn list_snapshot_files(
332 store: String,
333 ns: Option<BackupNamespace>,
334 backup_dir: pbs_api_types::BackupDir,
335 _info: &ApiMethod,
336 rpcenv: &mut dyn RpcEnvironment,
337) -> Result<Vec<BackupContent>, Error> {
338 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
339
340 tokio::task::spawn_blocking(move || {
341 let ns = ns.unwrap_or_default();
342
343 let datastore = check_privs_and_load_store(
344 &store,
345 &ns,
346 &auth_id,
347 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
348 PRIV_DATASTORE_BACKUP,
349 Some(Operation::Read),
350 &backup_dir.group,
351 )?;
352
353 let snapshot = datastore.backup_dir(ns, backup_dir)?;
354
355 let info = BackupInfo::new(snapshot)?;
356
357 let (_manifest, files) = get_all_snapshot_files(&info)?;
358
359 Ok(files)
360 })
361 .await?
362}
363
364#[api(
365 input: {
366 properties: {
367 store: { schema: DATASTORE_SCHEMA },
368 ns: {
369 type: BackupNamespace,
370 optional: true,
371 },
372 backup_dir: {
373 type: pbs_api_types::BackupDir,
374 flatten: true,
375 },
376 },
377 },
378 access: {
379 permission: &Permission::Anybody,
380 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
381 or DATASTORE_PRUNE and being the owner of the group",
382 },
383)]
384/// Delete backup snapshot.
385pub async fn delete_snapshot(
386 store: String,
387 ns: Option<BackupNamespace>,
388 backup_dir: pbs_api_types::BackupDir,
389 _info: &ApiMethod,
390 rpcenv: &mut dyn RpcEnvironment,
391) -> Result<Value, Error> {
392 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
393
394 tokio::task::spawn_blocking(move || {
395 let ns = ns.unwrap_or_default();
396
397 let datastore = check_privs_and_load_store(
398 &store,
399 &ns,
400 &auth_id,
401 PRIV_DATASTORE_MODIFY,
402 PRIV_DATASTORE_PRUNE,
403 Some(Operation::Write),
404 &backup_dir.group,
405 )?;
406
407 let snapshot = datastore.backup_dir(ns, backup_dir)?;
408
409 snapshot.destroy(false)?;
410
411 Ok(Value::Null)
412 })
413 .await?
414}
415
416#[api(
417 streaming: true,
418 input: {
419 properties: {
420 store: { schema: DATASTORE_SCHEMA },
421 ns: {
422 type: BackupNamespace,
423 optional: true,
424 },
425 "backup-type": {
426 optional: true,
427 type: BackupType,
428 },
429 "backup-id": {
430 optional: true,
431 schema: BACKUP_ID_SCHEMA,
432 },
433 },
434 },
435 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
436 access: {
437 permission: &Permission::Anybody,
438 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
439 or DATASTORE_BACKUP and being the owner of the group",
440 },
441)]
442/// List backup snapshots.
443pub async fn list_snapshots(
444 store: String,
445 ns: Option<BackupNamespace>,
446 backup_type: Option<BackupType>,
447 backup_id: Option<String>,
448 _param: Value,
449 _info: &ApiMethod,
450 rpcenv: &mut dyn RpcEnvironment,
451) -> Result<Vec<SnapshotListItem>, Error> {
452 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
453
454 tokio::task::spawn_blocking(move || unsafe {
455 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
456 })
457 .await
458 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
459}
460
461/// This must not run in a main worker thread as it potentially does tons of I/O.
462unsafe fn list_snapshots_blocking(
463 store: String,
464 ns: Option<BackupNamespace>,
465 backup_type: Option<BackupType>,
466 backup_id: Option<String>,
467 auth_id: Authid,
468) -> Result<Vec<SnapshotListItem>, Error> {
469 let ns = ns.unwrap_or_default();
470
471 let list_all = !check_ns_privs_full(
472 &store,
473 &ns,
474 &auth_id,
475 PRIV_DATASTORE_AUDIT,
476 PRIV_DATASTORE_BACKUP,
477 )?;
478
479 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
480
481 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
482 // backup group and provide an error free (Err -> None) accessor
483 let groups = match (backup_type, backup_id) {
484 (Some(backup_type), Some(backup_id)) => {
485 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
486 }
487 // FIXME: Recursion
488 (Some(backup_type), None) => datastore
489 .iter_backup_type_ok(ns.clone(), backup_type)?
490 .collect(),
491 // FIXME: Recursion
492 (None, Some(backup_id)) => BackupType::iter()
493 .filter_map(|backup_type| {
494 let group =
495 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
496 group.exists().then_some(group)
497 })
498 .collect(),
499 // FIXME: Recursion
500 (None, None) => datastore.list_backup_groups(ns.clone())?,
501 };
502
503 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
504 let backup = pbs_api_types::BackupDir {
505 group: group.into(),
506 time: info.backup_dir.backup_time(),
507 };
508 let protected = info.backup_dir.is_protected();
509
510 match get_all_snapshot_files(&info) {
511 Ok((manifest, files)) => {
512 // extract the first line from notes
513 let comment: Option<String> = manifest.unprotected["notes"]
514 .as_str()
515 .and_then(|notes| notes.lines().next())
516 .map(String::from);
517
518 let fingerprint = match manifest.fingerprint() {
519 Ok(fp) => fp,
520 Err(err) => {
521 eprintln!("error parsing fingerprint: '{}'", err);
522 None
523 }
524 };
525
526 let verification = manifest.unprotected["verify_state"].clone();
527 let verification: Option<SnapshotVerifyState> =
528 match serde_json::from_value(verification) {
529 Ok(verify) => verify,
530 Err(err) => {
531 eprintln!("error parsing verification state : '{}'", err);
532 None
533 }
534 };
535
536 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
537
538 SnapshotListItem {
539 backup,
540 comment,
541 verification,
542 fingerprint,
543 files,
544 size,
545 owner,
546 protected,
547 }
548 }
549 Err(err) => {
550 eprintln!("error during snapshot file listing: '{}'", err);
551 let files = info
552 .files
553 .into_iter()
554 .map(|filename| BackupContent {
555 filename,
556 size: None,
557 crypt_mode: None,
558 })
559 .collect();
560
561 SnapshotListItem {
562 backup,
563 comment: None,
564 verification: None,
565 fingerprint: None,
566 files,
567 size: None,
568 owner,
569 protected,
570 }
571 }
572 }
573 };
574
575 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
576 let owner = match group.get_owner() {
577 Ok(auth_id) => auth_id,
578 Err(err) => {
579 eprintln!(
580 "Failed to get owner of group '{}' in {} - {}",
581 group.group(),
582 print_store_and_ns(&store, &ns),
583 err
584 );
585 return Ok(snapshots);
586 }
587 };
588
589 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
590 return Ok(snapshots);
591 }
592
593 let group_backups = group.list_backups()?;
594
595 snapshots.extend(
596 group_backups
597 .into_iter()
598 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
599 );
600
601 Ok(snapshots)
602 })
603}
604
605async fn get_snapshots_count(
606 store: &Arc<DataStore>,
607 owner: Option<&Authid>,
608) -> Result<Counts, Error> {
609 let store = Arc::clone(store);
610 let owner = owner.cloned();
611 tokio::task::spawn_blocking(move || {
612 let root_ns = Default::default();
613 ListAccessibleBackupGroups::new_with_privs(
614 &store,
615 root_ns,
616 MAX_NAMESPACE_DEPTH,
617 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
618 None,
619 owner.as_ref(),
620 )?
621 .try_fold(Counts::default(), |mut counts, group| {
622 let group = match group {
623 Ok(group) => group,
624 Err(_) => return Ok(counts), // TODO: add this as error counts?
625 };
626 let snapshot_count = group.list_backups()?.len() as u64;
627
628 // only include groups with snapshots, counting/displaying empty groups can confuse
629 if snapshot_count > 0 {
630 let type_count = match group.backup_type() {
631 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
632 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
633 BackupType::Host => counts.host.get_or_insert(Default::default()),
634 };
635
636 type_count.groups += 1;
637 type_count.snapshots += snapshot_count;
638 }
639
640 Ok(counts)
641 })
642 })
643 .await?
644}
645
646#[api(
647 input: {
648 properties: {
649 store: {
650 schema: DATASTORE_SCHEMA,
651 },
652 verbose: {
653 type: bool,
654 default: false,
655 optional: true,
656 description: "Include additional information like snapshot counts and GC status.",
657 },
658 },
659
660 },
661 returns: {
662 type: DataStoreStatus,
663 },
664 access: {
665 permission: &Permission::Anybody,
666 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
667 the full statistics. Counts of accessible groups are always returned, if any",
668 },
669)]
670/// Get datastore status.
671pub async fn status(
672 store: String,
673 verbose: bool,
674 _info: &ApiMethod,
675 rpcenv: &mut dyn RpcEnvironment,
676) -> Result<DataStoreStatus, Error> {
677 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
678 let user_info = CachedUserInfo::new()?;
679 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
680
681 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
682 true
683 } else if store_privs & PRIV_DATASTORE_READ != 0 {
684 false // allow at least counts, user can read groups anyway..
685 } else {
686 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
687 // avoid leaking existence info if users hasn't at least any priv. below
688 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
689 _ => false,
690 }
691 };
692
693 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
694
695 let (counts, gc_status) = if verbose {
696 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
697 None
698 } else {
699 Some(&auth_id)
700 };
701
702 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
703 let gc_status = if store_stats {
704 Some(datastore.last_gc_status())
705 } else {
706 None
707 };
708
709 (counts, gc_status)
710 } else {
711 (None, None)
712 };
713
714 Ok(if store_stats {
715 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
716 DataStoreStatus {
717 total: storage.total,
718 used: storage.used,
719 avail: storage.available,
720 gc_status,
721 counts,
722 }
723 } else {
724 DataStoreStatus {
725 total: 0,
726 used: 0,
727 avail: 0,
728 gc_status,
729 counts,
730 }
731 })
732}
733
734#[api(
735 input: {
736 properties: {
737 store: {
738 schema: DATASTORE_SCHEMA,
739 },
740 ns: {
741 type: BackupNamespace,
742 optional: true,
743 },
744 "backup-type": {
745 type: BackupType,
746 optional: true,
747 },
748 "backup-id": {
749 schema: BACKUP_ID_SCHEMA,
750 optional: true,
751 },
752 "ignore-verified": {
753 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
754 optional: true,
755 },
756 "outdated-after": {
757 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
758 optional: true,
759 },
760 "backup-time": {
761 schema: BACKUP_TIME_SCHEMA,
762 optional: true,
763 },
764 "max-depth": {
765 schema: NS_MAX_DEPTH_SCHEMA,
766 optional: true,
767 },
768 },
769 },
770 returns: {
771 schema: UPID_SCHEMA,
772 },
773 access: {
774 permission: &Permission::Anybody,
775 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
776 or DATASTORE_BACKUP and being the owner of the group",
777 },
778)]
779/// Verify backups.
780///
781/// This function can verify a single backup snapshot, all backup from a backup group,
782/// or all backups in the datastore.
783#[allow(clippy::too_many_arguments)]
784pub fn verify(
785 store: String,
786 ns: Option<BackupNamespace>,
787 backup_type: Option<BackupType>,
788 backup_id: Option<String>,
789 backup_time: Option<i64>,
790 ignore_verified: Option<bool>,
791 outdated_after: Option<i64>,
792 max_depth: Option<usize>,
793 rpcenv: &mut dyn RpcEnvironment,
794) -> Result<Value, Error> {
795 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
796 let ns = ns.unwrap_or_default();
797
798 let owner_check_required = check_ns_privs_full(
799 &store,
800 &ns,
801 &auth_id,
802 PRIV_DATASTORE_VERIFY,
803 PRIV_DATASTORE_BACKUP,
804 )?;
805
806 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
807 let ignore_verified = ignore_verified.unwrap_or(true);
808
809 let worker_id;
810
811 let mut backup_dir = None;
812 let mut backup_group = None;
813 let mut worker_type = "verify";
814
815 match (backup_type, backup_id, backup_time) {
816 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
817 worker_id = format!(
818 "{}:{}/{}/{}/{:08X}",
819 store,
820 ns.display_as_path(),
821 backup_type,
822 backup_id,
823 backup_time
824 );
825 let dir =
826 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
827
828 if owner_check_required {
829 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
830 check_backup_owner(&owner, &auth_id)?;
831 }
832
833 backup_dir = Some(dir);
834 worker_type = "verify_snapshot";
835 }
836 (Some(backup_type), Some(backup_id), None) => {
837 worker_id = format!(
838 "{}:{}/{}/{}",
839 store,
840 ns.display_as_path(),
841 backup_type,
842 backup_id
843 );
844 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
845
846 if owner_check_required {
847 let owner = datastore.get_owner(&ns, &group)?;
848 check_backup_owner(&owner, &auth_id)?;
849 }
850
851 backup_group = Some(datastore.backup_group(ns.clone(), group));
852 worker_type = "verify_group";
853 }
854 (None, None, None) => {
855 worker_id = if ns.is_root() {
856 store
857 } else {
858 format!("{}:{}", store, ns.display_as_path())
859 };
860 }
861 _ => bail!("parameters do not specify a backup group or snapshot"),
862 }
863
864 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
865
866 let upid_str = WorkerTask::new_thread(
867 worker_type,
868 Some(worker_id),
869 auth_id.to_string(),
870 to_stdout,
871 move |worker| {
872 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
873 let failed_dirs = if let Some(backup_dir) = backup_dir {
874 let mut res = Vec::new();
875 if !verify_backup_dir(
876 &verify_worker,
877 &backup_dir,
878 worker.upid().clone(),
879 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
880 )? {
881 res.push(print_ns_and_snapshot(
882 backup_dir.backup_ns(),
883 backup_dir.as_ref(),
884 ));
885 }
886 res
887 } else if let Some(backup_group) = backup_group {
888 verify_backup_group(
889 &verify_worker,
890 &backup_group,
891 &mut StoreProgress::new(1),
892 worker.upid(),
893 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
894 )?
895 } else {
896 let owner = if owner_check_required {
897 Some(&auth_id)
898 } else {
899 None
900 };
901
902 verify_all_backups(
903 &verify_worker,
904 worker.upid(),
905 ns,
906 max_depth,
907 owner,
908 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
909 )?
910 };
911 if !failed_dirs.is_empty() {
912 task_log!(worker, "Failed to verify the following snapshots/groups:");
913 for dir in failed_dirs {
914 task_log!(worker, "\t{}", dir);
915 }
916 bail!("verification failed - please check the log for details");
917 }
918 Ok(())
919 },
920 )?;
921
922 Ok(json!(upid_str))
923}
924
925#[api(
926 input: {
927 properties: {
928 group: {
929 type: pbs_api_types::BackupGroup,
930 flatten: true,
931 },
932 "dry-run": {
933 optional: true,
934 type: bool,
935 default: false,
936 description: "Just show what prune would do, but do not delete anything.",
937 },
938 "keep-options": {
939 type: KeepOptions,
940 flatten: true,
941 },
942 store: {
943 schema: DATASTORE_SCHEMA,
944 },
945 ns: {
946 type: BackupNamespace,
947 optional: true,
948 },
949 "use-task": {
950 type: bool,
951 default: false,
952 optional: true,
953 description: "Spins up an asynchronous task that does the work.",
954 },
955 },
956 },
957 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
958 access: {
959 permission: &Permission::Anybody,
960 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
961 or DATASTORE_PRUNE and being the owner of the group",
962 },
963)]
964/// Prune a group on the datastore
965pub fn prune(
966 group: pbs_api_types::BackupGroup,
967 dry_run: bool,
968 keep_options: KeepOptions,
969 store: String,
970 ns: Option<BackupNamespace>,
971 param: Value,
972 rpcenv: &mut dyn RpcEnvironment,
973) -> Result<Value, Error> {
974 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
975 let ns = ns.unwrap_or_default();
976 let datastore = check_privs_and_load_store(
977 &store,
978 &ns,
979 &auth_id,
980 PRIV_DATASTORE_MODIFY,
981 PRIV_DATASTORE_PRUNE,
982 Some(Operation::Write),
983 &group,
984 )?;
985
986 let worker_id = format!("{}:{}:{}", store, ns, group);
987 let group = datastore.backup_group(ns.clone(), group);
988
989 #[derive(Debug, serde::Serialize)]
990 struct PruneResult {
991 #[serde(rename = "backup-type")]
992 backup_type: BackupType,
993 #[serde(rename = "backup-id")]
994 backup_id: String,
995 #[serde(rename = "backup-time")]
996 backup_time: i64,
997 keep: bool,
998 protected: bool,
999 #[serde(skip_serializing_if = "Option::is_none")]
1000 ns: Option<BackupNamespace>,
1001 }
1002 let mut prune_result: Vec<PruneResult> = Vec::new();
1003
1004 let list = group.list_backups()?;
1005
1006 let mut prune_info = compute_prune_info(list, &keep_options)?;
1007
1008 prune_info.reverse(); // delete older snapshots first
1009
1010 let keep_all = !keep_options.keeps_something();
1011
1012 if dry_run {
1013 for (info, mark) in prune_info {
1014 let keep = keep_all || mark.keep();
1015 let backup_dir = &info.backup_dir;
1016
1017 let mut result = PruneResult {
1018 backup_type: backup_dir.backup_type(),
1019 backup_id: backup_dir.backup_id().to_owned(),
1020 backup_time: backup_dir.backup_time(),
1021 keep,
1022 protected: mark.protected(),
1023 ns: None,
1024 };
1025 let prune_ns = backup_dir.backup_ns();
1026 if !prune_ns.is_root() {
1027 result.ns = Some(prune_ns.to_owned());
1028 }
1029 prune_result.push(result);
1030 }
1031 return Ok(json!(prune_result));
1032 }
1033
1034 let prune_group = move |worker: Arc<WorkerTask>| {
1035 if keep_all {
1036 task_log!(worker, "No prune selection - keeping all files.");
1037 } else {
1038 let mut opts = Vec::new();
1039 if !ns.is_root() {
1040 opts.push(format!("--ns {ns}"));
1041 }
1042 crate::server::cli_keep_options(&mut opts, &keep_options);
1043
1044 task_log!(worker, "retention options: {}", opts.join(" "));
1045 task_log!(
1046 worker,
1047 "Starting prune on {} group \"{}\"",
1048 print_store_and_ns(&store, &ns),
1049 group.group(),
1050 );
1051 }
1052
1053 for (info, mark) in prune_info {
1054 let keep = keep_all || mark.keep();
1055 let backup_dir = &info.backup_dir;
1056
1057 let backup_time = backup_dir.backup_time();
1058 let timestamp = backup_dir.backup_time_string();
1059 let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
1060
1061 let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
1062
1063 task_log!(worker, "{msg}");
1064
1065 prune_result.push(PruneResult {
1066 backup_type: group.ty,
1067 backup_id: group.id.clone(),
1068 backup_time,
1069 keep,
1070 protected: mark.protected(),
1071 ns: None,
1072 });
1073
1074 if !keep {
1075 if let Err(err) = backup_dir.destroy(false) {
1076 task_warn!(
1077 worker,
1078 "failed to remove dir {:?}: {}",
1079 backup_dir.relative_path(),
1080 err,
1081 );
1082 }
1083 }
1084 }
1085 prune_result
1086 };
1087
1088 if param["use-task"].as_bool().unwrap_or(false) {
1089 let upid = WorkerTask::spawn(
1090 "prune",
1091 Some(worker_id),
1092 auth_id.to_string(),
1093 true,
1094 move |worker| async move {
1095 let _ = prune_group(worker.clone());
1096 Ok(())
1097 },
1098 )?;
1099 Ok(json!(upid))
1100 } else {
1101 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1102 let result = prune_group(worker.clone());
1103 worker.log_result(&Ok(()));
1104 Ok(json!(result))
1105 }
1106}
1107
1108#[api(
1109 input: {
1110 properties: {
1111 "dry-run": {
1112 optional: true,
1113 type: bool,
1114 default: false,
1115 description: "Just show what prune would do, but do not delete anything.",
1116 },
1117 "prune-options": {
1118 type: PruneJobOptions,
1119 flatten: true,
1120 },
1121 store: {
1122 schema: DATASTORE_SCHEMA,
1123 },
1124 },
1125 },
1126 returns: {
1127 schema: UPID_SCHEMA,
1128 },
1129 access: {
1130 permission: &Permission::Anybody,
1131 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
1132 },
1133)]
1134/// Prune the datastore
1135pub fn prune_datastore(
1136 dry_run: bool,
1137 prune_options: PruneJobOptions,
1138 store: String,
1139 _param: Value,
1140 rpcenv: &mut dyn RpcEnvironment,
1141) -> Result<String, Error> {
1142 let user_info = CachedUserInfo::new()?;
1143
1144 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1145
1146 user_info.check_privs(
1147 &auth_id,
1148 &prune_options.acl_path(&store),
1149 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1150 true,
1151 )?;
1152
1153 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1154 let ns = prune_options.ns.clone().unwrap_or_default();
1155 let worker_id = format!("{}:{}", store, ns);
1156
1157 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1158
1159 let upid_str = WorkerTask::new_thread(
1160 "prune",
1161 Some(worker_id),
1162 auth_id.to_string(),
1163 to_stdout,
1164 move |worker| {
1165 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
1166 },
1167 )?;
1168
1169 Ok(upid_str)
1170}
1171
1172#[api(
1173 input: {
1174 properties: {
1175 store: {
1176 schema: DATASTORE_SCHEMA,
1177 },
1178 },
1179 },
1180 returns: {
1181 schema: UPID_SCHEMA,
1182 },
1183 access: {
1184 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1185 },
1186)]
1187/// Start garbage collection.
1188pub fn start_garbage_collection(
1189 store: String,
1190 _info: &ApiMethod,
1191 rpcenv: &mut dyn RpcEnvironment,
1192) -> Result<Value, Error> {
1193 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1194 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1195
1196 let job = Job::new("garbage_collection", &store)
1197 .map_err(|_| format_err!("garbage collection already running"))?;
1198
1199 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1200
1201 let upid_str =
1202 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1203 .map_err(|err| {
1204 format_err!(
1205 "unable to start garbage collection job on datastore {} - {}",
1206 store,
1207 err
1208 )
1209 })?;
1210
1211 Ok(json!(upid_str))
1212}
1213
1214#[api(
1215 input: {
1216 properties: {
1217 store: {
1218 schema: DATASTORE_SCHEMA,
1219 },
1220 },
1221 },
1222 returns: {
1223 type: GarbageCollectionJobStatus,
1224 },
1225 access: {
1226 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1227 },
1228)]
1229/// Garbage collection status.
1230pub fn garbage_collection_status(
1231 store: String,
1232 _info: &ApiMethod,
1233 _rpcenv: &mut dyn RpcEnvironment,
1234) -> Result<GarbageCollectionJobStatus, Error> {
1235 let (config, _) = pbs_config::datastore::config()?;
1236 let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
1237
1238 let mut info = GarbageCollectionJobStatus {
1239 store: store.clone(),
1240 schedule: store_config.gc_schedule,
1241 ..Default::default()
1242 };
1243
1244 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1245 let status_in_memory = datastore.last_gc_status();
1246 let state_file = JobState::load("garbage_collection", &store)
1247 .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
1248 .ok();
1249
1250 let mut last = proxmox_time::epoch_i64();
1251
1252 if let Some(ref upid) = status_in_memory.upid {
1253 let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
1254 if let Some(state) = state_file {
1255 if let Ok(cs) = compute_schedule_status(&state, Some(upid)) {
1256 computed_schedule = cs;
1257 }
1258 }
1259
1260 if let Some(endtime) = computed_schedule.last_run_endtime {
1261 last = endtime;
1262 if let Ok(parsed_upid) = upid.parse::<UPID>() {
1263 info.duration = Some(endtime - parsed_upid.starttime);
1264 }
1265 }
1266
1267 info.next_run = computed_schedule.next_run;
1268 info.last_run_endtime = computed_schedule.last_run_endtime;
1269 info.last_run_state = computed_schedule.last_run_state;
1270 }
1271
1272 info.next_run = info
1273 .schedule
1274 .as_ref()
1275 .and_then(|s| {
1276 s.parse::<CalendarEvent>()
1277 .map_err(|err| log::error!("{err}"))
1278 .ok()
1279 })
1280 .and_then(|e| {
1281 e.compute_next_event(last)
1282 .map_err(|err| log::error!("{err}"))
1283 .ok()
1284 })
1285 .and_then(|ne| ne);
1286
1287 info.status = status_in_memory;
1288
1289 Ok(info)
1290}
1291
1292#[api(
1293 returns: {
1294 description: "List the accessible datastores.",
1295 type: Array,
1296 items: { type: DataStoreListItem },
1297 },
1298 access: {
1299 permission: &Permission::Anybody,
1300 },
1301)]
1302/// Datastore list
1303pub fn get_datastore_list(
1304 _param: Value,
1305 _info: &ApiMethod,
1306 rpcenv: &mut dyn RpcEnvironment,
1307) -> Result<Vec<DataStoreListItem>, Error> {
1308 let (config, _digest) = pbs_config::datastore::config()?;
1309
1310 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1311 let user_info = CachedUserInfo::new()?;
1312
1313 let mut list = Vec::new();
1314
1315 for (store, (_, data)) in &config.sections {
1316 let acl_path = &["datastore", store];
1317 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
1318 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1319
1320 let mut allow_id = false;
1321 if !allowed {
1322 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1323 allow_id = any_privs;
1324 }
1325 }
1326
1327 if allowed || allow_id {
1328 list.push(DataStoreListItem {
1329 store: store.clone(),
1330 comment: if !allowed {
1331 None
1332 } else {
1333 data["comment"].as_str().map(String::from)
1334 },
1335 maintenance: data["maintenance-mode"].as_str().map(String::from),
1336 });
1337 }
1338 }
1339
1340 Ok(list)
1341}
1342
1343#[sortable]
1344pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1345 &ApiHandler::AsyncHttp(&download_file),
1346 &ObjectSchema::new(
1347 "Download single raw file from backup snapshot.",
1348 &sorted!([
1349 ("store", false, &DATASTORE_SCHEMA),
1350 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1351 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1352 ("backup-id", false, &BACKUP_ID_SCHEMA),
1353 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1354 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1355 ]),
1356 ),
1357)
1358.access(
1359 Some(
1360 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1361 DATASTORE_BACKUP and being the owner of the group",
1362 ),
1363 &Permission::Anybody,
1364);
1365
1366pub fn download_file(
1367 _parts: Parts,
1368 _req_body: Body,
1369 param: Value,
1370 _info: &ApiMethod,
1371 rpcenv: Box<dyn RpcEnvironment>,
1372) -> ApiResponseFuture {
1373 async move {
1374 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1375 let store = required_string_param(&param, "store")?;
1376 let backup_ns = optional_ns_param(&param)?;
1377
1378 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1379 let datastore = check_privs_and_load_store(
1380 store,
1381 &backup_ns,
1382 &auth_id,
1383 PRIV_DATASTORE_READ,
1384 PRIV_DATASTORE_BACKUP,
1385 Some(Operation::Read),
1386 &backup_dir.group,
1387 )?;
1388
1389 let file_name = required_string_param(&param, "file-name")?.to_owned();
1390
1391 println!(
1392 "Download {} from {} ({}/{})",
1393 file_name,
1394 print_store_and_ns(store, &backup_ns),
1395 backup_dir,
1396 file_name
1397 );
1398
1399 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1400
1401 let mut path = datastore.base_path();
1402 path.push(backup_dir.relative_path());
1403 path.push(&file_name);
1404
1405 let file = tokio::fs::File::open(&path)
1406 .await
1407 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1408
1409 let payload =
1410 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1411 .map_ok(|bytes| bytes.freeze())
1412 .map_err(move |err| {
1413 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1414 err
1415 });
1416 let body = Body::wrap_stream(payload);
1417
1418 // fixme: set other headers ?
1419 Ok(Response::builder()
1420 .status(StatusCode::OK)
1421 .header(header::CONTENT_TYPE, "application/octet-stream")
1422 .body(body)
1423 .unwrap())
1424 }
1425 .boxed()
1426}
1427
1428#[sortable]
1429pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1430 &ApiHandler::AsyncHttp(&download_file_decoded),
1431 &ObjectSchema::new(
1432 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1433 &sorted!([
1434 ("store", false, &DATASTORE_SCHEMA),
1435 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1436 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1437 ("backup-id", false, &BACKUP_ID_SCHEMA),
1438 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1439 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1440 ]),
1441 ),
1442)
1443.access(
1444 Some(
1445 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1446 DATASTORE_BACKUP and being the owner of the group",
1447 ),
1448 &Permission::Anybody,
1449);
1450
1451pub fn download_file_decoded(
1452 _parts: Parts,
1453 _req_body: Body,
1454 param: Value,
1455 _info: &ApiMethod,
1456 rpcenv: Box<dyn RpcEnvironment>,
1457) -> ApiResponseFuture {
1458 async move {
1459 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1460 let store = required_string_param(&param, "store")?;
1461 let backup_ns = optional_ns_param(&param)?;
1462
1463 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1464 let datastore = check_privs_and_load_store(
1465 store,
1466 &backup_ns,
1467 &auth_id,
1468 PRIV_DATASTORE_READ,
1469 PRIV_DATASTORE_BACKUP,
1470 Some(Operation::Read),
1471 &backup_dir_api.group,
1472 )?;
1473
1474 let file_name = required_string_param(&param, "file-name")?.to_owned();
1475 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1476
1477 let (manifest, files) = read_backup_index(&backup_dir)?;
1478 for file in files {
1479 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1480 bail!("cannot decode '{}' - is encrypted", file_name);
1481 }
1482 }
1483
1484 println!(
1485 "Download {} from {} ({}/{})",
1486 file_name,
1487 print_store_and_ns(store, &backup_ns),
1488 backup_dir_api,
1489 file_name
1490 );
1491
1492 let mut path = datastore.base_path();
1493 path.push(backup_dir.relative_path());
1494 path.push(&file_name);
1495
1496 let (_, extension) = file_name.rsplit_once('.').unwrap();
1497
1498 let body = match extension {
1499 "didx" => {
1500 let index = DynamicIndexReader::open(&path).map_err(|err| {
1501 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1502 })?;
1503 let (csum, size) = index.compute_csum();
1504 manifest.verify_file(&file_name, &csum, size)?;
1505
1506 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1507 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1508 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1509 eprintln!("error during streaming of '{:?}' - {}", path, err);
1510 err
1511 }))
1512 }
1513 "fidx" => {
1514 let index = FixedIndexReader::open(&path).map_err(|err| {
1515 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1516 })?;
1517
1518 let (csum, size) = index.compute_csum();
1519 manifest.verify_file(&file_name, &csum, size)?;
1520
1521 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1522 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1523 Body::wrap_stream(
1524 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1525 move |err| {
1526 eprintln!("error during streaming of '{:?}' - {}", path, err);
1527 err
1528 },
1529 ),
1530 )
1531 }
1532 "blob" => {
1533 let file = std::fs::File::open(&path)
1534 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1535
1536 // FIXME: load full blob to verify index checksum?
1537
1538 Body::wrap_stream(
1539 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1540 move |err| {
1541 eprintln!("error during streaming of '{:?}' - {}", path, err);
1542 err
1543 },
1544 ),
1545 )
1546 }
1547 extension => {
1548 bail!("cannot download '{}' files", extension);
1549 }
1550 };
1551
1552 // fixme: set other headers ?
1553 Ok(Response::builder()
1554 .status(StatusCode::OK)
1555 .header(header::CONTENT_TYPE, "application/octet-stream")
1556 .body(body)
1557 .unwrap())
1558 }
1559 .boxed()
1560}
1561
1562#[sortable]
1563pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1564 &ApiHandler::AsyncHttp(&upload_backup_log),
1565 &ObjectSchema::new(
1566 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1567 &sorted!([
1568 ("store", false, &DATASTORE_SCHEMA),
1569 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1570 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1571 ("backup-id", false, &BACKUP_ID_SCHEMA),
1572 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1573 ]),
1574 ),
1575)
1576.access(
1577 Some("Only the backup creator/owner is allowed to do this."),
1578 &Permission::Anybody,
1579);
1580
1581pub fn upload_backup_log(
1582 _parts: Parts,
1583 req_body: Body,
1584 param: Value,
1585 _info: &ApiMethod,
1586 rpcenv: Box<dyn RpcEnvironment>,
1587) -> ApiResponseFuture {
1588 async move {
1589 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1590 let store = required_string_param(&param, "store")?;
1591 let backup_ns = optional_ns_param(&param)?;
1592
1593 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1594
1595 let datastore = check_privs_and_load_store(
1596 store,
1597 &backup_ns,
1598 &auth_id,
1599 0,
1600 PRIV_DATASTORE_BACKUP,
1601 Some(Operation::Write),
1602 &backup_dir_api.group,
1603 )?;
1604 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1605
1606 let file_name = CLIENT_LOG_BLOB_NAME;
1607
1608 let mut path = backup_dir.full_path();
1609 path.push(file_name);
1610
1611 if path.exists() {
1612 bail!("backup already contains a log.");
1613 }
1614
1615 println!(
1616 "Upload backup log to {} {backup_dir_api}/{file_name}",
1617 print_store_and_ns(store, &backup_ns),
1618 );
1619
1620 let data = req_body
1621 .map_err(Error::from)
1622 .try_fold(Vec::new(), |mut acc, chunk| {
1623 acc.extend_from_slice(&chunk);
1624 future::ok::<_, Error>(acc)
1625 })
1626 .await?;
1627
1628 // always verify blob/CRC at server side
1629 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1630
1631 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1632
1633 // fixme: use correct formatter
1634 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1635 }
1636 .boxed()
1637}
1638
1639fn decode_path(path: &str) -> Result<Vec<u8>, Error> {
1640 if path != "root" && path != "/" {
1641 base64::decode(path).map_err(|err| format_err!("base64 decoding of path failed - {err}"))
1642 } else {
1643 Ok(vec![b'/'])
1644 }
1645}
1646
1647#[api(
1648 input: {
1649 properties: {
1650 store: { schema: DATASTORE_SCHEMA },
1651 ns: {
1652 type: BackupNamespace,
1653 optional: true,
1654 },
1655 backup_dir: {
1656 type: pbs_api_types::BackupDir,
1657 flatten: true,
1658 },
1659 "filepath": {
1660 description: "Base64 encoded path.",
1661 type: String,
1662 },
1663 "archive-name": {
1664 schema: BACKUP_ARCHIVE_NAME_SCHEMA,
1665 optional: true,
1666 },
1667 },
1668 },
1669 access: {
1670 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1671 DATASTORE_BACKUP and being the owner of the group",
1672 permission: &Permission::Anybody,
1673 },
1674)]
1675/// Get the entries of the given path of the catalog
1676pub async fn catalog(
1677 store: String,
1678 ns: Option<BackupNamespace>,
1679 backup_dir: pbs_api_types::BackupDir,
1680 filepath: String,
1681 archive_name: Option<String>,
1682 rpcenv: &mut dyn RpcEnvironment,
1683) -> Result<Vec<ArchiveEntry>, Error> {
1684 let file_name = archive_name
1685 .clone()
1686 .unwrap_or_else(|| CATALOG_NAME.to_string());
1687
1688 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1689
1690 let ns = ns.unwrap_or_default();
1691
1692 let datastore = check_privs_and_load_store(
1693 &store,
1694 &ns,
1695 &auth_id,
1696 PRIV_DATASTORE_READ,
1697 PRIV_DATASTORE_BACKUP,
1698 Some(Operation::Read),
1699 &backup_dir.group,
1700 )?;
1701
1702 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1703
1704 let (manifest, files) = read_backup_index(&backup_dir)?;
1705 for file in files {
1706 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1707 bail!("cannot decode '{file_name}' - is encrypted");
1708 }
1709 }
1710
1711 if archive_name.is_none() {
1712 tokio::task::spawn_blocking(move || {
1713 let mut path = datastore.base_path();
1714 path.push(backup_dir.relative_path());
1715 path.push(&file_name);
1716
1717 let index = DynamicIndexReader::open(&path)
1718 .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?;
1719
1720 let (csum, size) = index.compute_csum();
1721 manifest.verify_file(&file_name, &csum, size)?;
1722
1723 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1724 let reader = BufferedDynamicReader::new(index, chunk_reader);
1725
1726 let mut catalog_reader = CatalogReader::new(reader);
1727
1728 let path = decode_path(&filepath)?;
1729 catalog_reader.list_dir_contents(&path)
1730 })
1731 .await?
1732 } else {
1733 let (archive_name, payload_archive_name) =
1734 pbs_client::tools::get_pxar_archive_names(&file_name, &manifest)?;
1735 let (reader, archive_size) =
1736 get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &archive_name)?;
1737
1738 let reader = if let Some(payload_archive_name) = payload_archive_name {
1739 let payload_input =
1740 get_local_pxar_reader(datastore, &manifest, &backup_dir, &payload_archive_name)?;
1741 pxar::PxarVariant::Split(reader, payload_input)
1742 } else {
1743 pxar::PxarVariant::Unified(reader)
1744 };
1745 let accessor = Accessor::new(reader, archive_size).await?;
1746
1747 let file_path = decode_path(&filepath)?;
1748 pbs_client::tools::pxar_metadata_catalog_lookup(
1749 accessor,
1750 OsStr::from_bytes(&file_path),
1751 None,
1752 )
1753 .await
1754 }
1755}
1756
1757#[sortable]
1758pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1759 &ApiHandler::AsyncHttp(&pxar_file_download),
1760 &ObjectSchema::new(
1761 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1762 &sorted!([
1763 ("store", false, &DATASTORE_SCHEMA),
1764 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1765 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1766 ("backup-id", false, &BACKUP_ID_SCHEMA),
1767 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1768 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1769 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1770 ("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA),
1771 ]),
1772 )
1773).access(
1774 Some(
1775 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1776 DATASTORE_BACKUP and being the owner of the group",
1777 ),
1778 &Permission::Anybody,
1779);
1780
1781fn get_local_pxar_reader(
1782 datastore: Arc<DataStore>,
1783 manifest: &BackupManifest,
1784 backup_dir: &BackupDir,
1785 pxar_name: &str,
1786) -> Result<(LocalDynamicReadAt<LocalChunkReader>, u64), Error> {
1787 let mut path = datastore.base_path();
1788 path.push(backup_dir.relative_path());
1789 path.push(pxar_name);
1790
1791 let index = DynamicIndexReader::open(&path)
1792 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1793
1794 let (csum, size) = index.compute_csum();
1795 manifest.verify_file(pxar_name, &csum, size)?;
1796
1797 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1798 let reader = BufferedDynamicReader::new(index, chunk_reader);
1799 let archive_size = reader.archive_size();
1800
1801 Ok((LocalDynamicReadAt::new(reader), archive_size))
1802}
1803
1804pub fn pxar_file_download(
1805 _parts: Parts,
1806 _req_body: Body,
1807 param: Value,
1808 _info: &ApiMethod,
1809 rpcenv: Box<dyn RpcEnvironment>,
1810) -> ApiResponseFuture {
1811 async move {
1812 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1813 let store = required_string_param(&param, "store")?;
1814 let ns = optional_ns_param(&param)?;
1815
1816 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1817 let datastore = check_privs_and_load_store(
1818 store,
1819 &ns,
1820 &auth_id,
1821 PRIV_DATASTORE_READ,
1822 PRIV_DATASTORE_BACKUP,
1823 Some(Operation::Read),
1824 &backup_dir.group,
1825 )?;
1826
1827 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1828
1829 let filepath = required_string_param(&param, "filepath")?.to_owned();
1830
1831 let tar = param["tar"].as_bool().unwrap_or(false);
1832
1833 let mut components = base64::decode(&filepath)?;
1834 if !components.is_empty() && components[0] == b'/' {
1835 components.remove(0);
1836 }
1837
1838 let (pxar_name, file_path) = if let Some(archive_name) = param["archive-name"].as_str() {
1839 let archive_name = archive_name.as_bytes().to_owned();
1840 (archive_name, base64::decode(&filepath)?)
1841 } else {
1842 let mut split = components.splitn(2, |c| *c == b'/');
1843 let pxar_name = split.next().unwrap();
1844 let file_path = split.next().unwrap_or(b"/");
1845 (pxar_name.to_owned(), file_path.to_owned())
1846 };
1847 let pxar_name = std::str::from_utf8(&pxar_name)?;
1848 let (manifest, files) = read_backup_index(&backup_dir)?;
1849 for file in files {
1850 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1851 bail!("cannot decode '{}' - is encrypted", pxar_name);
1852 }
1853 }
1854
1855 let (pxar_name, payload_archive_name) =
1856 pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?;
1857 let (reader, archive_size) =
1858 get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?;
1859
1860 let reader = if let Some(payload_archive_name) = payload_archive_name {
1861 let payload_input =
1862 get_local_pxar_reader(datastore, &manifest, &backup_dir, &payload_archive_name)?;
1863 pxar::PxarVariant::Split(reader, payload_input)
1864 } else {
1865 pxar::PxarVariant::Unified(reader)
1866 };
1867 let decoder = Accessor::new(reader, archive_size).await?;
1868
1869 let root = decoder.open_root().await?;
1870 let path = OsStr::from_bytes(&file_path).to_os_string();
1871 let file = root
1872 .lookup(&path)
1873 .await?
1874 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1875
1876 let body = match file.kind() {
1877 EntryKind::File { .. } => Body::wrap_stream(
1878 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1879 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1880 err
1881 }),
1882 ),
1883 EntryKind::Hardlink(_) => Body::wrap_stream(
1884 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1885 .map_err(move |err| {
1886 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1887 err
1888 }),
1889 ),
1890 EntryKind::Directory => {
1891 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1892 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1893 if tar {
1894 proxmox_rest_server::spawn_internal_task(create_tar(
1895 channelwriter,
1896 decoder,
1897 path.clone(),
1898 ));
1899 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1900 Body::wrap_stream(zstdstream.map_err(move |err| {
1901 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
1902 err
1903 }))
1904 } else {
1905 proxmox_rest_server::spawn_internal_task(create_zip(
1906 channelwriter,
1907 decoder,
1908 path.clone(),
1909 ));
1910 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1911 log::error!("error during streaming of zip '{:?}' - {}", path, err);
1912 err
1913 }))
1914 }
1915 }
1916 other => bail!("cannot download file of type {:?}", other),
1917 };
1918
1919 // fixme: set other headers ?
1920 Ok(Response::builder()
1921 .status(StatusCode::OK)
1922 .header(header::CONTENT_TYPE, "application/octet-stream")
1923 .body(body)
1924 .unwrap())
1925 }
1926 .boxed()
1927}
1928
1929#[api(
1930 input: {
1931 properties: {
1932 store: {
1933 schema: DATASTORE_SCHEMA,
1934 },
1935 timeframe: {
1936 type: RRDTimeFrame,
1937 },
1938 cf: {
1939 type: RRDMode,
1940 },
1941 },
1942 },
1943 access: {
1944 permission: &Permission::Privilege(
1945 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1946 },
1947)]
1948/// Read datastore stats
1949pub fn get_rrd_stats(
1950 store: String,
1951 timeframe: RRDTimeFrame,
1952 cf: RRDMode,
1953 _param: Value,
1954) -> Result<Value, Error> {
1955 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1956 let disk_manager = crate::tools::disks::DiskManage::new();
1957
1958 let mut rrd_fields = vec![
1959 "total",
1960 "available",
1961 "used",
1962 "read_ios",
1963 "read_bytes",
1964 "write_ios",
1965 "write_bytes",
1966 ];
1967
1968 // we do not have io_ticks for zpools, so don't include them
1969 match disk_manager.find_mounted_device(&datastore.base_path()) {
1970 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1971 _ => rrd_fields.push("io_ticks"),
1972 };
1973
1974 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1975}
1976
1977#[api(
1978 input: {
1979 properties: {
1980 store: {
1981 schema: DATASTORE_SCHEMA,
1982 },
1983 },
1984 },
1985 access: {
1986 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1987 },
1988)]
1989/// Read datastore stats
1990pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1991 let active_operations = task_tracking::get_active_operations(&store)?;
1992 Ok(json!({
1993 "read": active_operations.read,
1994 "write": active_operations.write,
1995 }))
1996}
1997
1998#[api(
1999 input: {
2000 properties: {
2001 store: { schema: DATASTORE_SCHEMA },
2002 ns: {
2003 type: BackupNamespace,
2004 optional: true,
2005 },
2006 backup_group: {
2007 type: pbs_api_types::BackupGroup,
2008 flatten: true,
2009 },
2010 },
2011 },
2012 access: {
2013 permission: &Permission::Anybody,
2014 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2015 or DATASTORE_BACKUP and being the owner of the group",
2016 },
2017)]
2018/// Get "notes" for a backup group
2019pub fn get_group_notes(
2020 store: String,
2021 ns: Option<BackupNamespace>,
2022 backup_group: pbs_api_types::BackupGroup,
2023 rpcenv: &mut dyn RpcEnvironment,
2024) -> Result<String, Error> {
2025 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2026 let ns = ns.unwrap_or_default();
2027
2028 let datastore = check_privs_and_load_store(
2029 &store,
2030 &ns,
2031 &auth_id,
2032 PRIV_DATASTORE_AUDIT,
2033 PRIV_DATASTORE_BACKUP,
2034 Some(Operation::Read),
2035 &backup_group,
2036 )?;
2037
2038 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
2039 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
2040}
2041
2042#[api(
2043 input: {
2044 properties: {
2045 store: { schema: DATASTORE_SCHEMA },
2046 ns: {
2047 type: BackupNamespace,
2048 optional: true,
2049 },
2050 backup_group: {
2051 type: pbs_api_types::BackupGroup,
2052 flatten: true,
2053 },
2054 notes: {
2055 description: "A multiline text.",
2056 },
2057 },
2058 },
2059 access: {
2060 permission: &Permission::Anybody,
2061 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2062 or DATASTORE_BACKUP and being the owner of the group",
2063 },
2064)]
2065/// Set "notes" for a backup group
2066pub fn set_group_notes(
2067 store: String,
2068 ns: Option<BackupNamespace>,
2069 backup_group: pbs_api_types::BackupGroup,
2070 notes: String,
2071 rpcenv: &mut dyn RpcEnvironment,
2072) -> Result<(), Error> {
2073 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2074 let ns = ns.unwrap_or_default();
2075
2076 let datastore = check_privs_and_load_store(
2077 &store,
2078 &ns,
2079 &auth_id,
2080 PRIV_DATASTORE_MODIFY,
2081 PRIV_DATASTORE_BACKUP,
2082 Some(Operation::Write),
2083 &backup_group,
2084 )?;
2085
2086 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
2087 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
2088
2089 Ok(())
2090}
2091
2092#[api(
2093 input: {
2094 properties: {
2095 store: { schema: DATASTORE_SCHEMA },
2096 ns: {
2097 type: BackupNamespace,
2098 optional: true,
2099 },
2100 backup_dir: {
2101 type: pbs_api_types::BackupDir,
2102 flatten: true,
2103 },
2104 },
2105 },
2106 access: {
2107 permission: &Permission::Anybody,
2108 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2109 or DATASTORE_BACKUP and being the owner of the group",
2110 },
2111)]
2112/// Get "notes" for a specific backup
2113pub fn get_notes(
2114 store: String,
2115 ns: Option<BackupNamespace>,
2116 backup_dir: pbs_api_types::BackupDir,
2117 rpcenv: &mut dyn RpcEnvironment,
2118) -> Result<String, Error> {
2119 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2120 let ns = ns.unwrap_or_default();
2121
2122 let datastore = check_privs_and_load_store(
2123 &store,
2124 &ns,
2125 &auth_id,
2126 PRIV_DATASTORE_AUDIT,
2127 PRIV_DATASTORE_BACKUP,
2128 Some(Operation::Read),
2129 &backup_dir.group,
2130 )?;
2131
2132 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2133
2134 let (manifest, _) = backup_dir.load_manifest()?;
2135
2136 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
2137
2138 Ok(String::from(notes))
2139}
2140
2141#[api(
2142 input: {
2143 properties: {
2144 store: { schema: DATASTORE_SCHEMA },
2145 ns: {
2146 type: BackupNamespace,
2147 optional: true,
2148 },
2149 backup_dir: {
2150 type: pbs_api_types::BackupDir,
2151 flatten: true,
2152 },
2153 notes: {
2154 description: "A multiline text.",
2155 },
2156 },
2157 },
2158 access: {
2159 permission: &Permission::Anybody,
2160 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2161 or DATASTORE_BACKUP and being the owner of the group",
2162 },
2163)]
2164/// Set "notes" for a specific backup
2165pub fn set_notes(
2166 store: String,
2167 ns: Option<BackupNamespace>,
2168 backup_dir: pbs_api_types::BackupDir,
2169 notes: String,
2170 rpcenv: &mut dyn RpcEnvironment,
2171) -> Result<(), Error> {
2172 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2173 let ns = ns.unwrap_or_default();
2174
2175 let datastore = check_privs_and_load_store(
2176 &store,
2177 &ns,
2178 &auth_id,
2179 PRIV_DATASTORE_MODIFY,
2180 PRIV_DATASTORE_BACKUP,
2181 Some(Operation::Write),
2182 &backup_dir.group,
2183 )?;
2184
2185 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2186
2187 backup_dir
2188 .update_manifest(|manifest| {
2189 manifest.unprotected["notes"] = notes.into();
2190 })
2191 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2192
2193 Ok(())
2194}
2195
2196#[api(
2197 input: {
2198 properties: {
2199 store: { schema: DATASTORE_SCHEMA },
2200 ns: {
2201 type: BackupNamespace,
2202 optional: true,
2203 },
2204 backup_dir: {
2205 type: pbs_api_types::BackupDir,
2206 flatten: true,
2207 },
2208 },
2209 },
2210 access: {
2211 permission: &Permission::Anybody,
2212 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2213 or DATASTORE_BACKUP and being the owner of the group",
2214 },
2215)]
2216/// Query protection for a specific backup
2217pub fn get_protection(
2218 store: String,
2219 ns: Option<BackupNamespace>,
2220 backup_dir: pbs_api_types::BackupDir,
2221 rpcenv: &mut dyn RpcEnvironment,
2222) -> Result<bool, Error> {
2223 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2224 let ns = ns.unwrap_or_default();
2225 let datastore = check_privs_and_load_store(
2226 &store,
2227 &ns,
2228 &auth_id,
2229 PRIV_DATASTORE_AUDIT,
2230 PRIV_DATASTORE_BACKUP,
2231 Some(Operation::Read),
2232 &backup_dir.group,
2233 )?;
2234
2235 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2236
2237 Ok(backup_dir.is_protected())
2238}
2239
2240#[api(
2241 input: {
2242 properties: {
2243 store: { schema: DATASTORE_SCHEMA },
2244 ns: {
2245 type: BackupNamespace,
2246 optional: true,
2247 },
2248 backup_dir: {
2249 type: pbs_api_types::BackupDir,
2250 flatten: true,
2251 },
2252 protected: {
2253 description: "Enable/disable protection.",
2254 },
2255 },
2256 },
2257 access: {
2258 permission: &Permission::Anybody,
2259 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2260 or DATASTORE_BACKUP and being the owner of the group",
2261 },
2262)]
2263/// En- or disable protection for a specific backup
2264pub async fn set_protection(
2265 store: String,
2266 ns: Option<BackupNamespace>,
2267 backup_dir: pbs_api_types::BackupDir,
2268 protected: bool,
2269 rpcenv: &mut dyn RpcEnvironment,
2270) -> Result<(), Error> {
2271 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2272
2273 tokio::task::spawn_blocking(move || {
2274 let ns = ns.unwrap_or_default();
2275 let datastore = check_privs_and_load_store(
2276 &store,
2277 &ns,
2278 &auth_id,
2279 PRIV_DATASTORE_MODIFY,
2280 PRIV_DATASTORE_BACKUP,
2281 Some(Operation::Write),
2282 &backup_dir.group,
2283 )?;
2284
2285 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2286
2287 datastore.update_protection(&backup_dir, protected)
2288 })
2289 .await?
2290}
2291
2292#[api(
2293 input: {
2294 properties: {
2295 store: { schema: DATASTORE_SCHEMA },
2296 ns: {
2297 type: BackupNamespace,
2298 optional: true,
2299 },
2300 backup_group: {
2301 type: pbs_api_types::BackupGroup,
2302 flatten: true,
2303 },
2304 "new-owner": {
2305 type: Authid,
2306 },
2307 },
2308 },
2309 access: {
2310 permission: &Permission::Anybody,
2311 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2312 a user's token for owned backups with Datastore.Backup"
2313 },
2314)]
2315/// Change owner of a backup group
2316pub async fn set_backup_owner(
2317 store: String,
2318 ns: Option<BackupNamespace>,
2319 backup_group: pbs_api_types::BackupGroup,
2320 new_owner: Authid,
2321 rpcenv: &mut dyn RpcEnvironment,
2322) -> Result<(), Error> {
2323 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2324
2325 tokio::task::spawn_blocking(move || {
2326 let ns = ns.unwrap_or_default();
2327 let owner_check_required = check_ns_privs_full(
2328 &store,
2329 &ns,
2330 &auth_id,
2331 PRIV_DATASTORE_MODIFY,
2332 PRIV_DATASTORE_BACKUP,
2333 )?;
2334
2335 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2336
2337 let backup_group = datastore.backup_group(ns, backup_group);
2338
2339 if owner_check_required {
2340 let owner = backup_group.get_owner()?;
2341
2342 let allowed = match (owner.is_token(), new_owner.is_token()) {
2343 (true, true) => {
2344 // API token to API token, owned by same user
2345 let owner = owner.user();
2346 let new_owner = new_owner.user();
2347 owner == new_owner && Authid::from(owner.clone()) == auth_id
2348 }
2349 (true, false) => {
2350 // API token to API token owner
2351 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2352 }
2353 (false, true) => {
2354 // API token owner to API token
2355 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2356 }
2357 (false, false) => {
2358 // User to User, not allowed for unprivileged users
2359 false
2360 }
2361 };
2362
2363 if !allowed {
2364 return Err(http_err!(
2365 UNAUTHORIZED,
2366 "{} does not have permission to change owner of backup group '{}' to {}",
2367 auth_id,
2368 backup_group.group(),
2369 new_owner,
2370 ));
2371 }
2372 }
2373
2374 let user_info = CachedUserInfo::new()?;
2375
2376 if !user_info.is_active_auth_id(&new_owner) {
2377 bail!(
2378 "{} '{}' is inactive or non-existent",
2379 if new_owner.is_token() {
2380 "API token".to_string()
2381 } else {
2382 "user".to_string()
2383 },
2384 new_owner
2385 );
2386 }
2387
2388 backup_group.set_owner(&new_owner, true)?;
2389
2390 Ok(())
2391 })
2392 .await?
2393}
2394
2395#[sortable]
2396const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2397 (
2398 "active-operations",
2399 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2400 ),
2401 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2402 (
2403 "change-owner",
2404 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2405 ),
2406 (
2407 "download",
2408 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2409 ),
2410 (
2411 "download-decoded",
2412 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2413 ),
2414 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2415 (
2416 "gc",
2417 &Router::new()
2418 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2419 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2420 ),
2421 (
2422 "group-notes",
2423 &Router::new()
2424 .get(&API_METHOD_GET_GROUP_NOTES)
2425 .put(&API_METHOD_SET_GROUP_NOTES),
2426 ),
2427 (
2428 "groups",
2429 &Router::new()
2430 .get(&API_METHOD_LIST_GROUPS)
2431 .delete(&API_METHOD_DELETE_GROUP),
2432 ),
2433 (
2434 "namespace",
2435 // FIXME: move into datastore:: sub-module?!
2436 &crate::api2::admin::namespace::ROUTER,
2437 ),
2438 (
2439 "notes",
2440 &Router::new()
2441 .get(&API_METHOD_GET_NOTES)
2442 .put(&API_METHOD_SET_NOTES),
2443 ),
2444 (
2445 "protected",
2446 &Router::new()
2447 .get(&API_METHOD_GET_PROTECTION)
2448 .put(&API_METHOD_SET_PROTECTION),
2449 ),
2450 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2451 (
2452 "prune-datastore",
2453 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2454 ),
2455 (
2456 "pxar-file-download",
2457 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2458 ),
2459 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2460 (
2461 "snapshots",
2462 &Router::new()
2463 .get(&API_METHOD_LIST_SNAPSHOTS)
2464 .delete(&API_METHOD_DELETE_SNAPSHOT),
2465 ),
2466 ("status", &Router::new().get(&API_METHOD_STATUS)),
2467 (
2468 "upload-backup-log",
2469 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2470 ),
2471 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2472];
2473
2474const DATASTORE_INFO_ROUTER: Router = Router::new()
2475 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2476 .subdirs(DATASTORE_INFO_SUBDIRS);
2477
2478pub const ROUTER: Router = Router::new()
2479 .get(&API_METHOD_GET_DATASTORE_LIST)
2480 .match_all("store", &DATASTORE_INFO_ROUTER);