]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
man: verification: Fix config file name
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 //! Datastore Management
2
3 use std::collections::HashSet;
4 use std::ffi::OsStr;
5 use std::ops::Deref;
6 use std::os::unix::ffi::OsStrExt;
7 use std::path::{Path, PathBuf};
8 use std::sync::Arc;
9
10 use anyhow::{bail, format_err, Error};
11 use futures::*;
12 use hyper::http::request::Parts;
13 use hyper::{header, Body, Response, StatusCode};
14 use serde::Deserialize;
15 use serde_json::{json, Value};
16 use tokio_stream::wrappers::ReceiverStream;
17 use tracing::{info, warn};
18
19 use proxmox_async::blocking::WrappedReaderStream;
20 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
21 use proxmox_compression::zstd::ZstdEncoder;
22 use proxmox_router::{
23 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
24 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
25 };
26 use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
27 use proxmox_schema::*;
28 use proxmox_sortable_macro::sortable;
29 use proxmox_sys::fs::{
30 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
31 };
32 use proxmox_time::CalendarEvent;
33 use proxmox_worker_task::WorkerTaskContext;
34
35 use pxar::accessor::aio::Accessor;
36 use pxar::EntryKind;
37
38 use pbs_api_types::{
39 print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName,
40 BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode,
41 DataStoreConfig, DataStoreListItem, DataStoreMountStatus, DataStoreStatus,
42 GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode,
43 MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState,
44 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
45 BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
46 IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
47 PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
48 PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
49 VERIFICATION_OUTDATED_AFTER_SCHEMA,
50 };
51 use pbs_client::pxar::{create_tar, create_zip};
52 use pbs_config::CachedUserInfo;
53 use pbs_datastore::backup_info::BackupInfo;
54 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
55 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
56 use pbs_datastore::data_blob::DataBlob;
57 use pbs_datastore::data_blob_reader::DataBlobReader;
58 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
59 use pbs_datastore::fixed_index::FixedIndexReader;
60 use pbs_datastore::index::IndexFile;
61 use pbs_datastore::manifest::BackupManifest;
62 use pbs_datastore::prune::compute_prune_info;
63 use pbs_datastore::{
64 check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, BackupGroup,
65 DataStore, LocalChunkReader, StoreProgress,
66 };
67 use pbs_tools::json::required_string_param;
68 use proxmox_rest_server::{formatter, WorkerTask};
69
70 use crate::api2::backup::optional_ns_param;
71 use crate::api2::node::rrd::create_value_from_rrd;
72 use crate::backup::{
73 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
74 ListAccessibleBackupGroups, NS_PRIVS_OK,
75 };
76
77 use crate::server::jobstate::{compute_schedule_status, Job, JobState};
78
79 const GROUP_NOTES_FILE_NAME: &str = "notes";
80
81 fn get_group_note_path(
82 store: &DataStore,
83 ns: &BackupNamespace,
84 group: &pbs_api_types::BackupGroup,
85 ) -> PathBuf {
86 let mut note_path = store.group_path(ns, group);
87 note_path.push(GROUP_NOTES_FILE_NAME);
88 note_path
89 }
90
91 // helper to unify common sequence of checks:
92 // 1. check privs on NS (full or limited access)
93 // 2. load datastore
94 // 3. if needed (only limited access), check owner of group
95 fn check_privs_and_load_store(
96 store: &str,
97 ns: &BackupNamespace,
98 auth_id: &Authid,
99 full_access_privs: u64,
100 partial_access_privs: u64,
101 operation: Option<Operation>,
102 backup_group: &pbs_api_types::BackupGroup,
103 ) -> Result<Arc<DataStore>, Error> {
104 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
105
106 let datastore = DataStore::lookup_datastore(store, operation)?;
107
108 if limited {
109 let owner = datastore.get_owner(ns, backup_group)?;
110 check_backup_owner(&owner, auth_id)?;
111 }
112
113 Ok(datastore)
114 }
115
116 fn read_backup_index(
117 backup_dir: &BackupDir,
118 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
119 let (manifest, index_size) = backup_dir.load_manifest()?;
120
121 let mut result = Vec::new();
122 for item in manifest.files() {
123 result.push(BackupContent {
124 filename: item.filename.clone(),
125 crypt_mode: Some(item.crypt_mode),
126 size: Some(item.size),
127 });
128 }
129
130 result.push(BackupContent {
131 filename: MANIFEST_BLOB_NAME.to_string(),
132 crypt_mode: match manifest.signature {
133 Some(_) => Some(CryptMode::SignOnly),
134 None => Some(CryptMode::None),
135 },
136 size: Some(index_size),
137 });
138
139 Ok((manifest, result))
140 }
141
142 fn get_all_snapshot_files(
143 info: &BackupInfo,
144 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
145 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
146
147 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
148 acc.insert(item.filename.clone());
149 acc
150 });
151
152 for file in &info.files {
153 if file_set.contains(file) {
154 continue;
155 }
156 files.push(BackupContent {
157 filename: file.to_string(),
158 size: None,
159 crypt_mode: None,
160 });
161 }
162
163 Ok((manifest, files))
164 }
165
166 #[api(
167 input: {
168 properties: {
169 store: {
170 schema: DATASTORE_SCHEMA,
171 },
172 ns: {
173 type: BackupNamespace,
174 optional: true,
175 },
176 },
177 },
178 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
179 access: {
180 permission: &Permission::Anybody,
181 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
182 /datastore/{store}[/{namespace}]",
183 },
184 )]
185 /// List backup groups.
186 pub fn list_groups(
187 store: String,
188 ns: Option<BackupNamespace>,
189 rpcenv: &mut dyn RpcEnvironment,
190 ) -> Result<Vec<GroupListItem>, Error> {
191 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
192 let ns = ns.unwrap_or_default();
193
194 let list_all = !check_ns_privs_full(
195 &store,
196 &ns,
197 &auth_id,
198 PRIV_DATASTORE_AUDIT,
199 PRIV_DATASTORE_BACKUP,
200 )?;
201
202 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
203
204 datastore
205 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
206 .try_fold(Vec::new(), |mut group_info, group| {
207 let group = group?;
208
209 let owner = match datastore.get_owner(&ns, group.as_ref()) {
210 Ok(auth_id) => auth_id,
211 Err(err) => {
212 eprintln!(
213 "Failed to get owner of group '{}' in {} - {}",
214 group.group(),
215 print_store_and_ns(&store, &ns),
216 err
217 );
218 return Ok(group_info);
219 }
220 };
221 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
222 return Ok(group_info);
223 }
224
225 let snapshots = match group.list_backups() {
226 Ok(snapshots) => snapshots,
227 Err(_) => return Ok(group_info),
228 };
229
230 let backup_count: u64 = snapshots.len() as u64;
231 if backup_count == 0 {
232 return Ok(group_info);
233 }
234
235 let last_backup = snapshots
236 .iter()
237 .fold(&snapshots[0], |a, b| {
238 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
239 a
240 } else {
241 b
242 }
243 })
244 .to_owned();
245
246 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
247 let comment = file_read_firstline(note_path).ok();
248
249 group_info.push(GroupListItem {
250 backup: group.into(),
251 last_backup: last_backup.backup_dir.backup_time(),
252 owner: Some(owner),
253 backup_count,
254 files: last_backup.files,
255 comment,
256 });
257
258 Ok(group_info)
259 })
260 }
261
262 #[api(
263 input: {
264 properties: {
265 store: { schema: DATASTORE_SCHEMA },
266 ns: {
267 type: BackupNamespace,
268 optional: true,
269 },
270 group: {
271 type: pbs_api_types::BackupGroup,
272 flatten: true,
273 },
274 "error-on-protected": {
275 type: bool,
276 optional: true,
277 default: true,
278 description: "Return error when group cannot be deleted because of protected snapshots",
279 }
280 },
281 },
282 returns: {
283 type: BackupGroupDeleteStats,
284 },
285 access: {
286 permission: &Permission::Anybody,
287 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
288 or DATASTORE_PRUNE and being the owner of the group",
289 },
290 )]
291 /// Delete backup group including all snapshots.
292 pub async fn delete_group(
293 store: String,
294 ns: Option<BackupNamespace>,
295 error_on_protected: bool,
296 group: pbs_api_types::BackupGroup,
297 rpcenv: &mut dyn RpcEnvironment,
298 ) -> Result<BackupGroupDeleteStats, Error> {
299 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
300
301 tokio::task::spawn_blocking(move || {
302 let ns = ns.unwrap_or_default();
303
304 let datastore = check_privs_and_load_store(
305 &store,
306 &ns,
307 &auth_id,
308 PRIV_DATASTORE_MODIFY,
309 PRIV_DATASTORE_PRUNE,
310 Some(Operation::Write),
311 &group,
312 )?;
313
314 let delete_stats = datastore.remove_backup_group(&ns, &group)?;
315 if !delete_stats.all_removed() {
316 if error_on_protected {
317 bail!("group only partially deleted due to protected snapshots");
318 } else {
319 warn!("group only partially deleted due to protected snapshots");
320 }
321 }
322 Ok(delete_stats)
323 })
324 .await?
325 }
326
327 #[api(
328 input: {
329 properties: {
330 store: { schema: DATASTORE_SCHEMA },
331 ns: {
332 type: BackupNamespace,
333 optional: true,
334 },
335 backup_dir: {
336 type: pbs_api_types::BackupDir,
337 flatten: true,
338 },
339 },
340 },
341 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
342 access: {
343 permission: &Permission::Anybody,
344 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
345 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
346 },
347 )]
348 /// List snapshot files.
349 pub async fn list_snapshot_files(
350 store: String,
351 ns: Option<BackupNamespace>,
352 backup_dir: pbs_api_types::BackupDir,
353 _info: &ApiMethod,
354 rpcenv: &mut dyn RpcEnvironment,
355 ) -> Result<Vec<BackupContent>, Error> {
356 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
357
358 tokio::task::spawn_blocking(move || {
359 let ns = ns.unwrap_or_default();
360
361 let datastore = check_privs_and_load_store(
362 &store,
363 &ns,
364 &auth_id,
365 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
366 PRIV_DATASTORE_BACKUP,
367 Some(Operation::Read),
368 &backup_dir.group,
369 )?;
370
371 let snapshot = datastore.backup_dir(ns, backup_dir)?;
372
373 let info = BackupInfo::new(snapshot)?;
374
375 let (_manifest, files) = get_all_snapshot_files(&info)?;
376
377 Ok(files)
378 })
379 .await?
380 }
381
382 #[api(
383 input: {
384 properties: {
385 store: { schema: DATASTORE_SCHEMA },
386 ns: {
387 type: BackupNamespace,
388 optional: true,
389 },
390 backup_dir: {
391 type: pbs_api_types::BackupDir,
392 flatten: true,
393 },
394 },
395 },
396 access: {
397 permission: &Permission::Anybody,
398 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
399 or DATASTORE_PRUNE and being the owner of the group",
400 },
401 )]
402 /// Delete backup snapshot.
403 pub async fn delete_snapshot(
404 store: String,
405 ns: Option<BackupNamespace>,
406 backup_dir: pbs_api_types::BackupDir,
407 _info: &ApiMethod,
408 rpcenv: &mut dyn RpcEnvironment,
409 ) -> Result<Value, Error> {
410 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
411
412 tokio::task::spawn_blocking(move || {
413 let ns = ns.unwrap_or_default();
414
415 let datastore = check_privs_and_load_store(
416 &store,
417 &ns,
418 &auth_id,
419 PRIV_DATASTORE_MODIFY,
420 PRIV_DATASTORE_PRUNE,
421 Some(Operation::Write),
422 &backup_dir.group,
423 )?;
424
425 let snapshot = datastore.backup_dir(ns, backup_dir)?;
426
427 snapshot.destroy(false)?;
428
429 Ok(Value::Null)
430 })
431 .await?
432 }
433
434 #[api(
435 serializing: true,
436 input: {
437 properties: {
438 store: { schema: DATASTORE_SCHEMA },
439 ns: {
440 type: BackupNamespace,
441 optional: true,
442 },
443 "backup-type": {
444 optional: true,
445 type: BackupType,
446 },
447 "backup-id": {
448 optional: true,
449 schema: BACKUP_ID_SCHEMA,
450 },
451 },
452 },
453 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
454 access: {
455 permission: &Permission::Anybody,
456 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
457 or DATASTORE_BACKUP and being the owner of the group",
458 },
459 )]
460 /// List backup snapshots.
461 pub async fn list_snapshots(
462 store: String,
463 ns: Option<BackupNamespace>,
464 backup_type: Option<BackupType>,
465 backup_id: Option<String>,
466 _param: Value,
467 _info: &ApiMethod,
468 rpcenv: &mut dyn RpcEnvironment,
469 ) -> Result<Vec<SnapshotListItem>, Error> {
470 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
471
472 tokio::task::spawn_blocking(move || unsafe {
473 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
474 })
475 .await
476 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
477 }
478
479 /// This must not run in a main worker thread as it potentially does tons of I/O.
480 unsafe fn list_snapshots_blocking(
481 store: String,
482 ns: Option<BackupNamespace>,
483 backup_type: Option<BackupType>,
484 backup_id: Option<String>,
485 auth_id: Authid,
486 ) -> Result<Vec<SnapshotListItem>, Error> {
487 let ns = ns.unwrap_or_default();
488
489 let list_all = !check_ns_privs_full(
490 &store,
491 &ns,
492 &auth_id,
493 PRIV_DATASTORE_AUDIT,
494 PRIV_DATASTORE_BACKUP,
495 )?;
496
497 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
498
499 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
500 // backup group and provide an error free (Err -> None) accessor
501 let groups = match (backup_type, backup_id) {
502 (Some(backup_type), Some(backup_id)) => {
503 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
504 }
505 // FIXME: Recursion
506 (Some(backup_type), None) => datastore
507 .iter_backup_type_ok(ns.clone(), backup_type)?
508 .collect(),
509 // FIXME: Recursion
510 (None, Some(backup_id)) => BackupType::iter()
511 .filter_map(|backup_type| {
512 let group =
513 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
514 group.exists().then_some(group)
515 })
516 .collect(),
517 // FIXME: Recursion
518 (None, None) => datastore.list_backup_groups(ns.clone())?,
519 };
520
521 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
522 let backup = pbs_api_types::BackupDir {
523 group: group.into(),
524 time: info.backup_dir.backup_time(),
525 };
526 let protected = info.protected;
527
528 match get_all_snapshot_files(&info) {
529 Ok((manifest, files)) => {
530 // extract the first line from notes
531 let comment: Option<String> = manifest.unprotected["notes"]
532 .as_str()
533 .and_then(|notes| notes.lines().next())
534 .map(String::from);
535
536 let fingerprint = match manifest.fingerprint() {
537 Ok(fp) => fp,
538 Err(err) => {
539 eprintln!("error parsing fingerprint: '{}'", err);
540 None
541 }
542 };
543
544 let verification: Option<SnapshotVerifyState> = match manifest.verify_state() {
545 Ok(verify) => verify,
546 Err(err) => {
547 eprintln!("error parsing verification state : '{}'", err);
548 None
549 }
550 };
551
552 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
553
554 SnapshotListItem {
555 backup,
556 comment,
557 verification,
558 fingerprint,
559 files,
560 size,
561 owner,
562 protected,
563 }
564 }
565 Err(err) => {
566 eprintln!("error during snapshot file listing: '{}'", err);
567 let files = info
568 .files
569 .into_iter()
570 .map(|filename| BackupContent {
571 filename,
572 size: None,
573 crypt_mode: None,
574 })
575 .collect();
576
577 SnapshotListItem {
578 backup,
579 comment: None,
580 verification: None,
581 fingerprint: None,
582 files,
583 size: None,
584 owner,
585 protected,
586 }
587 }
588 }
589 };
590
591 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
592 let owner = match group.get_owner() {
593 Ok(auth_id) => auth_id,
594 Err(err) => {
595 eprintln!(
596 "Failed to get owner of group '{}' in {} - {}",
597 group.group(),
598 print_store_and_ns(&store, &ns),
599 err
600 );
601 return Ok(snapshots);
602 }
603 };
604
605 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
606 return Ok(snapshots);
607 }
608
609 let group_backups = group.list_backups()?;
610
611 snapshots.extend(
612 group_backups
613 .into_iter()
614 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
615 );
616
617 Ok(snapshots)
618 })
619 }
620
621 async fn get_snapshots_count(
622 store: &Arc<DataStore>,
623 owner: Option<&Authid>,
624 ) -> Result<Counts, Error> {
625 let store = Arc::clone(store);
626 let owner = owner.cloned();
627 tokio::task::spawn_blocking(move || {
628 let root_ns = Default::default();
629 ListAccessibleBackupGroups::new_with_privs(
630 &store,
631 root_ns,
632 MAX_NAMESPACE_DEPTH,
633 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
634 None,
635 owner.as_ref(),
636 )?
637 .try_fold(Counts::default(), |mut counts, group| {
638 let group = match group {
639 Ok(group) => group,
640 Err(_) => return Ok(counts), // TODO: add this as error counts?
641 };
642 let snapshot_count = group.list_backups()?.len() as u64;
643
644 // only include groups with snapshots, counting/displaying empty groups can confuse
645 if snapshot_count > 0 {
646 let type_count = match group.backup_type() {
647 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
648 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
649 BackupType::Host => counts.host.get_or_insert(Default::default()),
650 };
651
652 type_count.groups += 1;
653 type_count.snapshots += snapshot_count;
654 }
655
656 Ok(counts)
657 })
658 })
659 .await?
660 }
661
662 #[api(
663 input: {
664 properties: {
665 store: {
666 schema: DATASTORE_SCHEMA,
667 },
668 verbose: {
669 type: bool,
670 default: false,
671 optional: true,
672 description: "Include additional information like snapshot counts and GC status.",
673 },
674 },
675
676 },
677 returns: {
678 type: DataStoreStatus,
679 },
680 access: {
681 permission: &Permission::Anybody,
682 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
683 the full statistics. Counts of accessible groups are always returned, if any",
684 },
685 )]
686 /// Get datastore status.
687 pub async fn status(
688 store: String,
689 verbose: bool,
690 _info: &ApiMethod,
691 rpcenv: &mut dyn RpcEnvironment,
692 ) -> Result<DataStoreStatus, Error> {
693 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
694 let user_info = CachedUserInfo::new()?;
695 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
696
697 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
698 true
699 } else if store_privs & PRIV_DATASTORE_READ != 0 {
700 false // allow at least counts, user can read groups anyway..
701 } else {
702 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
703 // avoid leaking existence info if users hasn't at least any priv. below
704 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
705 _ => false,
706 }
707 };
708
709 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
710
711 let (counts, gc_status) = if verbose {
712 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
713 None
714 } else {
715 Some(&auth_id)
716 };
717
718 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
719 let gc_status = if store_stats {
720 Some(datastore.last_gc_status())
721 } else {
722 None
723 };
724
725 (counts, gc_status)
726 } else {
727 (None, None)
728 };
729
730 Ok(if store_stats {
731 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
732 DataStoreStatus {
733 total: storage.total,
734 used: storage.used,
735 avail: storage.available,
736 gc_status,
737 counts,
738 }
739 } else {
740 DataStoreStatus {
741 total: 0,
742 used: 0,
743 avail: 0,
744 gc_status,
745 counts,
746 }
747 })
748 }
749
750 #[api(
751 input: {
752 properties: {
753 store: {
754 schema: DATASTORE_SCHEMA,
755 },
756 ns: {
757 type: BackupNamespace,
758 optional: true,
759 },
760 "backup-type": {
761 type: BackupType,
762 optional: true,
763 },
764 "backup-id": {
765 schema: BACKUP_ID_SCHEMA,
766 optional: true,
767 },
768 "ignore-verified": {
769 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
770 optional: true,
771 },
772 "outdated-after": {
773 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
774 optional: true,
775 },
776 "backup-time": {
777 schema: BACKUP_TIME_SCHEMA,
778 optional: true,
779 },
780 "max-depth": {
781 schema: NS_MAX_DEPTH_SCHEMA,
782 optional: true,
783 },
784 },
785 },
786 returns: {
787 schema: UPID_SCHEMA,
788 },
789 access: {
790 permission: &Permission::Anybody,
791 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
792 or DATASTORE_BACKUP and being the owner of the group",
793 },
794 )]
795 /// Verify backups.
796 ///
797 /// This function can verify a single backup snapshot, all backup from a backup group,
798 /// or all backups in the datastore.
799 #[allow(clippy::too_many_arguments)]
800 pub fn verify(
801 store: String,
802 ns: Option<BackupNamespace>,
803 backup_type: Option<BackupType>,
804 backup_id: Option<String>,
805 backup_time: Option<i64>,
806 ignore_verified: Option<bool>,
807 outdated_after: Option<i64>,
808 max_depth: Option<usize>,
809 rpcenv: &mut dyn RpcEnvironment,
810 ) -> Result<Value, Error> {
811 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
812 let ns = ns.unwrap_or_default();
813
814 let owner_check_required = check_ns_privs_full(
815 &store,
816 &ns,
817 &auth_id,
818 PRIV_DATASTORE_VERIFY,
819 PRIV_DATASTORE_BACKUP,
820 )?;
821
822 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
823 let ignore_verified = ignore_verified.unwrap_or(true);
824
825 let worker_id;
826
827 let mut backup_dir = None;
828 let mut backup_group = None;
829 let mut worker_type = "verify";
830
831 match (backup_type, backup_id, backup_time) {
832 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
833 worker_id = format!(
834 "{}:{}/{}/{}/{:08X}",
835 store,
836 ns.display_as_path(),
837 backup_type,
838 backup_id,
839 backup_time
840 );
841 let dir =
842 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
843
844 if owner_check_required {
845 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
846 check_backup_owner(&owner, &auth_id)?;
847 }
848
849 backup_dir = Some(dir);
850 worker_type = "verify_snapshot";
851 }
852 (Some(backup_type), Some(backup_id), None) => {
853 worker_id = format!(
854 "{}:{}/{}/{}",
855 store,
856 ns.display_as_path(),
857 backup_type,
858 backup_id
859 );
860 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
861
862 if owner_check_required {
863 let owner = datastore.get_owner(&ns, &group)?;
864 check_backup_owner(&owner, &auth_id)?;
865 }
866
867 backup_group = Some(datastore.backup_group(ns.clone(), group));
868 worker_type = "verify_group";
869 }
870 (None, None, None) => {
871 worker_id = if ns.is_root() {
872 store
873 } else {
874 format!("{}:{}", store, ns.display_as_path())
875 };
876 }
877 _ => bail!("parameters do not specify a backup group or snapshot"),
878 }
879
880 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
881
882 let upid_str = WorkerTask::new_thread(
883 worker_type,
884 Some(worker_id),
885 auth_id.to_string(),
886 to_stdout,
887 move |worker| {
888 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
889 let failed_dirs = if let Some(backup_dir) = backup_dir {
890 let mut res = Vec::new();
891 if !verify_backup_dir(
892 &verify_worker,
893 &backup_dir,
894 worker.upid().clone(),
895 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
896 )? {
897 res.push(print_ns_and_snapshot(
898 backup_dir.backup_ns(),
899 backup_dir.as_ref(),
900 ));
901 }
902 res
903 } else if let Some(backup_group) = backup_group {
904 verify_backup_group(
905 &verify_worker,
906 &backup_group,
907 &mut StoreProgress::new(1),
908 worker.upid(),
909 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
910 )?
911 } else {
912 let owner = if owner_check_required {
913 Some(&auth_id)
914 } else {
915 None
916 };
917
918 verify_all_backups(
919 &verify_worker,
920 worker.upid(),
921 ns,
922 max_depth,
923 owner,
924 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
925 )?
926 };
927 if !failed_dirs.is_empty() {
928 info!("Failed to verify the following snapshots/groups:");
929 for dir in failed_dirs {
930 info!("\t{dir}");
931 }
932 bail!("verification failed - please check the log for details");
933 }
934 Ok(())
935 },
936 )?;
937
938 Ok(json!(upid_str))
939 }
940
941 #[api(
942 input: {
943 properties: {
944 group: {
945 type: pbs_api_types::BackupGroup,
946 flatten: true,
947 },
948 "dry-run": {
949 optional: true,
950 type: bool,
951 default: false,
952 description: "Just show what prune would do, but do not delete anything.",
953 },
954 "keep-options": {
955 type: KeepOptions,
956 flatten: true,
957 },
958 store: {
959 schema: DATASTORE_SCHEMA,
960 },
961 ns: {
962 type: BackupNamespace,
963 optional: true,
964 },
965 "use-task": {
966 type: bool,
967 default: false,
968 optional: true,
969 description: "Spins up an asynchronous task that does the work.",
970 },
971 },
972 },
973 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
974 access: {
975 permission: &Permission::Anybody,
976 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
977 or DATASTORE_PRUNE and being the owner of the group",
978 },
979 )]
980 /// Prune a group on the datastore
981 pub fn prune(
982 group: pbs_api_types::BackupGroup,
983 dry_run: bool,
984 keep_options: KeepOptions,
985 store: String,
986 ns: Option<BackupNamespace>,
987 param: Value,
988 rpcenv: &mut dyn RpcEnvironment,
989 ) -> Result<Value, Error> {
990 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
991 let ns = ns.unwrap_or_default();
992 let datastore = check_privs_and_load_store(
993 &store,
994 &ns,
995 &auth_id,
996 PRIV_DATASTORE_MODIFY,
997 PRIV_DATASTORE_PRUNE,
998 Some(Operation::Write),
999 &group,
1000 )?;
1001
1002 let worker_id = format!("{}:{}:{}", store, ns, group);
1003 let group = datastore.backup_group(ns.clone(), group);
1004
1005 #[derive(Debug, serde::Serialize)]
1006 struct PruneResult {
1007 #[serde(rename = "backup-type")]
1008 backup_type: BackupType,
1009 #[serde(rename = "backup-id")]
1010 backup_id: String,
1011 #[serde(rename = "backup-time")]
1012 backup_time: i64,
1013 keep: bool,
1014 protected: bool,
1015 #[serde(skip_serializing_if = "Option::is_none")]
1016 ns: Option<BackupNamespace>,
1017 }
1018 let mut prune_result: Vec<PruneResult> = Vec::new();
1019
1020 let list = group.list_backups()?;
1021
1022 let mut prune_info = compute_prune_info(list, &keep_options)?;
1023
1024 prune_info.reverse(); // delete older snapshots first
1025
1026 let keep_all = !keep_options.keeps_something();
1027
1028 if dry_run {
1029 for (info, mark) in prune_info {
1030 let keep = keep_all || mark.keep();
1031 let backup_dir = &info.backup_dir;
1032
1033 let mut result = PruneResult {
1034 backup_type: backup_dir.backup_type(),
1035 backup_id: backup_dir.backup_id().to_owned(),
1036 backup_time: backup_dir.backup_time(),
1037 keep,
1038 protected: mark.protected(),
1039 ns: None,
1040 };
1041 let prune_ns = backup_dir.backup_ns();
1042 if !prune_ns.is_root() {
1043 result.ns = Some(prune_ns.to_owned());
1044 }
1045 prune_result.push(result);
1046 }
1047 return Ok(json!(prune_result));
1048 }
1049
1050 let prune_group = move |_worker: Arc<WorkerTask>| {
1051 if keep_all {
1052 info!("No prune selection - keeping all files.");
1053 } else {
1054 let mut opts = Vec::new();
1055 if !ns.is_root() {
1056 opts.push(format!("--ns {ns}"));
1057 }
1058 crate::server::cli_keep_options(&mut opts, &keep_options);
1059
1060 info!("retention options: {}", opts.join(" "));
1061 info!(
1062 "Starting prune on {} group \"{}\"",
1063 print_store_and_ns(&store, &ns),
1064 group.group(),
1065 );
1066 }
1067
1068 for (info, mark) in prune_info {
1069 let keep = keep_all || mark.keep();
1070 let backup_dir = &info.backup_dir;
1071
1072 let backup_time = backup_dir.backup_time();
1073 let timestamp = backup_dir.backup_time_string();
1074 let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
1075
1076 let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
1077
1078 info!("{msg}");
1079
1080 prune_result.push(PruneResult {
1081 backup_type: group.ty,
1082 backup_id: group.id.clone(),
1083 backup_time,
1084 keep,
1085 protected: mark.protected(),
1086 ns: None,
1087 });
1088
1089 if !keep {
1090 if let Err(err) = backup_dir.destroy(false) {
1091 warn!(
1092 "failed to remove dir {:?}: {}",
1093 backup_dir.relative_path(),
1094 err,
1095 );
1096 }
1097 }
1098 }
1099 prune_result
1100 };
1101
1102 if param["use-task"].as_bool().unwrap_or(false) {
1103 let upid = WorkerTask::spawn(
1104 "prune",
1105 Some(worker_id),
1106 auth_id.to_string(),
1107 true,
1108 move |worker| async move {
1109 let _ = prune_group(worker.clone());
1110 Ok(())
1111 },
1112 )?;
1113 Ok(json!(upid))
1114 } else {
1115 let (worker, _) = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1116 let result = prune_group(worker.clone());
1117 worker.log_result(&Ok(()));
1118 Ok(json!(result))
1119 }
1120 }
1121
1122 #[api(
1123 input: {
1124 properties: {
1125 "dry-run": {
1126 optional: true,
1127 type: bool,
1128 default: false,
1129 description: "Just show what prune would do, but do not delete anything.",
1130 },
1131 "prune-options": {
1132 type: PruneJobOptions,
1133 flatten: true,
1134 },
1135 store: {
1136 schema: DATASTORE_SCHEMA,
1137 },
1138 },
1139 },
1140 returns: {
1141 schema: UPID_SCHEMA,
1142 },
1143 access: {
1144 permission: &Permission::Anybody,
1145 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
1146 },
1147 )]
1148 /// Prune the datastore
1149 pub fn prune_datastore(
1150 dry_run: bool,
1151 prune_options: PruneJobOptions,
1152 store: String,
1153 _param: Value,
1154 rpcenv: &mut dyn RpcEnvironment,
1155 ) -> Result<String, Error> {
1156 let user_info = CachedUserInfo::new()?;
1157
1158 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1159
1160 user_info.check_privs(
1161 &auth_id,
1162 &prune_options.acl_path(&store),
1163 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1164 true,
1165 )?;
1166
1167 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1168 let ns = prune_options.ns.clone().unwrap_or_default();
1169 let worker_id = format!("{}:{}", store, ns);
1170
1171 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1172
1173 let upid_str = WorkerTask::new_thread(
1174 "prune",
1175 Some(worker_id),
1176 auth_id.to_string(),
1177 to_stdout,
1178 move |_worker| crate::server::prune_datastore(auth_id, prune_options, datastore, dry_run),
1179 )?;
1180
1181 Ok(upid_str)
1182 }
1183
1184 #[api(
1185 input: {
1186 properties: {
1187 store: {
1188 schema: DATASTORE_SCHEMA,
1189 },
1190 },
1191 },
1192 returns: {
1193 schema: UPID_SCHEMA,
1194 },
1195 access: {
1196 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
1197 },
1198 )]
1199 /// Start garbage collection.
1200 pub fn start_garbage_collection(
1201 store: String,
1202 _info: &ApiMethod,
1203 rpcenv: &mut dyn RpcEnvironment,
1204 ) -> Result<Value, Error> {
1205 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1206 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1207
1208 let job = Job::new("garbage_collection", &store)
1209 .map_err(|_| format_err!("garbage collection already running"))?;
1210
1211 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1212
1213 let upid_str =
1214 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1215 .map_err(|err| {
1216 format_err!(
1217 "unable to start garbage collection job on datastore {} - {}",
1218 store,
1219 err
1220 )
1221 })?;
1222
1223 Ok(json!(upid_str))
1224 }
1225
1226 #[api(
1227 input: {
1228 properties: {
1229 store: {
1230 schema: DATASTORE_SCHEMA,
1231 },
1232 },
1233 },
1234 returns: {
1235 type: GarbageCollectionJobStatus,
1236 },
1237 access: {
1238 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1239 },
1240 )]
1241 /// Garbage collection status.
1242 pub fn garbage_collection_status(
1243 store: String,
1244 _info: &ApiMethod,
1245 _rpcenv: &mut dyn RpcEnvironment,
1246 ) -> Result<GarbageCollectionJobStatus, Error> {
1247 let (config, _) = pbs_config::datastore::config()?;
1248 let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
1249
1250 let mut info = GarbageCollectionJobStatus {
1251 store: store.clone(),
1252 schedule: store_config.gc_schedule,
1253 ..Default::default()
1254 };
1255
1256 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1257 let status_in_memory = datastore.last_gc_status();
1258 let state_file = JobState::load("garbage_collection", &store)
1259 .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
1260 .ok();
1261
1262 let mut last = proxmox_time::epoch_i64();
1263
1264 if let Some(ref upid) = status_in_memory.upid {
1265 let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
1266 if let Some(state) = state_file {
1267 if let Ok(cs) = compute_schedule_status(&state, Some(upid)) {
1268 computed_schedule = cs;
1269 }
1270 }
1271
1272 if let Some(endtime) = computed_schedule.last_run_endtime {
1273 last = endtime;
1274 if let Ok(parsed_upid) = upid.parse::<UPID>() {
1275 info.duration = Some(endtime - parsed_upid.starttime);
1276 }
1277 }
1278
1279 info.next_run = computed_schedule.next_run;
1280 info.last_run_endtime = computed_schedule.last_run_endtime;
1281 info.last_run_state = computed_schedule.last_run_state;
1282 }
1283
1284 info.next_run = info
1285 .schedule
1286 .as_ref()
1287 .and_then(|s| {
1288 s.parse::<CalendarEvent>()
1289 .map_err(|err| log::error!("{err}"))
1290 .ok()
1291 })
1292 .and_then(|e| {
1293 e.compute_next_event(last)
1294 .map_err(|err| log::error!("{err}"))
1295 .ok()
1296 })
1297 .and_then(|ne| ne);
1298
1299 info.status = status_in_memory;
1300
1301 Ok(info)
1302 }
1303
1304 #[api(
1305 returns: {
1306 description: "List the accessible datastores.",
1307 type: Array,
1308 items: { type: DataStoreListItem },
1309 },
1310 access: {
1311 permission: &Permission::Anybody,
1312 },
1313 )]
1314 /// Datastore list
1315 pub fn get_datastore_list(
1316 _param: Value,
1317 _info: &ApiMethod,
1318 rpcenv: &mut dyn RpcEnvironment,
1319 ) -> Result<Vec<DataStoreListItem>, Error> {
1320 let (config, _digest) = pbs_config::datastore::config()?;
1321
1322 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1323 let user_info = CachedUserInfo::new()?;
1324
1325 let mut list = Vec::new();
1326
1327 for (store, (_, data)) in config.sections {
1328 let acl_path = &["datastore", &store];
1329 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
1330 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
1331
1332 let mut allow_id = false;
1333 if !allowed {
1334 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1335 allow_id = any_privs;
1336 }
1337 }
1338
1339 if allowed || allow_id {
1340 let store_config: DataStoreConfig = serde_json::from_value(data)?;
1341
1342 let mount_status = match pbs_datastore::get_datastore_mount_status(&store_config) {
1343 Some(true) => DataStoreMountStatus::Mounted,
1344 Some(false) => DataStoreMountStatus::NotMounted,
1345 None => DataStoreMountStatus::NonRemovable,
1346 };
1347
1348 list.push(DataStoreListItem {
1349 store: store.clone(),
1350 comment: store_config.comment.filter(|_| allowed),
1351 mount_status,
1352 maintenance: store_config.maintenance_mode,
1353 });
1354 }
1355 }
1356
1357 Ok(list)
1358 }
1359
1360 #[sortable]
1361 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1362 &ApiHandler::AsyncHttp(&download_file),
1363 &ObjectSchema::new(
1364 "Download single raw file from backup snapshot.",
1365 &sorted!([
1366 ("store", false, &DATASTORE_SCHEMA),
1367 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1368 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1369 ("backup-id", false, &BACKUP_ID_SCHEMA),
1370 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1371 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1372 ]),
1373 ),
1374 )
1375 .access(
1376 Some(
1377 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1378 DATASTORE_BACKUP and being the owner of the group",
1379 ),
1380 &Permission::Anybody,
1381 );
1382
1383 pub fn download_file(
1384 _parts: Parts,
1385 _req_body: Body,
1386 param: Value,
1387 _info: &ApiMethod,
1388 rpcenv: Box<dyn RpcEnvironment>,
1389 ) -> ApiResponseFuture {
1390 async move {
1391 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1392 let store = required_string_param(&param, "store")?;
1393 let backup_ns = optional_ns_param(&param)?;
1394
1395 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1396 let datastore = check_privs_and_load_store(
1397 store,
1398 &backup_ns,
1399 &auth_id,
1400 PRIV_DATASTORE_READ,
1401 PRIV_DATASTORE_BACKUP,
1402 Some(Operation::Read),
1403 &backup_dir.group,
1404 )?;
1405
1406 let file_name = required_string_param(&param, "file-name")?.to_owned();
1407
1408 println!(
1409 "Download {} from {} ({}/{})",
1410 file_name,
1411 print_store_and_ns(store, &backup_ns),
1412 backup_dir,
1413 file_name
1414 );
1415
1416 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1417
1418 let mut path = datastore.base_path();
1419 path.push(backup_dir.relative_path());
1420 path.push(&file_name);
1421
1422 let file = tokio::fs::File::open(&path)
1423 .await
1424 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1425
1426 let payload =
1427 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1428 .map_ok(|bytes| bytes.freeze())
1429 .map_err(move |err| {
1430 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1431 err
1432 });
1433 let body = Body::wrap_stream(payload);
1434
1435 // fixme: set other headers ?
1436 Ok(Response::builder()
1437 .status(StatusCode::OK)
1438 .header(header::CONTENT_TYPE, "application/octet-stream")
1439 .body(body)
1440 .unwrap())
1441 }
1442 .boxed()
1443 }
1444
1445 #[sortable]
1446 pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1447 &ApiHandler::AsyncHttp(&download_file_decoded),
1448 &ObjectSchema::new(
1449 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1450 &sorted!([
1451 ("store", false, &DATASTORE_SCHEMA),
1452 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1453 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1454 ("backup-id", false, &BACKUP_ID_SCHEMA),
1455 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1456 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1457 ]),
1458 ),
1459 )
1460 .access(
1461 Some(
1462 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1463 DATASTORE_BACKUP and being the owner of the group",
1464 ),
1465 &Permission::Anybody,
1466 );
1467
1468 pub fn download_file_decoded(
1469 _parts: Parts,
1470 _req_body: Body,
1471 param: Value,
1472 _info: &ApiMethod,
1473 rpcenv: Box<dyn RpcEnvironment>,
1474 ) -> ApiResponseFuture {
1475 async move {
1476 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1477 let store = required_string_param(&param, "store")?;
1478 let backup_ns = optional_ns_param(&param)?;
1479
1480 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1481 let datastore = check_privs_and_load_store(
1482 store,
1483 &backup_ns,
1484 &auth_id,
1485 PRIV_DATASTORE_READ,
1486 PRIV_DATASTORE_BACKUP,
1487 Some(Operation::Read),
1488 &backup_dir_api.group,
1489 )?;
1490
1491 let file_name: BackupArchiveName =
1492 required_string_param(&param, "file-name")?.try_into()?;
1493 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1494
1495 let (manifest, files) = read_backup_index(&backup_dir)?;
1496 for file in files {
1497 if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
1498 bail!("cannot decode '{}' - is encrypted", file_name);
1499 }
1500 }
1501
1502 println!(
1503 "Download {} from {} ({}/{})",
1504 file_name,
1505 print_store_and_ns(store, &backup_ns),
1506 backup_dir_api,
1507 file_name
1508 );
1509
1510 let mut path = datastore.base_path();
1511 path.push(backup_dir.relative_path());
1512 path.push(file_name.as_ref());
1513
1514 let body = match file_name.archive_type() {
1515 ArchiveType::DynamicIndex => {
1516 let index = DynamicIndexReader::open(&path).map_err(|err| {
1517 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1518 })?;
1519 let (csum, size) = index.compute_csum();
1520 manifest.verify_file(&file_name, &csum, size)?;
1521
1522 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1523 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1524 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1525 eprintln!("error during streaming of '{:?}' - {}", path, err);
1526 err
1527 }))
1528 }
1529 ArchiveType::FixedIndex => {
1530 let index = FixedIndexReader::open(&path).map_err(|err| {
1531 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1532 })?;
1533
1534 let (csum, size) = index.compute_csum();
1535 manifest.verify_file(&file_name, &csum, size)?;
1536
1537 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1538 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
1539 Body::wrap_stream(
1540 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1541 move |err| {
1542 eprintln!("error during streaming of '{:?}' - {}", path, err);
1543 err
1544 },
1545 ),
1546 )
1547 }
1548 ArchiveType::Blob => {
1549 let file = std::fs::File::open(&path)
1550 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
1551
1552 // FIXME: load full blob to verify index checksum?
1553
1554 Body::wrap_stream(
1555 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1556 move |err| {
1557 eprintln!("error during streaming of '{:?}' - {}", path, err);
1558 err
1559 },
1560 ),
1561 )
1562 }
1563 };
1564
1565 // fixme: set other headers ?
1566 Ok(Response::builder()
1567 .status(StatusCode::OK)
1568 .header(header::CONTENT_TYPE, "application/octet-stream")
1569 .body(body)
1570 .unwrap())
1571 }
1572 .boxed()
1573 }
1574
1575 #[sortable]
1576 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1577 &ApiHandler::AsyncHttp(&upload_backup_log),
1578 &ObjectSchema::new(
1579 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
1580 &sorted!([
1581 ("store", false, &DATASTORE_SCHEMA),
1582 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1583 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1584 ("backup-id", false, &BACKUP_ID_SCHEMA),
1585 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1586 ]),
1587 ),
1588 )
1589 .access(
1590 Some("Only the backup creator/owner is allowed to do this."),
1591 &Permission::Anybody,
1592 );
1593
1594 pub fn upload_backup_log(
1595 _parts: Parts,
1596 req_body: Body,
1597 param: Value,
1598 _info: &ApiMethod,
1599 rpcenv: Box<dyn RpcEnvironment>,
1600 ) -> ApiResponseFuture {
1601 async move {
1602 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1603 let store = required_string_param(&param, "store")?;
1604 let backup_ns = optional_ns_param(&param)?;
1605
1606 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1607
1608 let datastore = check_privs_and_load_store(
1609 store,
1610 &backup_ns,
1611 &auth_id,
1612 0,
1613 PRIV_DATASTORE_BACKUP,
1614 Some(Operation::Write),
1615 &backup_dir_api.group,
1616 )?;
1617 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
1618
1619 let file_name = &CLIENT_LOG_BLOB_NAME;
1620
1621 let mut path = backup_dir.full_path();
1622 path.push(file_name.as_ref());
1623
1624 if path.exists() {
1625 bail!("backup already contains a log.");
1626 }
1627
1628 println!(
1629 "Upload backup log to {} {backup_dir_api}/{file_name}",
1630 print_store_and_ns(store, &backup_ns),
1631 file_name = file_name.deref(),
1632 );
1633
1634 let data = req_body
1635 .map_err(Error::from)
1636 .try_fold(Vec::new(), |mut acc, chunk| {
1637 acc.extend_from_slice(&chunk);
1638 future::ok::<_, Error>(acc)
1639 })
1640 .await?;
1641
1642 // always verify blob/CRC at server side
1643 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1644
1645 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
1646
1647 // fixme: use correct formatter
1648 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
1649 }
1650 .boxed()
1651 }
1652
1653 fn decode_path(path: &str) -> Result<Vec<u8>, Error> {
1654 if path != "root" && path != "/" {
1655 base64::decode(path).map_err(|err| format_err!("base64 decoding of path failed - {err}"))
1656 } else {
1657 Ok(vec![b'/'])
1658 }
1659 }
1660
1661 #[api(
1662 input: {
1663 properties: {
1664 store: { schema: DATASTORE_SCHEMA },
1665 ns: {
1666 type: BackupNamespace,
1667 optional: true,
1668 },
1669 backup_dir: {
1670 type: pbs_api_types::BackupDir,
1671 flatten: true,
1672 },
1673 "filepath": {
1674 description: "Base64 encoded path.",
1675 type: String,
1676 },
1677 "archive-name": {
1678 type: BackupArchiveName,
1679 optional: true,
1680 },
1681 },
1682 },
1683 access: {
1684 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1685 DATASTORE_BACKUP and being the owner of the group",
1686 permission: &Permission::Anybody,
1687 },
1688 )]
1689 /// Get the entries of the given path of the catalog
1690 pub async fn catalog(
1691 store: String,
1692 ns: Option<BackupNamespace>,
1693 backup_dir: pbs_api_types::BackupDir,
1694 filepath: String,
1695 archive_name: Option<BackupArchiveName>,
1696 rpcenv: &mut dyn RpcEnvironment,
1697 ) -> Result<Vec<ArchiveEntry>, Error> {
1698 let file_name = archive_name.clone().unwrap_or_else(|| CATALOG_NAME.clone());
1699
1700 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1701
1702 let ns = ns.unwrap_or_default();
1703
1704 let datastore = check_privs_and_load_store(
1705 &store,
1706 &ns,
1707 &auth_id,
1708 PRIV_DATASTORE_READ,
1709 PRIV_DATASTORE_BACKUP,
1710 Some(Operation::Read),
1711 &backup_dir.group,
1712 )?;
1713
1714 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1715
1716 let (manifest, files) = read_backup_index(&backup_dir)?;
1717 for file in files {
1718 if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
1719 bail!("cannot decode '{file_name}' - is encrypted");
1720 }
1721 }
1722
1723 if archive_name.is_none() {
1724 tokio::task::spawn_blocking(move || {
1725 let mut path = datastore.base_path();
1726 path.push(backup_dir.relative_path());
1727 path.push(file_name.as_ref());
1728
1729 let index = DynamicIndexReader::open(&path)
1730 .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?;
1731
1732 let (csum, size) = index.compute_csum();
1733 manifest.verify_file(&file_name, &csum, size)?;
1734
1735 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1736 let reader = BufferedDynamicReader::new(index, chunk_reader);
1737
1738 let mut catalog_reader = CatalogReader::new(reader);
1739
1740 let path = decode_path(&filepath)?;
1741 catalog_reader.list_dir_contents(&path)
1742 })
1743 .await?
1744 } else {
1745 let (archive_name, _payload_archive_name) =
1746 pbs_client::tools::get_pxar_archive_names(&file_name, &manifest)?;
1747 let (reader, archive_size) =
1748 get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &archive_name)?;
1749
1750 // only care about the metadata, don't attach a payload reader
1751 let reader = pxar::PxarVariant::Unified(reader);
1752 let accessor = Accessor::new(reader, archive_size).await?;
1753
1754 let file_path = decode_path(&filepath)?;
1755 pbs_client::pxar::tools::pxar_metadata_catalog_lookup(
1756 accessor,
1757 OsStr::from_bytes(&file_path),
1758 None,
1759 )
1760 .await
1761 }
1762 }
1763
1764 #[sortable]
1765 pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1766 &ApiHandler::AsyncHttp(&pxar_file_download),
1767 &ObjectSchema::new(
1768 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
1769 &sorted!([
1770 ("store", false, &DATASTORE_SCHEMA),
1771 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
1772 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1773 ("backup-id", false, &BACKUP_ID_SCHEMA),
1774 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1775 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1776 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
1777 ("archive-name", true, &BackupArchiveName::API_SCHEMA),
1778 ]),
1779 )
1780 ).access(
1781 Some(
1782 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1783 DATASTORE_BACKUP and being the owner of the group",
1784 ),
1785 &Permission::Anybody,
1786 );
1787
1788 fn get_local_pxar_reader(
1789 datastore: Arc<DataStore>,
1790 manifest: &BackupManifest,
1791 backup_dir: &BackupDir,
1792 pxar_name: &BackupArchiveName,
1793 ) -> Result<(LocalDynamicReadAt<LocalChunkReader>, u64), Error> {
1794 let mut path = datastore.base_path();
1795 path.push(backup_dir.relative_path());
1796 path.push(pxar_name.as_ref());
1797
1798 let index = DynamicIndexReader::open(&path)
1799 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1800
1801 let (csum, size) = index.compute_csum();
1802 manifest.verify_file(pxar_name, &csum, size)?;
1803
1804 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1805 let reader = BufferedDynamicReader::new(index, chunk_reader);
1806 let archive_size = reader.archive_size();
1807
1808 Ok((LocalDynamicReadAt::new(reader), archive_size))
1809 }
1810
1811 pub fn pxar_file_download(
1812 _parts: Parts,
1813 _req_body: Body,
1814 param: Value,
1815 _info: &ApiMethod,
1816 rpcenv: Box<dyn RpcEnvironment>,
1817 ) -> ApiResponseFuture {
1818 async move {
1819 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1820 let store = required_string_param(&param, "store")?;
1821 let ns = optional_ns_param(&param)?;
1822
1823 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
1824 let datastore = check_privs_and_load_store(
1825 store,
1826 &ns,
1827 &auth_id,
1828 PRIV_DATASTORE_READ,
1829 PRIV_DATASTORE_BACKUP,
1830 Some(Operation::Read),
1831 &backup_dir.group,
1832 )?;
1833
1834 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
1835
1836 let filepath = required_string_param(&param, "filepath")?.to_owned();
1837
1838 let tar = param["tar"].as_bool().unwrap_or(false);
1839
1840 let mut components = base64::decode(&filepath)?;
1841 if !components.is_empty() && components[0] == b'/' {
1842 components.remove(0);
1843 }
1844
1845 let (pxar_name, file_path) = if let Some(archive_name) = param["archive-name"].as_str() {
1846 let archive_name = archive_name.as_bytes().to_owned();
1847 (archive_name, base64::decode(&filepath)?)
1848 } else {
1849 let mut split = components.splitn(2, |c| *c == b'/');
1850 let pxar_name = split.next().unwrap();
1851 let file_path = split.next().unwrap_or(b"/");
1852 (pxar_name.to_owned(), file_path.to_owned())
1853 };
1854 let pxar_name: BackupArchiveName = std::str::from_utf8(&pxar_name)?.try_into()?;
1855 let (manifest, files) = read_backup_index(&backup_dir)?;
1856 for file in files {
1857 if file.filename == pxar_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
1858 bail!("cannot decode '{}' - is encrypted", pxar_name);
1859 }
1860 }
1861
1862 let (pxar_name, payload_archive_name) =
1863 pbs_client::tools::get_pxar_archive_names(&pxar_name, &manifest)?;
1864 let (reader, archive_size) =
1865 get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?;
1866
1867 let reader = if let Some(payload_archive_name) = payload_archive_name {
1868 let payload_input =
1869 get_local_pxar_reader(datastore, &manifest, &backup_dir, &payload_archive_name)?;
1870 pxar::PxarVariant::Split(reader, payload_input)
1871 } else {
1872 pxar::PxarVariant::Unified(reader)
1873 };
1874 let decoder = Accessor::new(reader, archive_size).await?;
1875
1876 let root = decoder.open_root().await?;
1877 let path = OsStr::from_bytes(&file_path).to_os_string();
1878 let file = root
1879 .lookup(&path)
1880 .await?
1881 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
1882
1883 let body = match file.kind() {
1884 EntryKind::File { .. } => Body::wrap_stream(
1885 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1886 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1887 err
1888 }),
1889 ),
1890 EntryKind::Hardlink(_) => Body::wrap_stream(
1891 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1892 .map_err(move |err| {
1893 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
1894 err
1895 }),
1896 ),
1897 EntryKind::Directory => {
1898 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
1899 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
1900 if tar {
1901 proxmox_rest_server::spawn_internal_task(create_tar(
1902 channelwriter,
1903 decoder,
1904 path.clone(),
1905 ));
1906 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1907 Body::wrap_stream(zstdstream.map_err(move |err| {
1908 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
1909 err
1910 }))
1911 } else {
1912 proxmox_rest_server::spawn_internal_task(create_zip(
1913 channelwriter,
1914 decoder,
1915 path.clone(),
1916 ));
1917 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1918 log::error!("error during streaming of zip '{:?}' - {}", path, err);
1919 err
1920 }))
1921 }
1922 }
1923 other => bail!("cannot download file of type {:?}", other),
1924 };
1925
1926 // fixme: set other headers ?
1927 Ok(Response::builder()
1928 .status(StatusCode::OK)
1929 .header(header::CONTENT_TYPE, "application/octet-stream")
1930 .body(body)
1931 .unwrap())
1932 }
1933 .boxed()
1934 }
1935
1936 #[api(
1937 input: {
1938 properties: {
1939 store: {
1940 schema: DATASTORE_SCHEMA,
1941 },
1942 timeframe: {
1943 type: RrdTimeframe,
1944 },
1945 cf: {
1946 type: RrdMode,
1947 },
1948 },
1949 },
1950 access: {
1951 permission: &Permission::Privilege(
1952 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1953 },
1954 )]
1955 /// Read datastore stats
1956 pub fn get_rrd_stats(
1957 store: String,
1958 timeframe: RrdTimeframe,
1959 cf: RrdMode,
1960 _param: Value,
1961 ) -> Result<Value, Error> {
1962 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1963 let disk_manager = crate::tools::disks::DiskManage::new();
1964
1965 let mut rrd_fields = vec![
1966 "total",
1967 "available",
1968 "used",
1969 "read_ios",
1970 "read_bytes",
1971 "write_ios",
1972 "write_bytes",
1973 ];
1974
1975 // we do not have io_ticks for zpools, so don't include them
1976 match disk_manager.find_mounted_device(&datastore.base_path()) {
1977 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
1978 _ => rrd_fields.push("io_ticks"),
1979 };
1980
1981 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1982 }
1983
1984 #[api(
1985 input: {
1986 properties: {
1987 store: {
1988 schema: DATASTORE_SCHEMA,
1989 },
1990 },
1991 },
1992 access: {
1993 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1994 },
1995 )]
1996 /// Read datastore stats
1997 pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
1998 let active_operations = task_tracking::get_active_operations(&store)?;
1999 Ok(json!({
2000 "read": active_operations.read,
2001 "write": active_operations.write,
2002 }))
2003 }
2004
2005 #[api(
2006 input: {
2007 properties: {
2008 store: { schema: DATASTORE_SCHEMA },
2009 ns: {
2010 type: BackupNamespace,
2011 optional: true,
2012 },
2013 backup_group: {
2014 type: pbs_api_types::BackupGroup,
2015 flatten: true,
2016 },
2017 },
2018 },
2019 access: {
2020 permission: &Permission::Anybody,
2021 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2022 or DATASTORE_BACKUP and being the owner of the group",
2023 },
2024 )]
2025 /// Get "notes" for a backup group
2026 pub fn get_group_notes(
2027 store: String,
2028 ns: Option<BackupNamespace>,
2029 backup_group: pbs_api_types::BackupGroup,
2030 rpcenv: &mut dyn RpcEnvironment,
2031 ) -> Result<String, Error> {
2032 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2033 let ns = ns.unwrap_or_default();
2034
2035 let datastore = check_privs_and_load_store(
2036 &store,
2037 &ns,
2038 &auth_id,
2039 PRIV_DATASTORE_AUDIT,
2040 PRIV_DATASTORE_BACKUP,
2041 Some(Operation::Read),
2042 &backup_group,
2043 )?;
2044
2045 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
2046 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
2047 }
2048
2049 #[api(
2050 input: {
2051 properties: {
2052 store: { schema: DATASTORE_SCHEMA },
2053 ns: {
2054 type: BackupNamespace,
2055 optional: true,
2056 },
2057 backup_group: {
2058 type: pbs_api_types::BackupGroup,
2059 flatten: true,
2060 },
2061 notes: {
2062 description: "A multiline text.",
2063 },
2064 },
2065 },
2066 access: {
2067 permission: &Permission::Anybody,
2068 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2069 or DATASTORE_BACKUP and being the owner of the group",
2070 },
2071 )]
2072 /// Set "notes" for a backup group
2073 pub fn set_group_notes(
2074 store: String,
2075 ns: Option<BackupNamespace>,
2076 backup_group: pbs_api_types::BackupGroup,
2077 notes: String,
2078 rpcenv: &mut dyn RpcEnvironment,
2079 ) -> Result<(), Error> {
2080 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2081 let ns = ns.unwrap_or_default();
2082
2083 let datastore = check_privs_and_load_store(
2084 &store,
2085 &ns,
2086 &auth_id,
2087 PRIV_DATASTORE_MODIFY,
2088 PRIV_DATASTORE_BACKUP,
2089 Some(Operation::Write),
2090 &backup_group,
2091 )?;
2092
2093 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
2094 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
2095
2096 Ok(())
2097 }
2098
2099 #[api(
2100 input: {
2101 properties: {
2102 store: { schema: DATASTORE_SCHEMA },
2103 ns: {
2104 type: BackupNamespace,
2105 optional: true,
2106 },
2107 backup_dir: {
2108 type: pbs_api_types::BackupDir,
2109 flatten: true,
2110 },
2111 },
2112 },
2113 access: {
2114 permission: &Permission::Anybody,
2115 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2116 or DATASTORE_BACKUP and being the owner of the group",
2117 },
2118 )]
2119 /// Get "notes" for a specific backup
2120 pub fn get_notes(
2121 store: String,
2122 ns: Option<BackupNamespace>,
2123 backup_dir: pbs_api_types::BackupDir,
2124 rpcenv: &mut dyn RpcEnvironment,
2125 ) -> Result<String, Error> {
2126 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2127 let ns = ns.unwrap_or_default();
2128
2129 let datastore = check_privs_and_load_store(
2130 &store,
2131 &ns,
2132 &auth_id,
2133 PRIV_DATASTORE_AUDIT,
2134 PRIV_DATASTORE_BACKUP,
2135 Some(Operation::Read),
2136 &backup_dir.group,
2137 )?;
2138
2139 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2140
2141 let (manifest, _) = backup_dir.load_manifest()?;
2142
2143 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
2144
2145 Ok(String::from(notes))
2146 }
2147
2148 #[api(
2149 input: {
2150 properties: {
2151 store: { schema: DATASTORE_SCHEMA },
2152 ns: {
2153 type: BackupNamespace,
2154 optional: true,
2155 },
2156 backup_dir: {
2157 type: pbs_api_types::BackupDir,
2158 flatten: true,
2159 },
2160 notes: {
2161 description: "A multiline text.",
2162 },
2163 },
2164 },
2165 access: {
2166 permission: &Permission::Anybody,
2167 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2168 or DATASTORE_BACKUP and being the owner of the group",
2169 },
2170 )]
2171 /// Set "notes" for a specific backup
2172 pub fn set_notes(
2173 store: String,
2174 ns: Option<BackupNamespace>,
2175 backup_dir: pbs_api_types::BackupDir,
2176 notes: String,
2177 rpcenv: &mut dyn RpcEnvironment,
2178 ) -> Result<(), Error> {
2179 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2180 let ns = ns.unwrap_or_default();
2181
2182 let datastore = check_privs_and_load_store(
2183 &store,
2184 &ns,
2185 &auth_id,
2186 PRIV_DATASTORE_MODIFY,
2187 PRIV_DATASTORE_BACKUP,
2188 Some(Operation::Write),
2189 &backup_dir.group,
2190 )?;
2191
2192 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2193
2194 backup_dir
2195 .update_manifest(|manifest| {
2196 manifest.unprotected["notes"] = notes.into();
2197 })
2198 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
2199
2200 Ok(())
2201 }
2202
2203 #[api(
2204 input: {
2205 properties: {
2206 store: { schema: DATASTORE_SCHEMA },
2207 ns: {
2208 type: BackupNamespace,
2209 optional: true,
2210 },
2211 backup_dir: {
2212 type: pbs_api_types::BackupDir,
2213 flatten: true,
2214 },
2215 },
2216 },
2217 access: {
2218 permission: &Permission::Anybody,
2219 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2220 or DATASTORE_BACKUP and being the owner of the group",
2221 },
2222 )]
2223 /// Query protection for a specific backup
2224 pub fn get_protection(
2225 store: String,
2226 ns: Option<BackupNamespace>,
2227 backup_dir: pbs_api_types::BackupDir,
2228 rpcenv: &mut dyn RpcEnvironment,
2229 ) -> Result<bool, Error> {
2230 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2231 let ns = ns.unwrap_or_default();
2232 let datastore = check_privs_and_load_store(
2233 &store,
2234 &ns,
2235 &auth_id,
2236 PRIV_DATASTORE_AUDIT,
2237 PRIV_DATASTORE_BACKUP,
2238 Some(Operation::Read),
2239 &backup_dir.group,
2240 )?;
2241
2242 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2243
2244 Ok(backup_dir.is_protected())
2245 }
2246
2247 #[api(
2248 input: {
2249 properties: {
2250 store: { schema: DATASTORE_SCHEMA },
2251 ns: {
2252 type: BackupNamespace,
2253 optional: true,
2254 },
2255 backup_dir: {
2256 type: pbs_api_types::BackupDir,
2257 flatten: true,
2258 },
2259 protected: {
2260 description: "Enable/disable protection.",
2261 },
2262 },
2263 },
2264 access: {
2265 permission: &Permission::Anybody,
2266 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2267 or DATASTORE_BACKUP and being the owner of the group",
2268 },
2269 )]
2270 /// En- or disable protection for a specific backup
2271 pub async fn set_protection(
2272 store: String,
2273 ns: Option<BackupNamespace>,
2274 backup_dir: pbs_api_types::BackupDir,
2275 protected: bool,
2276 rpcenv: &mut dyn RpcEnvironment,
2277 ) -> Result<(), Error> {
2278 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2279
2280 tokio::task::spawn_blocking(move || {
2281 let ns = ns.unwrap_or_default();
2282 let datastore = check_privs_and_load_store(
2283 &store,
2284 &ns,
2285 &auth_id,
2286 PRIV_DATASTORE_MODIFY,
2287 PRIV_DATASTORE_BACKUP,
2288 Some(Operation::Write),
2289 &backup_dir.group,
2290 )?;
2291
2292 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
2293
2294 datastore.update_protection(&backup_dir, protected)
2295 })
2296 .await?
2297 }
2298
2299 #[api(
2300 input: {
2301 properties: {
2302 store: { schema: DATASTORE_SCHEMA },
2303 ns: {
2304 type: BackupNamespace,
2305 optional: true,
2306 },
2307 backup_group: {
2308 type: pbs_api_types::BackupGroup,
2309 flatten: true,
2310 },
2311 "new-owner": {
2312 type: Authid,
2313 },
2314 },
2315 },
2316 access: {
2317 permission: &Permission::Anybody,
2318 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2319 a user's token for owned backups with Datastore.Backup"
2320 },
2321 )]
2322 /// Change owner of a backup group
2323 pub async fn set_backup_owner(
2324 store: String,
2325 ns: Option<BackupNamespace>,
2326 backup_group: pbs_api_types::BackupGroup,
2327 new_owner: Authid,
2328 rpcenv: &mut dyn RpcEnvironment,
2329 ) -> Result<(), Error> {
2330 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2331
2332 tokio::task::spawn_blocking(move || {
2333 let ns = ns.unwrap_or_default();
2334 let owner_check_required = check_ns_privs_full(
2335 &store,
2336 &ns,
2337 &auth_id,
2338 PRIV_DATASTORE_MODIFY,
2339 PRIV_DATASTORE_BACKUP,
2340 )?;
2341
2342 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2343
2344 let backup_group = datastore.backup_group(ns, backup_group);
2345
2346 if owner_check_required {
2347 let owner = backup_group.get_owner()?;
2348
2349 let allowed = match (owner.is_token(), new_owner.is_token()) {
2350 (true, true) => {
2351 // API token to API token, owned by same user
2352 let owner = owner.user();
2353 let new_owner = new_owner.user();
2354 owner == new_owner && Authid::from(owner.clone()) == auth_id
2355 }
2356 (true, false) => {
2357 // API token to API token owner
2358 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2359 }
2360 (false, true) => {
2361 // API token owner to API token
2362 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2363 }
2364 (false, false) => {
2365 // User to User, not allowed for unprivileged users
2366 false
2367 }
2368 };
2369
2370 if !allowed {
2371 return Err(http_err!(
2372 UNAUTHORIZED,
2373 "{} does not have permission to change owner of backup group '{}' to {}",
2374 auth_id,
2375 backup_group.group(),
2376 new_owner,
2377 ));
2378 }
2379 }
2380
2381 let user_info = CachedUserInfo::new()?;
2382
2383 if !user_info.is_active_auth_id(&new_owner) {
2384 bail!(
2385 "{} '{}' is inactive or non-existent",
2386 if new_owner.is_token() {
2387 "API token".to_string()
2388 } else {
2389 "user".to_string()
2390 },
2391 new_owner
2392 );
2393 }
2394
2395 backup_group.set_owner(&new_owner, true)?;
2396
2397 Ok(())
2398 })
2399 .await?
2400 }
2401
2402 fn setup_mounted_device(datastore: &DataStoreConfig, tmp_mount_path: &str) -> Result<(), Error> {
2403 let default_options = proxmox_sys::fs::CreateOptions::new();
2404 let mount_point = datastore.absolute_path();
2405 let full_store_path = format!(
2406 "{tmp_mount_path}/{}",
2407 datastore.path.trim_start_matches('/')
2408 );
2409 let backup_user = pbs_config::backup_user()?;
2410 let options = CreateOptions::new()
2411 .owner(backup_user.uid)
2412 .group(backup_user.gid);
2413
2414 proxmox_sys::fs::create_path(
2415 &mount_point,
2416 Some(default_options.clone()),
2417 Some(options.clone()),
2418 )
2419 .map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?;
2420
2421 // can't be created before it is mounted, so we have to do it here
2422 proxmox_sys::fs::create_path(
2423 &full_store_path,
2424 Some(default_options.clone()),
2425 Some(options.clone()),
2426 )
2427 .map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?;
2428
2429 info!(
2430 "bind mount '{}'({}) to '{}'",
2431 datastore.name, datastore.path, mount_point
2432 );
2433
2434 crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point))
2435 }
2436
2437 /// Here we
2438 ///
2439 /// 1. mount the removable device to `<PBS_RUN_DIR>/mount/<RANDOM_UUID>`
2440 /// 2. bind mount `<PBS_RUN_DIR>/mount/<RANDOM_UUID>/<datastore.path>` to `/mnt/datastore/<datastore.name>`
2441 /// 3. unmount `<PBS_RUN_DIR>/mount/<RANDOM_UUID>`
2442 ///
2443 /// leaving us with the datastore being mounted directly with its name under /mnt/datastore/...
2444 ///
2445 /// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to
2446 /// the same path, this is *very* unlikely since the device is only mounted really shortly, but
2447 /// technically possible.
2448 pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
2449 if let Some(uuid) = datastore.backing_device.as_ref() {
2450 if pbs_datastore::get_datastore_mount_status(&datastore) == Some(true) {
2451 info!(
2452 "device is already mounted at '{}'",
2453 datastore.absolute_path()
2454 );
2455 return Ok(());
2456 }
2457 let tmp_mount_path = format!(
2458 "{}/{:x}",
2459 pbs_buildcfg::rundir!("/mount"),
2460 proxmox_uuid::Uuid::generate()
2461 );
2462
2463 let default_options = proxmox_sys::fs::CreateOptions::new();
2464 proxmox_sys::fs::create_path(
2465 &tmp_mount_path,
2466 Some(default_options.clone()),
2467 Some(default_options.clone()),
2468 )?;
2469
2470 info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path);
2471 crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path))
2472 .map_err(|e| format_err!("mounting to tmp path failed: {e}"))?;
2473
2474 let setup_result = setup_mounted_device(&datastore, &tmp_mount_path);
2475
2476 let mut unmounted = true;
2477 if let Err(e) = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) {
2478 unmounted = false;
2479 warn!("unmounting from tmp path '{tmp_mount_path} failed: {e}'");
2480 }
2481 if unmounted {
2482 if let Err(e) = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) {
2483 warn!("removing tmp path '{tmp_mount_path} failed: {e}'");
2484 }
2485 }
2486
2487 setup_result.map_err(|e| {
2488 format_err!(
2489 "Datastore '{}' could not be created: {}.",
2490 datastore.name,
2491 e
2492 )
2493 })?;
2494 } else {
2495 bail!(
2496 "Datastore '{}' cannot be mounted because it is not removable.",
2497 datastore.name
2498 )
2499 }
2500 Ok(())
2501 }
2502
2503 #[api(
2504 protected: true,
2505 input: {
2506 properties: {
2507 store: {
2508 schema: DATASTORE_SCHEMA,
2509 },
2510 }
2511 },
2512 returns: {
2513 schema: UPID_SCHEMA,
2514 },
2515 access: {
2516 permission: &Permission::And(&[
2517 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
2518 &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
2519 ]),
2520 },
2521 )]
2522 /// Mount removable datastore.
2523 pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
2524 let (section_config, _digest) = pbs_config::datastore::config()?;
2525 let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?;
2526
2527 if datastore.backing_device.is_none() {
2528 bail!("datastore '{store}' is not removable");
2529 }
2530
2531 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2532 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
2533
2534 let upid = WorkerTask::new_thread(
2535 "mount-device",
2536 Some(store),
2537 auth_id.to_string(),
2538 to_stdout,
2539 move |_worker| do_mount_device(datastore),
2540 )?;
2541
2542 Ok(json!(upid))
2543 }
2544
2545 fn expect_maintanance_unmounting(
2546 store: &str,
2547 ) -> Result<(pbs_config::BackupLockGuard, DataStoreConfig), Error> {
2548 let lock = pbs_config::datastore::lock_config()?;
2549 let (section_config, _digest) = pbs_config::datastore::config()?;
2550 let store_config: DataStoreConfig = section_config.lookup("datastore", store)?;
2551
2552 if store_config
2553 .get_maintenance_mode()
2554 .map_or(true, |m| m.ty != MaintenanceType::Unmount)
2555 {
2556 bail!("maintenance mode is not 'Unmount'");
2557 }
2558
2559 Ok((lock, store_config))
2560 }
2561
2562 fn unset_maintenance(
2563 _lock: pbs_config::BackupLockGuard,
2564 mut config: DataStoreConfig,
2565 ) -> Result<(), Error> {
2566 let (mut section_config, _digest) = pbs_config::datastore::config()?;
2567 config.maintenance_mode = None;
2568 section_config.set_data(&config.name, "datastore", &config)?;
2569 pbs_config::datastore::save_config(&section_config)?;
2570 Ok(())
2571 }
2572
2573 fn do_unmount_device(
2574 datastore: DataStoreConfig,
2575 worker: Option<&dyn WorkerTaskContext>,
2576 ) -> Result<(), Error> {
2577 if datastore.backing_device.is_none() {
2578 bail!("can't unmount non-removable datastore");
2579 }
2580 let mount_point = datastore.absolute_path();
2581
2582 let mut active_operations = task_tracking::get_active_operations(&datastore.name)?;
2583 let mut old_status = String::new();
2584 let mut aborted = false;
2585 while active_operations.read + active_operations.write > 0 {
2586 if let Some(worker) = worker {
2587 if worker.abort_requested() || expect_maintanance_unmounting(&datastore.name).is_err() {
2588 aborted = true;
2589 break;
2590 }
2591 let status = format!(
2592 "cannot unmount yet, still {} read and {} write operations active",
2593 active_operations.read, active_operations.write
2594 );
2595 if status != old_status {
2596 info!("{status}");
2597 old_status = status;
2598 }
2599 }
2600 std::thread::sleep(std::time::Duration::from_secs(1));
2601 active_operations = task_tracking::get_active_operations(&datastore.name)?;
2602 }
2603
2604 if aborted || worker.map_or(false, |w| w.abort_requested()) {
2605 let _ = expect_maintanance_unmounting(&datastore.name)
2606 .inspect_err(|e| warn!("maintenance mode was not as expected: {e}"))
2607 .and_then(|(lock, config)| {
2608 unset_maintenance(lock, config)
2609 .inspect_err(|e| warn!("could not reset maintenance mode: {e}"))
2610 });
2611 bail!("aborted, due to user request");
2612 } else {
2613 let (lock, config) = expect_maintanance_unmounting(&datastore.name)?;
2614 crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?;
2615 unset_maintenance(lock, config)
2616 .map_err(|e| format_err!("could not reset maintenance mode: {e}"))?;
2617 }
2618 Ok(())
2619 }
2620
2621 #[api(
2622 protected: true,
2623 input: {
2624 properties: {
2625 store: { schema: DATASTORE_SCHEMA },
2626 },
2627 },
2628 returns: {
2629 schema: UPID_SCHEMA,
2630 },
2631 access: {
2632 permission: &Permission::And(&[
2633 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
2634 &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
2635 ]),
2636 }
2637 )]
2638 /// Unmount a removable device that is associated with the datastore
2639 pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
2640 let _lock = pbs_config::datastore::lock_config()?;
2641 let (mut section_config, _digest) = pbs_config::datastore::config()?;
2642 let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?;
2643
2644 if datastore.backing_device.is_none() {
2645 bail!("datastore '{store}' is not removable");
2646 }
2647
2648 ensure_datastore_is_mounted(&datastore)?;
2649
2650 datastore.set_maintenance_mode(Some(MaintenanceMode {
2651 ty: MaintenanceType::Unmount,
2652 message: None,
2653 }))?;
2654 section_config.set_data(&store, "datastore", &datastore)?;
2655 pbs_config::datastore::save_config(&section_config)?;
2656
2657 drop(_lock);
2658
2659 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2660 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
2661
2662 if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)
2663 {
2664 let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid);
2665 let _ = proxmox_daemon::command_socket::send_raw(
2666 sock,
2667 &format!(
2668 "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n",
2669 &store
2670 ),
2671 )
2672 .await;
2673 }
2674
2675 let upid = WorkerTask::new_thread(
2676 "unmount-device",
2677 Some(store),
2678 auth_id.to_string(),
2679 to_stdout,
2680 move |worker| do_unmount_device(datastore, Some(&worker)),
2681 )?;
2682
2683 Ok(json!(upid))
2684 }
2685
2686 #[sortable]
2687 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
2688 (
2689 "active-operations",
2690 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
2691 ),
2692 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
2693 (
2694 "change-owner",
2695 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
2696 ),
2697 (
2698 "download",
2699 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
2700 ),
2701 (
2702 "download-decoded",
2703 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
2704 ),
2705 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
2706 (
2707 "gc",
2708 &Router::new()
2709 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2710 .post(&API_METHOD_START_GARBAGE_COLLECTION),
2711 ),
2712 (
2713 "group-notes",
2714 &Router::new()
2715 .get(&API_METHOD_GET_GROUP_NOTES)
2716 .put(&API_METHOD_SET_GROUP_NOTES),
2717 ),
2718 (
2719 "groups",
2720 &Router::new()
2721 .get(&API_METHOD_LIST_GROUPS)
2722 .delete(&API_METHOD_DELETE_GROUP),
2723 ),
2724 ("mount", &Router::new().post(&API_METHOD_MOUNT)),
2725 (
2726 "namespace",
2727 // FIXME: move into datastore:: sub-module?!
2728 &crate::api2::admin::namespace::ROUTER,
2729 ),
2730 (
2731 "notes",
2732 &Router::new()
2733 .get(&API_METHOD_GET_NOTES)
2734 .put(&API_METHOD_SET_NOTES),
2735 ),
2736 (
2737 "protected",
2738 &Router::new()
2739 .get(&API_METHOD_GET_PROTECTION)
2740 .put(&API_METHOD_SET_PROTECTION),
2741 ),
2742 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
2743 (
2744 "prune-datastore",
2745 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
2746 ),
2747 (
2748 "pxar-file-download",
2749 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
2750 ),
2751 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
2752 (
2753 "snapshots",
2754 &Router::new()
2755 .get(&API_METHOD_LIST_SNAPSHOTS)
2756 .delete(&API_METHOD_DELETE_SNAPSHOT),
2757 ),
2758 ("status", &Router::new().get(&API_METHOD_STATUS)),
2759 ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)),
2760 (
2761 "upload-backup-log",
2762 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
2763 ),
2764 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
2765 ];
2766
2767 const DATASTORE_INFO_ROUTER: Router = Router::new()
2768 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2769 .subdirs(DATASTORE_INFO_SUBDIRS);
2770
2771 pub const ROUTER: Router = Router::new()
2772 .get(&API_METHOD_GET_DATASTORE_LIST)
2773 .match_all("store", &DATASTORE_INFO_ROUTER);