]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
api: make prune-group a real workertask
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
20ecaad1 25use proxmox_sortable_macro::sortable;
dc7a5b34
TL
26use proxmox_sys::fs::{
27 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
28};
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
abd82485
FG
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
dba37e21
WB
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 65use crate::backup::{
2981cdd4
TL
66 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups, NS_PRIVS_OK,
22cfad13 68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7a404dc5
FG
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
abd82485
FG
89 store: &str,
90 ns: &BackupNamespace,
c9396984 91 auth_id: &Authid,
7a404dc5
FG
92 full_access_privs: u64,
93 partial_access_privs: u64,
c9396984 94 operation: Option<Operation>,
c9396984
FG
95 backup_group: &pbs_api_types::BackupGroup,
96) -> Result<Arc<DataStore>, Error> {
abd82485 97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
7a404dc5 98
abd82485 99 let datastore = DataStore::lookup_datastore(store, operation)?;
c9396984 100
7a404dc5 101 if limited {
abd82485 102 let owner = datastore.get_owner(ns, backup_group)?;
e1db0670 103 check_backup_owner(&owner, auth_id)?;
c9396984
FG
104 }
105
106 Ok(datastore)
107}
108
e7cb4dc5 109fn read_backup_index(
e7cb4dc5
WB
110 backup_dir: &BackupDir,
111) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 112 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 113
09b1f7b2
DM
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
f28d9088 118 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
119 size: Some(item.size),
120 });
8c70e3eb
DM
121 }
122
09b1f7b2 123 result.push(BackupContent {
96d65fbc 124 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
09b1f7b2
DM
129 size: Some(index_size),
130 });
4f1e40a2 131
70030b43 132 Ok((manifest, result))
8c70e3eb
DM
133}
134
1c090810 135fn get_all_snapshot_files(
1c090810 136 info: &BackupInfo,
70030b43 137) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
dc7a5b34
TL
146 if file_set.contains(file) {
147 continue;
148 }
f28d9088
WB
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
1c090810
DC
154 }
155
70030b43 156 Ok((manifest, files))
1c090810
DC
157}
158
b31c8019
DM
159#[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
bc21ade2 165 ns: {
89ae3c32
WB
166 type: BackupNamespace,
167 optional: true,
168 },
b31c8019
DM
169 },
170 },
7b570c17 171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 172 access: {
7d6fc15b
TL
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
bb34b589 176 },
b31c8019
DM
177)]
178/// List backup groups.
b2362a12 179pub fn list_groups(
b31c8019 180 store: String,
bc21ade2 181 ns: Option<BackupNamespace>,
54552dda 182 rpcenv: &mut dyn RpcEnvironment,
b31c8019 183) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 185 let ns = ns.unwrap_or_default();
ea2e91e5
FG
186
187 let list_all = !check_ns_privs_full(
abd82485
FG
188 &store,
189 &ns,
7d6fc15b 190 &auth_id,
2bc2435a
FG
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
7d6fc15b 193 )?;
54552dda 194
abd82485 195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 196
249dde8b 197 datastore
abd82485 198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
e13303fc 201
abd82485 202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
203 Ok(auth_id) => auth_id,
204 Err(err) => {
e13303fc
FG
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
abd82485 208 print_store_and_ns(&store, &ns),
e13303fc
FG
209 err
210 );
249dde8b 211 return Ok(group_info);
dc7a5b34 212 }
249dde8b
TL
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
0d08fcee 217
6da20161 218 let snapshots = match group.list_backups() {
249dde8b
TL
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
0d08fcee 222
249dde8b
TL
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
0d08fcee 227
249dde8b
TL
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
abd82485 239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
cd0daa8b 240 let comment = file_read_firstline(note_path).ok();
249dde8b
TL
241
242 group_info.push(GroupListItem {
988d575d 243 backup: group.into(),
249dde8b
TL
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
0d08fcee
FG
249 });
250
249dde8b
TL
251 Ok(group_info)
252 })
812c6f87 253}
8f579717 254
f32791b4
DC
255#[api(
256 input: {
257 properties: {
988d575d 258 store: { schema: DATASTORE_SCHEMA },
bc21ade2 259 ns: {
133d718f
WB
260 type: BackupNamespace,
261 optional: true,
262 },
8c74349b
WB
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
f32791b4
DC
267 },
268 },
269 access: {
7d6fc15b
TL
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
273 },
274)]
275/// Delete backup group including all snapshots.
6f67dc11 276pub async fn delete_group(
f32791b4 277 store: String,
bc21ade2 278 ns: Option<BackupNamespace>,
8c74349b 279 group: pbs_api_types::BackupGroup,
f32791b4
DC
280 rpcenv: &mut dyn RpcEnvironment,
281) -> Result<Value, Error> {
f32791b4 282 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
f32791b4 283
6f67dc11
WB
284 tokio::task::spawn_blocking(move || {
285 let ns = ns.unwrap_or_default();
286
287 let datastore = check_privs_and_load_store(
288 &store,
289 &ns,
290 &auth_id,
291 PRIV_DATASTORE_MODIFY,
292 PRIV_DATASTORE_PRUNE,
293 Some(Operation::Write),
294 &group,
295 )?;
296
524ed404
CE
297 let delete_stats = datastore.remove_backup_group(&ns, &group)?;
298 if !delete_stats.all_removed() {
6f67dc11
WB
299 bail!("group only partially deleted due to protected snapshots");
300 }
f32791b4 301
6f67dc11
WB
302 Ok(Value::Null)
303 })
304 .await?
f32791b4
DC
305}
306
09b1f7b2
DM
307#[api(
308 input: {
309 properties: {
988d575d 310 store: { schema: DATASTORE_SCHEMA },
bc21ade2 311 ns: {
133d718f
WB
312 type: BackupNamespace,
313 optional: true,
314 },
8c74349b
WB
315 backup_dir: {
316 type: pbs_api_types::BackupDir,
317 flatten: true,
318 },
09b1f7b2
DM
319 },
320 },
7b570c17 321 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 322 access: {
7d6fc15b
TL
323 permission: &Permission::Anybody,
324 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
325 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 326 },
09b1f7b2
DM
327)]
328/// List snapshot files.
6cb674aa 329pub async fn list_snapshot_files(
09b1f7b2 330 store: String,
bc21ade2 331 ns: Option<BackupNamespace>,
8c74349b 332 backup_dir: pbs_api_types::BackupDir,
01a13423 333 _info: &ApiMethod,
54552dda 334 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 335) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 336 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 337
6cb674aa
WB
338 tokio::task::spawn_blocking(move || {
339 let ns = ns.unwrap_or_default();
01a13423 340
6cb674aa
WB
341 let datastore = check_privs_and_load_store(
342 &store,
343 &ns,
344 &auth_id,
345 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
346 PRIV_DATASTORE_BACKUP,
347 Some(Operation::Read),
348 &backup_dir.group,
349 )?;
350
351 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 352
6cb674aa 353 let info = BackupInfo::new(snapshot)?;
01a13423 354
6cb674aa 355 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43 356
6cb674aa
WB
357 Ok(files)
358 })
359 .await?
01a13423
DM
360}
361
68a6a0ee
DM
362#[api(
363 input: {
364 properties: {
988d575d 365 store: { schema: DATASTORE_SCHEMA },
bc21ade2 366 ns: {
133d718f
WB
367 type: BackupNamespace,
368 optional: true,
369 },
8c74349b
WB
370 backup_dir: {
371 type: pbs_api_types::BackupDir,
372 flatten: true,
373 },
68a6a0ee
DM
374 },
375 },
bb34b589 376 access: {
7d6fc15b
TL
377 permission: &Permission::Anybody,
378 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
379 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 380 },
68a6a0ee
DM
381)]
382/// Delete backup snapshot.
af201d7a 383pub async fn delete_snapshot(
68a6a0ee 384 store: String,
bc21ade2 385 ns: Option<BackupNamespace>,
8c74349b 386 backup_dir: pbs_api_types::BackupDir,
6f62c924 387 _info: &ApiMethod,
54552dda 388 rpcenv: &mut dyn RpcEnvironment,
6f62c924 389) -> Result<Value, Error> {
e6dc35ac 390 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 391
af201d7a
WB
392 tokio::task::spawn_blocking(move || {
393 let ns = ns.unwrap_or_default();
a724f5fd 394
af201d7a
WB
395 let datastore = check_privs_and_load_store(
396 &store,
397 &ns,
398 &auth_id,
399 PRIV_DATASTORE_MODIFY,
400 PRIV_DATASTORE_PRUNE,
401 Some(Operation::Write),
402 &backup_dir.group,
403 )?;
54552dda 404
af201d7a
WB
405 let snapshot = datastore.backup_dir(ns, backup_dir)?;
406
407 snapshot.destroy(false)?;
6f62c924 408
af201d7a
WB
409 Ok(Value::Null)
410 })
411 .await?
6f62c924
DM
412}
413
fc189b19 414#[api(
b7c3eaa9 415 streaming: true,
fc189b19
DM
416 input: {
417 properties: {
988d575d 418 store: { schema: DATASTORE_SCHEMA },
bc21ade2 419 ns: {
8c74349b
WB
420 type: BackupNamespace,
421 optional: true,
422 },
fc189b19
DM
423 "backup-type": {
424 optional: true,
988d575d 425 type: BackupType,
fc189b19
DM
426 },
427 "backup-id": {
428 optional: true,
429 schema: BACKUP_ID_SCHEMA,
430 },
431 },
432 },
7b570c17 433 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 434 access: {
7d6fc15b
TL
435 permission: &Permission::Anybody,
436 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
437 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 438 },
fc189b19
DM
439)]
440/// List backup snapshots.
a577d7d8 441pub async fn list_snapshots(
54552dda 442 store: String,
bc21ade2 443 ns: Option<BackupNamespace>,
988d575d 444 backup_type: Option<BackupType>,
54552dda
DM
445 backup_id: Option<String>,
446 _param: Value,
184f17af 447 _info: &ApiMethod,
54552dda 448 rpcenv: &mut dyn RpcEnvironment,
fc189b19 449) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 450 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 451
a577d7d8
WB
452 tokio::task::spawn_blocking(move || unsafe {
453 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
454 })
455 .await
456 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
457}
458
459/// This must not run in a main worker thread as it potentially does tons of I/O.
460unsafe fn list_snapshots_blocking(
461 store: String,
462 ns: Option<BackupNamespace>,
463 backup_type: Option<BackupType>,
464 backup_id: Option<String>,
465 auth_id: Authid,
466) -> Result<Vec<SnapshotListItem>, Error> {
bc21ade2 467 let ns = ns.unwrap_or_default();
7d6fc15b 468
ea2e91e5 469 let list_all = !check_ns_privs_full(
abd82485
FG
470 &store,
471 &ns,
7d6fc15b 472 &auth_id,
2bc2435a
FG
473 PRIV_DATASTORE_AUDIT,
474 PRIV_DATASTORE_BACKUP,
7d6fc15b 475 )?;
184f17af 476
abd82485 477 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 478
249dde8b
TL
479 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
480 // backup group and provide an error free (Err -> None) accessor
0d08fcee 481 let groups = match (backup_type, backup_id) {
db87d93e 482 (Some(backup_type), Some(backup_id)) => {
abd82485 483 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
db87d93e 484 }
8c74349b 485 // FIXME: Recursion
7d9cb8c4 486 (Some(backup_type), None) => datastore
91f4b11f 487 .iter_backup_type_ok(ns.clone(), backup_type)?
dc7a5b34 488 .collect(),
8c74349b 489 // FIXME: Recursion
91f4b11f
WB
490 (None, Some(backup_id)) => BackupType::iter()
491 .filter_map(|backup_type| {
492 let group =
493 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
f097eaa8 494 group.exists().then_some(group)
91f4b11f 495 })
dc7a5b34 496 .collect(),
8c74349b 497 // FIXME: Recursion
abd82485 498 (None, None) => datastore.list_backup_groups(ns.clone())?,
0d08fcee 499 };
54552dda 500
0d08fcee 501 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
502 let backup = pbs_api_types::BackupDir {
503 group: group.into(),
504 time: info.backup_dir.backup_time(),
505 };
6da20161 506 let protected = info.backup_dir.is_protected();
1c090810 507
9ccf933b 508 match get_all_snapshot_files(&info) {
70030b43 509 Ok((manifest, files)) => {
70030b43
DM
510 // extract the first line from notes
511 let comment: Option<String> = manifest.unprotected["notes"]
512 .as_str()
513 .and_then(|notes| notes.lines().next())
514 .map(String::from);
515
035c40e6
FG
516 let fingerprint = match manifest.fingerprint() {
517 Ok(fp) => fp,
518 Err(err) => {
519 eprintln!("error parsing fingerprint: '{}'", err);
520 None
dc7a5b34 521 }
035c40e6
FG
522 };
523
79c53595 524 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
525 let verification: Option<SnapshotVerifyState> =
526 match serde_json::from_value(verification) {
527 Ok(verify) => verify,
528 Err(err) => {
529 eprintln!("error parsing verification state : '{}'", err);
530 None
531 }
532 };
3b2046d2 533
0d08fcee
FG
534 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
535
79c53595 536 SnapshotListItem {
988d575d 537 backup,
79c53595
FG
538 comment,
539 verification,
035c40e6 540 fingerprint,
79c53595
FG
541 files,
542 size,
543 owner,
02db7267 544 protected,
79c53595 545 }
dc7a5b34 546 }
1c090810
DC
547 Err(err) => {
548 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 549 let files = info
dc7a5b34
TL
550 .files
551 .into_iter()
552 .map(|filename| BackupContent {
553 filename,
554 size: None,
555 crypt_mode: None,
556 })
557 .collect();
79c53595
FG
558
559 SnapshotListItem {
988d575d 560 backup,
79c53595
FG
561 comment: None,
562 verification: None,
035c40e6 563 fingerprint: None,
79c53595
FG
564 files,
565 size: None,
566 owner,
02db7267 567 protected,
79c53595 568 }
dc7a5b34 569 }
0d08fcee
FG
570 }
571 };
184f17af 572
dc7a5b34 573 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 574 let owner = match group.get_owner() {
dc7a5b34
TL
575 Ok(auth_id) => auth_id,
576 Err(err) => {
577 eprintln!(
e13303fc 578 "Failed to get owner of group '{}' in {} - {}",
e13303fc 579 group.group(),
abd82485 580 print_store_and_ns(&store, &ns),
e13303fc 581 err
dc7a5b34 582 );
0d08fcee
FG
583 return Ok(snapshots);
584 }
dc7a5b34 585 };
0d08fcee 586
dc7a5b34
TL
587 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
588 return Ok(snapshots);
589 }
0d08fcee 590
6da20161 591 let group_backups = group.list_backups()?;
0d08fcee 592
dc7a5b34
TL
593 snapshots.extend(
594 group_backups
595 .into_iter()
596 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
597 );
598
599 Ok(snapshots)
600 })
184f17af
DM
601}
602
5a2f7ea7
WB
603async fn get_snapshots_count(
604 store: &Arc<DataStore>,
605 owner: Option<&Authid>,
606) -> Result<Counts, Error> {
607 let store = Arc::clone(store);
608 let owner = owner.cloned();
609 tokio::task::spawn_blocking(move || {
610 let root_ns = Default::default();
611 ListAccessibleBackupGroups::new_with_privs(
612 &store,
613 root_ns,
614 MAX_NAMESPACE_DEPTH,
615 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
616 None,
617 owner.as_ref(),
618 )?
619 .try_fold(Counts::default(), |mut counts, group| {
620 let group = match group {
621 Ok(group) => group,
622 Err(_) => return Ok(counts), // TODO: add this as error counts?
22cfad13 623 };
5a2f7ea7
WB
624 let snapshot_count = group.list_backups()?.len() as u64;
625
626 // only include groups with snapshots, counting/displaying empty groups can confuse
627 if snapshot_count > 0 {
628 let type_count = match group.backup_type() {
629 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
630 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
631 BackupType::Host => counts.host.get_or_insert(Default::default()),
632 };
14e08625 633
5a2f7ea7
WB
634 type_count.groups += 1;
635 type_count.snapshots += snapshot_count;
636 }
16f9f244 637
5a2f7ea7
WB
638 Ok(counts)
639 })
f12f408e 640 })
5a2f7ea7 641 .await?
16f9f244
DC
642}
643
1dc117bb
DM
644#[api(
645 input: {
646 properties: {
647 store: {
648 schema: DATASTORE_SCHEMA,
649 },
98afc7b1
FG
650 verbose: {
651 type: bool,
652 default: false,
653 optional: true,
654 description: "Include additional information like snapshot counts and GC status.",
655 },
1dc117bb 656 },
98afc7b1 657
1dc117bb
DM
658 },
659 returns: {
14e08625 660 type: DataStoreStatus,
1dc117bb 661 },
bb34b589 662 access: {
84de1012
TL
663 permission: &Permission::Anybody,
664 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
665 the full statistics. Counts of accessible groups are always returned, if any",
bb34b589 666 },
1dc117bb
DM
667)]
668/// Get datastore status.
143ac7e6 669pub async fn status(
1dc117bb 670 store: String,
98afc7b1 671 verbose: bool,
0eecf38f 672 _info: &ApiMethod,
fdfcb74d 673 rpcenv: &mut dyn RpcEnvironment,
14e08625 674) -> Result<DataStoreStatus, Error> {
84de1012
TL
675 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
676 let user_info = CachedUserInfo::new()?;
677 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
678
679 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
680
681 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
682 true
683 } else if store_privs & PRIV_DATASTORE_READ != 0 {
684 false // allow at least counts, user can read groups anyway..
84de1012 685 } else {
2981cdd4 686 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
d20137e5 687 // avoid leaking existence info if users hasn't at least any priv. below
2981cdd4
TL
688 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
689 _ => false,
690 }
84de1012 691 };
d20137e5 692 let datastore = datastore?; // only unwrap no to avoid leaking existence info
fdfcb74d 693
84de1012 694 let (counts, gc_status) = if verbose {
fdfcb74d
FG
695 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
696 None
697 } else {
698 Some(&auth_id)
699 };
700
5a2f7ea7 701 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
84de1012
TL
702 let gc_status = if store_stats {
703 Some(datastore.last_gc_status())
704 } else {
705 None
706 };
fdfcb74d
FG
707
708 (counts, gc_status)
709 } else {
710 (None, None)
98afc7b1 711 };
16f9f244 712
84de1012 713 Ok(if store_stats {
143ac7e6 714 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
84de1012
TL
715 DataStoreStatus {
716 total: storage.total,
717 used: storage.used,
1cc73a43 718 avail: storage.available,
84de1012
TL
719 gc_status,
720 counts,
721 }
722 } else {
723 DataStoreStatus {
724 total: 0,
725 used: 0,
726 avail: 0,
727 gc_status,
728 counts,
729 }
14e08625 730 })
0eecf38f
DM
731}
732
c2009e53
DM
733#[api(
734 input: {
735 properties: {
736 store: {
737 schema: DATASTORE_SCHEMA,
738 },
bc21ade2 739 ns: {
8c74349b
WB
740 type: BackupNamespace,
741 optional: true,
742 },
c2009e53 743 "backup-type": {
988d575d 744 type: BackupType,
c2009e53
DM
745 optional: true,
746 },
747 "backup-id": {
748 schema: BACKUP_ID_SCHEMA,
749 optional: true,
750 },
dcbf29e7
HL
751 "ignore-verified": {
752 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
753 optional: true,
754 },
755 "outdated-after": {
756 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
757 optional: true,
758 },
c2009e53
DM
759 "backup-time": {
760 schema: BACKUP_TIME_SCHEMA,
761 optional: true,
762 },
59229bd7
TL
763 "max-depth": {
764 schema: NS_MAX_DEPTH_SCHEMA,
765 optional: true,
766 },
c2009e53
DM
767 },
768 },
769 returns: {
770 schema: UPID_SCHEMA,
771 },
772 access: {
7d6fc15b
TL
773 permission: &Permission::Anybody,
774 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
775 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
776 },
777)]
778/// Verify backups.
779///
780/// This function can verify a single backup snapshot, all backup from a backup group,
781/// or all backups in the datastore.
e1db0670 782#[allow(clippy::too_many_arguments)]
c2009e53
DM
783pub fn verify(
784 store: String,
bc21ade2 785 ns: Option<BackupNamespace>,
988d575d 786 backup_type: Option<BackupType>,
c2009e53
DM
787 backup_id: Option<String>,
788 backup_time: Option<i64>,
dcbf29e7
HL
789 ignore_verified: Option<bool>,
790 outdated_after: Option<i64>,
59229bd7 791 max_depth: Option<usize>,
c2009e53
DM
792 rpcenv: &mut dyn RpcEnvironment,
793) -> Result<Value, Error> {
7d6fc15b 794 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 795 let ns = ns.unwrap_or_default();
ea2e91e5
FG
796
797 let owner_check_required = check_ns_privs_full(
abd82485
FG
798 &store,
799 &ns,
7d6fc15b 800 &auth_id,
2bc2435a
FG
801 PRIV_DATASTORE_VERIFY,
802 PRIV_DATASTORE_BACKUP,
7d6fc15b 803 )?;
a724f5fd 804
abd82485 805 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 806 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 807
8ea00f6e 808 let worker_id;
c2009e53
DM
809
810 let mut backup_dir = None;
811 let mut backup_group = None;
133042b5 812 let mut worker_type = "verify";
c2009e53
DM
813
814 match (backup_type, backup_id, backup_time) {
815 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 816 worker_id = format!(
8c74349b 817 "{}:{}/{}/{}/{:08X}",
abd82485 818 store,
bc21ade2 819 ns.display_as_path(),
8c74349b
WB
820 backup_type,
821 backup_id,
822 backup_time
dc7a5b34 823 );
bc21ade2
WB
824 let dir =
825 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 826
a724f5fd
FG
827 if owner_check_required {
828 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
829 check_backup_owner(&owner, &auth_id)?;
830 }
09f6a240 831
c2009e53 832 backup_dir = Some(dir);
133042b5 833 worker_type = "verify_snapshot";
c2009e53
DM
834 }
835 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
836 worker_id = format!(
837 "{}:{}/{}/{}",
abd82485 838 store,
bc21ade2 839 ns.display_as_path(),
8c74349b
WB
840 backup_type,
841 backup_id
842 );
133d718f 843 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 844
a724f5fd 845 if owner_check_required {
bc21ade2 846 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
847 check_backup_owner(&owner, &auth_id)?;
848 }
09f6a240 849
bc21ade2 850 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 851 worker_type = "verify_group";
c2009e53
DM
852 }
853 (None, None, None) => {
bc21ade2 854 worker_id = if ns.is_root() {
abd82485 855 store
59229bd7 856 } else {
abd82485 857 format!("{}:{}", store, ns.display_as_path())
59229bd7 858 };
c2009e53 859 }
5a718dce 860 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
861 }
862
39735609 863 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
864
865 let upid_str = WorkerTask::new_thread(
133042b5 866 worker_type,
44288184 867 Some(worker_id),
049a22a3 868 auth_id.to_string(),
e7cb4dc5
WB
869 to_stdout,
870 move |worker| {
9c26a3d6 871 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 872 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 873 let mut res = Vec::new();
f6b1d1cc 874 if !verify_backup_dir(
9c26a3d6 875 &verify_worker,
f6b1d1cc 876 &backup_dir,
f6b1d1cc 877 worker.upid().clone(),
dc7a5b34 878 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 879 )? {
5ae393af
FG
880 res.push(print_ns_and_snapshot(
881 backup_dir.backup_ns(),
882 backup_dir.as_ref(),
883 ));
adfdc369
DC
884 }
885 res
c2009e53 886 } else if let Some(backup_group) = backup_group {
10dac693 887 verify_backup_group(
9c26a3d6 888 &verify_worker,
63d9aca9 889 &backup_group,
7e25b9aa 890 &mut StoreProgress::new(1),
f6b1d1cc 891 worker.upid(),
dc7a5b34 892 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
10dac693 893 )?
c2009e53 894 } else {
a724f5fd 895 let owner = if owner_check_required {
de27ebc6 896 Some(&auth_id)
09f6a240
FG
897 } else {
898 None
899 };
900
dcbf29e7
HL
901 verify_all_backups(
902 &verify_worker,
903 worker.upid(),
bc21ade2 904 ns,
59229bd7 905 max_depth,
dcbf29e7 906 owner,
dc7a5b34 907 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 908 )?
c2009e53 909 };
3984a5fd 910 if !failed_dirs.is_empty() {
1ec0d70d 911 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 912 for dir in failed_dirs {
1ec0d70d 913 task_log!(worker, "\t{}", dir);
adfdc369 914 }
1ffe0301 915 bail!("verification failed - please check the log for details");
c2009e53
DM
916 }
917 Ok(())
e7cb4dc5
WB
918 },
919 )?;
c2009e53
DM
920
921 Ok(json!(upid_str))
922}
923
0a240aaa
DC
924#[api(
925 input: {
926 properties: {
8c74349b
WB
927 group: {
928 type: pbs_api_types::BackupGroup,
929 flatten: true,
930 },
0a240aaa
DC
931 "dry-run": {
932 optional: true,
933 type: bool,
934 default: false,
935 description: "Just show what prune would do, but do not delete anything.",
936 },
dba37e21
WB
937 "keep-options": {
938 type: KeepOptions,
0a240aaa
DC
939 flatten: true,
940 },
941 store: {
942 schema: DATASTORE_SCHEMA,
943 },
dba37e21
WB
944 ns: {
945 type: BackupNamespace,
946 optional: true,
947 },
432de66a
GG
948 "use-task": {
949 type: bool,
950 default: false,
951 optional: true,
952 description: "Spins up an asynchronous task that does the work.",
953 },
0a240aaa
DC
954 },
955 },
7b570c17 956 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 957 access: {
7d6fc15b
TL
958 permission: &Permission::Anybody,
959 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
960 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
961 },
962)]
9805207a 963/// Prune a group on the datastore
bf78f708 964pub fn prune(
8c74349b 965 group: pbs_api_types::BackupGroup,
0a240aaa 966 dry_run: bool,
dba37e21 967 keep_options: KeepOptions,
0a240aaa 968 store: String,
dba37e21 969 ns: Option<BackupNamespace>,
432de66a 970 param: Value,
54552dda 971 rpcenv: &mut dyn RpcEnvironment,
83b7db02 972) -> Result<Value, Error> {
e6dc35ac 973 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 974 let ns = ns.unwrap_or_default();
7a404dc5 975 let datastore = check_privs_and_load_store(
abd82485
FG
976 &store,
977 &ns,
7d6fc15b 978 &auth_id,
2bc2435a
FG
979 PRIV_DATASTORE_MODIFY,
980 PRIV_DATASTORE_PRUNE,
c9396984 981 Some(Operation::Write),
c9396984
FG
982 &group,
983 )?;
db87d93e 984
abd82485
FG
985 let worker_id = format!("{}:{}:{}", store, ns, group);
986 let group = datastore.backup_group(ns.clone(), group);
83b7db02 987
432de66a
GG
988 #[derive(Debug, serde::Serialize)]
989 struct PruneResult {
990 #[serde(rename = "backup-type")]
991 backup_type: BackupType,
992 #[serde(rename = "backup-id")]
993 backup_id: String,
994 #[serde(rename = "backup-time")]
995 backup_time: i64,
996 keep: bool,
997 protected: bool,
998 #[serde(skip_serializing_if = "Option::is_none")]
999 ns: Option<BackupNamespace>,
1000 }
1001 let mut prune_result: Vec<PruneResult> = Vec::new();
dda70154 1002
6da20161 1003 let list = group.list_backups()?;
dda70154 1004
dba37e21 1005 let mut prune_info = compute_prune_info(list, &keep_options)?;
dda70154
DM
1006
1007 prune_info.reverse(); // delete older snapshots first
1008
dba37e21 1009 let keep_all = !keep_options.keeps_something();
dda70154
DM
1010
1011 if dry_run {
02db7267
DC
1012 for (info, mark) in prune_info {
1013 let keep = keep_all || mark.keep();
432de66a
GG
1014 let backup_dir = &info.backup_dir;
1015
1016 let mut result = PruneResult {
1017 backup_type: backup_dir.backup_type(),
1018 backup_id: backup_dir.backup_id().to_owned(),
1019 backup_time: backup_dir.backup_time(),
1020 keep,
1021 protected: mark.protected(),
1022 ns: None,
1023 };
1024 let prune_ns = backup_dir.backup_ns();
bc21ade2 1025 if !prune_ns.is_root() {
432de66a 1026 result.ns = Some(prune_ns.to_owned());
33f2c2a1
WB
1027 }
1028 prune_result.push(result);
dda70154
DM
1029 }
1030 return Ok(json!(prune_result));
1031 }
1032
432de66a
GG
1033 let prune_group = move |worker: Arc<WorkerTask>| {
1034 if keep_all {
1035 task_log!(worker, "No prune selection - keeping all files.");
1036 } else {
1037 let mut opts = Vec::new();
1038 if !ns.is_root() {
1039 opts.push(format!("--ns {ns}"));
1040 }
1041 crate::server::cli_keep_options(&mut opts, &keep_options);
1042
1043 task_log!(worker, "retention options: {}", opts.join(" "));
1044 task_log!(
1045 worker,
1046 "Starting prune on {} group \"{}\"",
1047 print_store_and_ns(&store, &ns),
1048 group.group(),
1049 );
dba37e21 1050 }
3b03abfe 1051
432de66a
GG
1052 for (info, mark) in prune_info {
1053 let keep = keep_all || mark.keep();
1054 let backup_dir = &info.backup_dir;
dda70154 1055
432de66a
GG
1056 let backup_time = backup_dir.backup_time();
1057 let timestamp = backup_dir.backup_time_string();
1058 let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
db87d93e 1059
432de66a 1060 let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
f1539300 1061
432de66a 1062 task_log!(worker, "{msg}");
f1539300 1063
432de66a
GG
1064 prune_result.push(PruneResult {
1065 backup_type: group.ty,
1066 backup_id: group.id.clone(),
1067 backup_time,
1068 keep,
1069 protected: mark.protected(),
1070 ns: None,
1071 });
f1539300 1072
432de66a
GG
1073 if !keep {
1074 if let Err(err) = backup_dir.destroy(false) {
1075 task_warn!(
1076 worker,
1077 "failed to remove dir {:?}: {}",
1078 backup_dir.relative_path(),
1079 err,
1080 );
1081 }
8f0b4c1f 1082 }
8f579717 1083 }
432de66a
GG
1084 prune_result
1085 };
83b7db02 1086
432de66a
GG
1087 if param["use-task"].as_bool().unwrap_or(false) {
1088 let upid = WorkerTask::spawn(
1089 "prune",
1090 Some(worker_id),
1091 auth_id.to_string(),
1092 true,
1093 move |worker| async move {
1094 let _ = prune_group(worker.clone());
1095 Ok(())
1096 },
1097 )?;
1098 Ok(json!(upid))
1099 } else {
1100 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1101 let result = prune_group(worker.clone());
1102 worker.log_result(&Ok(()));
1103 Ok(json!(result))
1104 }
83b7db02
DM
1105}
1106
9805207a
DC
1107#[api(
1108 input: {
1109 properties: {
1110 "dry-run": {
1111 optional: true,
1112 type: bool,
1113 default: false,
1114 description: "Just show what prune would do, but do not delete anything.",
1115 },
1116 "prune-options": {
dba37e21 1117 type: PruneJobOptions,
9805207a
DC
1118 flatten: true,
1119 },
1120 store: {
1121 schema: DATASTORE_SCHEMA,
1122 },
1123 },
1124 },
1125 returns: {
1126 schema: UPID_SCHEMA,
1127 },
1128 access: {
dba37e21
WB
1129 permission: &Permission::Anybody,
1130 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
9805207a
DC
1131 },
1132)]
1133/// Prune the datastore
1134pub fn prune_datastore(
1135 dry_run: bool,
dba37e21 1136 prune_options: PruneJobOptions,
9805207a
DC
1137 store: String,
1138 _param: Value,
1139 rpcenv: &mut dyn RpcEnvironment,
1140) -> Result<String, Error> {
dba37e21
WB
1141 let user_info = CachedUserInfo::new()?;
1142
9805207a
DC
1143 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1144
dba37e21
WB
1145 user_info.check_privs(
1146 &auth_id,
1147 &prune_options.acl_path(&store),
1148 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1149 true,
1150 )?;
1151
e9d2fc93 1152 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
dba37e21 1153 let ns = prune_options.ns.clone().unwrap_or_default();
36971618 1154 let worker_id = format!("{}:{}", store, ns);
9805207a 1155
bfa942c0
DC
1156 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1157
9805207a
DC
1158 let upid_str = WorkerTask::new_thread(
1159 "prune",
36971618 1160 Some(worker_id),
049a22a3 1161 auth_id.to_string(),
bfa942c0 1162 to_stdout,
dc7a5b34 1163 move |worker| {
dba37e21 1164 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
dc7a5b34 1165 },
9805207a
DC
1166 )?;
1167
1168 Ok(upid_str)
1169}
1170
dfc58d47
DM
1171#[api(
1172 input: {
1173 properties: {
1174 store: {
1175 schema: DATASTORE_SCHEMA,
1176 },
1177 },
1178 },
1179 returns: {
1180 schema: UPID_SCHEMA,
1181 },
bb34b589 1182 access: {
54552dda 1183 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1184 },
dfc58d47
DM
1185)]
1186/// Start garbage collection.
bf78f708 1187pub fn start_garbage_collection(
dfc58d47 1188 store: String,
6049b71f 1189 _info: &ApiMethod,
dd5495d6 1190 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1191) -> Result<Value, Error> {
e9d2fc93 1192 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1193 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1194
dc7a5b34 1195 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1196 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1197
39735609 1198 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1199
dc7a5b34
TL
1200 let upid_str =
1201 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1202 .map_err(|err| {
1203 format_err!(
1204 "unable to start garbage collection job on datastore {} - {}",
1205 store,
1206 err
1207 )
1208 })?;
0f778e06
DM
1209
1210 Ok(json!(upid_str))
15e9b4ed
DM
1211}
1212
a92830dc
DM
1213#[api(
1214 input: {
1215 properties: {
1216 store: {
1217 schema: DATASTORE_SCHEMA,
1218 },
1219 },
1220 },
1221 returns: {
1222 type: GarbageCollectionStatus,
bb34b589
DM
1223 },
1224 access: {
1225 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1226 },
a92830dc
DM
1227)]
1228/// Garbage collection status.
5eeea607 1229pub fn garbage_collection_status(
a92830dc 1230 store: String,
6049b71f 1231 _info: &ApiMethod,
dd5495d6 1232 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1233) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1234 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1235
f2b99c34 1236 let status = datastore.last_gc_status();
691c89a0 1237
a92830dc 1238 Ok(status)
691c89a0
DM
1239}
1240
bb34b589 1241#[api(
30fb6025
DM
1242 returns: {
1243 description: "List the accessible datastores.",
1244 type: Array,
9b93c620 1245 items: { type: DataStoreListItem },
30fb6025 1246 },
bb34b589 1247 access: {
54552dda 1248 permission: &Permission::Anybody,
bb34b589
DM
1249 },
1250)]
1251/// Datastore list
bf78f708 1252pub fn get_datastore_list(
6049b71f
DM
1253 _param: Value,
1254 _info: &ApiMethod,
54552dda 1255 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1256) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1257 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1258
e6dc35ac 1259 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1260 let user_info = CachedUserInfo::new()?;
1261
30fb6025 1262 let mut list = Vec::new();
54552dda 1263
30fb6025 1264 for (store, (_, data)) in &config.sections {
8c9c6c07
TL
1265 let acl_path = &["datastore", store];
1266 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
dc7a5b34 1267 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1268
1269 let mut allow_id = false;
1270 if !allowed {
8c9c6c07
TL
1271 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1272 allow_id = any_privs;
7d6fc15b
TL
1273 }
1274 }
1275
1276 if allowed || allow_id {
dc7a5b34
TL
1277 list.push(DataStoreListItem {
1278 store: store.clone(),
7d6fc15b
TL
1279 comment: if !allowed {
1280 None
1281 } else {
1282 data["comment"].as_str().map(String::from)
1283 },
e022d13c 1284 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1285 });
30fb6025 1286 }
54552dda
DM
1287 }
1288
44288184 1289 Ok(list)
15e9b4ed
DM
1290}
1291
0ab08ac9
DM
1292#[sortable]
1293pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1294 &ApiHandler::AsyncHttp(&download_file),
1295 &ObjectSchema::new(
1296 "Download single raw file from backup snapshot.",
1297 &sorted!([
66c49c21 1298 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1299 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1300 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1301 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1302 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1303 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1304 ]),
dc7a5b34
TL
1305 ),
1306)
1307.access(
7d6fc15b
TL
1308 Some(
1309 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1310 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1311 ),
7d6fc15b 1312 &Permission::Anybody,
54552dda 1313);
691c89a0 1314
bf78f708 1315pub fn download_file(
9e47c0a5
DM
1316 _parts: Parts,
1317 _req_body: Body,
1318 param: Value,
255f378a 1319 _info: &ApiMethod,
54552dda 1320 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1321) -> ApiResponseFuture {
ad51d02a 1322 async move {
7d6fc15b 1323 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1324 let store = required_string_param(&param, "store")?;
133d718f 1325 let backup_ns = optional_ns_param(&param)?;
1afce610 1326
7d6fc15b 1327 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1328 let datastore = check_privs_and_load_store(
e1db0670 1329 store,
abd82485 1330 &backup_ns,
7d6fc15b 1331 &auth_id,
2bc2435a
FG
1332 PRIV_DATASTORE_READ,
1333 PRIV_DATASTORE_BACKUP,
c9396984 1334 Some(Operation::Read),
c9396984
FG
1335 &backup_dir.group,
1336 )?;
1337
3c8c2827 1338 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1339
dc7a5b34
TL
1340 println!(
1341 "Download {} from {} ({}/{})",
abd82485 1342 file_name,
e1db0670 1343 print_store_and_ns(store, &backup_ns),
abd82485
FG
1344 backup_dir,
1345 file_name
dc7a5b34 1346 );
9e47c0a5 1347
1afce610
FG
1348 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1349
ad51d02a
DM
1350 let mut path = datastore.base_path();
1351 path.push(backup_dir.relative_path());
1352 path.push(&file_name);
1353
ba694720 1354 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1355 .await
1356 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1357
dc7a5b34
TL
1358 let payload =
1359 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1360 .map_ok(|bytes| bytes.freeze())
1361 .map_err(move |err| {
1362 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1363 err
1364 });
ad51d02a 1365 let body = Body::wrap_stream(payload);
9e47c0a5 1366
ad51d02a
DM
1367 // fixme: set other headers ?
1368 Ok(Response::builder()
dc7a5b34
TL
1369 .status(StatusCode::OK)
1370 .header(header::CONTENT_TYPE, "application/octet-stream")
1371 .body(body)
1372 .unwrap())
1373 }
1374 .boxed()
9e47c0a5
DM
1375}
1376
6ef9bb59
DC
1377#[sortable]
1378pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1379 &ApiHandler::AsyncHttp(&download_file_decoded),
1380 &ObjectSchema::new(
1381 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1382 &sorted!([
1383 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1384 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1385 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1386 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1387 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1388 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1389 ]),
dc7a5b34
TL
1390 ),
1391)
1392.access(
7d6fc15b
TL
1393 Some(
1394 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1395 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1396 ),
7d6fc15b 1397 &Permission::Anybody,
6ef9bb59
DC
1398);
1399
bf78f708 1400pub fn download_file_decoded(
6ef9bb59
DC
1401 _parts: Parts,
1402 _req_body: Body,
1403 param: Value,
1404 _info: &ApiMethod,
1405 rpcenv: Box<dyn RpcEnvironment>,
1406) -> ApiResponseFuture {
6ef9bb59 1407 async move {
7d6fc15b 1408 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1409 let store = required_string_param(&param, "store")?;
133d718f 1410 let backup_ns = optional_ns_param(&param)?;
abd82485 1411
1afce610 1412 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1413 let datastore = check_privs_and_load_store(
e1db0670 1414 store,
abd82485 1415 &backup_ns,
7d6fc15b 1416 &auth_id,
2bc2435a
FG
1417 PRIV_DATASTORE_READ,
1418 PRIV_DATASTORE_BACKUP,
c9396984 1419 Some(Operation::Read),
1afce610 1420 &backup_dir_api.group,
c9396984 1421 )?;
a724f5fd 1422
3c8c2827 1423 let file_name = required_string_param(&param, "file-name")?.to_owned();
abd82485 1424 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
6ef9bb59 1425
9ccf933b 1426 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1427 for file in files {
f28d9088 1428 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1429 bail!("cannot decode '{}' - is encrypted", file_name);
1430 }
1431 }
1432
dc7a5b34
TL
1433 println!(
1434 "Download {} from {} ({}/{})",
abd82485 1435 file_name,
e1db0670 1436 print_store_and_ns(store, &backup_ns),
abd82485
FG
1437 backup_dir_api,
1438 file_name
dc7a5b34 1439 );
6ef9bb59
DC
1440
1441 let mut path = datastore.base_path();
1442 path.push(backup_dir.relative_path());
1443 path.push(&file_name);
1444
e1db0670 1445 let (_, extension) = file_name.rsplit_once('.').unwrap();
6ef9bb59
DC
1446
1447 let body = match extension {
1448 "didx" => {
dc7a5b34
TL
1449 let index = DynamicIndexReader::open(&path).map_err(|err| {
1450 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1451 })?;
2d55beec
FG
1452 let (csum, size) = index.compute_csum();
1453 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1454
14f6c9cb 1455 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1456 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1457 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1458 eprintln!("error during streaming of '{:?}' - {}", path, err);
1459 err
1460 }))
1461 }
6ef9bb59 1462 "fidx" => {
dc7a5b34
TL
1463 let index = FixedIndexReader::open(&path).map_err(|err| {
1464 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1465 })?;
6ef9bb59 1466
2d55beec
FG
1467 let (csum, size) = index.compute_csum();
1468 manifest.verify_file(&file_name, &csum, size)?;
1469
14f6c9cb 1470 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1471 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1472 Body::wrap_stream(
1473 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1474 move |err| {
1475 eprintln!("error during streaming of '{:?}' - {}", path, err);
1476 err
1477 },
1478 ),
1479 )
1480 }
6ef9bb59
DC
1481 "blob" => {
1482 let file = std::fs::File::open(&path)
8aa67ee7 1483 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1484
2d55beec
FG
1485 // FIXME: load full blob to verify index checksum?
1486
6ef9bb59 1487 Body::wrap_stream(
dc7a5b34
TL
1488 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1489 move |err| {
6ef9bb59
DC
1490 eprintln!("error during streaming of '{:?}' - {}", path, err);
1491 err
dc7a5b34
TL
1492 },
1493 ),
6ef9bb59 1494 )
dc7a5b34 1495 }
6ef9bb59
DC
1496 extension => {
1497 bail!("cannot download '{}' files", extension);
dc7a5b34 1498 }
6ef9bb59
DC
1499 };
1500
1501 // fixme: set other headers ?
1502 Ok(Response::builder()
dc7a5b34
TL
1503 .status(StatusCode::OK)
1504 .header(header::CONTENT_TYPE, "application/octet-stream")
1505 .body(body)
1506 .unwrap())
1507 }
1508 .boxed()
6ef9bb59
DC
1509}
1510
552c2259 1511#[sortable]
0ab08ac9
DM
1512pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1513 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1514 &ObjectSchema::new(
54552dda 1515 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1516 &sorted!([
66c49c21 1517 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1518 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1519 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1520 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1521 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1522 ]),
dc7a5b34
TL
1523 ),
1524)
1525.access(
54552dda 1526 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1527 &Permission::Anybody,
54552dda 1528);
9e47c0a5 1529
bf78f708 1530pub fn upload_backup_log(
07ee2235
DM
1531 _parts: Parts,
1532 req_body: Body,
1533 param: Value,
255f378a 1534 _info: &ApiMethod,
54552dda 1535 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1536) -> ApiResponseFuture {
ad51d02a 1537 async move {
7d6fc15b 1538 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1539 let store = required_string_param(&param, "store")?;
133d718f 1540 let backup_ns = optional_ns_param(&param)?;
abd82485 1541
1afce610 1542 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1543
7a404dc5 1544 let datastore = check_privs_and_load_store(
e1db0670 1545 store,
abd82485 1546 &backup_ns,
c9396984 1547 &auth_id,
7a404dc5
FG
1548 0,
1549 PRIV_DATASTORE_BACKUP,
c9396984 1550 Some(Operation::Write),
1afce610 1551 &backup_dir_api.group,
c9396984 1552 )?;
abd82485 1553 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
07ee2235 1554
dc7a5b34 1555 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1556
133d718f 1557 let mut path = backup_dir.full_path();
16f6766a 1558 path.push(file_name);
07ee2235 1559
ad51d02a
DM
1560 if path.exists() {
1561 bail!("backup already contains a log.");
1562 }
e128d4e8 1563
abd82485
FG
1564 println!(
1565 "Upload backup log to {} {backup_dir_api}/{file_name}",
e1db0670 1566 print_store_and_ns(store, &backup_ns),
abd82485 1567 );
ad51d02a
DM
1568
1569 let data = req_body
1570 .map_err(Error::from)
1571 .try_fold(Vec::new(), |mut acc, chunk| {
1654ab33 1572 acc.extend_from_slice(&chunk);
ad51d02a
DM
1573 future::ok::<_, Error>(acc)
1574 })
1575 .await?;
1576
39f18b30
DM
1577 // always verify blob/CRC at server side
1578 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1579
e0a19d33 1580 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1581
1582 // fixme: use correct formatter
53daae8e 1583 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1584 }
1585 .boxed()
07ee2235
DM
1586}
1587
5b1cfa01
DC
1588#[api(
1589 input: {
1590 properties: {
988d575d 1591 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1592 ns: {
133d718f
WB
1593 type: BackupNamespace,
1594 optional: true,
1595 },
8c74349b
WB
1596 backup_dir: {
1597 type: pbs_api_types::BackupDir,
1598 flatten: true,
1599 },
5b1cfa01
DC
1600 "filepath": {
1601 description: "Base64 encoded path.",
1602 type: String,
1603 }
1604 },
1605 },
1606 access: {
7d6fc15b
TL
1607 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1608 DATASTORE_BACKUP and being the owner of the group",
1609 permission: &Permission::Anybody,
5b1cfa01
DC
1610 },
1611)]
1612/// Get the entries of the given path of the catalog
7beb27d4 1613pub async fn catalog(
5b1cfa01 1614 store: String,
bc21ade2 1615 ns: Option<BackupNamespace>,
8c74349b 1616 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1617 filepath: String,
5b1cfa01 1618 rpcenv: &mut dyn RpcEnvironment,
227501c0 1619) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1620 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1621
7beb27d4
WB
1622 tokio::task::spawn_blocking(move || {
1623 let ns = ns.unwrap_or_default();
a724f5fd 1624
7beb27d4
WB
1625 let datastore = check_privs_and_load_store(
1626 &store,
1627 &ns,
1628 &auth_id,
1629 PRIV_DATASTORE_READ,
1630 PRIV_DATASTORE_BACKUP,
1631 Some(Operation::Read),
1632 &backup_dir.group,
1633 )?;
1634
1635 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1636
7beb27d4 1637 let file_name = CATALOG_NAME;
9238cdf5 1638
7beb27d4
WB
1639 let (manifest, files) = read_backup_index(&backup_dir)?;
1640 for file in files {
1641 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1642 bail!("cannot decode '{}' - is encrypted", file_name);
1643 }
9238cdf5 1644 }
9238cdf5 1645
7beb27d4
WB
1646 let mut path = datastore.base_path();
1647 path.push(backup_dir.relative_path());
1648 path.push(file_name);
5b1cfa01 1649
7beb27d4
WB
1650 let index = DynamicIndexReader::open(&path)
1651 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
5b1cfa01 1652
7beb27d4
WB
1653 let (csum, size) = index.compute_csum();
1654 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1655
7beb27d4
WB
1656 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1657 let reader = BufferedDynamicReader::new(index, chunk_reader);
5b1cfa01 1658
7beb27d4 1659 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1660
7beb27d4
WB
1661 let path = if filepath != "root" && filepath != "/" {
1662 base64::decode(filepath)?
1663 } else {
1664 vec![b'/']
1665 };
5b1cfa01 1666
7beb27d4
WB
1667 catalog_reader.list_dir_contents(&path)
1668 })
1669 .await?
5b1cfa01
DC
1670}
1671
d33d8f4e
DC
1672#[sortable]
1673pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1674 &ApiHandler::AsyncHttp(&pxar_file_download),
1675 &ObjectSchema::new(
1ffe0301 1676 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1677 &sorted!([
1678 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1679 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1680 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1681 ("backup-id", false, &BACKUP_ID_SCHEMA),
1682 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1683 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1684 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1685 ]),
1686 )
7d6fc15b
TL
1687).access(
1688 Some(
1689 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1690 DATASTORE_BACKUP and being the owner of the group",
1691 ),
1692 &Permission::Anybody,
d33d8f4e
DC
1693);
1694
bf78f708 1695pub fn pxar_file_download(
d33d8f4e
DC
1696 _parts: Parts,
1697 _req_body: Body,
1698 param: Value,
1699 _info: &ApiMethod,
1700 rpcenv: Box<dyn RpcEnvironment>,
1701) -> ApiResponseFuture {
d33d8f4e 1702 async move {
7d6fc15b 1703 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1704 let store = required_string_param(&param, "store")?;
bc21ade2 1705 let ns = optional_ns_param(&param)?;
abd82485 1706
7d6fc15b 1707 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1708 let datastore = check_privs_and_load_store(
e1db0670 1709 store,
abd82485 1710 &ns,
7d6fc15b 1711 &auth_id,
2bc2435a
FG
1712 PRIV_DATASTORE_READ,
1713 PRIV_DATASTORE_BACKUP,
c9396984 1714 Some(Operation::Read),
c9396984
FG
1715 &backup_dir.group,
1716 )?;
a724f5fd 1717
bc21ade2 1718 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1719
3c8c2827 1720 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1721
984ddb2f
DC
1722 let tar = param["tar"].as_bool().unwrap_or(false);
1723
d33d8f4e 1724 let mut components = base64::decode(&filepath)?;
3984a5fd 1725 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1726 components.remove(0);
1727 }
1728
d8d8af98 1729 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1730 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1731 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1732 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1733 for file in files {
1734 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1735 bail!("cannot decode '{}' - is encrypted", pxar_name);
1736 }
1737 }
d33d8f4e 1738
9238cdf5
FG
1739 let mut path = datastore.base_path();
1740 path.push(backup_dir.relative_path());
1741 path.push(pxar_name);
d33d8f4e
DC
1742
1743 let index = DynamicIndexReader::open(&path)
1744 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1745
2d55beec 1746 let (csum, size) = index.compute_csum();
9a37bd6c 1747 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1748
14f6c9cb 1749 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1750 let reader = BufferedDynamicReader::new(index, chunk_reader);
1751 let archive_size = reader.archive_size();
1752 let reader = LocalDynamicReadAt::new(reader);
1753
1754 let decoder = Accessor::new(reader, archive_size).await?;
1755 let root = decoder.open_root().await?;
2e219481 1756 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1757 let file = root
dc7a5b34
TL
1758 .lookup(&path)
1759 .await?
2e219481 1760 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1761
804f6143
DC
1762 let body = match file.kind() {
1763 EntryKind::File { .. } => Body::wrap_stream(
1764 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1765 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1766 err
1767 }),
1768 ),
1769 EntryKind::Hardlink(_) => Body::wrap_stream(
1770 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1771 .map_err(move |err| {
dc7a5b34 1772 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1773 err
1774 }),
1775 ),
1776 EntryKind::Directory => {
984ddb2f 1777 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1778 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1779 if tar {
dc7a5b34
TL
1780 proxmox_rest_server::spawn_internal_task(create_tar(
1781 channelwriter,
1782 decoder,
1783 path.clone(),
dc7a5b34 1784 ));
984ddb2f
DC
1785 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1786 Body::wrap_stream(zstdstream.map_err(move |err| {
0608b36b 1787 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
984ddb2f
DC
1788 err
1789 }))
1790 } else {
dc7a5b34
TL
1791 proxmox_rest_server::spawn_internal_task(create_zip(
1792 channelwriter,
1793 decoder,
1794 path.clone(),
dc7a5b34 1795 ));
984ddb2f 1796 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
0608b36b 1797 log::error!("error during streaming of zip '{:?}' - {}", path, err);
984ddb2f
DC
1798 err
1799 }))
1800 }
804f6143
DC
1801 }
1802 other => bail!("cannot download file of type {:?}", other),
1803 };
d33d8f4e
DC
1804
1805 // fixme: set other headers ?
1806 Ok(Response::builder()
dc7a5b34
TL
1807 .status(StatusCode::OK)
1808 .header(header::CONTENT_TYPE, "application/octet-stream")
1809 .body(body)
1810 .unwrap())
1811 }
1812 .boxed()
d33d8f4e
DC
1813}
1814
1a0d3d11
DM
1815#[api(
1816 input: {
1817 properties: {
1818 store: {
1819 schema: DATASTORE_SCHEMA,
1820 },
1821 timeframe: {
c68fa58a 1822 type: RRDTimeFrame,
1a0d3d11
DM
1823 },
1824 cf: {
1825 type: RRDMode,
1826 },
1827 },
1828 },
1829 access: {
7d6fc15b
TL
1830 permission: &Permission::Privilege(
1831 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1832 },
1833)]
1834/// Read datastore stats
bf78f708 1835pub fn get_rrd_stats(
1a0d3d11 1836 store: String,
c68fa58a 1837 timeframe: RRDTimeFrame,
1a0d3d11
DM
1838 cf: RRDMode,
1839 _param: Value,
1840) -> Result<Value, Error> {
e9d2fc93 1841 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1842 let disk_manager = crate::tools::disks::DiskManage::new();
1843
1844 let mut rrd_fields = vec![
dc7a5b34 1845 "total",
de923258 1846 "available",
dc7a5b34
TL
1847 "used",
1848 "read_ios",
1849 "read_bytes",
1850 "write_ios",
1851 "write_bytes",
f27b6086
DC
1852 ];
1853
1854 // we do not have io_ticks for zpools, so don't include them
1855 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1856 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1857 _ => rrd_fields.push("io_ticks"),
1858 };
1859
dc7a5b34 1860 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1861}
1862
5fd823c3
HL
1863#[api(
1864 input: {
1865 properties: {
1866 store: {
1867 schema: DATASTORE_SCHEMA,
1868 },
1869 },
1870 },
1871 access: {
1872 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1873 },
1874)]
1875/// Read datastore stats
dc7a5b34 1876pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1877 let active_operations = task_tracking::get_active_operations(&store)?;
1878 Ok(json!({
1879 "read": active_operations.read,
1880 "write": active_operations.write,
1881 }))
1882}
1883
d6688884
SR
1884#[api(
1885 input: {
1886 properties: {
988d575d 1887 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1888 ns: {
133d718f
WB
1889 type: BackupNamespace,
1890 optional: true,
1891 },
8c74349b
WB
1892 backup_group: {
1893 type: pbs_api_types::BackupGroup,
1894 flatten: true,
1895 },
d6688884
SR
1896 },
1897 },
1898 access: {
7d6fc15b
TL
1899 permission: &Permission::Anybody,
1900 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1901 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1902 },
1903)]
1904/// Get "notes" for a backup group
1905pub fn get_group_notes(
1906 store: String,
bc21ade2 1907 ns: Option<BackupNamespace>,
8c74349b 1908 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1909 rpcenv: &mut dyn RpcEnvironment,
1910) -> Result<String, Error> {
d6688884 1911 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1912 let ns = ns.unwrap_or_default();
ea2e91e5 1913
7a404dc5 1914 let datastore = check_privs_and_load_store(
abd82485
FG
1915 &store,
1916 &ns,
7d6fc15b 1917 &auth_id,
2bc2435a
FG
1918 PRIV_DATASTORE_AUDIT,
1919 PRIV_DATASTORE_BACKUP,
c9396984 1920 Some(Operation::Read),
c9396984
FG
1921 &backup_group,
1922 )?;
d6688884 1923
abd82485 1924 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1925 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1926}
1927
1928#[api(
1929 input: {
1930 properties: {
988d575d 1931 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1932 ns: {
133d718f
WB
1933 type: BackupNamespace,
1934 optional: true,
1935 },
8c74349b
WB
1936 backup_group: {
1937 type: pbs_api_types::BackupGroup,
1938 flatten: true,
1939 },
d6688884
SR
1940 notes: {
1941 description: "A multiline text.",
1942 },
1943 },
1944 },
1945 access: {
7d6fc15b
TL
1946 permission: &Permission::Anybody,
1947 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1948 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1949 },
1950)]
1951/// Set "notes" for a backup group
1952pub fn set_group_notes(
1953 store: String,
bc21ade2 1954 ns: Option<BackupNamespace>,
8c74349b 1955 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1956 notes: String,
1957 rpcenv: &mut dyn RpcEnvironment,
1958) -> Result<(), Error> {
d6688884 1959 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485
FG
1960 let ns = ns.unwrap_or_default();
1961
7a404dc5 1962 let datastore = check_privs_and_load_store(
abd82485
FG
1963 &store,
1964 &ns,
7d6fc15b 1965 &auth_id,
2bc2435a
FG
1966 PRIV_DATASTORE_MODIFY,
1967 PRIV_DATASTORE_BACKUP,
c9396984 1968 Some(Operation::Write),
c9396984
FG
1969 &backup_group,
1970 )?;
d6688884 1971
abd82485 1972 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1973 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1974
1975 Ok(())
1976}
1977
912b3f5b
DM
1978#[api(
1979 input: {
1980 properties: {
988d575d 1981 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1982 ns: {
133d718f
WB
1983 type: BackupNamespace,
1984 optional: true,
1985 },
8c74349b
WB
1986 backup_dir: {
1987 type: pbs_api_types::BackupDir,
1988 flatten: true,
1989 },
912b3f5b
DM
1990 },
1991 },
1992 access: {
7d6fc15b
TL
1993 permission: &Permission::Anybody,
1994 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1995 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1996 },
1997)]
1998/// Get "notes" for a specific backup
bf78f708 1999pub fn get_notes(
912b3f5b 2000 store: String,
bc21ade2 2001 ns: Option<BackupNamespace>,
8c74349b 2002 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2003 rpcenv: &mut dyn RpcEnvironment,
2004) -> Result<String, Error> {
7d6fc15b 2005 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2006 let ns = ns.unwrap_or_default();
ea2e91e5 2007
7a404dc5 2008 let datastore = check_privs_and_load_store(
abd82485
FG
2009 &store,
2010 &ns,
7d6fc15b 2011 &auth_id,
2bc2435a
FG
2012 PRIV_DATASTORE_AUDIT,
2013 PRIV_DATASTORE_BACKUP,
c9396984 2014 Some(Operation::Read),
c9396984
FG
2015 &backup_dir.group,
2016 )?;
912b3f5b 2017
fbfb64a6 2018 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2019
133d718f 2020 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 2021
dc7a5b34 2022 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
2023
2024 Ok(String::from(notes))
2025}
2026
2027#[api(
2028 input: {
2029 properties: {
988d575d 2030 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2031 ns: {
133d718f
WB
2032 type: BackupNamespace,
2033 optional: true,
2034 },
8c74349b
WB
2035 backup_dir: {
2036 type: pbs_api_types::BackupDir,
2037 flatten: true,
2038 },
912b3f5b
DM
2039 notes: {
2040 description: "A multiline text.",
2041 },
2042 },
2043 },
2044 access: {
7d6fc15b
TL
2045 permission: &Permission::Anybody,
2046 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2047 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2048 },
2049)]
2050/// Set "notes" for a specific backup
bf78f708 2051pub fn set_notes(
912b3f5b 2052 store: String,
bc21ade2 2053 ns: Option<BackupNamespace>,
8c74349b 2054 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2055 notes: String,
2056 rpcenv: &mut dyn RpcEnvironment,
2057) -> Result<(), Error> {
7d6fc15b 2058 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2059 let ns = ns.unwrap_or_default();
ea2e91e5 2060
7a404dc5 2061 let datastore = check_privs_and_load_store(
abd82485
FG
2062 &store,
2063 &ns,
7d6fc15b 2064 &auth_id,
2bc2435a
FG
2065 PRIV_DATASTORE_MODIFY,
2066 PRIV_DATASTORE_BACKUP,
c9396984 2067 Some(Operation::Write),
c9396984
FG
2068 &backup_dir.group,
2069 )?;
912b3f5b 2070
fbfb64a6 2071 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2072
133d718f
WB
2073 backup_dir
2074 .update_manifest(|manifest| {
dc7a5b34
TL
2075 manifest.unprotected["notes"] = notes.into();
2076 })
2077 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2078
2079 Ok(())
2080}
2081
8292d3d2
DC
2082#[api(
2083 input: {
2084 properties: {
988d575d 2085 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2086 ns: {
133d718f
WB
2087 type: BackupNamespace,
2088 optional: true,
2089 },
8c74349b
WB
2090 backup_dir: {
2091 type: pbs_api_types::BackupDir,
2092 flatten: true,
2093 },
8292d3d2
DC
2094 },
2095 },
2096 access: {
7d6fc15b
TL
2097 permission: &Permission::Anybody,
2098 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2099 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2100 },
2101)]
2102/// Query protection for a specific backup
2103pub fn get_protection(
2104 store: String,
bc21ade2 2105 ns: Option<BackupNamespace>,
8c74349b 2106 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2107 rpcenv: &mut dyn RpcEnvironment,
2108) -> Result<bool, Error> {
7d6fc15b 2109 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2110 let ns = ns.unwrap_or_default();
7a404dc5 2111 let datastore = check_privs_and_load_store(
abd82485
FG
2112 &store,
2113 &ns,
7d6fc15b 2114 &auth_id,
2bc2435a
FG
2115 PRIV_DATASTORE_AUDIT,
2116 PRIV_DATASTORE_BACKUP,
c9396984 2117 Some(Operation::Read),
c9396984
FG
2118 &backup_dir.group,
2119 )?;
8292d3d2 2120
fbfb64a6 2121 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2122
6da20161 2123 Ok(backup_dir.is_protected())
8292d3d2
DC
2124}
2125
2126#[api(
2127 input: {
2128 properties: {
988d575d 2129 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2130 ns: {
133d718f
WB
2131 type: BackupNamespace,
2132 optional: true,
2133 },
8c74349b
WB
2134 backup_dir: {
2135 type: pbs_api_types::BackupDir,
2136 flatten: true,
2137 },
8292d3d2
DC
2138 protected: {
2139 description: "Enable/disable protection.",
2140 },
2141 },
2142 },
2143 access: {
7d6fc15b
TL
2144 permission: &Permission::Anybody,
2145 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2146 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2147 },
2148)]
2149/// En- or disable protection for a specific backup
67d7a59d 2150pub async fn set_protection(
8292d3d2 2151 store: String,
bc21ade2 2152 ns: Option<BackupNamespace>,
8c74349b 2153 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2154 protected: bool,
2155 rpcenv: &mut dyn RpcEnvironment,
2156) -> Result<(), Error> {
7d6fc15b 2157 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8292d3d2 2158
67d7a59d
WB
2159 tokio::task::spawn_blocking(move || {
2160 let ns = ns.unwrap_or_default();
2161 let datastore = check_privs_and_load_store(
2162 &store,
2163 &ns,
2164 &auth_id,
2165 PRIV_DATASTORE_MODIFY,
2166 PRIV_DATASTORE_BACKUP,
2167 Some(Operation::Write),
2168 &backup_dir.group,
2169 )?;
2170
2171 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2172
67d7a59d
WB
2173 datastore.update_protection(&backup_dir, protected)
2174 })
2175 .await?
8292d3d2
DC
2176}
2177
72be0eb1 2178#[api(
4940012d 2179 input: {
72be0eb1 2180 properties: {
988d575d 2181 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2182 ns: {
133d718f
WB
2183 type: BackupNamespace,
2184 optional: true,
2185 },
8c74349b
WB
2186 backup_group: {
2187 type: pbs_api_types::BackupGroup,
2188 flatten: true,
2189 },
72be0eb1 2190 "new-owner": {
e6dc35ac 2191 type: Authid,
72be0eb1
DW
2192 },
2193 },
4940012d
FG
2194 },
2195 access: {
bff85572 2196 permission: &Permission::Anybody,
7d6fc15b
TL
2197 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2198 a user's token for owned backups with Datastore.Backup"
4940012d 2199 },
72be0eb1
DW
2200)]
2201/// Change owner of a backup group
979b3784 2202pub async fn set_backup_owner(
72be0eb1 2203 store: String,
bc21ade2 2204 ns: Option<BackupNamespace>,
8c74349b 2205 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2206 new_owner: Authid,
bff85572 2207 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2208) -> Result<(), Error> {
bff85572 2209 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1909ece2 2210
979b3784
WB
2211 tokio::task::spawn_blocking(move || {
2212 let ns = ns.unwrap_or_default();
2213 let owner_check_required = check_ns_privs_full(
2214 &store,
2215 &ns,
2216 &auth_id,
2217 PRIV_DATASTORE_MODIFY,
2218 PRIV_DATASTORE_BACKUP,
2219 )?;
1909ece2 2220
979b3784 2221 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
bff85572 2222
979b3784 2223 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2224
979b3784
WB
2225 if owner_check_required {
2226 let owner = backup_group.get_owner()?;
bff85572 2227
979b3784
WB
2228 let allowed = match (owner.is_token(), new_owner.is_token()) {
2229 (true, true) => {
2230 // API token to API token, owned by same user
2231 let owner = owner.user();
2232 let new_owner = new_owner.user();
2233 owner == new_owner && Authid::from(owner.clone()) == auth_id
2234 }
2235 (true, false) => {
2236 // API token to API token owner
2237 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2238 }
2239 (false, true) => {
2240 // API token owner to API token
2241 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2242 }
2243 (false, false) => {
2244 // User to User, not allowed for unprivileged users
2245 false
2246 }
2247 };
2248
2249 if !allowed {
2250 return Err(http_err!(
2251 UNAUTHORIZED,
2252 "{} does not have permission to change owner of backup group '{}' to {}",
2253 auth_id,
2254 backup_group.group(),
2255 new_owner,
2256 ));
2257 }
2bc2435a 2258 }
bff85572 2259
979b3784 2260 let user_info = CachedUserInfo::new()?;
7d6fc15b 2261
979b3784
WB
2262 if !user_info.is_active_auth_id(&new_owner) {
2263 bail!(
2264 "{} '{}' is inactive or non-existent",
2265 if new_owner.is_token() {
2266 "API token".to_string()
2267 } else {
2268 "user".to_string()
2269 },
2270 new_owner
2271 );
2272 }
72be0eb1 2273
979b3784 2274 backup_group.set_owner(&new_owner, true)?;
72be0eb1 2275
979b3784
WB
2276 Ok(())
2277 })
2278 .await?
72be0eb1
DW
2279}
2280
552c2259 2281#[sortable]
255f378a 2282const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2283 (
2284 "active-operations",
dc7a5b34 2285 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2286 ),
dc7a5b34 2287 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2288 (
2289 "change-owner",
dc7a5b34 2290 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2291 ),
255f378a
DM
2292 (
2293 "download",
dc7a5b34 2294 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2295 ),
6ef9bb59
DC
2296 (
2297 "download-decoded",
dc7a5b34 2298 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2299 ),
dc7a5b34 2300 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2301 (
2302 "gc",
2303 &Router::new()
2304 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2305 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2306 ),
d6688884
SR
2307 (
2308 "group-notes",
2309 &Router::new()
2310 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2311 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2312 ),
255f378a
DM
2313 (
2314 "groups",
2315 &Router::new()
b31c8019 2316 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2317 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2318 ),
18934ae5
TL
2319 (
2320 "namespace",
2321 // FIXME: move into datastore:: sub-module?!
2322 &crate::api2::admin::namespace::ROUTER,
2323 ),
912b3f5b
DM
2324 (
2325 "notes",
2326 &Router::new()
2327 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2328 .put(&API_METHOD_SET_NOTES),
912b3f5b 2329 ),
8292d3d2
DC
2330 (
2331 "protected",
2332 &Router::new()
2333 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2334 .put(&API_METHOD_SET_PROTECTION),
255f378a 2335 ),
dc7a5b34 2336 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2337 (
2338 "prune-datastore",
dc7a5b34 2339 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2340 ),
d33d8f4e
DC
2341 (
2342 "pxar-file-download",
dc7a5b34 2343 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2344 ),
dc7a5b34 2345 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2346 (
2347 "snapshots",
2348 &Router::new()
fc189b19 2349 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2350 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2351 ),
dc7a5b34 2352 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2353 (
2354 "upload-backup-log",
dc7a5b34 2355 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2356 ),
dc7a5b34 2357 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2358];
2359
ad51d02a 2360const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2361 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2362 .subdirs(DATASTORE_INFO_SUBDIRS);
2363
255f378a 2364pub const ROUTER: Router = Router::new()
bb34b589 2365 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2366 .match_all("store", &DATASTORE_INFO_ROUTER);