]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
datastore: add BackupGroup::exists helper
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
abd82485
FG
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
dba37e21
WB
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 65use crate::backup::{
2981cdd4
TL
66 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups, NS_PRIVS_OK,
22cfad13 68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7a404dc5
FG
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
abd82485
FG
89 store: &str,
90 ns: &BackupNamespace,
c9396984 91 auth_id: &Authid,
7a404dc5
FG
92 full_access_privs: u64,
93 partial_access_privs: u64,
c9396984 94 operation: Option<Operation>,
c9396984
FG
95 backup_group: &pbs_api_types::BackupGroup,
96) -> Result<Arc<DataStore>, Error> {
abd82485 97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
7a404dc5 98
abd82485 99 let datastore = DataStore::lookup_datastore(store, operation)?;
c9396984 100
7a404dc5 101 if limited {
abd82485 102 let owner = datastore.get_owner(ns, backup_group)?;
c9396984
FG
103 check_backup_owner(&owner, &auth_id)?;
104 }
105
106 Ok(datastore)
107}
108
e7cb4dc5 109fn read_backup_index(
e7cb4dc5
WB
110 backup_dir: &BackupDir,
111) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 112 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 113
09b1f7b2
DM
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
f28d9088 118 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
119 size: Some(item.size),
120 });
8c70e3eb
DM
121 }
122
09b1f7b2 123 result.push(BackupContent {
96d65fbc 124 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
09b1f7b2
DM
129 size: Some(index_size),
130 });
4f1e40a2 131
70030b43 132 Ok((manifest, result))
8c70e3eb
DM
133}
134
1c090810 135fn get_all_snapshot_files(
1c090810 136 info: &BackupInfo,
70030b43 137) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
dc7a5b34
TL
146 if file_set.contains(file) {
147 continue;
148 }
f28d9088
WB
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
1c090810
DC
154 }
155
70030b43 156 Ok((manifest, files))
1c090810
DC
157}
158
b31c8019
DM
159#[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
bc21ade2 165 ns: {
89ae3c32
WB
166 type: BackupNamespace,
167 optional: true,
168 },
b31c8019
DM
169 },
170 },
7b570c17 171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 172 access: {
7d6fc15b
TL
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
bb34b589 176 },
b31c8019
DM
177)]
178/// List backup groups.
b2362a12 179pub fn list_groups(
b31c8019 180 store: String,
bc21ade2 181 ns: Option<BackupNamespace>,
54552dda 182 rpcenv: &mut dyn RpcEnvironment,
b31c8019 183) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 185 let ns = ns.unwrap_or_default();
ea2e91e5
FG
186
187 let list_all = !check_ns_privs_full(
abd82485
FG
188 &store,
189 &ns,
7d6fc15b 190 &auth_id,
2bc2435a
FG
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
7d6fc15b 193 )?;
54552dda 194
abd82485 195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 196
249dde8b 197 datastore
abd82485 198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
e13303fc 201
abd82485 202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
203 Ok(auth_id) => auth_id,
204 Err(err) => {
e13303fc
FG
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
abd82485 208 print_store_and_ns(&store, &ns),
e13303fc
FG
209 err
210 );
249dde8b 211 return Ok(group_info);
dc7a5b34 212 }
249dde8b
TL
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
0d08fcee 217
6da20161 218 let snapshots = match group.list_backups() {
249dde8b
TL
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
0d08fcee 222
249dde8b
TL
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
0d08fcee 227
249dde8b
TL
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
abd82485 239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
249dde8b
TL
240 let comment = file_read_firstline(&note_path).ok();
241
242 group_info.push(GroupListItem {
988d575d 243 backup: group.into(),
249dde8b
TL
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
0d08fcee
FG
249 });
250
249dde8b
TL
251 Ok(group_info)
252 })
812c6f87 253}
8f579717 254
f32791b4
DC
255#[api(
256 input: {
257 properties: {
988d575d 258 store: { schema: DATASTORE_SCHEMA },
bc21ade2 259 ns: {
133d718f
WB
260 type: BackupNamespace,
261 optional: true,
262 },
8c74349b
WB
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
f32791b4
DC
267 },
268 },
269 access: {
7d6fc15b
TL
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
273 },
274)]
275/// Delete backup group including all snapshots.
6f67dc11 276pub async fn delete_group(
f32791b4 277 store: String,
bc21ade2 278 ns: Option<BackupNamespace>,
8c74349b 279 group: pbs_api_types::BackupGroup,
f32791b4
DC
280 rpcenv: &mut dyn RpcEnvironment,
281) -> Result<Value, Error> {
f32791b4 282 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
f32791b4 283
6f67dc11
WB
284 tokio::task::spawn_blocking(move || {
285 let ns = ns.unwrap_or_default();
286
287 let datastore = check_privs_and_load_store(
288 &store,
289 &ns,
290 &auth_id,
291 PRIV_DATASTORE_MODIFY,
292 PRIV_DATASTORE_PRUNE,
293 Some(Operation::Write),
294 &group,
295 )?;
296
297 if !datastore.remove_backup_group(&ns, &group)? {
298 bail!("group only partially deleted due to protected snapshots");
299 }
f32791b4 300
6f67dc11
WB
301 Ok(Value::Null)
302 })
303 .await?
f32791b4
DC
304}
305
09b1f7b2
DM
306#[api(
307 input: {
308 properties: {
988d575d 309 store: { schema: DATASTORE_SCHEMA },
bc21ade2 310 ns: {
133d718f
WB
311 type: BackupNamespace,
312 optional: true,
313 },
8c74349b
WB
314 backup_dir: {
315 type: pbs_api_types::BackupDir,
316 flatten: true,
317 },
09b1f7b2
DM
318 },
319 },
7b570c17 320 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 321 access: {
7d6fc15b
TL
322 permission: &Permission::Anybody,
323 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
324 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 325 },
09b1f7b2
DM
326)]
327/// List snapshot files.
6cb674aa 328pub async fn list_snapshot_files(
09b1f7b2 329 store: String,
bc21ade2 330 ns: Option<BackupNamespace>,
8c74349b 331 backup_dir: pbs_api_types::BackupDir,
01a13423 332 _info: &ApiMethod,
54552dda 333 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 334) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 335 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 336
6cb674aa
WB
337 tokio::task::spawn_blocking(move || {
338 let ns = ns.unwrap_or_default();
01a13423 339
6cb674aa
WB
340 let datastore = check_privs_and_load_store(
341 &store,
342 &ns,
343 &auth_id,
344 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
345 PRIV_DATASTORE_BACKUP,
346 Some(Operation::Read),
347 &backup_dir.group,
348 )?;
349
350 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 351
6cb674aa 352 let info = BackupInfo::new(snapshot)?;
01a13423 353
6cb674aa 354 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43 355
6cb674aa
WB
356 Ok(files)
357 })
358 .await?
01a13423
DM
359}
360
68a6a0ee
DM
361#[api(
362 input: {
363 properties: {
988d575d 364 store: { schema: DATASTORE_SCHEMA },
bc21ade2 365 ns: {
133d718f
WB
366 type: BackupNamespace,
367 optional: true,
368 },
8c74349b
WB
369 backup_dir: {
370 type: pbs_api_types::BackupDir,
371 flatten: true,
372 },
68a6a0ee
DM
373 },
374 },
bb34b589 375 access: {
7d6fc15b
TL
376 permission: &Permission::Anybody,
377 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
378 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 379 },
68a6a0ee
DM
380)]
381/// Delete backup snapshot.
af201d7a 382pub async fn delete_snapshot(
68a6a0ee 383 store: String,
bc21ade2 384 ns: Option<BackupNamespace>,
8c74349b 385 backup_dir: pbs_api_types::BackupDir,
6f62c924 386 _info: &ApiMethod,
54552dda 387 rpcenv: &mut dyn RpcEnvironment,
6f62c924 388) -> Result<Value, Error> {
e6dc35ac 389 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 390
af201d7a
WB
391 tokio::task::spawn_blocking(move || {
392 let ns = ns.unwrap_or_default();
a724f5fd 393
af201d7a
WB
394 let datastore = check_privs_and_load_store(
395 &store,
396 &ns,
397 &auth_id,
398 PRIV_DATASTORE_MODIFY,
399 PRIV_DATASTORE_PRUNE,
400 Some(Operation::Write),
401 &backup_dir.group,
402 )?;
54552dda 403
af201d7a
WB
404 let snapshot = datastore.backup_dir(ns, backup_dir)?;
405
406 snapshot.destroy(false)?;
6f62c924 407
af201d7a
WB
408 Ok(Value::Null)
409 })
410 .await?
6f62c924
DM
411}
412
fc189b19 413#[api(
b7c3eaa9 414 streaming: true,
fc189b19
DM
415 input: {
416 properties: {
988d575d 417 store: { schema: DATASTORE_SCHEMA },
bc21ade2 418 ns: {
8c74349b
WB
419 type: BackupNamespace,
420 optional: true,
421 },
fc189b19
DM
422 "backup-type": {
423 optional: true,
988d575d 424 type: BackupType,
fc189b19
DM
425 },
426 "backup-id": {
427 optional: true,
428 schema: BACKUP_ID_SCHEMA,
429 },
430 },
431 },
7b570c17 432 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 433 access: {
7d6fc15b
TL
434 permission: &Permission::Anybody,
435 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
436 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 437 },
fc189b19
DM
438)]
439/// List backup snapshots.
a577d7d8 440pub async fn list_snapshots(
54552dda 441 store: String,
bc21ade2 442 ns: Option<BackupNamespace>,
988d575d 443 backup_type: Option<BackupType>,
54552dda
DM
444 backup_id: Option<String>,
445 _param: Value,
184f17af 446 _info: &ApiMethod,
54552dda 447 rpcenv: &mut dyn RpcEnvironment,
fc189b19 448) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 449 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 450
a577d7d8
WB
451 tokio::task::spawn_blocking(move || unsafe {
452 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
453 })
454 .await
455 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
456}
457
458/// This must not run in a main worker thread as it potentially does tons of I/O.
459unsafe fn list_snapshots_blocking(
460 store: String,
461 ns: Option<BackupNamespace>,
462 backup_type: Option<BackupType>,
463 backup_id: Option<String>,
464 auth_id: Authid,
465) -> Result<Vec<SnapshotListItem>, Error> {
bc21ade2 466 let ns = ns.unwrap_or_default();
7d6fc15b 467
ea2e91e5 468 let list_all = !check_ns_privs_full(
abd82485
FG
469 &store,
470 &ns,
7d6fc15b 471 &auth_id,
2bc2435a
FG
472 PRIV_DATASTORE_AUDIT,
473 PRIV_DATASTORE_BACKUP,
7d6fc15b 474 )?;
184f17af 475
abd82485 476 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 477
249dde8b
TL
478 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
479 // backup group and provide an error free (Err -> None) accessor
0d08fcee 480 let groups = match (backup_type, backup_id) {
db87d93e 481 (Some(backup_type), Some(backup_id)) => {
abd82485 482 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
db87d93e 483 }
8c74349b 484 // FIXME: Recursion
7d9cb8c4 485 (Some(backup_type), None) => datastore
abd82485 486 .iter_backup_groups_ok(ns.clone())?
dc7a5b34
TL
487 .filter(|group| group.backup_type() == backup_type)
488 .collect(),
8c74349b 489 // FIXME: Recursion
7d9cb8c4 490 (None, Some(backup_id)) => datastore
abd82485 491 .iter_backup_groups_ok(ns.clone())?
dc7a5b34
TL
492 .filter(|group| group.backup_id() == backup_id)
493 .collect(),
8c74349b 494 // FIXME: Recursion
abd82485 495 (None, None) => datastore.list_backup_groups(ns.clone())?,
0d08fcee 496 };
54552dda 497
0d08fcee 498 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
499 let backup = pbs_api_types::BackupDir {
500 group: group.into(),
501 time: info.backup_dir.backup_time(),
502 };
6da20161 503 let protected = info.backup_dir.is_protected();
1c090810 504
9ccf933b 505 match get_all_snapshot_files(&info) {
70030b43 506 Ok((manifest, files)) => {
70030b43
DM
507 // extract the first line from notes
508 let comment: Option<String> = manifest.unprotected["notes"]
509 .as_str()
510 .and_then(|notes| notes.lines().next())
511 .map(String::from);
512
035c40e6
FG
513 let fingerprint = match manifest.fingerprint() {
514 Ok(fp) => fp,
515 Err(err) => {
516 eprintln!("error parsing fingerprint: '{}'", err);
517 None
dc7a5b34 518 }
035c40e6
FG
519 };
520
79c53595 521 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
522 let verification: Option<SnapshotVerifyState> =
523 match serde_json::from_value(verification) {
524 Ok(verify) => verify,
525 Err(err) => {
526 eprintln!("error parsing verification state : '{}'", err);
527 None
528 }
529 };
3b2046d2 530
0d08fcee
FG
531 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
532
79c53595 533 SnapshotListItem {
988d575d 534 backup,
79c53595
FG
535 comment,
536 verification,
035c40e6 537 fingerprint,
79c53595
FG
538 files,
539 size,
540 owner,
02db7267 541 protected,
79c53595 542 }
dc7a5b34 543 }
1c090810
DC
544 Err(err) => {
545 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 546 let files = info
dc7a5b34
TL
547 .files
548 .into_iter()
549 .map(|filename| BackupContent {
550 filename,
551 size: None,
552 crypt_mode: None,
553 })
554 .collect();
79c53595
FG
555
556 SnapshotListItem {
988d575d 557 backup,
79c53595
FG
558 comment: None,
559 verification: None,
035c40e6 560 fingerprint: None,
79c53595
FG
561 files,
562 size: None,
563 owner,
02db7267 564 protected,
79c53595 565 }
dc7a5b34 566 }
0d08fcee
FG
567 }
568 };
184f17af 569
dc7a5b34 570 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 571 let owner = match group.get_owner() {
dc7a5b34
TL
572 Ok(auth_id) => auth_id,
573 Err(err) => {
574 eprintln!(
e13303fc 575 "Failed to get owner of group '{}' in {} - {}",
e13303fc 576 group.group(),
abd82485 577 print_store_and_ns(&store, &ns),
e13303fc 578 err
dc7a5b34 579 );
0d08fcee
FG
580 return Ok(snapshots);
581 }
dc7a5b34 582 };
0d08fcee 583
dc7a5b34
TL
584 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
585 return Ok(snapshots);
586 }
0d08fcee 587
6da20161 588 let group_backups = group.list_backups()?;
0d08fcee 589
dc7a5b34
TL
590 snapshots.extend(
591 group_backups
592 .into_iter()
593 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
594 );
595
596 Ok(snapshots)
597 })
184f17af
DM
598}
599
5a2f7ea7
WB
600async fn get_snapshots_count(
601 store: &Arc<DataStore>,
602 owner: Option<&Authid>,
603) -> Result<Counts, Error> {
604 let store = Arc::clone(store);
605 let owner = owner.cloned();
606 tokio::task::spawn_blocking(move || {
607 let root_ns = Default::default();
608 ListAccessibleBackupGroups::new_with_privs(
609 &store,
610 root_ns,
611 MAX_NAMESPACE_DEPTH,
612 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
613 None,
614 owner.as_ref(),
615 )?
616 .try_fold(Counts::default(), |mut counts, group| {
617 let group = match group {
618 Ok(group) => group,
619 Err(_) => return Ok(counts), // TODO: add this as error counts?
22cfad13 620 };
5a2f7ea7
WB
621 let snapshot_count = group.list_backups()?.len() as u64;
622
623 // only include groups with snapshots, counting/displaying empty groups can confuse
624 if snapshot_count > 0 {
625 let type_count = match group.backup_type() {
626 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
627 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
628 BackupType::Host => counts.host.get_or_insert(Default::default()),
629 };
14e08625 630
5a2f7ea7
WB
631 type_count.groups += 1;
632 type_count.snapshots += snapshot_count;
633 }
16f9f244 634
5a2f7ea7
WB
635 Ok(counts)
636 })
f12f408e 637 })
5a2f7ea7 638 .await?
16f9f244
DC
639}
640
1dc117bb
DM
641#[api(
642 input: {
643 properties: {
644 store: {
645 schema: DATASTORE_SCHEMA,
646 },
98afc7b1
FG
647 verbose: {
648 type: bool,
649 default: false,
650 optional: true,
651 description: "Include additional information like snapshot counts and GC status.",
652 },
1dc117bb 653 },
98afc7b1 654
1dc117bb
DM
655 },
656 returns: {
14e08625 657 type: DataStoreStatus,
1dc117bb 658 },
bb34b589 659 access: {
84de1012
TL
660 permission: &Permission::Anybody,
661 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
662 the full statistics. Counts of accessible groups are always returned, if any",
bb34b589 663 },
1dc117bb
DM
664)]
665/// Get datastore status.
143ac7e6 666pub async fn status(
1dc117bb 667 store: String,
98afc7b1 668 verbose: bool,
0eecf38f 669 _info: &ApiMethod,
fdfcb74d 670 rpcenv: &mut dyn RpcEnvironment,
14e08625 671) -> Result<DataStoreStatus, Error> {
84de1012
TL
672 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
673 let user_info = CachedUserInfo::new()?;
674 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
675
676 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
677
678 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
679 true
680 } else if store_privs & PRIV_DATASTORE_READ != 0 {
681 false // allow at least counts, user can read groups anyway..
84de1012 682 } else {
2981cdd4 683 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
d20137e5 684 // avoid leaking existence info if users hasn't at least any priv. below
2981cdd4
TL
685 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
686 _ => false,
687 }
84de1012 688 };
d20137e5 689 let datastore = datastore?; // only unwrap no to avoid leaking existence info
fdfcb74d 690
84de1012 691 let (counts, gc_status) = if verbose {
fdfcb74d
FG
692 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
693 None
694 } else {
695 Some(&auth_id)
696 };
697
5a2f7ea7 698 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
84de1012
TL
699 let gc_status = if store_stats {
700 Some(datastore.last_gc_status())
701 } else {
702 None
703 };
fdfcb74d
FG
704
705 (counts, gc_status)
706 } else {
707 (None, None)
98afc7b1 708 };
16f9f244 709
84de1012 710 Ok(if store_stats {
143ac7e6 711 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
84de1012
TL
712 DataStoreStatus {
713 total: storage.total,
714 used: storage.used,
1cc73a43 715 avail: storage.available,
84de1012
TL
716 gc_status,
717 counts,
718 }
719 } else {
720 DataStoreStatus {
721 total: 0,
722 used: 0,
723 avail: 0,
724 gc_status,
725 counts,
726 }
14e08625 727 })
0eecf38f
DM
728}
729
c2009e53
DM
730#[api(
731 input: {
732 properties: {
733 store: {
734 schema: DATASTORE_SCHEMA,
735 },
bc21ade2 736 ns: {
8c74349b
WB
737 type: BackupNamespace,
738 optional: true,
739 },
c2009e53 740 "backup-type": {
988d575d 741 type: BackupType,
c2009e53
DM
742 optional: true,
743 },
744 "backup-id": {
745 schema: BACKUP_ID_SCHEMA,
746 optional: true,
747 },
dcbf29e7
HL
748 "ignore-verified": {
749 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
750 optional: true,
751 },
752 "outdated-after": {
753 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
754 optional: true,
755 },
c2009e53
DM
756 "backup-time": {
757 schema: BACKUP_TIME_SCHEMA,
758 optional: true,
759 },
59229bd7
TL
760 "max-depth": {
761 schema: NS_MAX_DEPTH_SCHEMA,
762 optional: true,
763 },
c2009e53
DM
764 },
765 },
766 returns: {
767 schema: UPID_SCHEMA,
768 },
769 access: {
7d6fc15b
TL
770 permission: &Permission::Anybody,
771 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
772 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
773 },
774)]
775/// Verify backups.
776///
777/// This function can verify a single backup snapshot, all backup from a backup group,
778/// or all backups in the datastore.
779pub fn verify(
780 store: String,
bc21ade2 781 ns: Option<BackupNamespace>,
988d575d 782 backup_type: Option<BackupType>,
c2009e53
DM
783 backup_id: Option<String>,
784 backup_time: Option<i64>,
dcbf29e7
HL
785 ignore_verified: Option<bool>,
786 outdated_after: Option<i64>,
59229bd7 787 max_depth: Option<usize>,
c2009e53
DM
788 rpcenv: &mut dyn RpcEnvironment,
789) -> Result<Value, Error> {
7d6fc15b 790 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 791 let ns = ns.unwrap_or_default();
ea2e91e5
FG
792
793 let owner_check_required = check_ns_privs_full(
abd82485
FG
794 &store,
795 &ns,
7d6fc15b 796 &auth_id,
2bc2435a
FG
797 PRIV_DATASTORE_VERIFY,
798 PRIV_DATASTORE_BACKUP,
7d6fc15b 799 )?;
a724f5fd 800
abd82485 801 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 802 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 803
8ea00f6e 804 let worker_id;
c2009e53
DM
805
806 let mut backup_dir = None;
807 let mut backup_group = None;
133042b5 808 let mut worker_type = "verify";
c2009e53
DM
809
810 match (backup_type, backup_id, backup_time) {
811 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 812 worker_id = format!(
8c74349b 813 "{}:{}/{}/{}/{:08X}",
abd82485 814 store,
bc21ade2 815 ns.display_as_path(),
8c74349b
WB
816 backup_type,
817 backup_id,
818 backup_time
dc7a5b34 819 );
bc21ade2
WB
820 let dir =
821 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 822
a724f5fd
FG
823 if owner_check_required {
824 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
825 check_backup_owner(&owner, &auth_id)?;
826 }
09f6a240 827
c2009e53 828 backup_dir = Some(dir);
133042b5 829 worker_type = "verify_snapshot";
c2009e53
DM
830 }
831 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
832 worker_id = format!(
833 "{}:{}/{}/{}",
abd82485 834 store,
bc21ade2 835 ns.display_as_path(),
8c74349b
WB
836 backup_type,
837 backup_id
838 );
133d718f 839 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 840
a724f5fd 841 if owner_check_required {
bc21ade2 842 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
843 check_backup_owner(&owner, &auth_id)?;
844 }
09f6a240 845
bc21ade2 846 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 847 worker_type = "verify_group";
c2009e53
DM
848 }
849 (None, None, None) => {
bc21ade2 850 worker_id = if ns.is_root() {
abd82485 851 store
59229bd7 852 } else {
abd82485 853 format!("{}:{}", store, ns.display_as_path())
59229bd7 854 };
c2009e53 855 }
5a718dce 856 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
857 }
858
39735609 859 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
860
861 let upid_str = WorkerTask::new_thread(
133042b5 862 worker_type,
44288184 863 Some(worker_id),
049a22a3 864 auth_id.to_string(),
e7cb4dc5
WB
865 to_stdout,
866 move |worker| {
9c26a3d6 867 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 868 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 869 let mut res = Vec::new();
f6b1d1cc 870 if !verify_backup_dir(
9c26a3d6 871 &verify_worker,
f6b1d1cc 872 &backup_dir,
f6b1d1cc 873 worker.upid().clone(),
dc7a5b34 874 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 875 )? {
5ae393af
FG
876 res.push(print_ns_and_snapshot(
877 backup_dir.backup_ns(),
878 backup_dir.as_ref(),
879 ));
adfdc369
DC
880 }
881 res
c2009e53 882 } else if let Some(backup_group) = backup_group {
7e25b9aa 883 let failed_dirs = verify_backup_group(
9c26a3d6 884 &verify_worker,
63d9aca9 885 &backup_group,
7e25b9aa 886 &mut StoreProgress::new(1),
f6b1d1cc 887 worker.upid(),
dc7a5b34 888 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
889 )?;
890 failed_dirs
c2009e53 891 } else {
a724f5fd 892 let owner = if owner_check_required {
de27ebc6 893 Some(&auth_id)
09f6a240
FG
894 } else {
895 None
896 };
897
dcbf29e7
HL
898 verify_all_backups(
899 &verify_worker,
900 worker.upid(),
bc21ade2 901 ns,
59229bd7 902 max_depth,
dcbf29e7 903 owner,
dc7a5b34 904 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 905 )?
c2009e53 906 };
3984a5fd 907 if !failed_dirs.is_empty() {
1ec0d70d 908 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 909 for dir in failed_dirs {
1ec0d70d 910 task_log!(worker, "\t{}", dir);
adfdc369 911 }
1ffe0301 912 bail!("verification failed - please check the log for details");
c2009e53
DM
913 }
914 Ok(())
e7cb4dc5
WB
915 },
916 )?;
c2009e53
DM
917
918 Ok(json!(upid_str))
919}
920
0a240aaa
DC
921#[api(
922 input: {
923 properties: {
8c74349b
WB
924 group: {
925 type: pbs_api_types::BackupGroup,
926 flatten: true,
927 },
0a240aaa
DC
928 "dry-run": {
929 optional: true,
930 type: bool,
931 default: false,
932 description: "Just show what prune would do, but do not delete anything.",
933 },
dba37e21
WB
934 "keep-options": {
935 type: KeepOptions,
0a240aaa
DC
936 flatten: true,
937 },
938 store: {
939 schema: DATASTORE_SCHEMA,
940 },
dba37e21
WB
941 ns: {
942 type: BackupNamespace,
943 optional: true,
944 },
0a240aaa
DC
945 },
946 },
7b570c17 947 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 948 access: {
7d6fc15b
TL
949 permission: &Permission::Anybody,
950 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
951 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
952 },
953)]
9805207a 954/// Prune a group on the datastore
bf78f708 955pub fn prune(
8c74349b 956 group: pbs_api_types::BackupGroup,
0a240aaa 957 dry_run: bool,
dba37e21 958 keep_options: KeepOptions,
0a240aaa 959 store: String,
dba37e21 960 ns: Option<BackupNamespace>,
0a240aaa 961 _param: Value,
54552dda 962 rpcenv: &mut dyn RpcEnvironment,
83b7db02 963) -> Result<Value, Error> {
e6dc35ac 964 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 965 let ns = ns.unwrap_or_default();
7a404dc5 966 let datastore = check_privs_and_load_store(
abd82485
FG
967 &store,
968 &ns,
7d6fc15b 969 &auth_id,
2bc2435a
FG
970 PRIV_DATASTORE_MODIFY,
971 PRIV_DATASTORE_PRUNE,
c9396984 972 Some(Operation::Write),
c9396984
FG
973 &group,
974 )?;
db87d93e 975
abd82485
FG
976 let worker_id = format!("{}:{}:{}", store, ns, group);
977 let group = datastore.backup_group(ns.clone(), group);
83b7db02 978
dda70154
DM
979 let mut prune_result = Vec::new();
980
6da20161 981 let list = group.list_backups()?;
dda70154 982
dba37e21 983 let mut prune_info = compute_prune_info(list, &keep_options)?;
dda70154
DM
984
985 prune_info.reverse(); // delete older snapshots first
986
dba37e21 987 let keep_all = !keep_options.keeps_something();
dda70154
DM
988
989 if dry_run {
02db7267
DC
990 for (info, mark) in prune_info {
991 let keep = keep_all || mark.keep();
dda70154 992
33f2c2a1 993 let mut result = json!({
db87d93e
WB
994 "backup-type": info.backup_dir.backup_type(),
995 "backup-id": info.backup_dir.backup_id(),
996 "backup-time": info.backup_dir.backup_time(),
dda70154 997 "keep": keep,
02db7267 998 "protected": mark.protected(),
33f2c2a1 999 });
bc21ade2
WB
1000 let prune_ns = info.backup_dir.backup_ns();
1001 if !prune_ns.is_root() {
1002 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
1003 }
1004 prune_result.push(result);
dda70154
DM
1005 }
1006 return Ok(json!(prune_result));
1007 }
1008
163e9bbe 1009 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 1010 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 1011
f1539300 1012 if keep_all {
1ec0d70d 1013 task_log!(worker, "No prune selection - keeping all files.");
f1539300 1014 } else {
dba37e21
WB
1015 let mut opts = Vec::new();
1016 if !ns.is_root() {
1017 opts.push(format!("--ns {ns}"));
1018 }
1019 crate::server::cli_keep_options(&mut opts, &keep_options);
1020
1021 task_log!(worker, "retention options: {}", opts.join(" "));
dc7a5b34
TL
1022 task_log!(
1023 worker,
e13303fc 1024 "Starting prune on {} group \"{}\"",
abd82485 1025 print_store_and_ns(&store, &ns),
e13303fc 1026 group.group(),
dc7a5b34 1027 );
f1539300 1028 }
3b03abfe 1029
02db7267
DC
1030 for (info, mark) in prune_info {
1031 let keep = keep_all || mark.keep();
dda70154 1032
f1539300
SR
1033 let backup_time = info.backup_dir.backup_time();
1034 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1035 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1036
1037 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1038
1ec0d70d 1039 task_log!(worker, "{}", msg);
f1539300 1040
133d718f 1041 prune_result.push(json!({
db87d93e
WB
1042 "backup-type": group.ty,
1043 "backup-id": group.id,
f1539300
SR
1044 "backup-time": backup_time,
1045 "keep": keep,
02db7267 1046 "protected": mark.protected(),
133d718f 1047 }));
f1539300
SR
1048
1049 if !(dry_run || keep) {
133d718f 1050 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1051 task_warn!(
1052 worker,
1053 "failed to remove dir {:?}: {}",
1054 info.backup_dir.relative_path(),
1055 err,
f1539300 1056 );
8f0b4c1f 1057 }
8f579717 1058 }
f1539300 1059 }
dd8e744f 1060
f1539300 1061 worker.log_result(&Ok(()));
83b7db02 1062
dda70154 1063 Ok(json!(prune_result))
83b7db02
DM
1064}
1065
9805207a
DC
1066#[api(
1067 input: {
1068 properties: {
1069 "dry-run": {
1070 optional: true,
1071 type: bool,
1072 default: false,
1073 description: "Just show what prune would do, but do not delete anything.",
1074 },
1075 "prune-options": {
dba37e21 1076 type: PruneJobOptions,
9805207a
DC
1077 flatten: true,
1078 },
1079 store: {
1080 schema: DATASTORE_SCHEMA,
1081 },
1082 },
1083 },
1084 returns: {
1085 schema: UPID_SCHEMA,
1086 },
1087 access: {
dba37e21
WB
1088 permission: &Permission::Anybody,
1089 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
9805207a
DC
1090 },
1091)]
1092/// Prune the datastore
1093pub fn prune_datastore(
1094 dry_run: bool,
dba37e21 1095 prune_options: PruneJobOptions,
9805207a
DC
1096 store: String,
1097 _param: Value,
1098 rpcenv: &mut dyn RpcEnvironment,
1099) -> Result<String, Error> {
dba37e21
WB
1100 let user_info = CachedUserInfo::new()?;
1101
9805207a
DC
1102 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1103
dba37e21
WB
1104 user_info.check_privs(
1105 &auth_id,
1106 &prune_options.acl_path(&store),
1107 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1108 true,
1109 )?;
1110
e9d2fc93 1111 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
dba37e21 1112 let ns = prune_options.ns.clone().unwrap_or_default();
36971618 1113 let worker_id = format!("{}:{}", store, ns);
9805207a 1114
bfa942c0
DC
1115 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1116
9805207a
DC
1117 let upid_str = WorkerTask::new_thread(
1118 "prune",
36971618 1119 Some(worker_id),
049a22a3 1120 auth_id.to_string(),
bfa942c0 1121 to_stdout,
dc7a5b34 1122 move |worker| {
dba37e21 1123 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
dc7a5b34 1124 },
9805207a
DC
1125 )?;
1126
1127 Ok(upid_str)
1128}
1129
dfc58d47
DM
1130#[api(
1131 input: {
1132 properties: {
1133 store: {
1134 schema: DATASTORE_SCHEMA,
1135 },
1136 },
1137 },
1138 returns: {
1139 schema: UPID_SCHEMA,
1140 },
bb34b589 1141 access: {
54552dda 1142 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1143 },
dfc58d47
DM
1144)]
1145/// Start garbage collection.
bf78f708 1146pub fn start_garbage_collection(
dfc58d47 1147 store: String,
6049b71f 1148 _info: &ApiMethod,
dd5495d6 1149 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1150) -> Result<Value, Error> {
e9d2fc93 1151 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1152 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1153
dc7a5b34 1154 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1155 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1156
39735609 1157 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1158
dc7a5b34
TL
1159 let upid_str =
1160 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1161 .map_err(|err| {
1162 format_err!(
1163 "unable to start garbage collection job on datastore {} - {}",
1164 store,
1165 err
1166 )
1167 })?;
0f778e06
DM
1168
1169 Ok(json!(upid_str))
15e9b4ed
DM
1170}
1171
a92830dc
DM
1172#[api(
1173 input: {
1174 properties: {
1175 store: {
1176 schema: DATASTORE_SCHEMA,
1177 },
1178 },
1179 },
1180 returns: {
1181 type: GarbageCollectionStatus,
bb34b589
DM
1182 },
1183 access: {
1184 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1185 },
a92830dc
DM
1186)]
1187/// Garbage collection status.
5eeea607 1188pub fn garbage_collection_status(
a92830dc 1189 store: String,
6049b71f 1190 _info: &ApiMethod,
dd5495d6 1191 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1192) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1193 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1194
f2b99c34 1195 let status = datastore.last_gc_status();
691c89a0 1196
a92830dc 1197 Ok(status)
691c89a0
DM
1198}
1199
bb34b589 1200#[api(
30fb6025
DM
1201 returns: {
1202 description: "List the accessible datastores.",
1203 type: Array,
9b93c620 1204 items: { type: DataStoreListItem },
30fb6025 1205 },
bb34b589 1206 access: {
54552dda 1207 permission: &Permission::Anybody,
bb34b589
DM
1208 },
1209)]
1210/// Datastore list
bf78f708 1211pub fn get_datastore_list(
6049b71f
DM
1212 _param: Value,
1213 _info: &ApiMethod,
54552dda 1214 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1215) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1216 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1217
e6dc35ac 1218 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1219 let user_info = CachedUserInfo::new()?;
1220
30fb6025 1221 let mut list = Vec::new();
54552dda 1222
30fb6025 1223 for (store, (_, data)) in &config.sections {
8c9c6c07
TL
1224 let acl_path = &["datastore", store];
1225 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
dc7a5b34 1226 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1227
1228 let mut allow_id = false;
1229 if !allowed {
8c9c6c07
TL
1230 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1231 allow_id = any_privs;
7d6fc15b
TL
1232 }
1233 }
1234
1235 if allowed || allow_id {
dc7a5b34
TL
1236 list.push(DataStoreListItem {
1237 store: store.clone(),
7d6fc15b
TL
1238 comment: if !allowed {
1239 None
1240 } else {
1241 data["comment"].as_str().map(String::from)
1242 },
e022d13c 1243 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1244 });
30fb6025 1245 }
54552dda
DM
1246 }
1247
44288184 1248 Ok(list)
15e9b4ed
DM
1249}
1250
0ab08ac9
DM
1251#[sortable]
1252pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1253 &ApiHandler::AsyncHttp(&download_file),
1254 &ObjectSchema::new(
1255 "Download single raw file from backup snapshot.",
1256 &sorted!([
66c49c21 1257 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1258 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1259 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1260 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1261 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1262 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1263 ]),
dc7a5b34
TL
1264 ),
1265)
1266.access(
7d6fc15b
TL
1267 Some(
1268 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1269 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1270 ),
7d6fc15b 1271 &Permission::Anybody,
54552dda 1272);
691c89a0 1273
bf78f708 1274pub fn download_file(
9e47c0a5
DM
1275 _parts: Parts,
1276 _req_body: Body,
1277 param: Value,
255f378a 1278 _info: &ApiMethod,
54552dda 1279 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1280) -> ApiResponseFuture {
ad51d02a 1281 async move {
7d6fc15b 1282 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1283 let store = required_string_param(&param, "store")?;
133d718f 1284 let backup_ns = optional_ns_param(&param)?;
1afce610 1285
7d6fc15b 1286 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1287 let datastore = check_privs_and_load_store(
abd82485
FG
1288 &store,
1289 &backup_ns,
7d6fc15b 1290 &auth_id,
2bc2435a
FG
1291 PRIV_DATASTORE_READ,
1292 PRIV_DATASTORE_BACKUP,
c9396984 1293 Some(Operation::Read),
c9396984
FG
1294 &backup_dir.group,
1295 )?;
1296
3c8c2827 1297 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1298
dc7a5b34
TL
1299 println!(
1300 "Download {} from {} ({}/{})",
abd82485
FG
1301 file_name,
1302 print_store_and_ns(&store, &backup_ns),
1303 backup_dir,
1304 file_name
dc7a5b34 1305 );
9e47c0a5 1306
1afce610
FG
1307 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1308
ad51d02a
DM
1309 let mut path = datastore.base_path();
1310 path.push(backup_dir.relative_path());
1311 path.push(&file_name);
1312
ba694720 1313 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1314 .await
1315 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1316
dc7a5b34
TL
1317 let payload =
1318 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1319 .map_ok(|bytes| bytes.freeze())
1320 .map_err(move |err| {
1321 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1322 err
1323 });
ad51d02a 1324 let body = Body::wrap_stream(payload);
9e47c0a5 1325
ad51d02a
DM
1326 // fixme: set other headers ?
1327 Ok(Response::builder()
dc7a5b34
TL
1328 .status(StatusCode::OK)
1329 .header(header::CONTENT_TYPE, "application/octet-stream")
1330 .body(body)
1331 .unwrap())
1332 }
1333 .boxed()
9e47c0a5
DM
1334}
1335
6ef9bb59
DC
1336#[sortable]
1337pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1338 &ApiHandler::AsyncHttp(&download_file_decoded),
1339 &ObjectSchema::new(
1340 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1341 &sorted!([
1342 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1343 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1344 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1345 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1346 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1347 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1348 ]),
dc7a5b34
TL
1349 ),
1350)
1351.access(
7d6fc15b
TL
1352 Some(
1353 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1354 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1355 ),
7d6fc15b 1356 &Permission::Anybody,
6ef9bb59
DC
1357);
1358
bf78f708 1359pub fn download_file_decoded(
6ef9bb59
DC
1360 _parts: Parts,
1361 _req_body: Body,
1362 param: Value,
1363 _info: &ApiMethod,
1364 rpcenv: Box<dyn RpcEnvironment>,
1365) -> ApiResponseFuture {
6ef9bb59 1366 async move {
7d6fc15b 1367 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1368 let store = required_string_param(&param, "store")?;
133d718f 1369 let backup_ns = optional_ns_param(&param)?;
abd82485 1370
1afce610 1371 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1372 let datastore = check_privs_and_load_store(
abd82485
FG
1373 &store,
1374 &backup_ns,
7d6fc15b 1375 &auth_id,
2bc2435a
FG
1376 PRIV_DATASTORE_READ,
1377 PRIV_DATASTORE_BACKUP,
c9396984 1378 Some(Operation::Read),
1afce610 1379 &backup_dir_api.group,
c9396984 1380 )?;
a724f5fd 1381
3c8c2827 1382 let file_name = required_string_param(&param, "file-name")?.to_owned();
abd82485 1383 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
6ef9bb59 1384
9ccf933b 1385 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1386 for file in files {
f28d9088 1387 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1388 bail!("cannot decode '{}' - is encrypted", file_name);
1389 }
1390 }
1391
dc7a5b34
TL
1392 println!(
1393 "Download {} from {} ({}/{})",
abd82485
FG
1394 file_name,
1395 print_store_and_ns(&store, &backup_ns),
1396 backup_dir_api,
1397 file_name
dc7a5b34 1398 );
6ef9bb59
DC
1399
1400 let mut path = datastore.base_path();
1401 path.push(backup_dir.relative_path());
1402 path.push(&file_name);
1403
1404 let extension = file_name.rsplitn(2, '.').next().unwrap();
1405
1406 let body = match extension {
1407 "didx" => {
dc7a5b34
TL
1408 let index = DynamicIndexReader::open(&path).map_err(|err| {
1409 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1410 })?;
2d55beec
FG
1411 let (csum, size) = index.compute_csum();
1412 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1413
14f6c9cb 1414 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1415 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1416 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1417 eprintln!("error during streaming of '{:?}' - {}", path, err);
1418 err
1419 }))
1420 }
6ef9bb59 1421 "fidx" => {
dc7a5b34
TL
1422 let index = FixedIndexReader::open(&path).map_err(|err| {
1423 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1424 })?;
6ef9bb59 1425
2d55beec
FG
1426 let (csum, size) = index.compute_csum();
1427 manifest.verify_file(&file_name, &csum, size)?;
1428
14f6c9cb 1429 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1430 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1431 Body::wrap_stream(
1432 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1433 move |err| {
1434 eprintln!("error during streaming of '{:?}' - {}", path, err);
1435 err
1436 },
1437 ),
1438 )
1439 }
6ef9bb59
DC
1440 "blob" => {
1441 let file = std::fs::File::open(&path)
8aa67ee7 1442 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1443
2d55beec
FG
1444 // FIXME: load full blob to verify index checksum?
1445
6ef9bb59 1446 Body::wrap_stream(
dc7a5b34
TL
1447 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1448 move |err| {
6ef9bb59
DC
1449 eprintln!("error during streaming of '{:?}' - {}", path, err);
1450 err
dc7a5b34
TL
1451 },
1452 ),
6ef9bb59 1453 )
dc7a5b34 1454 }
6ef9bb59
DC
1455 extension => {
1456 bail!("cannot download '{}' files", extension);
dc7a5b34 1457 }
6ef9bb59
DC
1458 };
1459
1460 // fixme: set other headers ?
1461 Ok(Response::builder()
dc7a5b34
TL
1462 .status(StatusCode::OK)
1463 .header(header::CONTENT_TYPE, "application/octet-stream")
1464 .body(body)
1465 .unwrap())
1466 }
1467 .boxed()
6ef9bb59
DC
1468}
1469
552c2259 1470#[sortable]
0ab08ac9
DM
1471pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1472 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1473 &ObjectSchema::new(
54552dda 1474 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1475 &sorted!([
66c49c21 1476 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1477 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1478 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1479 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1480 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1481 ]),
dc7a5b34
TL
1482 ),
1483)
1484.access(
54552dda 1485 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1486 &Permission::Anybody,
54552dda 1487);
9e47c0a5 1488
bf78f708 1489pub fn upload_backup_log(
07ee2235
DM
1490 _parts: Parts,
1491 req_body: Body,
1492 param: Value,
255f378a 1493 _info: &ApiMethod,
54552dda 1494 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1495) -> ApiResponseFuture {
ad51d02a 1496 async move {
7d6fc15b 1497 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1498 let store = required_string_param(&param, "store")?;
133d718f 1499 let backup_ns = optional_ns_param(&param)?;
abd82485 1500
1afce610 1501 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1502
7a404dc5 1503 let datastore = check_privs_and_load_store(
abd82485
FG
1504 &store,
1505 &backup_ns,
c9396984 1506 &auth_id,
7a404dc5
FG
1507 0,
1508 PRIV_DATASTORE_BACKUP,
c9396984 1509 Some(Operation::Write),
1afce610 1510 &backup_dir_api.group,
c9396984 1511 )?;
abd82485 1512 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
07ee2235 1513
dc7a5b34 1514 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1515
133d718f 1516 let mut path = backup_dir.full_path();
ad51d02a 1517 path.push(&file_name);
07ee2235 1518
ad51d02a
DM
1519 if path.exists() {
1520 bail!("backup already contains a log.");
1521 }
e128d4e8 1522
abd82485
FG
1523 println!(
1524 "Upload backup log to {} {backup_dir_api}/{file_name}",
1525 print_store_and_ns(&store, &backup_ns),
1526 );
ad51d02a
DM
1527
1528 let data = req_body
1529 .map_err(Error::from)
1530 .try_fold(Vec::new(), |mut acc, chunk| {
1531 acc.extend_from_slice(&*chunk);
1532 future::ok::<_, Error>(acc)
1533 })
1534 .await?;
1535
39f18b30
DM
1536 // always verify blob/CRC at server side
1537 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1538
e0a19d33 1539 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1540
1541 // fixme: use correct formatter
53daae8e 1542 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1543 }
1544 .boxed()
07ee2235
DM
1545}
1546
5b1cfa01
DC
1547#[api(
1548 input: {
1549 properties: {
988d575d 1550 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1551 ns: {
133d718f
WB
1552 type: BackupNamespace,
1553 optional: true,
1554 },
8c74349b
WB
1555 backup_dir: {
1556 type: pbs_api_types::BackupDir,
1557 flatten: true,
1558 },
5b1cfa01
DC
1559 "filepath": {
1560 description: "Base64 encoded path.",
1561 type: String,
1562 }
1563 },
1564 },
1565 access: {
7d6fc15b
TL
1566 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1567 DATASTORE_BACKUP and being the owner of the group",
1568 permission: &Permission::Anybody,
5b1cfa01
DC
1569 },
1570)]
1571/// Get the entries of the given path of the catalog
7beb27d4 1572pub async fn catalog(
5b1cfa01 1573 store: String,
bc21ade2 1574 ns: Option<BackupNamespace>,
8c74349b 1575 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1576 filepath: String,
5b1cfa01 1577 rpcenv: &mut dyn RpcEnvironment,
227501c0 1578) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1579 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1580
7beb27d4
WB
1581 tokio::task::spawn_blocking(move || {
1582 let ns = ns.unwrap_or_default();
a724f5fd 1583
7beb27d4
WB
1584 let datastore = check_privs_and_load_store(
1585 &store,
1586 &ns,
1587 &auth_id,
1588 PRIV_DATASTORE_READ,
1589 PRIV_DATASTORE_BACKUP,
1590 Some(Operation::Read),
1591 &backup_dir.group,
1592 )?;
1593
1594 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1595
7beb27d4 1596 let file_name = CATALOG_NAME;
9238cdf5 1597
7beb27d4
WB
1598 let (manifest, files) = read_backup_index(&backup_dir)?;
1599 for file in files {
1600 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1601 bail!("cannot decode '{}' - is encrypted", file_name);
1602 }
9238cdf5 1603 }
9238cdf5 1604
7beb27d4
WB
1605 let mut path = datastore.base_path();
1606 path.push(backup_dir.relative_path());
1607 path.push(file_name);
5b1cfa01 1608
7beb27d4
WB
1609 let index = DynamicIndexReader::open(&path)
1610 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
5b1cfa01 1611
7beb27d4
WB
1612 let (csum, size) = index.compute_csum();
1613 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1614
7beb27d4
WB
1615 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1616 let reader = BufferedDynamicReader::new(index, chunk_reader);
5b1cfa01 1617
7beb27d4 1618 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1619
7beb27d4
WB
1620 let path = if filepath != "root" && filepath != "/" {
1621 base64::decode(filepath)?
1622 } else {
1623 vec![b'/']
1624 };
5b1cfa01 1625
7beb27d4
WB
1626 catalog_reader.list_dir_contents(&path)
1627 })
1628 .await?
5b1cfa01
DC
1629}
1630
d33d8f4e
DC
1631#[sortable]
1632pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1633 &ApiHandler::AsyncHttp(&pxar_file_download),
1634 &ObjectSchema::new(
1ffe0301 1635 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1636 &sorted!([
1637 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1638 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1639 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1640 ("backup-id", false, &BACKUP_ID_SCHEMA),
1641 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1642 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1643 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1644 ]),
1645 )
7d6fc15b
TL
1646).access(
1647 Some(
1648 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1649 DATASTORE_BACKUP and being the owner of the group",
1650 ),
1651 &Permission::Anybody,
d33d8f4e
DC
1652);
1653
bf78f708 1654pub fn pxar_file_download(
d33d8f4e
DC
1655 _parts: Parts,
1656 _req_body: Body,
1657 param: Value,
1658 _info: &ApiMethod,
1659 rpcenv: Box<dyn RpcEnvironment>,
1660) -> ApiResponseFuture {
d33d8f4e 1661 async move {
7d6fc15b 1662 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1663 let store = required_string_param(&param, "store")?;
bc21ade2 1664 let ns = optional_ns_param(&param)?;
abd82485 1665
7d6fc15b 1666 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1667 let datastore = check_privs_and_load_store(
abd82485
FG
1668 &store,
1669 &ns,
7d6fc15b 1670 &auth_id,
2bc2435a
FG
1671 PRIV_DATASTORE_READ,
1672 PRIV_DATASTORE_BACKUP,
c9396984 1673 Some(Operation::Read),
c9396984
FG
1674 &backup_dir.group,
1675 )?;
a724f5fd 1676
bc21ade2 1677 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1678
3c8c2827 1679 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1680
984ddb2f
DC
1681 let tar = param["tar"].as_bool().unwrap_or(false);
1682
d33d8f4e 1683 let mut components = base64::decode(&filepath)?;
3984a5fd 1684 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1685 components.remove(0);
1686 }
1687
d8d8af98 1688 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1689 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1690 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1691 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1692 for file in files {
1693 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1694 bail!("cannot decode '{}' - is encrypted", pxar_name);
1695 }
1696 }
d33d8f4e 1697
9238cdf5
FG
1698 let mut path = datastore.base_path();
1699 path.push(backup_dir.relative_path());
1700 path.push(pxar_name);
d33d8f4e
DC
1701
1702 let index = DynamicIndexReader::open(&path)
1703 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1704
2d55beec 1705 let (csum, size) = index.compute_csum();
9a37bd6c 1706 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1707
14f6c9cb 1708 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1709 let reader = BufferedDynamicReader::new(index, chunk_reader);
1710 let archive_size = reader.archive_size();
1711 let reader = LocalDynamicReadAt::new(reader);
1712
1713 let decoder = Accessor::new(reader, archive_size).await?;
1714 let root = decoder.open_root().await?;
2e219481 1715 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1716 let file = root
dc7a5b34
TL
1717 .lookup(&path)
1718 .await?
2e219481 1719 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1720
804f6143
DC
1721 let body = match file.kind() {
1722 EntryKind::File { .. } => Body::wrap_stream(
1723 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1724 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1725 err
1726 }),
1727 ),
1728 EntryKind::Hardlink(_) => Body::wrap_stream(
1729 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1730 .map_err(move |err| {
dc7a5b34 1731 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1732 err
1733 }),
1734 ),
1735 EntryKind::Directory => {
984ddb2f 1736 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1737 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1738 if tar {
dc7a5b34
TL
1739 proxmox_rest_server::spawn_internal_task(create_tar(
1740 channelwriter,
1741 decoder,
1742 path.clone(),
dc7a5b34 1743 ));
984ddb2f
DC
1744 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1745 Body::wrap_stream(zstdstream.map_err(move |err| {
0608b36b 1746 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
984ddb2f
DC
1747 err
1748 }))
1749 } else {
dc7a5b34
TL
1750 proxmox_rest_server::spawn_internal_task(create_zip(
1751 channelwriter,
1752 decoder,
1753 path.clone(),
dc7a5b34 1754 ));
984ddb2f 1755 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
0608b36b 1756 log::error!("error during streaming of zip '{:?}' - {}", path, err);
984ddb2f
DC
1757 err
1758 }))
1759 }
804f6143
DC
1760 }
1761 other => bail!("cannot download file of type {:?}", other),
1762 };
d33d8f4e
DC
1763
1764 // fixme: set other headers ?
1765 Ok(Response::builder()
dc7a5b34
TL
1766 .status(StatusCode::OK)
1767 .header(header::CONTENT_TYPE, "application/octet-stream")
1768 .body(body)
1769 .unwrap())
1770 }
1771 .boxed()
d33d8f4e
DC
1772}
1773
1a0d3d11
DM
1774#[api(
1775 input: {
1776 properties: {
1777 store: {
1778 schema: DATASTORE_SCHEMA,
1779 },
1780 timeframe: {
c68fa58a 1781 type: RRDTimeFrame,
1a0d3d11
DM
1782 },
1783 cf: {
1784 type: RRDMode,
1785 },
1786 },
1787 },
1788 access: {
7d6fc15b
TL
1789 permission: &Permission::Privilege(
1790 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1791 },
1792)]
1793/// Read datastore stats
bf78f708 1794pub fn get_rrd_stats(
1a0d3d11 1795 store: String,
c68fa58a 1796 timeframe: RRDTimeFrame,
1a0d3d11
DM
1797 cf: RRDMode,
1798 _param: Value,
1799) -> Result<Value, Error> {
e9d2fc93 1800 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1801 let disk_manager = crate::tools::disks::DiskManage::new();
1802
1803 let mut rrd_fields = vec![
dc7a5b34
TL
1804 "total",
1805 "used",
1806 "read_ios",
1807 "read_bytes",
1808 "write_ios",
1809 "write_bytes",
f27b6086
DC
1810 ];
1811
1812 // we do not have io_ticks for zpools, so don't include them
1813 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1814 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1815 _ => rrd_fields.push("io_ticks"),
1816 };
1817
dc7a5b34 1818 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1819}
1820
5fd823c3
HL
1821#[api(
1822 input: {
1823 properties: {
1824 store: {
1825 schema: DATASTORE_SCHEMA,
1826 },
1827 },
1828 },
1829 access: {
1830 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1831 },
1832)]
1833/// Read datastore stats
dc7a5b34 1834pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1835 let active_operations = task_tracking::get_active_operations(&store)?;
1836 Ok(json!({
1837 "read": active_operations.read,
1838 "write": active_operations.write,
1839 }))
1840}
1841
d6688884
SR
1842#[api(
1843 input: {
1844 properties: {
988d575d 1845 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1846 ns: {
133d718f
WB
1847 type: BackupNamespace,
1848 optional: true,
1849 },
8c74349b
WB
1850 backup_group: {
1851 type: pbs_api_types::BackupGroup,
1852 flatten: true,
1853 },
d6688884
SR
1854 },
1855 },
1856 access: {
7d6fc15b
TL
1857 permission: &Permission::Anybody,
1858 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1859 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1860 },
1861)]
1862/// Get "notes" for a backup group
1863pub fn get_group_notes(
1864 store: String,
bc21ade2 1865 ns: Option<BackupNamespace>,
8c74349b 1866 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1867 rpcenv: &mut dyn RpcEnvironment,
1868) -> Result<String, Error> {
d6688884 1869 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1870 let ns = ns.unwrap_or_default();
ea2e91e5 1871
7a404dc5 1872 let datastore = check_privs_and_load_store(
abd82485
FG
1873 &store,
1874 &ns,
7d6fc15b 1875 &auth_id,
2bc2435a
FG
1876 PRIV_DATASTORE_AUDIT,
1877 PRIV_DATASTORE_BACKUP,
c9396984 1878 Some(Operation::Read),
c9396984
FG
1879 &backup_group,
1880 )?;
d6688884 1881
abd82485 1882 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1883 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1884}
1885
1886#[api(
1887 input: {
1888 properties: {
988d575d 1889 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1890 ns: {
133d718f
WB
1891 type: BackupNamespace,
1892 optional: true,
1893 },
8c74349b
WB
1894 backup_group: {
1895 type: pbs_api_types::BackupGroup,
1896 flatten: true,
1897 },
d6688884
SR
1898 notes: {
1899 description: "A multiline text.",
1900 },
1901 },
1902 },
1903 access: {
7d6fc15b
TL
1904 permission: &Permission::Anybody,
1905 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1906 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1907 },
1908)]
1909/// Set "notes" for a backup group
1910pub fn set_group_notes(
1911 store: String,
bc21ade2 1912 ns: Option<BackupNamespace>,
8c74349b 1913 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1914 notes: String,
1915 rpcenv: &mut dyn RpcEnvironment,
1916) -> Result<(), Error> {
d6688884 1917 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485
FG
1918 let ns = ns.unwrap_or_default();
1919
7a404dc5 1920 let datastore = check_privs_and_load_store(
abd82485
FG
1921 &store,
1922 &ns,
7d6fc15b 1923 &auth_id,
2bc2435a
FG
1924 PRIV_DATASTORE_MODIFY,
1925 PRIV_DATASTORE_BACKUP,
c9396984 1926 Some(Operation::Write),
c9396984
FG
1927 &backup_group,
1928 )?;
d6688884 1929
abd82485 1930 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1931 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1932
1933 Ok(())
1934}
1935
912b3f5b
DM
1936#[api(
1937 input: {
1938 properties: {
988d575d 1939 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1940 ns: {
133d718f
WB
1941 type: BackupNamespace,
1942 optional: true,
1943 },
8c74349b
WB
1944 backup_dir: {
1945 type: pbs_api_types::BackupDir,
1946 flatten: true,
1947 },
912b3f5b
DM
1948 },
1949 },
1950 access: {
7d6fc15b
TL
1951 permission: &Permission::Anybody,
1952 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1953 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1954 },
1955)]
1956/// Get "notes" for a specific backup
bf78f708 1957pub fn get_notes(
912b3f5b 1958 store: String,
bc21ade2 1959 ns: Option<BackupNamespace>,
8c74349b 1960 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1961 rpcenv: &mut dyn RpcEnvironment,
1962) -> Result<String, Error> {
7d6fc15b 1963 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1964 let ns = ns.unwrap_or_default();
ea2e91e5 1965
7a404dc5 1966 let datastore = check_privs_and_load_store(
abd82485
FG
1967 &store,
1968 &ns,
7d6fc15b 1969 &auth_id,
2bc2435a
FG
1970 PRIV_DATASTORE_AUDIT,
1971 PRIV_DATASTORE_BACKUP,
c9396984 1972 Some(Operation::Read),
c9396984
FG
1973 &backup_dir.group,
1974 )?;
912b3f5b 1975
fbfb64a6 1976 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1977
133d718f 1978 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1979
dc7a5b34 1980 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1981
1982 Ok(String::from(notes))
1983}
1984
1985#[api(
1986 input: {
1987 properties: {
988d575d 1988 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1989 ns: {
133d718f
WB
1990 type: BackupNamespace,
1991 optional: true,
1992 },
8c74349b
WB
1993 backup_dir: {
1994 type: pbs_api_types::BackupDir,
1995 flatten: true,
1996 },
912b3f5b
DM
1997 notes: {
1998 description: "A multiline text.",
1999 },
2000 },
2001 },
2002 access: {
7d6fc15b
TL
2003 permission: &Permission::Anybody,
2004 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2005 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2006 },
2007)]
2008/// Set "notes" for a specific backup
bf78f708 2009pub fn set_notes(
912b3f5b 2010 store: String,
bc21ade2 2011 ns: Option<BackupNamespace>,
8c74349b 2012 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2013 notes: String,
2014 rpcenv: &mut dyn RpcEnvironment,
2015) -> Result<(), Error> {
7d6fc15b 2016 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2017 let ns = ns.unwrap_or_default();
ea2e91e5 2018
7a404dc5 2019 let datastore = check_privs_and_load_store(
abd82485
FG
2020 &store,
2021 &ns,
7d6fc15b 2022 &auth_id,
2bc2435a
FG
2023 PRIV_DATASTORE_MODIFY,
2024 PRIV_DATASTORE_BACKUP,
c9396984 2025 Some(Operation::Write),
c9396984
FG
2026 &backup_dir.group,
2027 )?;
912b3f5b 2028
fbfb64a6 2029 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2030
133d718f
WB
2031 backup_dir
2032 .update_manifest(|manifest| {
dc7a5b34
TL
2033 manifest.unprotected["notes"] = notes.into();
2034 })
2035 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2036
2037 Ok(())
2038}
2039
8292d3d2
DC
2040#[api(
2041 input: {
2042 properties: {
988d575d 2043 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2044 ns: {
133d718f
WB
2045 type: BackupNamespace,
2046 optional: true,
2047 },
8c74349b
WB
2048 backup_dir: {
2049 type: pbs_api_types::BackupDir,
2050 flatten: true,
2051 },
8292d3d2
DC
2052 },
2053 },
2054 access: {
7d6fc15b
TL
2055 permission: &Permission::Anybody,
2056 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2057 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2058 },
2059)]
2060/// Query protection for a specific backup
2061pub fn get_protection(
2062 store: String,
bc21ade2 2063 ns: Option<BackupNamespace>,
8c74349b 2064 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2065 rpcenv: &mut dyn RpcEnvironment,
2066) -> Result<bool, Error> {
7d6fc15b 2067 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2068 let ns = ns.unwrap_or_default();
7a404dc5 2069 let datastore = check_privs_and_load_store(
abd82485
FG
2070 &store,
2071 &ns,
7d6fc15b 2072 &auth_id,
2bc2435a
FG
2073 PRIV_DATASTORE_AUDIT,
2074 PRIV_DATASTORE_BACKUP,
c9396984 2075 Some(Operation::Read),
c9396984
FG
2076 &backup_dir.group,
2077 )?;
8292d3d2 2078
fbfb64a6 2079 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2080
6da20161 2081 Ok(backup_dir.is_protected())
8292d3d2
DC
2082}
2083
2084#[api(
2085 input: {
2086 properties: {
988d575d 2087 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2088 ns: {
133d718f
WB
2089 type: BackupNamespace,
2090 optional: true,
2091 },
8c74349b
WB
2092 backup_dir: {
2093 type: pbs_api_types::BackupDir,
2094 flatten: true,
2095 },
8292d3d2
DC
2096 protected: {
2097 description: "Enable/disable protection.",
2098 },
2099 },
2100 },
2101 access: {
7d6fc15b
TL
2102 permission: &Permission::Anybody,
2103 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2104 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2105 },
2106)]
2107/// En- or disable protection for a specific backup
67d7a59d 2108pub async fn set_protection(
8292d3d2 2109 store: String,
bc21ade2 2110 ns: Option<BackupNamespace>,
8c74349b 2111 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2112 protected: bool,
2113 rpcenv: &mut dyn RpcEnvironment,
2114) -> Result<(), Error> {
7d6fc15b 2115 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8292d3d2 2116
67d7a59d
WB
2117 tokio::task::spawn_blocking(move || {
2118 let ns = ns.unwrap_or_default();
2119 let datastore = check_privs_and_load_store(
2120 &store,
2121 &ns,
2122 &auth_id,
2123 PRIV_DATASTORE_MODIFY,
2124 PRIV_DATASTORE_BACKUP,
2125 Some(Operation::Write),
2126 &backup_dir.group,
2127 )?;
2128
2129 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2130
67d7a59d
WB
2131 datastore.update_protection(&backup_dir, protected)
2132 })
2133 .await?
8292d3d2
DC
2134}
2135
72be0eb1 2136#[api(
4940012d 2137 input: {
72be0eb1 2138 properties: {
988d575d 2139 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2140 ns: {
133d718f
WB
2141 type: BackupNamespace,
2142 optional: true,
2143 },
8c74349b
WB
2144 backup_group: {
2145 type: pbs_api_types::BackupGroup,
2146 flatten: true,
2147 },
72be0eb1 2148 "new-owner": {
e6dc35ac 2149 type: Authid,
72be0eb1
DW
2150 },
2151 },
4940012d
FG
2152 },
2153 access: {
bff85572 2154 permission: &Permission::Anybody,
7d6fc15b
TL
2155 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2156 a user's token for owned backups with Datastore.Backup"
4940012d 2157 },
72be0eb1
DW
2158)]
2159/// Change owner of a backup group
979b3784 2160pub async fn set_backup_owner(
72be0eb1 2161 store: String,
bc21ade2 2162 ns: Option<BackupNamespace>,
8c74349b 2163 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2164 new_owner: Authid,
bff85572 2165 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2166) -> Result<(), Error> {
bff85572 2167 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1909ece2 2168
979b3784
WB
2169 tokio::task::spawn_blocking(move || {
2170 let ns = ns.unwrap_or_default();
2171 let owner_check_required = check_ns_privs_full(
2172 &store,
2173 &ns,
2174 &auth_id,
2175 PRIV_DATASTORE_MODIFY,
2176 PRIV_DATASTORE_BACKUP,
2177 )?;
1909ece2 2178
979b3784 2179 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
bff85572 2180
979b3784 2181 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2182
979b3784
WB
2183 if owner_check_required {
2184 let owner = backup_group.get_owner()?;
bff85572 2185
979b3784
WB
2186 let allowed = match (owner.is_token(), new_owner.is_token()) {
2187 (true, true) => {
2188 // API token to API token, owned by same user
2189 let owner = owner.user();
2190 let new_owner = new_owner.user();
2191 owner == new_owner && Authid::from(owner.clone()) == auth_id
2192 }
2193 (true, false) => {
2194 // API token to API token owner
2195 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2196 }
2197 (false, true) => {
2198 // API token owner to API token
2199 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2200 }
2201 (false, false) => {
2202 // User to User, not allowed for unprivileged users
2203 false
2204 }
2205 };
2206
2207 if !allowed {
2208 return Err(http_err!(
2209 UNAUTHORIZED,
2210 "{} does not have permission to change owner of backup group '{}' to {}",
2211 auth_id,
2212 backup_group.group(),
2213 new_owner,
2214 ));
2215 }
2bc2435a 2216 }
bff85572 2217
979b3784 2218 let user_info = CachedUserInfo::new()?;
7d6fc15b 2219
979b3784
WB
2220 if !user_info.is_active_auth_id(&new_owner) {
2221 bail!(
2222 "{} '{}' is inactive or non-existent",
2223 if new_owner.is_token() {
2224 "API token".to_string()
2225 } else {
2226 "user".to_string()
2227 },
2228 new_owner
2229 );
2230 }
72be0eb1 2231
979b3784 2232 backup_group.set_owner(&new_owner, true)?;
72be0eb1 2233
979b3784
WB
2234 Ok(())
2235 })
2236 .await?
72be0eb1
DW
2237}
2238
552c2259 2239#[sortable]
255f378a 2240const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2241 (
2242 "active-operations",
dc7a5b34 2243 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2244 ),
dc7a5b34 2245 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2246 (
2247 "change-owner",
dc7a5b34 2248 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2249 ),
255f378a
DM
2250 (
2251 "download",
dc7a5b34 2252 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2253 ),
6ef9bb59
DC
2254 (
2255 "download-decoded",
dc7a5b34 2256 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2257 ),
dc7a5b34 2258 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2259 (
2260 "gc",
2261 &Router::new()
2262 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2263 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2264 ),
d6688884
SR
2265 (
2266 "group-notes",
2267 &Router::new()
2268 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2269 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2270 ),
255f378a
DM
2271 (
2272 "groups",
2273 &Router::new()
b31c8019 2274 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2275 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2276 ),
18934ae5
TL
2277 (
2278 "namespace",
2279 // FIXME: move into datastore:: sub-module?!
2280 &crate::api2::admin::namespace::ROUTER,
2281 ),
912b3f5b
DM
2282 (
2283 "notes",
2284 &Router::new()
2285 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2286 .put(&API_METHOD_SET_NOTES),
912b3f5b 2287 ),
8292d3d2
DC
2288 (
2289 "protected",
2290 &Router::new()
2291 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2292 .put(&API_METHOD_SET_PROTECTION),
255f378a 2293 ),
dc7a5b34 2294 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2295 (
2296 "prune-datastore",
dc7a5b34 2297 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2298 ),
d33d8f4e
DC
2299 (
2300 "pxar-file-download",
dc7a5b34 2301 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2302 ),
dc7a5b34 2303 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2304 (
2305 "snapshots",
2306 &Router::new()
fc189b19 2307 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2308 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2309 ),
dc7a5b34 2310 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2311 (
2312 "upload-backup-log",
dc7a5b34 2313 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2314 ),
dc7a5b34 2315 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2316];
2317
ad51d02a 2318const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2319 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2320 .subdirs(DATASTORE_INFO_SUBDIRS);
2321
255f378a 2322pub const ROUTER: Router = Router::new()
bb34b589 2323 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2324 .match_all("store", &DATASTORE_INFO_ROUTER);