]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
ui: metricserver: fix enable column
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
abd82485
FG
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
dba37e21
WB
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 65use crate::backup::{
2981cdd4
TL
66 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups, NS_PRIVS_OK,
22cfad13 68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7a404dc5
FG
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
abd82485
FG
89 store: &str,
90 ns: &BackupNamespace,
c9396984 91 auth_id: &Authid,
7a404dc5
FG
92 full_access_privs: u64,
93 partial_access_privs: u64,
c9396984 94 operation: Option<Operation>,
c9396984
FG
95 backup_group: &pbs_api_types::BackupGroup,
96) -> Result<Arc<DataStore>, Error> {
abd82485 97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
7a404dc5 98
abd82485 99 let datastore = DataStore::lookup_datastore(store, operation)?;
c9396984 100
7a404dc5 101 if limited {
abd82485 102 let owner = datastore.get_owner(ns, backup_group)?;
e1db0670 103 check_backup_owner(&owner, auth_id)?;
c9396984
FG
104 }
105
106 Ok(datastore)
107}
108
e7cb4dc5 109fn read_backup_index(
e7cb4dc5
WB
110 backup_dir: &BackupDir,
111) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 112 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 113
09b1f7b2
DM
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
f28d9088 118 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
119 size: Some(item.size),
120 });
8c70e3eb
DM
121 }
122
09b1f7b2 123 result.push(BackupContent {
96d65fbc 124 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
09b1f7b2
DM
129 size: Some(index_size),
130 });
4f1e40a2 131
70030b43 132 Ok((manifest, result))
8c70e3eb
DM
133}
134
1c090810 135fn get_all_snapshot_files(
1c090810 136 info: &BackupInfo,
70030b43 137) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
dc7a5b34
TL
146 if file_set.contains(file) {
147 continue;
148 }
f28d9088
WB
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
1c090810
DC
154 }
155
70030b43 156 Ok((manifest, files))
1c090810
DC
157}
158
b31c8019
DM
159#[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
bc21ade2 165 ns: {
89ae3c32
WB
166 type: BackupNamespace,
167 optional: true,
168 },
b31c8019
DM
169 },
170 },
7b570c17 171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 172 access: {
7d6fc15b
TL
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
bb34b589 176 },
b31c8019
DM
177)]
178/// List backup groups.
b2362a12 179pub fn list_groups(
b31c8019 180 store: String,
bc21ade2 181 ns: Option<BackupNamespace>,
54552dda 182 rpcenv: &mut dyn RpcEnvironment,
b31c8019 183) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 185 let ns = ns.unwrap_or_default();
ea2e91e5
FG
186
187 let list_all = !check_ns_privs_full(
abd82485
FG
188 &store,
189 &ns,
7d6fc15b 190 &auth_id,
2bc2435a
FG
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
7d6fc15b 193 )?;
54552dda 194
abd82485 195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 196
249dde8b 197 datastore
abd82485 198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
e13303fc 201
abd82485 202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
203 Ok(auth_id) => auth_id,
204 Err(err) => {
e13303fc
FG
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
abd82485 208 print_store_and_ns(&store, &ns),
e13303fc
FG
209 err
210 );
249dde8b 211 return Ok(group_info);
dc7a5b34 212 }
249dde8b
TL
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
0d08fcee 217
6da20161 218 let snapshots = match group.list_backups() {
249dde8b
TL
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
0d08fcee 222
249dde8b
TL
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
0d08fcee 227
249dde8b
TL
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
abd82485 239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
249dde8b
TL
240 let comment = file_read_firstline(&note_path).ok();
241
242 group_info.push(GroupListItem {
988d575d 243 backup: group.into(),
249dde8b
TL
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
0d08fcee
FG
249 });
250
249dde8b
TL
251 Ok(group_info)
252 })
812c6f87 253}
8f579717 254
f32791b4
DC
255#[api(
256 input: {
257 properties: {
988d575d 258 store: { schema: DATASTORE_SCHEMA },
bc21ade2 259 ns: {
133d718f
WB
260 type: BackupNamespace,
261 optional: true,
262 },
8c74349b
WB
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
f32791b4
DC
267 },
268 },
269 access: {
7d6fc15b
TL
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
273 },
274)]
275/// Delete backup group including all snapshots.
6f67dc11 276pub async fn delete_group(
f32791b4 277 store: String,
bc21ade2 278 ns: Option<BackupNamespace>,
8c74349b 279 group: pbs_api_types::BackupGroup,
f32791b4
DC
280 rpcenv: &mut dyn RpcEnvironment,
281) -> Result<Value, Error> {
f32791b4 282 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
f32791b4 283
6f67dc11
WB
284 tokio::task::spawn_blocking(move || {
285 let ns = ns.unwrap_or_default();
286
287 let datastore = check_privs_and_load_store(
288 &store,
289 &ns,
290 &auth_id,
291 PRIV_DATASTORE_MODIFY,
292 PRIV_DATASTORE_PRUNE,
293 Some(Operation::Write),
294 &group,
295 )?;
296
297 if !datastore.remove_backup_group(&ns, &group)? {
298 bail!("group only partially deleted due to protected snapshots");
299 }
f32791b4 300
6f67dc11
WB
301 Ok(Value::Null)
302 })
303 .await?
f32791b4
DC
304}
305
09b1f7b2
DM
306#[api(
307 input: {
308 properties: {
988d575d 309 store: { schema: DATASTORE_SCHEMA },
bc21ade2 310 ns: {
133d718f
WB
311 type: BackupNamespace,
312 optional: true,
313 },
8c74349b
WB
314 backup_dir: {
315 type: pbs_api_types::BackupDir,
316 flatten: true,
317 },
09b1f7b2
DM
318 },
319 },
7b570c17 320 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 321 access: {
7d6fc15b
TL
322 permission: &Permission::Anybody,
323 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
324 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 325 },
09b1f7b2
DM
326)]
327/// List snapshot files.
6cb674aa 328pub async fn list_snapshot_files(
09b1f7b2 329 store: String,
bc21ade2 330 ns: Option<BackupNamespace>,
8c74349b 331 backup_dir: pbs_api_types::BackupDir,
01a13423 332 _info: &ApiMethod,
54552dda 333 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 334) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 335 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 336
6cb674aa
WB
337 tokio::task::spawn_blocking(move || {
338 let ns = ns.unwrap_or_default();
01a13423 339
6cb674aa
WB
340 let datastore = check_privs_and_load_store(
341 &store,
342 &ns,
343 &auth_id,
344 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
345 PRIV_DATASTORE_BACKUP,
346 Some(Operation::Read),
347 &backup_dir.group,
348 )?;
349
350 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 351
6cb674aa 352 let info = BackupInfo::new(snapshot)?;
01a13423 353
6cb674aa 354 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43 355
6cb674aa
WB
356 Ok(files)
357 })
358 .await?
01a13423
DM
359}
360
68a6a0ee
DM
361#[api(
362 input: {
363 properties: {
988d575d 364 store: { schema: DATASTORE_SCHEMA },
bc21ade2 365 ns: {
133d718f
WB
366 type: BackupNamespace,
367 optional: true,
368 },
8c74349b
WB
369 backup_dir: {
370 type: pbs_api_types::BackupDir,
371 flatten: true,
372 },
68a6a0ee
DM
373 },
374 },
bb34b589 375 access: {
7d6fc15b
TL
376 permission: &Permission::Anybody,
377 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
378 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 379 },
68a6a0ee
DM
380)]
381/// Delete backup snapshot.
af201d7a 382pub async fn delete_snapshot(
68a6a0ee 383 store: String,
bc21ade2 384 ns: Option<BackupNamespace>,
8c74349b 385 backup_dir: pbs_api_types::BackupDir,
6f62c924 386 _info: &ApiMethod,
54552dda 387 rpcenv: &mut dyn RpcEnvironment,
6f62c924 388) -> Result<Value, Error> {
e6dc35ac 389 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 390
af201d7a
WB
391 tokio::task::spawn_blocking(move || {
392 let ns = ns.unwrap_or_default();
a724f5fd 393
af201d7a
WB
394 let datastore = check_privs_and_load_store(
395 &store,
396 &ns,
397 &auth_id,
398 PRIV_DATASTORE_MODIFY,
399 PRIV_DATASTORE_PRUNE,
400 Some(Operation::Write),
401 &backup_dir.group,
402 )?;
54552dda 403
af201d7a
WB
404 let snapshot = datastore.backup_dir(ns, backup_dir)?;
405
406 snapshot.destroy(false)?;
6f62c924 407
af201d7a
WB
408 Ok(Value::Null)
409 })
410 .await?
6f62c924
DM
411}
412
fc189b19 413#[api(
b7c3eaa9 414 streaming: true,
fc189b19
DM
415 input: {
416 properties: {
988d575d 417 store: { schema: DATASTORE_SCHEMA },
bc21ade2 418 ns: {
8c74349b
WB
419 type: BackupNamespace,
420 optional: true,
421 },
fc189b19
DM
422 "backup-type": {
423 optional: true,
988d575d 424 type: BackupType,
fc189b19
DM
425 },
426 "backup-id": {
427 optional: true,
428 schema: BACKUP_ID_SCHEMA,
429 },
430 },
431 },
7b570c17 432 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 433 access: {
7d6fc15b
TL
434 permission: &Permission::Anybody,
435 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
436 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 437 },
fc189b19
DM
438)]
439/// List backup snapshots.
a577d7d8 440pub async fn list_snapshots(
54552dda 441 store: String,
bc21ade2 442 ns: Option<BackupNamespace>,
988d575d 443 backup_type: Option<BackupType>,
54552dda
DM
444 backup_id: Option<String>,
445 _param: Value,
184f17af 446 _info: &ApiMethod,
54552dda 447 rpcenv: &mut dyn RpcEnvironment,
fc189b19 448) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 449 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 450
a577d7d8
WB
451 tokio::task::spawn_blocking(move || unsafe {
452 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
453 })
454 .await
455 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
456}
457
458/// This must not run in a main worker thread as it potentially does tons of I/O.
459unsafe fn list_snapshots_blocking(
460 store: String,
461 ns: Option<BackupNamespace>,
462 backup_type: Option<BackupType>,
463 backup_id: Option<String>,
464 auth_id: Authid,
465) -> Result<Vec<SnapshotListItem>, Error> {
bc21ade2 466 let ns = ns.unwrap_or_default();
7d6fc15b 467
ea2e91e5 468 let list_all = !check_ns_privs_full(
abd82485
FG
469 &store,
470 &ns,
7d6fc15b 471 &auth_id,
2bc2435a
FG
472 PRIV_DATASTORE_AUDIT,
473 PRIV_DATASTORE_BACKUP,
7d6fc15b 474 )?;
184f17af 475
abd82485 476 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 477
249dde8b
TL
478 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
479 // backup group and provide an error free (Err -> None) accessor
0d08fcee 480 let groups = match (backup_type, backup_id) {
db87d93e 481 (Some(backup_type), Some(backup_id)) => {
abd82485 482 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
db87d93e 483 }
8c74349b 484 // FIXME: Recursion
7d9cb8c4 485 (Some(backup_type), None) => datastore
91f4b11f 486 .iter_backup_type_ok(ns.clone(), backup_type)?
dc7a5b34 487 .collect(),
8c74349b 488 // FIXME: Recursion
91f4b11f
WB
489 (None, Some(backup_id)) => BackupType::iter()
490 .filter_map(|backup_type| {
491 let group =
492 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
f097eaa8 493 group.exists().then_some(group)
91f4b11f 494 })
dc7a5b34 495 .collect(),
8c74349b 496 // FIXME: Recursion
abd82485 497 (None, None) => datastore.list_backup_groups(ns.clone())?,
0d08fcee 498 };
54552dda 499
0d08fcee 500 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
501 let backup = pbs_api_types::BackupDir {
502 group: group.into(),
503 time: info.backup_dir.backup_time(),
504 };
6da20161 505 let protected = info.backup_dir.is_protected();
1c090810 506
9ccf933b 507 match get_all_snapshot_files(&info) {
70030b43 508 Ok((manifest, files)) => {
70030b43
DM
509 // extract the first line from notes
510 let comment: Option<String> = manifest.unprotected["notes"]
511 .as_str()
512 .and_then(|notes| notes.lines().next())
513 .map(String::from);
514
035c40e6
FG
515 let fingerprint = match manifest.fingerprint() {
516 Ok(fp) => fp,
517 Err(err) => {
518 eprintln!("error parsing fingerprint: '{}'", err);
519 None
dc7a5b34 520 }
035c40e6
FG
521 };
522
79c53595 523 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
524 let verification: Option<SnapshotVerifyState> =
525 match serde_json::from_value(verification) {
526 Ok(verify) => verify,
527 Err(err) => {
528 eprintln!("error parsing verification state : '{}'", err);
529 None
530 }
531 };
3b2046d2 532
0d08fcee
FG
533 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
534
79c53595 535 SnapshotListItem {
988d575d 536 backup,
79c53595
FG
537 comment,
538 verification,
035c40e6 539 fingerprint,
79c53595
FG
540 files,
541 size,
542 owner,
02db7267 543 protected,
79c53595 544 }
dc7a5b34 545 }
1c090810
DC
546 Err(err) => {
547 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 548 let files = info
dc7a5b34
TL
549 .files
550 .into_iter()
551 .map(|filename| BackupContent {
552 filename,
553 size: None,
554 crypt_mode: None,
555 })
556 .collect();
79c53595
FG
557
558 SnapshotListItem {
988d575d 559 backup,
79c53595
FG
560 comment: None,
561 verification: None,
035c40e6 562 fingerprint: None,
79c53595
FG
563 files,
564 size: None,
565 owner,
02db7267 566 protected,
79c53595 567 }
dc7a5b34 568 }
0d08fcee
FG
569 }
570 };
184f17af 571
dc7a5b34 572 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 573 let owner = match group.get_owner() {
dc7a5b34
TL
574 Ok(auth_id) => auth_id,
575 Err(err) => {
576 eprintln!(
e13303fc 577 "Failed to get owner of group '{}' in {} - {}",
e13303fc 578 group.group(),
abd82485 579 print_store_and_ns(&store, &ns),
e13303fc 580 err
dc7a5b34 581 );
0d08fcee
FG
582 return Ok(snapshots);
583 }
dc7a5b34 584 };
0d08fcee 585
dc7a5b34
TL
586 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
587 return Ok(snapshots);
588 }
0d08fcee 589
6da20161 590 let group_backups = group.list_backups()?;
0d08fcee 591
dc7a5b34
TL
592 snapshots.extend(
593 group_backups
594 .into_iter()
595 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
596 );
597
598 Ok(snapshots)
599 })
184f17af
DM
600}
601
5a2f7ea7
WB
602async fn get_snapshots_count(
603 store: &Arc<DataStore>,
604 owner: Option<&Authid>,
605) -> Result<Counts, Error> {
606 let store = Arc::clone(store);
607 let owner = owner.cloned();
608 tokio::task::spawn_blocking(move || {
609 let root_ns = Default::default();
610 ListAccessibleBackupGroups::new_with_privs(
611 &store,
612 root_ns,
613 MAX_NAMESPACE_DEPTH,
614 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
615 None,
616 owner.as_ref(),
617 )?
618 .try_fold(Counts::default(), |mut counts, group| {
619 let group = match group {
620 Ok(group) => group,
621 Err(_) => return Ok(counts), // TODO: add this as error counts?
22cfad13 622 };
5a2f7ea7
WB
623 let snapshot_count = group.list_backups()?.len() as u64;
624
625 // only include groups with snapshots, counting/displaying empty groups can confuse
626 if snapshot_count > 0 {
627 let type_count = match group.backup_type() {
628 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
629 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
630 BackupType::Host => counts.host.get_or_insert(Default::default()),
631 };
14e08625 632
5a2f7ea7
WB
633 type_count.groups += 1;
634 type_count.snapshots += snapshot_count;
635 }
16f9f244 636
5a2f7ea7
WB
637 Ok(counts)
638 })
f12f408e 639 })
5a2f7ea7 640 .await?
16f9f244
DC
641}
642
1dc117bb
DM
643#[api(
644 input: {
645 properties: {
646 store: {
647 schema: DATASTORE_SCHEMA,
648 },
98afc7b1
FG
649 verbose: {
650 type: bool,
651 default: false,
652 optional: true,
653 description: "Include additional information like snapshot counts and GC status.",
654 },
1dc117bb 655 },
98afc7b1 656
1dc117bb
DM
657 },
658 returns: {
14e08625 659 type: DataStoreStatus,
1dc117bb 660 },
bb34b589 661 access: {
84de1012
TL
662 permission: &Permission::Anybody,
663 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
664 the full statistics. Counts of accessible groups are always returned, if any",
bb34b589 665 },
1dc117bb
DM
666)]
667/// Get datastore status.
143ac7e6 668pub async fn status(
1dc117bb 669 store: String,
98afc7b1 670 verbose: bool,
0eecf38f 671 _info: &ApiMethod,
fdfcb74d 672 rpcenv: &mut dyn RpcEnvironment,
14e08625 673) -> Result<DataStoreStatus, Error> {
84de1012
TL
674 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
675 let user_info = CachedUserInfo::new()?;
676 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
677
678 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
679
680 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
681 true
682 } else if store_privs & PRIV_DATASTORE_READ != 0 {
683 false // allow at least counts, user can read groups anyway..
84de1012 684 } else {
2981cdd4 685 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
d20137e5 686 // avoid leaking existence info if users hasn't at least any priv. below
2981cdd4
TL
687 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
688 _ => false,
689 }
84de1012 690 };
d20137e5 691 let datastore = datastore?; // only unwrap no to avoid leaking existence info
fdfcb74d 692
84de1012 693 let (counts, gc_status) = if verbose {
fdfcb74d
FG
694 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
695 None
696 } else {
697 Some(&auth_id)
698 };
699
5a2f7ea7 700 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
84de1012
TL
701 let gc_status = if store_stats {
702 Some(datastore.last_gc_status())
703 } else {
704 None
705 };
fdfcb74d
FG
706
707 (counts, gc_status)
708 } else {
709 (None, None)
98afc7b1 710 };
16f9f244 711
84de1012 712 Ok(if store_stats {
143ac7e6 713 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
84de1012
TL
714 DataStoreStatus {
715 total: storage.total,
716 used: storage.used,
1cc73a43 717 avail: storage.available,
84de1012
TL
718 gc_status,
719 counts,
720 }
721 } else {
722 DataStoreStatus {
723 total: 0,
724 used: 0,
725 avail: 0,
726 gc_status,
727 counts,
728 }
14e08625 729 })
0eecf38f
DM
730}
731
c2009e53
DM
732#[api(
733 input: {
734 properties: {
735 store: {
736 schema: DATASTORE_SCHEMA,
737 },
bc21ade2 738 ns: {
8c74349b
WB
739 type: BackupNamespace,
740 optional: true,
741 },
c2009e53 742 "backup-type": {
988d575d 743 type: BackupType,
c2009e53
DM
744 optional: true,
745 },
746 "backup-id": {
747 schema: BACKUP_ID_SCHEMA,
748 optional: true,
749 },
dcbf29e7
HL
750 "ignore-verified": {
751 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
752 optional: true,
753 },
754 "outdated-after": {
755 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
756 optional: true,
757 },
c2009e53
DM
758 "backup-time": {
759 schema: BACKUP_TIME_SCHEMA,
760 optional: true,
761 },
59229bd7
TL
762 "max-depth": {
763 schema: NS_MAX_DEPTH_SCHEMA,
764 optional: true,
765 },
c2009e53
DM
766 },
767 },
768 returns: {
769 schema: UPID_SCHEMA,
770 },
771 access: {
7d6fc15b
TL
772 permission: &Permission::Anybody,
773 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
774 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
775 },
776)]
777/// Verify backups.
778///
779/// This function can verify a single backup snapshot, all backup from a backup group,
780/// or all backups in the datastore.
e1db0670 781#[allow(clippy::too_many_arguments)]
c2009e53
DM
782pub fn verify(
783 store: String,
bc21ade2 784 ns: Option<BackupNamespace>,
988d575d 785 backup_type: Option<BackupType>,
c2009e53
DM
786 backup_id: Option<String>,
787 backup_time: Option<i64>,
dcbf29e7
HL
788 ignore_verified: Option<bool>,
789 outdated_after: Option<i64>,
59229bd7 790 max_depth: Option<usize>,
c2009e53
DM
791 rpcenv: &mut dyn RpcEnvironment,
792) -> Result<Value, Error> {
7d6fc15b 793 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 794 let ns = ns.unwrap_or_default();
ea2e91e5
FG
795
796 let owner_check_required = check_ns_privs_full(
abd82485
FG
797 &store,
798 &ns,
7d6fc15b 799 &auth_id,
2bc2435a
FG
800 PRIV_DATASTORE_VERIFY,
801 PRIV_DATASTORE_BACKUP,
7d6fc15b 802 )?;
a724f5fd 803
abd82485 804 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 805 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 806
8ea00f6e 807 let worker_id;
c2009e53
DM
808
809 let mut backup_dir = None;
810 let mut backup_group = None;
133042b5 811 let mut worker_type = "verify";
c2009e53
DM
812
813 match (backup_type, backup_id, backup_time) {
814 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 815 worker_id = format!(
8c74349b 816 "{}:{}/{}/{}/{:08X}",
abd82485 817 store,
bc21ade2 818 ns.display_as_path(),
8c74349b
WB
819 backup_type,
820 backup_id,
821 backup_time
dc7a5b34 822 );
bc21ade2
WB
823 let dir =
824 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 825
a724f5fd
FG
826 if owner_check_required {
827 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
828 check_backup_owner(&owner, &auth_id)?;
829 }
09f6a240 830
c2009e53 831 backup_dir = Some(dir);
133042b5 832 worker_type = "verify_snapshot";
c2009e53
DM
833 }
834 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
835 worker_id = format!(
836 "{}:{}/{}/{}",
abd82485 837 store,
bc21ade2 838 ns.display_as_path(),
8c74349b
WB
839 backup_type,
840 backup_id
841 );
133d718f 842 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 843
a724f5fd 844 if owner_check_required {
bc21ade2 845 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
846 check_backup_owner(&owner, &auth_id)?;
847 }
09f6a240 848
bc21ade2 849 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 850 worker_type = "verify_group";
c2009e53
DM
851 }
852 (None, None, None) => {
bc21ade2 853 worker_id = if ns.is_root() {
abd82485 854 store
59229bd7 855 } else {
abd82485 856 format!("{}:{}", store, ns.display_as_path())
59229bd7 857 };
c2009e53 858 }
5a718dce 859 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
860 }
861
39735609 862 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
863
864 let upid_str = WorkerTask::new_thread(
133042b5 865 worker_type,
44288184 866 Some(worker_id),
049a22a3 867 auth_id.to_string(),
e7cb4dc5
WB
868 to_stdout,
869 move |worker| {
9c26a3d6 870 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 871 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 872 let mut res = Vec::new();
f6b1d1cc 873 if !verify_backup_dir(
9c26a3d6 874 &verify_worker,
f6b1d1cc 875 &backup_dir,
f6b1d1cc 876 worker.upid().clone(),
dc7a5b34 877 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 878 )? {
5ae393af
FG
879 res.push(print_ns_and_snapshot(
880 backup_dir.backup_ns(),
881 backup_dir.as_ref(),
882 ));
adfdc369
DC
883 }
884 res
c2009e53 885 } else if let Some(backup_group) = backup_group {
10dac693 886 verify_backup_group(
9c26a3d6 887 &verify_worker,
63d9aca9 888 &backup_group,
7e25b9aa 889 &mut StoreProgress::new(1),
f6b1d1cc 890 worker.upid(),
dc7a5b34 891 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
10dac693 892 )?
c2009e53 893 } else {
a724f5fd 894 let owner = if owner_check_required {
de27ebc6 895 Some(&auth_id)
09f6a240
FG
896 } else {
897 None
898 };
899
dcbf29e7
HL
900 verify_all_backups(
901 &verify_worker,
902 worker.upid(),
bc21ade2 903 ns,
59229bd7 904 max_depth,
dcbf29e7 905 owner,
dc7a5b34 906 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 907 )?
c2009e53 908 };
3984a5fd 909 if !failed_dirs.is_empty() {
1ec0d70d 910 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 911 for dir in failed_dirs {
1ec0d70d 912 task_log!(worker, "\t{}", dir);
adfdc369 913 }
1ffe0301 914 bail!("verification failed - please check the log for details");
c2009e53
DM
915 }
916 Ok(())
e7cb4dc5
WB
917 },
918 )?;
c2009e53
DM
919
920 Ok(json!(upid_str))
921}
922
0a240aaa
DC
923#[api(
924 input: {
925 properties: {
8c74349b
WB
926 group: {
927 type: pbs_api_types::BackupGroup,
928 flatten: true,
929 },
0a240aaa
DC
930 "dry-run": {
931 optional: true,
932 type: bool,
933 default: false,
934 description: "Just show what prune would do, but do not delete anything.",
935 },
dba37e21
WB
936 "keep-options": {
937 type: KeepOptions,
0a240aaa
DC
938 flatten: true,
939 },
940 store: {
941 schema: DATASTORE_SCHEMA,
942 },
dba37e21
WB
943 ns: {
944 type: BackupNamespace,
945 optional: true,
946 },
0a240aaa
DC
947 },
948 },
7b570c17 949 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 950 access: {
7d6fc15b
TL
951 permission: &Permission::Anybody,
952 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
953 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
954 },
955)]
9805207a 956/// Prune a group on the datastore
bf78f708 957pub fn prune(
8c74349b 958 group: pbs_api_types::BackupGroup,
0a240aaa 959 dry_run: bool,
dba37e21 960 keep_options: KeepOptions,
0a240aaa 961 store: String,
dba37e21 962 ns: Option<BackupNamespace>,
0a240aaa 963 _param: Value,
54552dda 964 rpcenv: &mut dyn RpcEnvironment,
83b7db02 965) -> Result<Value, Error> {
e6dc35ac 966 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 967 let ns = ns.unwrap_or_default();
7a404dc5 968 let datastore = check_privs_and_load_store(
abd82485
FG
969 &store,
970 &ns,
7d6fc15b 971 &auth_id,
2bc2435a
FG
972 PRIV_DATASTORE_MODIFY,
973 PRIV_DATASTORE_PRUNE,
c9396984 974 Some(Operation::Write),
c9396984
FG
975 &group,
976 )?;
db87d93e 977
abd82485
FG
978 let worker_id = format!("{}:{}:{}", store, ns, group);
979 let group = datastore.backup_group(ns.clone(), group);
83b7db02 980
dda70154
DM
981 let mut prune_result = Vec::new();
982
6da20161 983 let list = group.list_backups()?;
dda70154 984
dba37e21 985 let mut prune_info = compute_prune_info(list, &keep_options)?;
dda70154
DM
986
987 prune_info.reverse(); // delete older snapshots first
988
dba37e21 989 let keep_all = !keep_options.keeps_something();
dda70154
DM
990
991 if dry_run {
02db7267
DC
992 for (info, mark) in prune_info {
993 let keep = keep_all || mark.keep();
dda70154 994
33f2c2a1 995 let mut result = json!({
db87d93e
WB
996 "backup-type": info.backup_dir.backup_type(),
997 "backup-id": info.backup_dir.backup_id(),
998 "backup-time": info.backup_dir.backup_time(),
dda70154 999 "keep": keep,
02db7267 1000 "protected": mark.protected(),
33f2c2a1 1001 });
bc21ade2
WB
1002 let prune_ns = info.backup_dir.backup_ns();
1003 if !prune_ns.is_root() {
1004 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
1005 }
1006 prune_result.push(result);
dda70154
DM
1007 }
1008 return Ok(json!(prune_result));
1009 }
1010
163e9bbe 1011 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 1012 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 1013
f1539300 1014 if keep_all {
1ec0d70d 1015 task_log!(worker, "No prune selection - keeping all files.");
f1539300 1016 } else {
dba37e21
WB
1017 let mut opts = Vec::new();
1018 if !ns.is_root() {
1019 opts.push(format!("--ns {ns}"));
1020 }
1021 crate::server::cli_keep_options(&mut opts, &keep_options);
1022
1023 task_log!(worker, "retention options: {}", opts.join(" "));
dc7a5b34
TL
1024 task_log!(
1025 worker,
e13303fc 1026 "Starting prune on {} group \"{}\"",
abd82485 1027 print_store_and_ns(&store, &ns),
e13303fc 1028 group.group(),
dc7a5b34 1029 );
f1539300 1030 }
3b03abfe 1031
02db7267
DC
1032 for (info, mark) in prune_info {
1033 let keep = keep_all || mark.keep();
dda70154 1034
f1539300
SR
1035 let backup_time = info.backup_dir.backup_time();
1036 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1037 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1038
1039 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1040
1ec0d70d 1041 task_log!(worker, "{}", msg);
f1539300 1042
133d718f 1043 prune_result.push(json!({
db87d93e
WB
1044 "backup-type": group.ty,
1045 "backup-id": group.id,
f1539300
SR
1046 "backup-time": backup_time,
1047 "keep": keep,
02db7267 1048 "protected": mark.protected(),
133d718f 1049 }));
f1539300
SR
1050
1051 if !(dry_run || keep) {
133d718f 1052 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1053 task_warn!(
1054 worker,
1055 "failed to remove dir {:?}: {}",
1056 info.backup_dir.relative_path(),
1057 err,
f1539300 1058 );
8f0b4c1f 1059 }
8f579717 1060 }
f1539300 1061 }
dd8e744f 1062
f1539300 1063 worker.log_result(&Ok(()));
83b7db02 1064
dda70154 1065 Ok(json!(prune_result))
83b7db02
DM
1066}
1067
9805207a
DC
1068#[api(
1069 input: {
1070 properties: {
1071 "dry-run": {
1072 optional: true,
1073 type: bool,
1074 default: false,
1075 description: "Just show what prune would do, but do not delete anything.",
1076 },
1077 "prune-options": {
dba37e21 1078 type: PruneJobOptions,
9805207a
DC
1079 flatten: true,
1080 },
1081 store: {
1082 schema: DATASTORE_SCHEMA,
1083 },
1084 },
1085 },
1086 returns: {
1087 schema: UPID_SCHEMA,
1088 },
1089 access: {
dba37e21
WB
1090 permission: &Permission::Anybody,
1091 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
9805207a
DC
1092 },
1093)]
1094/// Prune the datastore
1095pub fn prune_datastore(
1096 dry_run: bool,
dba37e21 1097 prune_options: PruneJobOptions,
9805207a
DC
1098 store: String,
1099 _param: Value,
1100 rpcenv: &mut dyn RpcEnvironment,
1101) -> Result<String, Error> {
dba37e21
WB
1102 let user_info = CachedUserInfo::new()?;
1103
9805207a
DC
1104 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1105
dba37e21
WB
1106 user_info.check_privs(
1107 &auth_id,
1108 &prune_options.acl_path(&store),
1109 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1110 true,
1111 )?;
1112
e9d2fc93 1113 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
dba37e21 1114 let ns = prune_options.ns.clone().unwrap_or_default();
36971618 1115 let worker_id = format!("{}:{}", store, ns);
9805207a 1116
bfa942c0
DC
1117 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1118
9805207a
DC
1119 let upid_str = WorkerTask::new_thread(
1120 "prune",
36971618 1121 Some(worker_id),
049a22a3 1122 auth_id.to_string(),
bfa942c0 1123 to_stdout,
dc7a5b34 1124 move |worker| {
dba37e21 1125 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
dc7a5b34 1126 },
9805207a
DC
1127 )?;
1128
1129 Ok(upid_str)
1130}
1131
dfc58d47
DM
1132#[api(
1133 input: {
1134 properties: {
1135 store: {
1136 schema: DATASTORE_SCHEMA,
1137 },
1138 },
1139 },
1140 returns: {
1141 schema: UPID_SCHEMA,
1142 },
bb34b589 1143 access: {
54552dda 1144 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1145 },
dfc58d47
DM
1146)]
1147/// Start garbage collection.
bf78f708 1148pub fn start_garbage_collection(
dfc58d47 1149 store: String,
6049b71f 1150 _info: &ApiMethod,
dd5495d6 1151 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1152) -> Result<Value, Error> {
e9d2fc93 1153 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1154 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1155
dc7a5b34 1156 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1157 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1158
39735609 1159 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1160
dc7a5b34
TL
1161 let upid_str =
1162 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1163 .map_err(|err| {
1164 format_err!(
1165 "unable to start garbage collection job on datastore {} - {}",
1166 store,
1167 err
1168 )
1169 })?;
0f778e06
DM
1170
1171 Ok(json!(upid_str))
15e9b4ed
DM
1172}
1173
a92830dc
DM
1174#[api(
1175 input: {
1176 properties: {
1177 store: {
1178 schema: DATASTORE_SCHEMA,
1179 },
1180 },
1181 },
1182 returns: {
1183 type: GarbageCollectionStatus,
bb34b589
DM
1184 },
1185 access: {
1186 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1187 },
a92830dc
DM
1188)]
1189/// Garbage collection status.
5eeea607 1190pub fn garbage_collection_status(
a92830dc 1191 store: String,
6049b71f 1192 _info: &ApiMethod,
dd5495d6 1193 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1194) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1196
f2b99c34 1197 let status = datastore.last_gc_status();
691c89a0 1198
a92830dc 1199 Ok(status)
691c89a0
DM
1200}
1201
bb34b589 1202#[api(
30fb6025
DM
1203 returns: {
1204 description: "List the accessible datastores.",
1205 type: Array,
9b93c620 1206 items: { type: DataStoreListItem },
30fb6025 1207 },
bb34b589 1208 access: {
54552dda 1209 permission: &Permission::Anybody,
bb34b589
DM
1210 },
1211)]
1212/// Datastore list
bf78f708 1213pub fn get_datastore_list(
6049b71f
DM
1214 _param: Value,
1215 _info: &ApiMethod,
54552dda 1216 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1217) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1218 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1219
e6dc35ac 1220 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1221 let user_info = CachedUserInfo::new()?;
1222
30fb6025 1223 let mut list = Vec::new();
54552dda 1224
30fb6025 1225 for (store, (_, data)) in &config.sections {
8c9c6c07
TL
1226 let acl_path = &["datastore", store];
1227 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
dc7a5b34 1228 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1229
1230 let mut allow_id = false;
1231 if !allowed {
8c9c6c07
TL
1232 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1233 allow_id = any_privs;
7d6fc15b
TL
1234 }
1235 }
1236
1237 if allowed || allow_id {
dc7a5b34
TL
1238 list.push(DataStoreListItem {
1239 store: store.clone(),
7d6fc15b
TL
1240 comment: if !allowed {
1241 None
1242 } else {
1243 data["comment"].as_str().map(String::from)
1244 },
e022d13c 1245 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1246 });
30fb6025 1247 }
54552dda
DM
1248 }
1249
44288184 1250 Ok(list)
15e9b4ed
DM
1251}
1252
0ab08ac9
DM
1253#[sortable]
1254pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1255 &ApiHandler::AsyncHttp(&download_file),
1256 &ObjectSchema::new(
1257 "Download single raw file from backup snapshot.",
1258 &sorted!([
66c49c21 1259 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1260 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1261 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1262 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1263 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1264 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1265 ]),
dc7a5b34
TL
1266 ),
1267)
1268.access(
7d6fc15b
TL
1269 Some(
1270 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1271 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1272 ),
7d6fc15b 1273 &Permission::Anybody,
54552dda 1274);
691c89a0 1275
bf78f708 1276pub fn download_file(
9e47c0a5
DM
1277 _parts: Parts,
1278 _req_body: Body,
1279 param: Value,
255f378a 1280 _info: &ApiMethod,
54552dda 1281 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1282) -> ApiResponseFuture {
ad51d02a 1283 async move {
7d6fc15b 1284 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1285 let store = required_string_param(&param, "store")?;
133d718f 1286 let backup_ns = optional_ns_param(&param)?;
1afce610 1287
7d6fc15b 1288 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1289 let datastore = check_privs_and_load_store(
e1db0670 1290 store,
abd82485 1291 &backup_ns,
7d6fc15b 1292 &auth_id,
2bc2435a
FG
1293 PRIV_DATASTORE_READ,
1294 PRIV_DATASTORE_BACKUP,
c9396984 1295 Some(Operation::Read),
c9396984
FG
1296 &backup_dir.group,
1297 )?;
1298
3c8c2827 1299 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1300
dc7a5b34
TL
1301 println!(
1302 "Download {} from {} ({}/{})",
abd82485 1303 file_name,
e1db0670 1304 print_store_and_ns(store, &backup_ns),
abd82485
FG
1305 backup_dir,
1306 file_name
dc7a5b34 1307 );
9e47c0a5 1308
1afce610
FG
1309 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1310
ad51d02a
DM
1311 let mut path = datastore.base_path();
1312 path.push(backup_dir.relative_path());
1313 path.push(&file_name);
1314
ba694720 1315 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1316 .await
1317 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1318
dc7a5b34
TL
1319 let payload =
1320 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1321 .map_ok(|bytes| bytes.freeze())
1322 .map_err(move |err| {
1323 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1324 err
1325 });
ad51d02a 1326 let body = Body::wrap_stream(payload);
9e47c0a5 1327
ad51d02a
DM
1328 // fixme: set other headers ?
1329 Ok(Response::builder()
dc7a5b34
TL
1330 .status(StatusCode::OK)
1331 .header(header::CONTENT_TYPE, "application/octet-stream")
1332 .body(body)
1333 .unwrap())
1334 }
1335 .boxed()
9e47c0a5
DM
1336}
1337
6ef9bb59
DC
1338#[sortable]
1339pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1340 &ApiHandler::AsyncHttp(&download_file_decoded),
1341 &ObjectSchema::new(
1342 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1343 &sorted!([
1344 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1345 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1346 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1347 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1348 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1349 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1350 ]),
dc7a5b34
TL
1351 ),
1352)
1353.access(
7d6fc15b
TL
1354 Some(
1355 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1356 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1357 ),
7d6fc15b 1358 &Permission::Anybody,
6ef9bb59
DC
1359);
1360
bf78f708 1361pub fn download_file_decoded(
6ef9bb59
DC
1362 _parts: Parts,
1363 _req_body: Body,
1364 param: Value,
1365 _info: &ApiMethod,
1366 rpcenv: Box<dyn RpcEnvironment>,
1367) -> ApiResponseFuture {
6ef9bb59 1368 async move {
7d6fc15b 1369 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1370 let store = required_string_param(&param, "store")?;
133d718f 1371 let backup_ns = optional_ns_param(&param)?;
abd82485 1372
1afce610 1373 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1374 let datastore = check_privs_and_load_store(
e1db0670 1375 store,
abd82485 1376 &backup_ns,
7d6fc15b 1377 &auth_id,
2bc2435a
FG
1378 PRIV_DATASTORE_READ,
1379 PRIV_DATASTORE_BACKUP,
c9396984 1380 Some(Operation::Read),
1afce610 1381 &backup_dir_api.group,
c9396984 1382 )?;
a724f5fd 1383
3c8c2827 1384 let file_name = required_string_param(&param, "file-name")?.to_owned();
abd82485 1385 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
6ef9bb59 1386
9ccf933b 1387 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1388 for file in files {
f28d9088 1389 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1390 bail!("cannot decode '{}' - is encrypted", file_name);
1391 }
1392 }
1393
dc7a5b34
TL
1394 println!(
1395 "Download {} from {} ({}/{})",
abd82485 1396 file_name,
e1db0670 1397 print_store_and_ns(store, &backup_ns),
abd82485
FG
1398 backup_dir_api,
1399 file_name
dc7a5b34 1400 );
6ef9bb59
DC
1401
1402 let mut path = datastore.base_path();
1403 path.push(backup_dir.relative_path());
1404 path.push(&file_name);
1405
e1db0670 1406 let (_, extension) = file_name.rsplit_once('.').unwrap();
6ef9bb59
DC
1407
1408 let body = match extension {
1409 "didx" => {
dc7a5b34
TL
1410 let index = DynamicIndexReader::open(&path).map_err(|err| {
1411 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1412 })?;
2d55beec
FG
1413 let (csum, size) = index.compute_csum();
1414 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1415
14f6c9cb 1416 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1417 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1418 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1419 eprintln!("error during streaming of '{:?}' - {}", path, err);
1420 err
1421 }))
1422 }
6ef9bb59 1423 "fidx" => {
dc7a5b34
TL
1424 let index = FixedIndexReader::open(&path).map_err(|err| {
1425 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1426 })?;
6ef9bb59 1427
2d55beec
FG
1428 let (csum, size) = index.compute_csum();
1429 manifest.verify_file(&file_name, &csum, size)?;
1430
14f6c9cb 1431 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1432 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1433 Body::wrap_stream(
1434 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1435 move |err| {
1436 eprintln!("error during streaming of '{:?}' - {}", path, err);
1437 err
1438 },
1439 ),
1440 )
1441 }
6ef9bb59
DC
1442 "blob" => {
1443 let file = std::fs::File::open(&path)
8aa67ee7 1444 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1445
2d55beec
FG
1446 // FIXME: load full blob to verify index checksum?
1447
6ef9bb59 1448 Body::wrap_stream(
dc7a5b34
TL
1449 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1450 move |err| {
6ef9bb59
DC
1451 eprintln!("error during streaming of '{:?}' - {}", path, err);
1452 err
dc7a5b34
TL
1453 },
1454 ),
6ef9bb59 1455 )
dc7a5b34 1456 }
6ef9bb59
DC
1457 extension => {
1458 bail!("cannot download '{}' files", extension);
dc7a5b34 1459 }
6ef9bb59
DC
1460 };
1461
1462 // fixme: set other headers ?
1463 Ok(Response::builder()
dc7a5b34
TL
1464 .status(StatusCode::OK)
1465 .header(header::CONTENT_TYPE, "application/octet-stream")
1466 .body(body)
1467 .unwrap())
1468 }
1469 .boxed()
6ef9bb59
DC
1470}
1471
552c2259 1472#[sortable]
0ab08ac9
DM
1473pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1474 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1475 &ObjectSchema::new(
54552dda 1476 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1477 &sorted!([
66c49c21 1478 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1479 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1480 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1481 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1482 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1483 ]),
dc7a5b34
TL
1484 ),
1485)
1486.access(
54552dda 1487 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1488 &Permission::Anybody,
54552dda 1489);
9e47c0a5 1490
bf78f708 1491pub fn upload_backup_log(
07ee2235
DM
1492 _parts: Parts,
1493 req_body: Body,
1494 param: Value,
255f378a 1495 _info: &ApiMethod,
54552dda 1496 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1497) -> ApiResponseFuture {
ad51d02a 1498 async move {
7d6fc15b 1499 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1500 let store = required_string_param(&param, "store")?;
133d718f 1501 let backup_ns = optional_ns_param(&param)?;
abd82485 1502
1afce610 1503 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1504
7a404dc5 1505 let datastore = check_privs_and_load_store(
e1db0670 1506 store,
abd82485 1507 &backup_ns,
c9396984 1508 &auth_id,
7a404dc5
FG
1509 0,
1510 PRIV_DATASTORE_BACKUP,
c9396984 1511 Some(Operation::Write),
1afce610 1512 &backup_dir_api.group,
c9396984 1513 )?;
abd82485 1514 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
07ee2235 1515
dc7a5b34 1516 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1517
133d718f 1518 let mut path = backup_dir.full_path();
ad51d02a 1519 path.push(&file_name);
07ee2235 1520
ad51d02a
DM
1521 if path.exists() {
1522 bail!("backup already contains a log.");
1523 }
e128d4e8 1524
abd82485
FG
1525 println!(
1526 "Upload backup log to {} {backup_dir_api}/{file_name}",
e1db0670 1527 print_store_and_ns(store, &backup_ns),
abd82485 1528 );
ad51d02a
DM
1529
1530 let data = req_body
1531 .map_err(Error::from)
1532 .try_fold(Vec::new(), |mut acc, chunk| {
1654ab33 1533 acc.extend_from_slice(&chunk);
ad51d02a
DM
1534 future::ok::<_, Error>(acc)
1535 })
1536 .await?;
1537
39f18b30
DM
1538 // always verify blob/CRC at server side
1539 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1540
e0a19d33 1541 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1542
1543 // fixme: use correct formatter
53daae8e 1544 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1545 }
1546 .boxed()
07ee2235
DM
1547}
1548
5b1cfa01
DC
1549#[api(
1550 input: {
1551 properties: {
988d575d 1552 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1553 ns: {
133d718f
WB
1554 type: BackupNamespace,
1555 optional: true,
1556 },
8c74349b
WB
1557 backup_dir: {
1558 type: pbs_api_types::BackupDir,
1559 flatten: true,
1560 },
5b1cfa01
DC
1561 "filepath": {
1562 description: "Base64 encoded path.",
1563 type: String,
1564 }
1565 },
1566 },
1567 access: {
7d6fc15b
TL
1568 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1569 DATASTORE_BACKUP and being the owner of the group",
1570 permission: &Permission::Anybody,
5b1cfa01
DC
1571 },
1572)]
1573/// Get the entries of the given path of the catalog
7beb27d4 1574pub async fn catalog(
5b1cfa01 1575 store: String,
bc21ade2 1576 ns: Option<BackupNamespace>,
8c74349b 1577 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1578 filepath: String,
5b1cfa01 1579 rpcenv: &mut dyn RpcEnvironment,
227501c0 1580) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1581 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1582
7beb27d4
WB
1583 tokio::task::spawn_blocking(move || {
1584 let ns = ns.unwrap_or_default();
a724f5fd 1585
7beb27d4
WB
1586 let datastore = check_privs_and_load_store(
1587 &store,
1588 &ns,
1589 &auth_id,
1590 PRIV_DATASTORE_READ,
1591 PRIV_DATASTORE_BACKUP,
1592 Some(Operation::Read),
1593 &backup_dir.group,
1594 )?;
1595
1596 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1597
7beb27d4 1598 let file_name = CATALOG_NAME;
9238cdf5 1599
7beb27d4
WB
1600 let (manifest, files) = read_backup_index(&backup_dir)?;
1601 for file in files {
1602 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1603 bail!("cannot decode '{}' - is encrypted", file_name);
1604 }
9238cdf5 1605 }
9238cdf5 1606
7beb27d4
WB
1607 let mut path = datastore.base_path();
1608 path.push(backup_dir.relative_path());
1609 path.push(file_name);
5b1cfa01 1610
7beb27d4
WB
1611 let index = DynamicIndexReader::open(&path)
1612 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
5b1cfa01 1613
7beb27d4
WB
1614 let (csum, size) = index.compute_csum();
1615 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1616
7beb27d4
WB
1617 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1618 let reader = BufferedDynamicReader::new(index, chunk_reader);
5b1cfa01 1619
7beb27d4 1620 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1621
7beb27d4
WB
1622 let path = if filepath != "root" && filepath != "/" {
1623 base64::decode(filepath)?
1624 } else {
1625 vec![b'/']
1626 };
5b1cfa01 1627
7beb27d4
WB
1628 catalog_reader.list_dir_contents(&path)
1629 })
1630 .await?
5b1cfa01
DC
1631}
1632
d33d8f4e
DC
1633#[sortable]
1634pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1635 &ApiHandler::AsyncHttp(&pxar_file_download),
1636 &ObjectSchema::new(
1ffe0301 1637 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1638 &sorted!([
1639 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1640 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1641 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1642 ("backup-id", false, &BACKUP_ID_SCHEMA),
1643 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1644 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1645 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1646 ]),
1647 )
7d6fc15b
TL
1648).access(
1649 Some(
1650 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1651 DATASTORE_BACKUP and being the owner of the group",
1652 ),
1653 &Permission::Anybody,
d33d8f4e
DC
1654);
1655
bf78f708 1656pub fn pxar_file_download(
d33d8f4e
DC
1657 _parts: Parts,
1658 _req_body: Body,
1659 param: Value,
1660 _info: &ApiMethod,
1661 rpcenv: Box<dyn RpcEnvironment>,
1662) -> ApiResponseFuture {
d33d8f4e 1663 async move {
7d6fc15b 1664 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1665 let store = required_string_param(&param, "store")?;
bc21ade2 1666 let ns = optional_ns_param(&param)?;
abd82485 1667
7d6fc15b 1668 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1669 let datastore = check_privs_and_load_store(
e1db0670 1670 store,
abd82485 1671 &ns,
7d6fc15b 1672 &auth_id,
2bc2435a
FG
1673 PRIV_DATASTORE_READ,
1674 PRIV_DATASTORE_BACKUP,
c9396984 1675 Some(Operation::Read),
c9396984
FG
1676 &backup_dir.group,
1677 )?;
a724f5fd 1678
bc21ade2 1679 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1680
3c8c2827 1681 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1682
984ddb2f
DC
1683 let tar = param["tar"].as_bool().unwrap_or(false);
1684
d33d8f4e 1685 let mut components = base64::decode(&filepath)?;
3984a5fd 1686 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1687 components.remove(0);
1688 }
1689
d8d8af98 1690 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1691 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1692 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1693 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1694 for file in files {
1695 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1696 bail!("cannot decode '{}' - is encrypted", pxar_name);
1697 }
1698 }
d33d8f4e 1699
9238cdf5
FG
1700 let mut path = datastore.base_path();
1701 path.push(backup_dir.relative_path());
1702 path.push(pxar_name);
d33d8f4e
DC
1703
1704 let index = DynamicIndexReader::open(&path)
1705 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1706
2d55beec 1707 let (csum, size) = index.compute_csum();
9a37bd6c 1708 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1709
14f6c9cb 1710 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1711 let reader = BufferedDynamicReader::new(index, chunk_reader);
1712 let archive_size = reader.archive_size();
1713 let reader = LocalDynamicReadAt::new(reader);
1714
1715 let decoder = Accessor::new(reader, archive_size).await?;
1716 let root = decoder.open_root().await?;
2e219481 1717 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1718 let file = root
dc7a5b34
TL
1719 .lookup(&path)
1720 .await?
2e219481 1721 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1722
804f6143
DC
1723 let body = match file.kind() {
1724 EntryKind::File { .. } => Body::wrap_stream(
1725 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1726 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1727 err
1728 }),
1729 ),
1730 EntryKind::Hardlink(_) => Body::wrap_stream(
1731 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1732 .map_err(move |err| {
dc7a5b34 1733 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1734 err
1735 }),
1736 ),
1737 EntryKind::Directory => {
984ddb2f 1738 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1739 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1740 if tar {
dc7a5b34
TL
1741 proxmox_rest_server::spawn_internal_task(create_tar(
1742 channelwriter,
1743 decoder,
1744 path.clone(),
dc7a5b34 1745 ));
984ddb2f
DC
1746 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1747 Body::wrap_stream(zstdstream.map_err(move |err| {
0608b36b 1748 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
984ddb2f
DC
1749 err
1750 }))
1751 } else {
dc7a5b34
TL
1752 proxmox_rest_server::spawn_internal_task(create_zip(
1753 channelwriter,
1754 decoder,
1755 path.clone(),
dc7a5b34 1756 ));
984ddb2f 1757 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
0608b36b 1758 log::error!("error during streaming of zip '{:?}' - {}", path, err);
984ddb2f
DC
1759 err
1760 }))
1761 }
804f6143
DC
1762 }
1763 other => bail!("cannot download file of type {:?}", other),
1764 };
d33d8f4e
DC
1765
1766 // fixme: set other headers ?
1767 Ok(Response::builder()
dc7a5b34
TL
1768 .status(StatusCode::OK)
1769 .header(header::CONTENT_TYPE, "application/octet-stream")
1770 .body(body)
1771 .unwrap())
1772 }
1773 .boxed()
d33d8f4e
DC
1774}
1775
1a0d3d11
DM
1776#[api(
1777 input: {
1778 properties: {
1779 store: {
1780 schema: DATASTORE_SCHEMA,
1781 },
1782 timeframe: {
c68fa58a 1783 type: RRDTimeFrame,
1a0d3d11
DM
1784 },
1785 cf: {
1786 type: RRDMode,
1787 },
1788 },
1789 },
1790 access: {
7d6fc15b
TL
1791 permission: &Permission::Privilege(
1792 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1793 },
1794)]
1795/// Read datastore stats
bf78f708 1796pub fn get_rrd_stats(
1a0d3d11 1797 store: String,
c68fa58a 1798 timeframe: RRDTimeFrame,
1a0d3d11
DM
1799 cf: RRDMode,
1800 _param: Value,
1801) -> Result<Value, Error> {
e9d2fc93 1802 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1803 let disk_manager = crate::tools::disks::DiskManage::new();
1804
1805 let mut rrd_fields = vec![
dc7a5b34 1806 "total",
de923258 1807 "available",
dc7a5b34
TL
1808 "used",
1809 "read_ios",
1810 "read_bytes",
1811 "write_ios",
1812 "write_bytes",
f27b6086
DC
1813 ];
1814
1815 // we do not have io_ticks for zpools, so don't include them
1816 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1817 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1818 _ => rrd_fields.push("io_ticks"),
1819 };
1820
dc7a5b34 1821 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1822}
1823
5fd823c3
HL
1824#[api(
1825 input: {
1826 properties: {
1827 store: {
1828 schema: DATASTORE_SCHEMA,
1829 },
1830 },
1831 },
1832 access: {
1833 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1834 },
1835)]
1836/// Read datastore stats
dc7a5b34 1837pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1838 let active_operations = task_tracking::get_active_operations(&store)?;
1839 Ok(json!({
1840 "read": active_operations.read,
1841 "write": active_operations.write,
1842 }))
1843}
1844
d6688884
SR
1845#[api(
1846 input: {
1847 properties: {
988d575d 1848 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1849 ns: {
133d718f
WB
1850 type: BackupNamespace,
1851 optional: true,
1852 },
8c74349b
WB
1853 backup_group: {
1854 type: pbs_api_types::BackupGroup,
1855 flatten: true,
1856 },
d6688884
SR
1857 },
1858 },
1859 access: {
7d6fc15b
TL
1860 permission: &Permission::Anybody,
1861 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1862 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1863 },
1864)]
1865/// Get "notes" for a backup group
1866pub fn get_group_notes(
1867 store: String,
bc21ade2 1868 ns: Option<BackupNamespace>,
8c74349b 1869 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1870 rpcenv: &mut dyn RpcEnvironment,
1871) -> Result<String, Error> {
d6688884 1872 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1873 let ns = ns.unwrap_or_default();
ea2e91e5 1874
7a404dc5 1875 let datastore = check_privs_and_load_store(
abd82485
FG
1876 &store,
1877 &ns,
7d6fc15b 1878 &auth_id,
2bc2435a
FG
1879 PRIV_DATASTORE_AUDIT,
1880 PRIV_DATASTORE_BACKUP,
c9396984 1881 Some(Operation::Read),
c9396984
FG
1882 &backup_group,
1883 )?;
d6688884 1884
abd82485 1885 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1886 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1887}
1888
1889#[api(
1890 input: {
1891 properties: {
988d575d 1892 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1893 ns: {
133d718f
WB
1894 type: BackupNamespace,
1895 optional: true,
1896 },
8c74349b
WB
1897 backup_group: {
1898 type: pbs_api_types::BackupGroup,
1899 flatten: true,
1900 },
d6688884
SR
1901 notes: {
1902 description: "A multiline text.",
1903 },
1904 },
1905 },
1906 access: {
7d6fc15b
TL
1907 permission: &Permission::Anybody,
1908 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1909 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1910 },
1911)]
1912/// Set "notes" for a backup group
1913pub fn set_group_notes(
1914 store: String,
bc21ade2 1915 ns: Option<BackupNamespace>,
8c74349b 1916 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1917 notes: String,
1918 rpcenv: &mut dyn RpcEnvironment,
1919) -> Result<(), Error> {
d6688884 1920 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485
FG
1921 let ns = ns.unwrap_or_default();
1922
7a404dc5 1923 let datastore = check_privs_and_load_store(
abd82485
FG
1924 &store,
1925 &ns,
7d6fc15b 1926 &auth_id,
2bc2435a
FG
1927 PRIV_DATASTORE_MODIFY,
1928 PRIV_DATASTORE_BACKUP,
c9396984 1929 Some(Operation::Write),
c9396984
FG
1930 &backup_group,
1931 )?;
d6688884 1932
abd82485 1933 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1934 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1935
1936 Ok(())
1937}
1938
912b3f5b
DM
1939#[api(
1940 input: {
1941 properties: {
988d575d 1942 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1943 ns: {
133d718f
WB
1944 type: BackupNamespace,
1945 optional: true,
1946 },
8c74349b
WB
1947 backup_dir: {
1948 type: pbs_api_types::BackupDir,
1949 flatten: true,
1950 },
912b3f5b
DM
1951 },
1952 },
1953 access: {
7d6fc15b
TL
1954 permission: &Permission::Anybody,
1955 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1956 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1957 },
1958)]
1959/// Get "notes" for a specific backup
bf78f708 1960pub fn get_notes(
912b3f5b 1961 store: String,
bc21ade2 1962 ns: Option<BackupNamespace>,
8c74349b 1963 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1964 rpcenv: &mut dyn RpcEnvironment,
1965) -> Result<String, Error> {
7d6fc15b 1966 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1967 let ns = ns.unwrap_or_default();
ea2e91e5 1968
7a404dc5 1969 let datastore = check_privs_and_load_store(
abd82485
FG
1970 &store,
1971 &ns,
7d6fc15b 1972 &auth_id,
2bc2435a
FG
1973 PRIV_DATASTORE_AUDIT,
1974 PRIV_DATASTORE_BACKUP,
c9396984 1975 Some(Operation::Read),
c9396984
FG
1976 &backup_dir.group,
1977 )?;
912b3f5b 1978
fbfb64a6 1979 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1980
133d718f 1981 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1982
dc7a5b34 1983 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1984
1985 Ok(String::from(notes))
1986}
1987
1988#[api(
1989 input: {
1990 properties: {
988d575d 1991 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1992 ns: {
133d718f
WB
1993 type: BackupNamespace,
1994 optional: true,
1995 },
8c74349b
WB
1996 backup_dir: {
1997 type: pbs_api_types::BackupDir,
1998 flatten: true,
1999 },
912b3f5b
DM
2000 notes: {
2001 description: "A multiline text.",
2002 },
2003 },
2004 },
2005 access: {
7d6fc15b
TL
2006 permission: &Permission::Anybody,
2007 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2008 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2009 },
2010)]
2011/// Set "notes" for a specific backup
bf78f708 2012pub fn set_notes(
912b3f5b 2013 store: String,
bc21ade2 2014 ns: Option<BackupNamespace>,
8c74349b 2015 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2016 notes: String,
2017 rpcenv: &mut dyn RpcEnvironment,
2018) -> Result<(), Error> {
7d6fc15b 2019 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2020 let ns = ns.unwrap_or_default();
ea2e91e5 2021
7a404dc5 2022 let datastore = check_privs_and_load_store(
abd82485
FG
2023 &store,
2024 &ns,
7d6fc15b 2025 &auth_id,
2bc2435a
FG
2026 PRIV_DATASTORE_MODIFY,
2027 PRIV_DATASTORE_BACKUP,
c9396984 2028 Some(Operation::Write),
c9396984
FG
2029 &backup_dir.group,
2030 )?;
912b3f5b 2031
fbfb64a6 2032 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2033
133d718f
WB
2034 backup_dir
2035 .update_manifest(|manifest| {
dc7a5b34
TL
2036 manifest.unprotected["notes"] = notes.into();
2037 })
2038 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2039
2040 Ok(())
2041}
2042
8292d3d2
DC
2043#[api(
2044 input: {
2045 properties: {
988d575d 2046 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2047 ns: {
133d718f
WB
2048 type: BackupNamespace,
2049 optional: true,
2050 },
8c74349b
WB
2051 backup_dir: {
2052 type: pbs_api_types::BackupDir,
2053 flatten: true,
2054 },
8292d3d2
DC
2055 },
2056 },
2057 access: {
7d6fc15b
TL
2058 permission: &Permission::Anybody,
2059 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2060 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2061 },
2062)]
2063/// Query protection for a specific backup
2064pub fn get_protection(
2065 store: String,
bc21ade2 2066 ns: Option<BackupNamespace>,
8c74349b 2067 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2068 rpcenv: &mut dyn RpcEnvironment,
2069) -> Result<bool, Error> {
7d6fc15b 2070 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2071 let ns = ns.unwrap_or_default();
7a404dc5 2072 let datastore = check_privs_and_load_store(
abd82485
FG
2073 &store,
2074 &ns,
7d6fc15b 2075 &auth_id,
2bc2435a
FG
2076 PRIV_DATASTORE_AUDIT,
2077 PRIV_DATASTORE_BACKUP,
c9396984 2078 Some(Operation::Read),
c9396984
FG
2079 &backup_dir.group,
2080 )?;
8292d3d2 2081
fbfb64a6 2082 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2083
6da20161 2084 Ok(backup_dir.is_protected())
8292d3d2
DC
2085}
2086
2087#[api(
2088 input: {
2089 properties: {
988d575d 2090 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2091 ns: {
133d718f
WB
2092 type: BackupNamespace,
2093 optional: true,
2094 },
8c74349b
WB
2095 backup_dir: {
2096 type: pbs_api_types::BackupDir,
2097 flatten: true,
2098 },
8292d3d2
DC
2099 protected: {
2100 description: "Enable/disable protection.",
2101 },
2102 },
2103 },
2104 access: {
7d6fc15b
TL
2105 permission: &Permission::Anybody,
2106 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2107 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2108 },
2109)]
2110/// En- or disable protection for a specific backup
67d7a59d 2111pub async fn set_protection(
8292d3d2 2112 store: String,
bc21ade2 2113 ns: Option<BackupNamespace>,
8c74349b 2114 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2115 protected: bool,
2116 rpcenv: &mut dyn RpcEnvironment,
2117) -> Result<(), Error> {
7d6fc15b 2118 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8292d3d2 2119
67d7a59d
WB
2120 tokio::task::spawn_blocking(move || {
2121 let ns = ns.unwrap_or_default();
2122 let datastore = check_privs_and_load_store(
2123 &store,
2124 &ns,
2125 &auth_id,
2126 PRIV_DATASTORE_MODIFY,
2127 PRIV_DATASTORE_BACKUP,
2128 Some(Operation::Write),
2129 &backup_dir.group,
2130 )?;
2131
2132 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2133
67d7a59d
WB
2134 datastore.update_protection(&backup_dir, protected)
2135 })
2136 .await?
8292d3d2
DC
2137}
2138
72be0eb1 2139#[api(
4940012d 2140 input: {
72be0eb1 2141 properties: {
988d575d 2142 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2143 ns: {
133d718f
WB
2144 type: BackupNamespace,
2145 optional: true,
2146 },
8c74349b
WB
2147 backup_group: {
2148 type: pbs_api_types::BackupGroup,
2149 flatten: true,
2150 },
72be0eb1 2151 "new-owner": {
e6dc35ac 2152 type: Authid,
72be0eb1
DW
2153 },
2154 },
4940012d
FG
2155 },
2156 access: {
bff85572 2157 permission: &Permission::Anybody,
7d6fc15b
TL
2158 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2159 a user's token for owned backups with Datastore.Backup"
4940012d 2160 },
72be0eb1
DW
2161)]
2162/// Change owner of a backup group
979b3784 2163pub async fn set_backup_owner(
72be0eb1 2164 store: String,
bc21ade2 2165 ns: Option<BackupNamespace>,
8c74349b 2166 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2167 new_owner: Authid,
bff85572 2168 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2169) -> Result<(), Error> {
bff85572 2170 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1909ece2 2171
979b3784
WB
2172 tokio::task::spawn_blocking(move || {
2173 let ns = ns.unwrap_or_default();
2174 let owner_check_required = check_ns_privs_full(
2175 &store,
2176 &ns,
2177 &auth_id,
2178 PRIV_DATASTORE_MODIFY,
2179 PRIV_DATASTORE_BACKUP,
2180 )?;
1909ece2 2181
979b3784 2182 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
bff85572 2183
979b3784 2184 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2185
979b3784
WB
2186 if owner_check_required {
2187 let owner = backup_group.get_owner()?;
bff85572 2188
979b3784
WB
2189 let allowed = match (owner.is_token(), new_owner.is_token()) {
2190 (true, true) => {
2191 // API token to API token, owned by same user
2192 let owner = owner.user();
2193 let new_owner = new_owner.user();
2194 owner == new_owner && Authid::from(owner.clone()) == auth_id
2195 }
2196 (true, false) => {
2197 // API token to API token owner
2198 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2199 }
2200 (false, true) => {
2201 // API token owner to API token
2202 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2203 }
2204 (false, false) => {
2205 // User to User, not allowed for unprivileged users
2206 false
2207 }
2208 };
2209
2210 if !allowed {
2211 return Err(http_err!(
2212 UNAUTHORIZED,
2213 "{} does not have permission to change owner of backup group '{}' to {}",
2214 auth_id,
2215 backup_group.group(),
2216 new_owner,
2217 ));
2218 }
2bc2435a 2219 }
bff85572 2220
979b3784 2221 let user_info = CachedUserInfo::new()?;
7d6fc15b 2222
979b3784
WB
2223 if !user_info.is_active_auth_id(&new_owner) {
2224 bail!(
2225 "{} '{}' is inactive or non-existent",
2226 if new_owner.is_token() {
2227 "API token".to_string()
2228 } else {
2229 "user".to_string()
2230 },
2231 new_owner
2232 );
2233 }
72be0eb1 2234
979b3784 2235 backup_group.set_owner(&new_owner, true)?;
72be0eb1 2236
979b3784
WB
2237 Ok(())
2238 })
2239 .await?
72be0eb1
DW
2240}
2241
552c2259 2242#[sortable]
255f378a 2243const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2244 (
2245 "active-operations",
dc7a5b34 2246 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2247 ),
dc7a5b34 2248 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2249 (
2250 "change-owner",
dc7a5b34 2251 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2252 ),
255f378a
DM
2253 (
2254 "download",
dc7a5b34 2255 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2256 ),
6ef9bb59
DC
2257 (
2258 "download-decoded",
dc7a5b34 2259 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2260 ),
dc7a5b34 2261 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2262 (
2263 "gc",
2264 &Router::new()
2265 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2266 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2267 ),
d6688884
SR
2268 (
2269 "group-notes",
2270 &Router::new()
2271 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2272 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2273 ),
255f378a
DM
2274 (
2275 "groups",
2276 &Router::new()
b31c8019 2277 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2278 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2279 ),
18934ae5
TL
2280 (
2281 "namespace",
2282 // FIXME: move into datastore:: sub-module?!
2283 &crate::api2::admin::namespace::ROUTER,
2284 ),
912b3f5b
DM
2285 (
2286 "notes",
2287 &Router::new()
2288 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2289 .put(&API_METHOD_SET_NOTES),
912b3f5b 2290 ),
8292d3d2
DC
2291 (
2292 "protected",
2293 &Router::new()
2294 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2295 .put(&API_METHOD_SET_PROTECTION),
255f378a 2296 ),
dc7a5b34 2297 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2298 (
2299 "prune-datastore",
dc7a5b34 2300 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2301 ),
d33d8f4e
DC
2302 (
2303 "pxar-file-download",
dc7a5b34 2304 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2305 ),
dc7a5b34 2306 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2307 (
2308 "snapshots",
2309 &Router::new()
fc189b19 2310 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2311 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2312 ),
dc7a5b34 2313 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2314 (
2315 "upload-backup-log",
dc7a5b34 2316 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2317 ),
dc7a5b34 2318 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2319];
2320
ad51d02a 2321const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2322 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2323 .subdirs(DATASTORE_INFO_SUBDIRS);
2324
255f378a 2325pub const ROUTER: Router = Router::new()
bb34b589 2326 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2327 .match_all("store", &DATASTORE_INFO_ROUTER);