]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
api: datastore: cleanup store/ns handling
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
ea2e91e5
FG
35 print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
36 DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
37 GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 65use crate::backup::{
ea2e91e5 66 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
22cfad13
TL
67 ListAccessibleBackupGroups,
68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7a404dc5
FG
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
ea2e91e5 89 store_with_ns: &DatastoreWithNamespace,
c9396984 90 auth_id: &Authid,
7a404dc5
FG
91 full_access_privs: u64,
92 partial_access_privs: u64,
c9396984 93 operation: Option<Operation>,
c9396984
FG
94 backup_group: &pbs_api_types::BackupGroup,
95) -> Result<Arc<DataStore>, Error> {
ea2e91e5
FG
96 let limited = check_ns_privs_full(
97 store_with_ns,
98 auth_id,
99 full_access_privs,
100 partial_access_privs,
101 )?;
7a404dc5 102
ea2e91e5 103 let datastore = DataStore::lookup_datastore(&store_with_ns.store, operation)?;
c9396984 104
7a404dc5 105 if limited {
ea2e91e5 106 let owner = datastore.get_owner(&store_with_ns.ns, backup_group)?;
c9396984
FG
107 check_backup_owner(&owner, &auth_id)?;
108 }
109
110 Ok(datastore)
111}
112
e7cb4dc5 113fn read_backup_index(
e7cb4dc5
WB
114 backup_dir: &BackupDir,
115) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 116 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 117
09b1f7b2
DM
118 let mut result = Vec::new();
119 for item in manifest.files() {
120 result.push(BackupContent {
121 filename: item.filename.clone(),
f28d9088 122 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
123 size: Some(item.size),
124 });
8c70e3eb
DM
125 }
126
09b1f7b2 127 result.push(BackupContent {
96d65fbc 128 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
129 crypt_mode: match manifest.signature {
130 Some(_) => Some(CryptMode::SignOnly),
131 None => Some(CryptMode::None),
132 },
09b1f7b2
DM
133 size: Some(index_size),
134 });
4f1e40a2 135
70030b43 136 Ok((manifest, result))
8c70e3eb
DM
137}
138
1c090810 139fn get_all_snapshot_files(
1c090810 140 info: &BackupInfo,
70030b43 141) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 142 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
143
144 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
145 acc.insert(item.filename.clone());
146 acc
147 });
148
149 for file in &info.files {
dc7a5b34
TL
150 if file_set.contains(file) {
151 continue;
152 }
f28d9088
WB
153 files.push(BackupContent {
154 filename: file.to_string(),
155 size: None,
156 crypt_mode: None,
157 });
1c090810
DC
158 }
159
70030b43 160 Ok((manifest, files))
1c090810
DC
161}
162
b31c8019
DM
163#[api(
164 input: {
165 properties: {
166 store: {
167 schema: DATASTORE_SCHEMA,
168 },
bc21ade2 169 ns: {
89ae3c32
WB
170 type: BackupNamespace,
171 optional: true,
172 },
b31c8019
DM
173 },
174 },
7b570c17 175 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 176 access: {
7d6fc15b
TL
177 permission: &Permission::Anybody,
178 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
179 /datastore/{store}[/{namespace}]",
bb34b589 180 },
b31c8019
DM
181)]
182/// List backup groups.
b2362a12 183pub fn list_groups(
b31c8019 184 store: String,
bc21ade2 185 ns: Option<BackupNamespace>,
54552dda 186 rpcenv: &mut dyn RpcEnvironment,
b31c8019 187) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 188 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 189
ea2e91e5 190 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
191 store: store,
192 ns: ns.unwrap_or_default(),
ea2e91e5
FG
193 };
194
195 let list_all = !check_ns_privs_full(
196 &store_with_ns,
7d6fc15b 197 &auth_id,
2bc2435a
FG
198 PRIV_DATASTORE_AUDIT,
199 PRIV_DATASTORE_BACKUP,
7d6fc15b 200 )?;
54552dda 201
974a3e52 202 let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
0d08fcee 203
249dde8b 204 datastore
974a3e52 205 .iter_backup_groups(store_with_ns.ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
206 .try_fold(Vec::new(), |mut group_info, group| {
207 let group = group?;
e13303fc 208
974a3e52 209 let owner = match datastore.get_owner(&store_with_ns.ns, group.as_ref()) {
249dde8b
TL
210 Ok(auth_id) => auth_id,
211 Err(err) => {
e13303fc
FG
212 eprintln!(
213 "Failed to get owner of group '{}' in {} - {}",
214 group.group(),
215 store_with_ns,
216 err
217 );
249dde8b 218 return Ok(group_info);
dc7a5b34 219 }
249dde8b
TL
220 };
221 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
222 return Ok(group_info);
223 }
0d08fcee 224
6da20161 225 let snapshots = match group.list_backups() {
249dde8b
TL
226 Ok(snapshots) => snapshots,
227 Err(_) => return Ok(group_info),
228 };
0d08fcee 229
249dde8b
TL
230 let backup_count: u64 = snapshots.len() as u64;
231 if backup_count == 0 {
232 return Ok(group_info);
233 }
0d08fcee 234
249dde8b
TL
235 let last_backup = snapshots
236 .iter()
237 .fold(&snapshots[0], |a, b| {
238 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
239 a
240 } else {
241 b
242 }
243 })
244 .to_owned();
245
974a3e52 246 let note_path = get_group_note_path(&datastore, &store_with_ns.ns, group.as_ref());
249dde8b
TL
247 let comment = file_read_firstline(&note_path).ok();
248
249 group_info.push(GroupListItem {
988d575d 250 backup: group.into(),
249dde8b
TL
251 last_backup: last_backup.backup_dir.backup_time(),
252 owner: Some(owner),
253 backup_count,
254 files: last_backup.files,
255 comment,
0d08fcee
FG
256 });
257
249dde8b
TL
258 Ok(group_info)
259 })
812c6f87 260}
8f579717 261
f32791b4
DC
262#[api(
263 input: {
264 properties: {
988d575d 265 store: { schema: DATASTORE_SCHEMA },
bc21ade2 266 ns: {
133d718f
WB
267 type: BackupNamespace,
268 optional: true,
269 },
8c74349b
WB
270 group: {
271 type: pbs_api_types::BackupGroup,
272 flatten: true,
273 },
f32791b4
DC
274 },
275 },
276 access: {
7d6fc15b
TL
277 permission: &Permission::Anybody,
278 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
279 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
280 },
281)]
282/// Delete backup group including all snapshots.
283pub fn delete_group(
284 store: String,
bc21ade2 285 ns: Option<BackupNamespace>,
8c74349b 286 group: pbs_api_types::BackupGroup,
f32791b4
DC
287 _info: &ApiMethod,
288 rpcenv: &mut dyn RpcEnvironment,
289) -> Result<Value, Error> {
f32791b4
DC
290 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
291
974a3e52
FG
292 let store_with_ns = DatastoreWithNamespace {
293 store,
294 ns: ns.unwrap_or_default(),
295 };
133d718f 296
7a404dc5 297 let datastore = check_privs_and_load_store(
974a3e52 298 &store_with_ns,
7d6fc15b 299 &auth_id,
2bc2435a
FG
300 PRIV_DATASTORE_MODIFY,
301 PRIV_DATASTORE_PRUNE,
c9396984 302 Some(Operation::Write),
c9396984
FG
303 &group,
304 )?;
f32791b4 305
974a3e52 306 if !datastore.remove_backup_group(&store_with_ns.ns, &group)? {
171a00ca 307 bail!("group only partially deleted due to protected snapshots");
5cc7d891 308 }
f32791b4
DC
309
310 Ok(Value::Null)
311}
312
09b1f7b2
DM
313#[api(
314 input: {
315 properties: {
988d575d 316 store: { schema: DATASTORE_SCHEMA },
bc21ade2 317 ns: {
133d718f
WB
318 type: BackupNamespace,
319 optional: true,
320 },
8c74349b
WB
321 backup_dir: {
322 type: pbs_api_types::BackupDir,
323 flatten: true,
324 },
09b1f7b2
DM
325 },
326 },
7b570c17 327 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 328 access: {
7d6fc15b
TL
329 permission: &Permission::Anybody,
330 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
331 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 332 },
09b1f7b2
DM
333)]
334/// List snapshot files.
ea5f547f 335pub fn list_snapshot_files(
09b1f7b2 336 store: String,
bc21ade2 337 ns: Option<BackupNamespace>,
8c74349b 338 backup_dir: pbs_api_types::BackupDir,
01a13423 339 _info: &ApiMethod,
54552dda 340 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 341) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 342 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 343
ea2e91e5 344 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
345 store: store,
346 ns: ns.unwrap_or_default(),
ea2e91e5 347 };
133d718f 348
7a404dc5 349 let datastore = check_privs_and_load_store(
ea2e91e5 350 &store_with_ns,
7d6fc15b 351 &auth_id,
2bc2435a
FG
352 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
353 PRIV_DATASTORE_BACKUP,
c9396984 354 Some(Operation::Read),
c9396984
FG
355 &backup_dir.group,
356 )?;
01a13423 357
974a3e52 358 let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
54552dda 359
6da20161 360 let info = BackupInfo::new(snapshot)?;
01a13423 361
9ccf933b 362 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43
DM
363
364 Ok(files)
01a13423
DM
365}
366
68a6a0ee
DM
367#[api(
368 input: {
369 properties: {
988d575d 370 store: { schema: DATASTORE_SCHEMA },
bc21ade2 371 ns: {
133d718f
WB
372 type: BackupNamespace,
373 optional: true,
374 },
8c74349b
WB
375 backup_dir: {
376 type: pbs_api_types::BackupDir,
377 flatten: true,
378 },
68a6a0ee
DM
379 },
380 },
bb34b589 381 access: {
7d6fc15b
TL
382 permission: &Permission::Anybody,
383 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
384 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 385 },
68a6a0ee
DM
386)]
387/// Delete backup snapshot.
bf78f708 388pub fn delete_snapshot(
68a6a0ee 389 store: String,
bc21ade2 390 ns: Option<BackupNamespace>,
8c74349b 391 backup_dir: pbs_api_types::BackupDir,
6f62c924 392 _info: &ApiMethod,
54552dda 393 rpcenv: &mut dyn RpcEnvironment,
6f62c924 394) -> Result<Value, Error> {
e6dc35ac 395 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 396
ea2e91e5 397 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
398 store: store,
399 ns: ns.unwrap_or_default(),
ea2e91e5
FG
400 };
401
7a404dc5 402 let datastore = check_privs_and_load_store(
ea2e91e5 403 &store_with_ns,
7d6fc15b 404 &auth_id,
2bc2435a
FG
405 PRIV_DATASTORE_MODIFY,
406 PRIV_DATASTORE_PRUNE,
c9396984 407 Some(Operation::Write),
c9396984
FG
408 &backup_dir.group,
409 )?;
a724f5fd 410
974a3e52 411 let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
54552dda 412
133d718f 413 snapshot.destroy(false)?;
6f62c924
DM
414
415 Ok(Value::Null)
416}
417
fc189b19 418#[api(
b7c3eaa9 419 streaming: true,
fc189b19
DM
420 input: {
421 properties: {
988d575d 422 store: { schema: DATASTORE_SCHEMA },
bc21ade2 423 ns: {
8c74349b
WB
424 type: BackupNamespace,
425 optional: true,
426 },
fc189b19
DM
427 "backup-type": {
428 optional: true,
988d575d 429 type: BackupType,
fc189b19
DM
430 },
431 "backup-id": {
432 optional: true,
433 schema: BACKUP_ID_SCHEMA,
434 },
435 },
436 },
7b570c17 437 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 438 access: {
7d6fc15b
TL
439 permission: &Permission::Anybody,
440 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
441 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 442 },
fc189b19
DM
443)]
444/// List backup snapshots.
dc7a5b34 445pub fn list_snapshots(
54552dda 446 store: String,
bc21ade2 447 ns: Option<BackupNamespace>,
988d575d 448 backup_type: Option<BackupType>,
54552dda
DM
449 backup_id: Option<String>,
450 _param: Value,
184f17af 451 _info: &ApiMethod,
54552dda 452 rpcenv: &mut dyn RpcEnvironment,
fc189b19 453) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 454 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 455
bc21ade2 456 let ns = ns.unwrap_or_default();
ea2e91e5 457 let store_with_ns = DatastoreWithNamespace {
974a3e52 458 store,
ea2e91e5
FG
459 ns: ns.clone(),
460 };
7d6fc15b 461
ea2e91e5
FG
462 let list_all = !check_ns_privs_full(
463 &store_with_ns,
7d6fc15b 464 &auth_id,
2bc2435a
FG
465 PRIV_DATASTORE_AUDIT,
466 PRIV_DATASTORE_BACKUP,
7d6fc15b 467 )?;
184f17af 468
974a3e52 469 let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
184f17af 470
249dde8b
TL
471 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
472 // backup group and provide an error free (Err -> None) accessor
0d08fcee 473 let groups = match (backup_type, backup_id) {
db87d93e 474 (Some(backup_type), Some(backup_id)) => {
bc21ade2 475 vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
db87d93e 476 }
8c74349b 477 // FIXME: Recursion
7d9cb8c4 478 (Some(backup_type), None) => datastore
bc21ade2 479 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
480 .filter(|group| group.backup_type() == backup_type)
481 .collect(),
8c74349b 482 // FIXME: Recursion
7d9cb8c4 483 (None, Some(backup_id)) => datastore
bc21ade2 484 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
485 .filter(|group| group.backup_id() == backup_id)
486 .collect(),
8c74349b 487 // FIXME: Recursion
bc21ade2 488 (None, None) => datastore.list_backup_groups(ns)?,
0d08fcee 489 };
54552dda 490
0d08fcee 491 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
492 let backup = pbs_api_types::BackupDir {
493 group: group.into(),
494 time: info.backup_dir.backup_time(),
495 };
6da20161 496 let protected = info.backup_dir.is_protected();
1c090810 497
9ccf933b 498 match get_all_snapshot_files(&info) {
70030b43 499 Ok((manifest, files)) => {
70030b43
DM
500 // extract the first line from notes
501 let comment: Option<String> = manifest.unprotected["notes"]
502 .as_str()
503 .and_then(|notes| notes.lines().next())
504 .map(String::from);
505
035c40e6
FG
506 let fingerprint = match manifest.fingerprint() {
507 Ok(fp) => fp,
508 Err(err) => {
509 eprintln!("error parsing fingerprint: '{}'", err);
510 None
dc7a5b34 511 }
035c40e6
FG
512 };
513
79c53595 514 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
515 let verification: Option<SnapshotVerifyState> =
516 match serde_json::from_value(verification) {
517 Ok(verify) => verify,
518 Err(err) => {
519 eprintln!("error parsing verification state : '{}'", err);
520 None
521 }
522 };
3b2046d2 523
0d08fcee
FG
524 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
525
79c53595 526 SnapshotListItem {
988d575d 527 backup,
79c53595
FG
528 comment,
529 verification,
035c40e6 530 fingerprint,
79c53595
FG
531 files,
532 size,
533 owner,
02db7267 534 protected,
79c53595 535 }
dc7a5b34 536 }
1c090810
DC
537 Err(err) => {
538 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 539 let files = info
dc7a5b34
TL
540 .files
541 .into_iter()
542 .map(|filename| BackupContent {
543 filename,
544 size: None,
545 crypt_mode: None,
546 })
547 .collect();
79c53595
FG
548
549 SnapshotListItem {
988d575d 550 backup,
79c53595
FG
551 comment: None,
552 verification: None,
035c40e6 553 fingerprint: None,
79c53595
FG
554 files,
555 size: None,
556 owner,
02db7267 557 protected,
79c53595 558 }
dc7a5b34 559 }
0d08fcee
FG
560 }
561 };
184f17af 562
dc7a5b34 563 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 564 let owner = match group.get_owner() {
dc7a5b34
TL
565 Ok(auth_id) => auth_id,
566 Err(err) => {
567 eprintln!(
e13303fc 568 "Failed to get owner of group '{}' in {} - {}",
e13303fc 569 group.group(),
4a4dd66c 570 &store_with_ns,
e13303fc 571 err
dc7a5b34 572 );
0d08fcee
FG
573 return Ok(snapshots);
574 }
dc7a5b34 575 };
0d08fcee 576
dc7a5b34
TL
577 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
578 return Ok(snapshots);
579 }
0d08fcee 580
6da20161 581 let group_backups = group.list_backups()?;
0d08fcee 582
dc7a5b34
TL
583 snapshots.extend(
584 group_backups
585 .into_iter()
586 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
587 );
588
589 Ok(snapshots)
590 })
184f17af
DM
591}
592
22cfad13 593fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
8122eaad 594 let root_ns = Default::default();
f12f408e
TL
595 ListAccessibleBackupGroups::new_with_privs(
596 store,
597 root_ns,
598 MAX_NAMESPACE_DEPTH,
599 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
600 None,
601 owner,
602 )?
603 .try_fold(Counts::default(), |mut counts, group| {
604 let group = match group {
605 Ok(group) => group,
606 Err(_) => return Ok(counts), // TODO: add this as error counts?
607 };
608 let snapshot_count = group.list_backups()?.len() as u64;
609
610 // only include groups with snapshots, counting/displaying emtpy groups can confuse
611 if snapshot_count > 0 {
612 let type_count = match group.backup_type() {
613 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
614 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
615 BackupType::Host => counts.host.get_or_insert(Default::default()),
22cfad13 616 };
14e08625 617
f12f408e
TL
618 type_count.groups += 1;
619 type_count.snapshots += snapshot_count;
620 }
16f9f244 621
f12f408e
TL
622 Ok(counts)
623 })
16f9f244
DC
624}
625
1dc117bb
DM
626#[api(
627 input: {
628 properties: {
629 store: {
630 schema: DATASTORE_SCHEMA,
631 },
98afc7b1
FG
632 verbose: {
633 type: bool,
634 default: false,
635 optional: true,
636 description: "Include additional information like snapshot counts and GC status.",
637 },
1dc117bb 638 },
98afc7b1 639
1dc117bb
DM
640 },
641 returns: {
14e08625 642 type: DataStoreStatus,
1dc117bb 643 },
bb34b589 644 access: {
7d6fc15b
TL
645 permission: &Permission::Privilege(
646 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 647 },
1dc117bb
DM
648)]
649/// Get datastore status.
ea5f547f 650pub fn status(
1dc117bb 651 store: String,
98afc7b1 652 verbose: bool,
0eecf38f 653 _info: &ApiMethod,
fdfcb74d 654 rpcenv: &mut dyn RpcEnvironment,
14e08625 655) -> Result<DataStoreStatus, Error> {
e9d2fc93 656 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 657 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
658 let (counts, gc_status) = if verbose {
659 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
660 let user_info = CachedUserInfo::new()?;
661
662 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
663 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
664 None
665 } else {
666 Some(&auth_id)
667 };
668
669 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
670 let gc_status = Some(datastore.last_gc_status());
671
672 (counts, gc_status)
673 } else {
674 (None, None)
98afc7b1 675 };
16f9f244 676
14e08625
DC
677 Ok(DataStoreStatus {
678 total: storage.total,
679 used: storage.used,
680 avail: storage.avail,
681 gc_status,
682 counts,
683 })
0eecf38f
DM
684}
685
c2009e53
DM
686#[api(
687 input: {
688 properties: {
689 store: {
690 schema: DATASTORE_SCHEMA,
691 },
bc21ade2 692 ns: {
8c74349b
WB
693 type: BackupNamespace,
694 optional: true,
695 },
c2009e53 696 "backup-type": {
988d575d 697 type: BackupType,
c2009e53
DM
698 optional: true,
699 },
700 "backup-id": {
701 schema: BACKUP_ID_SCHEMA,
702 optional: true,
703 },
dcbf29e7
HL
704 "ignore-verified": {
705 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
706 optional: true,
707 },
708 "outdated-after": {
709 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
710 optional: true,
711 },
c2009e53
DM
712 "backup-time": {
713 schema: BACKUP_TIME_SCHEMA,
714 optional: true,
715 },
59229bd7
TL
716 "max-depth": {
717 schema: NS_MAX_DEPTH_SCHEMA,
718 optional: true,
719 },
c2009e53
DM
720 },
721 },
722 returns: {
723 schema: UPID_SCHEMA,
724 },
725 access: {
7d6fc15b
TL
726 permission: &Permission::Anybody,
727 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
728 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
729 },
730)]
731/// Verify backups.
732///
733/// This function can verify a single backup snapshot, all backup from a backup group,
734/// or all backups in the datastore.
735pub fn verify(
736 store: String,
bc21ade2 737 ns: Option<BackupNamespace>,
988d575d 738 backup_type: Option<BackupType>,
c2009e53
DM
739 backup_id: Option<String>,
740 backup_time: Option<i64>,
dcbf29e7
HL
741 ignore_verified: Option<bool>,
742 outdated_after: Option<i64>,
59229bd7 743 max_depth: Option<usize>,
c2009e53
DM
744 rpcenv: &mut dyn RpcEnvironment,
745) -> Result<Value, Error> {
7d6fc15b 746 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 747 let ns = ns.unwrap_or_default();
ea2e91e5 748 let store_with_ns = DatastoreWithNamespace {
974a3e52 749 store,
ea2e91e5
FG
750 ns: ns.clone(),
751 };
752
753 let owner_check_required = check_ns_privs_full(
754 &store_with_ns,
7d6fc15b 755 &auth_id,
2bc2435a
FG
756 PRIV_DATASTORE_VERIFY,
757 PRIV_DATASTORE_BACKUP,
7d6fc15b 758 )?;
a724f5fd 759
974a3e52 760 let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
dcbf29e7 761 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 762
8ea00f6e 763 let worker_id;
c2009e53
DM
764
765 let mut backup_dir = None;
766 let mut backup_group = None;
133042b5 767 let mut worker_type = "verify";
c2009e53
DM
768
769 match (backup_type, backup_id, backup_time) {
770 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 771 worker_id = format!(
8c74349b 772 "{}:{}/{}/{}/{:08X}",
974a3e52 773 store_with_ns.store,
bc21ade2 774 ns.display_as_path(),
8c74349b
WB
775 backup_type,
776 backup_id,
777 backup_time
dc7a5b34 778 );
bc21ade2
WB
779 let dir =
780 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 781
a724f5fd
FG
782 if owner_check_required {
783 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
784 check_backup_owner(&owner, &auth_id)?;
785 }
09f6a240 786
c2009e53 787 backup_dir = Some(dir);
133042b5 788 worker_type = "verify_snapshot";
c2009e53
DM
789 }
790 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
791 worker_id = format!(
792 "{}:{}/{}/{}",
974a3e52 793 store_with_ns.store,
bc21ade2 794 ns.display_as_path(),
8c74349b
WB
795 backup_type,
796 backup_id
797 );
133d718f 798 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 799
a724f5fd 800 if owner_check_required {
bc21ade2 801 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
802 check_backup_owner(&owner, &auth_id)?;
803 }
09f6a240 804
bc21ade2 805 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 806 worker_type = "verify_group";
c2009e53
DM
807 }
808 (None, None, None) => {
bc21ade2 809 worker_id = if ns.is_root() {
974a3e52 810 format!("{}", store_with_ns.store)
59229bd7 811 } else {
974a3e52 812 format!("{}:{}", store_with_ns.store, ns.display_as_path())
59229bd7 813 };
c2009e53 814 }
5a718dce 815 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
816 }
817
39735609 818 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
819
820 let upid_str = WorkerTask::new_thread(
133042b5 821 worker_type,
44288184 822 Some(worker_id),
049a22a3 823 auth_id.to_string(),
e7cb4dc5
WB
824 to_stdout,
825 move |worker| {
9c26a3d6 826 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 827 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 828 let mut res = Vec::new();
f6b1d1cc 829 if !verify_backup_dir(
9c26a3d6 830 &verify_worker,
f6b1d1cc 831 &backup_dir,
f6b1d1cc 832 worker.upid().clone(),
dc7a5b34 833 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 834 )? {
5ae393af
FG
835 res.push(print_ns_and_snapshot(
836 backup_dir.backup_ns(),
837 backup_dir.as_ref(),
838 ));
adfdc369
DC
839 }
840 res
c2009e53 841 } else if let Some(backup_group) = backup_group {
7e25b9aa 842 let failed_dirs = verify_backup_group(
9c26a3d6 843 &verify_worker,
63d9aca9 844 &backup_group,
7e25b9aa 845 &mut StoreProgress::new(1),
f6b1d1cc 846 worker.upid(),
dc7a5b34 847 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
848 )?;
849 failed_dirs
c2009e53 850 } else {
a724f5fd 851 let owner = if owner_check_required {
de27ebc6 852 Some(&auth_id)
09f6a240
FG
853 } else {
854 None
855 };
856
dcbf29e7
HL
857 verify_all_backups(
858 &verify_worker,
859 worker.upid(),
bc21ade2 860 ns,
59229bd7 861 max_depth,
dcbf29e7 862 owner,
dc7a5b34 863 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 864 )?
c2009e53 865 };
3984a5fd 866 if !failed_dirs.is_empty() {
1ec0d70d 867 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 868 for dir in failed_dirs {
1ec0d70d 869 task_log!(worker, "\t{}", dir);
adfdc369 870 }
1ffe0301 871 bail!("verification failed - please check the log for details");
c2009e53
DM
872 }
873 Ok(())
e7cb4dc5
WB
874 },
875 )?;
c2009e53
DM
876
877 Ok(json!(upid_str))
878}
879
0a240aaa
DC
880#[api(
881 input: {
882 properties: {
bc21ade2 883 ns: {
133d718f
WB
884 type: BackupNamespace,
885 optional: true,
886 },
8c74349b
WB
887 group: {
888 type: pbs_api_types::BackupGroup,
889 flatten: true,
890 },
0a240aaa
DC
891 "dry-run": {
892 optional: true,
893 type: bool,
894 default: false,
895 description: "Just show what prune would do, but do not delete anything.",
896 },
897 "prune-options": {
898 type: PruneOptions,
899 flatten: true,
900 },
901 store: {
902 schema: DATASTORE_SCHEMA,
903 },
904 },
905 },
7b570c17 906 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 907 access: {
7d6fc15b
TL
908 permission: &Permission::Anybody,
909 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
910 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
911 },
912)]
9805207a 913/// Prune a group on the datastore
bf78f708 914pub fn prune(
bc21ade2 915 ns: Option<BackupNamespace>,
8c74349b 916 group: pbs_api_types::BackupGroup,
0a240aaa
DC
917 dry_run: bool,
918 prune_options: PruneOptions,
919 store: String,
920 _param: Value,
54552dda 921 rpcenv: &mut dyn RpcEnvironment,
83b7db02 922) -> Result<Value, Error> {
e6dc35ac 923 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 924 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
925 store,
926 ns: ns.unwrap_or_default(),
ea2e91e5
FG
927 };
928
7a404dc5 929 let datastore = check_privs_and_load_store(
ea2e91e5 930 &store_with_ns,
7d6fc15b 931 &auth_id,
2bc2435a
FG
932 PRIV_DATASTORE_MODIFY,
933 PRIV_DATASTORE_PRUNE,
c9396984 934 Some(Operation::Write),
c9396984
FG
935 &group,
936 )?;
db87d93e 937
974a3e52
FG
938 let worker_id = format!("{}:{}:{}", store_with_ns.store, store_with_ns.ns, group);
939 let group = datastore.backup_group(store_with_ns.ns.clone(), group);
83b7db02 940
dda70154
DM
941 let mut prune_result = Vec::new();
942
6da20161 943 let list = group.list_backups()?;
dda70154
DM
944
945 let mut prune_info = compute_prune_info(list, &prune_options)?;
946
947 prune_info.reverse(); // delete older snapshots first
948
89725197 949 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
950
951 if dry_run {
02db7267
DC
952 for (info, mark) in prune_info {
953 let keep = keep_all || mark.keep();
dda70154 954
33f2c2a1 955 let mut result = json!({
db87d93e
WB
956 "backup-type": info.backup_dir.backup_type(),
957 "backup-id": info.backup_dir.backup_id(),
958 "backup-time": info.backup_dir.backup_time(),
dda70154 959 "keep": keep,
02db7267 960 "protected": mark.protected(),
33f2c2a1 961 });
bc21ade2
WB
962 let prune_ns = info.backup_dir.backup_ns();
963 if !prune_ns.is_root() {
964 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
965 }
966 prune_result.push(result);
dda70154
DM
967 }
968 return Ok(json!(prune_result));
969 }
970
163e9bbe 971 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 972 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 973
f1539300 974 if keep_all {
1ec0d70d 975 task_log!(worker, "No prune selection - keeping all files.");
f1539300 976 } else {
dc7a5b34
TL
977 task_log!(
978 worker,
979 "retention options: {}",
980 pbs_datastore::prune::cli_options_string(&prune_options)
981 );
982 task_log!(
983 worker,
e13303fc
FG
984 "Starting prune on {} group \"{}\"",
985 store_with_ns,
986 group.group(),
dc7a5b34 987 );
f1539300 988 }
3b03abfe 989
02db7267
DC
990 for (info, mark) in prune_info {
991 let keep = keep_all || mark.keep();
dda70154 992
f1539300
SR
993 let backup_time = info.backup_dir.backup_time();
994 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
995 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
996
997 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 998
1ec0d70d 999 task_log!(worker, "{}", msg);
f1539300 1000
133d718f 1001 prune_result.push(json!({
db87d93e
WB
1002 "backup-type": group.ty,
1003 "backup-id": group.id,
f1539300
SR
1004 "backup-time": backup_time,
1005 "keep": keep,
02db7267 1006 "protected": mark.protected(),
133d718f 1007 }));
f1539300
SR
1008
1009 if !(dry_run || keep) {
133d718f 1010 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1011 task_warn!(
1012 worker,
1013 "failed to remove dir {:?}: {}",
1014 info.backup_dir.relative_path(),
1015 err,
f1539300 1016 );
8f0b4c1f 1017 }
8f579717 1018 }
f1539300 1019 }
dd8e744f 1020
f1539300 1021 worker.log_result(&Ok(()));
83b7db02 1022
dda70154 1023 Ok(json!(prune_result))
83b7db02
DM
1024}
1025
9805207a
DC
1026#[api(
1027 input: {
1028 properties: {
1029 "dry-run": {
1030 optional: true,
1031 type: bool,
1032 default: false,
1033 description: "Just show what prune would do, but do not delete anything.",
1034 },
1035 "prune-options": {
1036 type: PruneOptions,
1037 flatten: true,
1038 },
1039 store: {
1040 schema: DATASTORE_SCHEMA,
1041 },
2f5417f8
TL
1042 ns: {
1043 type: BackupNamespace,
1044 optional: true,
1045 },
e3c26aea
TL
1046 "max-depth": {
1047 schema: NS_MAX_DEPTH_SCHEMA,
1048 optional: true,
1049 },
9805207a
DC
1050 },
1051 },
1052 returns: {
1053 schema: UPID_SCHEMA,
1054 },
1055 access: {
7d6fc15b
TL
1056 permission: &Permission::Privilege(
1057 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
9805207a
DC
1058 },
1059)]
1060/// Prune the datastore
1061pub fn prune_datastore(
1062 dry_run: bool,
1063 prune_options: PruneOptions,
1064 store: String,
2f5417f8 1065 ns: Option<BackupNamespace>,
e3c26aea 1066 max_depth: Option<usize>,
9805207a
DC
1067 _param: Value,
1068 rpcenv: &mut dyn RpcEnvironment,
1069) -> Result<String, Error> {
9805207a
DC
1070 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1071
e9d2fc93 1072 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
36971618
FG
1073 let ns = ns.unwrap_or_default();
1074 let worker_id = format!("{}:{}", store, ns);
9805207a 1075
bfa942c0
DC
1076 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1077
9805207a
DC
1078 let upid_str = WorkerTask::new_thread(
1079 "prune",
36971618 1080 Some(worker_id),
049a22a3 1081 auth_id.to_string(),
bfa942c0 1082 to_stdout,
dc7a5b34 1083 move |worker| {
e3c26aea
TL
1084 crate::server::prune_datastore(
1085 worker,
1086 auth_id,
1087 prune_options,
1088 datastore,
1089 ns,
1090 max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
1091 dry_run,
1092 )
dc7a5b34 1093 },
9805207a
DC
1094 )?;
1095
1096 Ok(upid_str)
1097}
1098
dfc58d47
DM
1099#[api(
1100 input: {
1101 properties: {
1102 store: {
1103 schema: DATASTORE_SCHEMA,
1104 },
1105 },
1106 },
1107 returns: {
1108 schema: UPID_SCHEMA,
1109 },
bb34b589 1110 access: {
54552dda 1111 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1112 },
dfc58d47
DM
1113)]
1114/// Start garbage collection.
bf78f708 1115pub fn start_garbage_collection(
dfc58d47 1116 store: String,
6049b71f 1117 _info: &ApiMethod,
dd5495d6 1118 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1119) -> Result<Value, Error> {
e9d2fc93 1120 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1121 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1122
dc7a5b34 1123 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1124 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1125
39735609 1126 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1127
dc7a5b34
TL
1128 let upid_str =
1129 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1130 .map_err(|err| {
1131 format_err!(
1132 "unable to start garbage collection job on datastore {} - {}",
1133 store,
1134 err
1135 )
1136 })?;
0f778e06
DM
1137
1138 Ok(json!(upid_str))
15e9b4ed
DM
1139}
1140
a92830dc
DM
1141#[api(
1142 input: {
1143 properties: {
1144 store: {
1145 schema: DATASTORE_SCHEMA,
1146 },
1147 },
1148 },
1149 returns: {
1150 type: GarbageCollectionStatus,
bb34b589
DM
1151 },
1152 access: {
1153 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1154 },
a92830dc
DM
1155)]
1156/// Garbage collection status.
5eeea607 1157pub fn garbage_collection_status(
a92830dc 1158 store: String,
6049b71f 1159 _info: &ApiMethod,
dd5495d6 1160 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1161) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1162 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1163
f2b99c34 1164 let status = datastore.last_gc_status();
691c89a0 1165
a92830dc 1166 Ok(status)
691c89a0
DM
1167}
1168
7d6fc15b
TL
1169fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1170 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1171 // below /datastore/{store}" helper
1172 let mut iter =
1173 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1174 iter
1175 } else {
1176 return false;
1177 };
1178 let wanted =
1179 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1180 let name = store.name();
1181 iter.any(|ns| -> bool {
210ded98
FG
1182 let store_with_ns = DatastoreWithNamespace {
1183 store: name.to_string(),
1184 ns: ns,
1185 };
1186 let user_privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
7d6fc15b
TL
1187 user_privs & wanted != 0
1188 })
1189}
1190
bb34b589 1191#[api(
30fb6025
DM
1192 returns: {
1193 description: "List the accessible datastores.",
1194 type: Array,
9b93c620 1195 items: { type: DataStoreListItem },
30fb6025 1196 },
bb34b589 1197 access: {
54552dda 1198 permission: &Permission::Anybody,
bb34b589
DM
1199 },
1200)]
1201/// Datastore list
bf78f708 1202pub fn get_datastore_list(
6049b71f
DM
1203 _param: Value,
1204 _info: &ApiMethod,
54552dda 1205 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1206) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1207 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1208
e6dc35ac 1209 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1210 let user_info = CachedUserInfo::new()?;
1211
30fb6025 1212 let mut list = Vec::new();
54552dda 1213
30fb6025 1214 for (store, (_, data)) in &config.sections {
9a37bd6c 1215 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1216 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1217
1218 let mut allow_id = false;
1219 if !allowed {
1220 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1221 // safety: we just cannot go through lookup as we must avoid an operation check
1222 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1223 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1224 }
1225 }
1226
1227 if allowed || allow_id {
dc7a5b34
TL
1228 list.push(DataStoreListItem {
1229 store: store.clone(),
7d6fc15b
TL
1230 comment: if !allowed {
1231 None
1232 } else {
1233 data["comment"].as_str().map(String::from)
1234 },
e022d13c 1235 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1236 });
30fb6025 1237 }
54552dda
DM
1238 }
1239
44288184 1240 Ok(list)
15e9b4ed
DM
1241}
1242
0ab08ac9
DM
1243#[sortable]
1244pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1245 &ApiHandler::AsyncHttp(&download_file),
1246 &ObjectSchema::new(
1247 "Download single raw file from backup snapshot.",
1248 &sorted!([
66c49c21 1249 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1250 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1251 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1252 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1253 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1254 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1255 ]),
dc7a5b34
TL
1256 ),
1257)
1258.access(
7d6fc15b
TL
1259 Some(
1260 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1261 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1262 ),
7d6fc15b 1263 &Permission::Anybody,
54552dda 1264);
691c89a0 1265
bf78f708 1266pub fn download_file(
9e47c0a5
DM
1267 _parts: Parts,
1268 _req_body: Body,
1269 param: Value,
255f378a 1270 _info: &ApiMethod,
54552dda 1271 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1272) -> ApiResponseFuture {
ad51d02a 1273 async move {
7d6fc15b 1274 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1275 let store = required_string_param(&param, "store")?;
133d718f 1276 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1277
1278 let store_with_ns = DatastoreWithNamespace {
1279 store: store.to_owned(),
1280 ns: backup_ns.clone(),
1281 };
7d6fc15b 1282 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1283 let datastore = check_privs_and_load_store(
ea2e91e5 1284 &store_with_ns,
7d6fc15b 1285 &auth_id,
2bc2435a
FG
1286 PRIV_DATASTORE_READ,
1287 PRIV_DATASTORE_BACKUP,
c9396984 1288 Some(Operation::Read),
c9396984
FG
1289 &backup_dir.group,
1290 )?;
1291
3c8c2827 1292 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1293
dc7a5b34
TL
1294 println!(
1295 "Download {} from {} ({}/{})",
1afce610 1296 file_name, store_with_ns, backup_dir, file_name
dc7a5b34 1297 );
9e47c0a5 1298
1afce610
FG
1299 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1300
ad51d02a
DM
1301 let mut path = datastore.base_path();
1302 path.push(backup_dir.relative_path());
1303 path.push(&file_name);
1304
ba694720 1305 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1306 .await
1307 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1308
dc7a5b34
TL
1309 let payload =
1310 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1311 .map_ok(|bytes| bytes.freeze())
1312 .map_err(move |err| {
1313 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1314 err
1315 });
ad51d02a 1316 let body = Body::wrap_stream(payload);
9e47c0a5 1317
ad51d02a
DM
1318 // fixme: set other headers ?
1319 Ok(Response::builder()
dc7a5b34
TL
1320 .status(StatusCode::OK)
1321 .header(header::CONTENT_TYPE, "application/octet-stream")
1322 .body(body)
1323 .unwrap())
1324 }
1325 .boxed()
9e47c0a5
DM
1326}
1327
6ef9bb59
DC
1328#[sortable]
1329pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1330 &ApiHandler::AsyncHttp(&download_file_decoded),
1331 &ObjectSchema::new(
1332 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1333 &sorted!([
1334 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1335 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1336 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1337 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1338 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1339 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1340 ]),
dc7a5b34
TL
1341 ),
1342)
1343.access(
7d6fc15b
TL
1344 Some(
1345 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1346 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1347 ),
7d6fc15b 1348 &Permission::Anybody,
6ef9bb59
DC
1349);
1350
bf78f708 1351pub fn download_file_decoded(
6ef9bb59
DC
1352 _parts: Parts,
1353 _req_body: Body,
1354 param: Value,
1355 _info: &ApiMethod,
1356 rpcenv: Box<dyn RpcEnvironment>,
1357) -> ApiResponseFuture {
6ef9bb59 1358 async move {
7d6fc15b 1359 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1360 let store = required_string_param(&param, "store")?;
133d718f 1361 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1362 let store_with_ns = DatastoreWithNamespace {
1363 store: store.to_owned(),
1364 ns: backup_ns.clone(),
1365 };
1366 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1367 let datastore = check_privs_and_load_store(
ea2e91e5 1368 &store_with_ns,
7d6fc15b 1369 &auth_id,
2bc2435a
FG
1370 PRIV_DATASTORE_READ,
1371 PRIV_DATASTORE_BACKUP,
c9396984 1372 Some(Operation::Read),
1afce610 1373 &backup_dir_api.group,
c9396984 1374 )?;
a724f5fd 1375
3c8c2827 1376 let file_name = required_string_param(&param, "file-name")?.to_owned();
1afce610 1377 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
6ef9bb59 1378
9ccf933b 1379 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1380 for file in files {
f28d9088 1381 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1382 bail!("cannot decode '{}' - is encrypted", file_name);
1383 }
1384 }
1385
dc7a5b34
TL
1386 println!(
1387 "Download {} from {} ({}/{})",
1afce610 1388 file_name, store_with_ns, backup_dir_api, file_name
dc7a5b34 1389 );
6ef9bb59
DC
1390
1391 let mut path = datastore.base_path();
1392 path.push(backup_dir.relative_path());
1393 path.push(&file_name);
1394
1395 let extension = file_name.rsplitn(2, '.').next().unwrap();
1396
1397 let body = match extension {
1398 "didx" => {
dc7a5b34
TL
1399 let index = DynamicIndexReader::open(&path).map_err(|err| {
1400 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1401 })?;
2d55beec
FG
1402 let (csum, size) = index.compute_csum();
1403 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1404
14f6c9cb 1405 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1406 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1407 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1408 eprintln!("error during streaming of '{:?}' - {}", path, err);
1409 err
1410 }))
1411 }
6ef9bb59 1412 "fidx" => {
dc7a5b34
TL
1413 let index = FixedIndexReader::open(&path).map_err(|err| {
1414 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1415 })?;
6ef9bb59 1416
2d55beec
FG
1417 let (csum, size) = index.compute_csum();
1418 manifest.verify_file(&file_name, &csum, size)?;
1419
14f6c9cb 1420 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1421 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1422 Body::wrap_stream(
1423 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1424 move |err| {
1425 eprintln!("error during streaming of '{:?}' - {}", path, err);
1426 err
1427 },
1428 ),
1429 )
1430 }
6ef9bb59
DC
1431 "blob" => {
1432 let file = std::fs::File::open(&path)
8aa67ee7 1433 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1434
2d55beec
FG
1435 // FIXME: load full blob to verify index checksum?
1436
6ef9bb59 1437 Body::wrap_stream(
dc7a5b34
TL
1438 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1439 move |err| {
6ef9bb59
DC
1440 eprintln!("error during streaming of '{:?}' - {}", path, err);
1441 err
dc7a5b34
TL
1442 },
1443 ),
6ef9bb59 1444 )
dc7a5b34 1445 }
6ef9bb59
DC
1446 extension => {
1447 bail!("cannot download '{}' files", extension);
dc7a5b34 1448 }
6ef9bb59
DC
1449 };
1450
1451 // fixme: set other headers ?
1452 Ok(Response::builder()
dc7a5b34
TL
1453 .status(StatusCode::OK)
1454 .header(header::CONTENT_TYPE, "application/octet-stream")
1455 .body(body)
1456 .unwrap())
1457 }
1458 .boxed()
6ef9bb59
DC
1459}
1460
552c2259 1461#[sortable]
0ab08ac9
DM
1462pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1463 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1464 &ObjectSchema::new(
54552dda 1465 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1466 &sorted!([
66c49c21 1467 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1468 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1469 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1470 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1471 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1472 ]),
dc7a5b34
TL
1473 ),
1474)
1475.access(
54552dda 1476 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1477 &Permission::Anybody,
54552dda 1478);
9e47c0a5 1479
bf78f708 1480pub fn upload_backup_log(
07ee2235
DM
1481 _parts: Parts,
1482 req_body: Body,
1483 param: Value,
255f378a 1484 _info: &ApiMethod,
54552dda 1485 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1486) -> ApiResponseFuture {
ad51d02a 1487 async move {
7d6fc15b 1488 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1489 let store = required_string_param(&param, "store")?;
133d718f 1490 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1491 let store_with_ns = DatastoreWithNamespace {
1492 store: store.to_owned(),
1493 ns: backup_ns.clone(),
1494 };
1495 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1496
7a404dc5 1497 let datastore = check_privs_and_load_store(
ea2e91e5 1498 &store_with_ns,
c9396984 1499 &auth_id,
7a404dc5
FG
1500 0,
1501 PRIV_DATASTORE_BACKUP,
c9396984 1502 Some(Operation::Write),
1afce610 1503 &backup_dir_api.group,
c9396984 1504 )?;
1afce610 1505 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
07ee2235 1506
dc7a5b34 1507 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1508
133d718f 1509 let mut path = backup_dir.full_path();
ad51d02a 1510 path.push(&file_name);
07ee2235 1511
ad51d02a
DM
1512 if path.exists() {
1513 bail!("backup already contains a log.");
1514 }
e128d4e8 1515
1afce610 1516 println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
ad51d02a
DM
1517
1518 let data = req_body
1519 .map_err(Error::from)
1520 .try_fold(Vec::new(), |mut acc, chunk| {
1521 acc.extend_from_slice(&*chunk);
1522 future::ok::<_, Error>(acc)
1523 })
1524 .await?;
1525
39f18b30
DM
1526 // always verify blob/CRC at server side
1527 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1528
e0a19d33 1529 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1530
1531 // fixme: use correct formatter
53daae8e 1532 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1533 }
1534 .boxed()
07ee2235
DM
1535}
1536
5b1cfa01
DC
1537#[api(
1538 input: {
1539 properties: {
988d575d 1540 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1541 ns: {
133d718f
WB
1542 type: BackupNamespace,
1543 optional: true,
1544 },
8c74349b
WB
1545 backup_dir: {
1546 type: pbs_api_types::BackupDir,
1547 flatten: true,
1548 },
5b1cfa01
DC
1549 "filepath": {
1550 description: "Base64 encoded path.",
1551 type: String,
1552 }
1553 },
1554 },
1555 access: {
7d6fc15b
TL
1556 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1557 DATASTORE_BACKUP and being the owner of the group",
1558 permission: &Permission::Anybody,
5b1cfa01
DC
1559 },
1560)]
1561/// Get the entries of the given path of the catalog
bf78f708 1562pub fn catalog(
5b1cfa01 1563 store: String,
bc21ade2 1564 ns: Option<BackupNamespace>,
8c74349b 1565 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1566 filepath: String,
5b1cfa01 1567 rpcenv: &mut dyn RpcEnvironment,
227501c0 1568) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1569 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1570 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
1571 store,
1572 ns: ns.unwrap_or_default(),
ea2e91e5
FG
1573 };
1574
7a404dc5 1575 let datastore = check_privs_and_load_store(
ea2e91e5 1576 &store_with_ns,
7d6fc15b 1577 &auth_id,
2bc2435a
FG
1578 PRIV_DATASTORE_READ,
1579 PRIV_DATASTORE_BACKUP,
c9396984 1580 Some(Operation::Read),
c9396984
FG
1581 &backup_dir.group,
1582 )?;
a724f5fd 1583
974a3e52 1584 let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
5b1cfa01 1585
9238cdf5
FG
1586 let file_name = CATALOG_NAME;
1587
9ccf933b 1588 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1589 for file in files {
1590 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1591 bail!("cannot decode '{}' - is encrypted", file_name);
1592 }
1593 }
1594
5b1cfa01
DC
1595 let mut path = datastore.base_path();
1596 path.push(backup_dir.relative_path());
9238cdf5 1597 path.push(file_name);
5b1cfa01
DC
1598
1599 let index = DynamicIndexReader::open(&path)
1600 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1601
2d55beec 1602 let (csum, size) = index.compute_csum();
9a37bd6c 1603 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1604
14f6c9cb 1605 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1606 let reader = BufferedDynamicReader::new(index, chunk_reader);
1607
1608 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1609
5279ee74 1610 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1611 base64::decode(filepath)?
1612 } else {
1613 vec![b'/']
1614 };
5b1cfa01 1615
86582454 1616 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1617}
1618
d33d8f4e
DC
1619#[sortable]
1620pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1621 &ApiHandler::AsyncHttp(&pxar_file_download),
1622 &ObjectSchema::new(
1ffe0301 1623 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1624 &sorted!([
1625 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1626 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1627 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1628 ("backup-id", false, &BACKUP_ID_SCHEMA),
1629 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1630 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1631 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1632 ]),
1633 )
7d6fc15b
TL
1634).access(
1635 Some(
1636 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1637 DATASTORE_BACKUP and being the owner of the group",
1638 ),
1639 &Permission::Anybody,
d33d8f4e
DC
1640);
1641
bf78f708 1642pub fn pxar_file_download(
d33d8f4e
DC
1643 _parts: Parts,
1644 _req_body: Body,
1645 param: Value,
1646 _info: &ApiMethod,
1647 rpcenv: Box<dyn RpcEnvironment>,
1648) -> ApiResponseFuture {
d33d8f4e 1649 async move {
7d6fc15b 1650 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1651 let store = required_string_param(&param, "store")?;
bc21ade2 1652 let ns = optional_ns_param(&param)?;
ea2e91e5
FG
1653 let store_with_ns = DatastoreWithNamespace {
1654 store: store.to_owned(),
1655 ns: ns.clone(),
1656 };
7d6fc15b 1657 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1658 let datastore = check_privs_and_load_store(
ea2e91e5 1659 &store_with_ns,
7d6fc15b 1660 &auth_id,
2bc2435a
FG
1661 PRIV_DATASTORE_READ,
1662 PRIV_DATASTORE_BACKUP,
c9396984 1663 Some(Operation::Read),
c9396984
FG
1664 &backup_dir.group,
1665 )?;
a724f5fd 1666
bc21ade2 1667 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1668
3c8c2827 1669 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1670
984ddb2f
DC
1671 let tar = param["tar"].as_bool().unwrap_or(false);
1672
d33d8f4e 1673 let mut components = base64::decode(&filepath)?;
3984a5fd 1674 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1675 components.remove(0);
1676 }
1677
d8d8af98 1678 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1679 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1680 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1681 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1682 for file in files {
1683 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1684 bail!("cannot decode '{}' - is encrypted", pxar_name);
1685 }
1686 }
d33d8f4e 1687
9238cdf5
FG
1688 let mut path = datastore.base_path();
1689 path.push(backup_dir.relative_path());
1690 path.push(pxar_name);
d33d8f4e
DC
1691
1692 let index = DynamicIndexReader::open(&path)
1693 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1694
2d55beec 1695 let (csum, size) = index.compute_csum();
9a37bd6c 1696 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1697
14f6c9cb 1698 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1699 let reader = BufferedDynamicReader::new(index, chunk_reader);
1700 let archive_size = reader.archive_size();
1701 let reader = LocalDynamicReadAt::new(reader);
1702
1703 let decoder = Accessor::new(reader, archive_size).await?;
1704 let root = decoder.open_root().await?;
2e219481 1705 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1706 let file = root
dc7a5b34
TL
1707 .lookup(&path)
1708 .await?
2e219481 1709 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1710
804f6143
DC
1711 let body = match file.kind() {
1712 EntryKind::File { .. } => Body::wrap_stream(
1713 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1714 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1715 err
1716 }),
1717 ),
1718 EntryKind::Hardlink(_) => Body::wrap_stream(
1719 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1720 .map_err(move |err| {
dc7a5b34 1721 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1722 err
1723 }),
1724 ),
1725 EntryKind::Directory => {
984ddb2f 1726 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1727 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1728 if tar {
dc7a5b34
TL
1729 proxmox_rest_server::spawn_internal_task(create_tar(
1730 channelwriter,
1731 decoder,
1732 path.clone(),
1733 false,
1734 ));
984ddb2f
DC
1735 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1736 Body::wrap_stream(zstdstream.map_err(move |err| {
1737 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1738 err
1739 }))
1740 } else {
dc7a5b34
TL
1741 proxmox_rest_server::spawn_internal_task(create_zip(
1742 channelwriter,
1743 decoder,
1744 path.clone(),
1745 false,
1746 ));
984ddb2f
DC
1747 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1748 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1749 err
1750 }))
1751 }
804f6143
DC
1752 }
1753 other => bail!("cannot download file of type {:?}", other),
1754 };
d33d8f4e
DC
1755
1756 // fixme: set other headers ?
1757 Ok(Response::builder()
dc7a5b34
TL
1758 .status(StatusCode::OK)
1759 .header(header::CONTENT_TYPE, "application/octet-stream")
1760 .body(body)
1761 .unwrap())
1762 }
1763 .boxed()
d33d8f4e
DC
1764}
1765
1a0d3d11
DM
1766#[api(
1767 input: {
1768 properties: {
1769 store: {
1770 schema: DATASTORE_SCHEMA,
1771 },
1772 timeframe: {
c68fa58a 1773 type: RRDTimeFrame,
1a0d3d11
DM
1774 },
1775 cf: {
1776 type: RRDMode,
1777 },
1778 },
1779 },
1780 access: {
7d6fc15b
TL
1781 permission: &Permission::Privilege(
1782 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1783 },
1784)]
1785/// Read datastore stats
bf78f708 1786pub fn get_rrd_stats(
1a0d3d11 1787 store: String,
c68fa58a 1788 timeframe: RRDTimeFrame,
1a0d3d11
DM
1789 cf: RRDMode,
1790 _param: Value,
1791) -> Result<Value, Error> {
e9d2fc93 1792 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1793 let disk_manager = crate::tools::disks::DiskManage::new();
1794
1795 let mut rrd_fields = vec![
dc7a5b34
TL
1796 "total",
1797 "used",
1798 "read_ios",
1799 "read_bytes",
1800 "write_ios",
1801 "write_bytes",
f27b6086
DC
1802 ];
1803
1804 // we do not have io_ticks for zpools, so don't include them
1805 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1806 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1807 _ => rrd_fields.push("io_ticks"),
1808 };
1809
dc7a5b34 1810 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1811}
1812
5fd823c3
HL
1813#[api(
1814 input: {
1815 properties: {
1816 store: {
1817 schema: DATASTORE_SCHEMA,
1818 },
1819 },
1820 },
1821 access: {
1822 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1823 },
1824)]
1825/// Read datastore stats
dc7a5b34 1826pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1827 let active_operations = task_tracking::get_active_operations(&store)?;
1828 Ok(json!({
1829 "read": active_operations.read,
1830 "write": active_operations.write,
1831 }))
1832}
1833
d6688884
SR
1834#[api(
1835 input: {
1836 properties: {
988d575d 1837 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1838 ns: {
133d718f
WB
1839 type: BackupNamespace,
1840 optional: true,
1841 },
8c74349b
WB
1842 backup_group: {
1843 type: pbs_api_types::BackupGroup,
1844 flatten: true,
1845 },
d6688884
SR
1846 },
1847 },
1848 access: {
7d6fc15b
TL
1849 permission: &Permission::Anybody,
1850 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1851 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1852 },
1853)]
1854/// Get "notes" for a backup group
1855pub fn get_group_notes(
1856 store: String,
bc21ade2 1857 ns: Option<BackupNamespace>,
8c74349b 1858 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1859 rpcenv: &mut dyn RpcEnvironment,
1860) -> Result<String, Error> {
d6688884 1861 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1862 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
1863 store,
1864 ns: ns.unwrap_or_default(),
ea2e91e5
FG
1865 };
1866
7a404dc5 1867 let datastore = check_privs_and_load_store(
ea2e91e5 1868 &store_with_ns,
7d6fc15b 1869 &auth_id,
2bc2435a
FG
1870 PRIV_DATASTORE_AUDIT,
1871 PRIV_DATASTORE_BACKUP,
c9396984 1872 Some(Operation::Read),
c9396984
FG
1873 &backup_group,
1874 )?;
d6688884 1875
974a3e52 1876 let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
d6688884
SR
1877 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1878}
1879
1880#[api(
1881 input: {
1882 properties: {
988d575d 1883 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1884 ns: {
133d718f
WB
1885 type: BackupNamespace,
1886 optional: true,
1887 },
8c74349b
WB
1888 backup_group: {
1889 type: pbs_api_types::BackupGroup,
1890 flatten: true,
1891 },
d6688884
SR
1892 notes: {
1893 description: "A multiline text.",
1894 },
1895 },
1896 },
1897 access: {
7d6fc15b
TL
1898 permission: &Permission::Anybody,
1899 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1900 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1901 },
1902)]
1903/// Set "notes" for a backup group
1904pub fn set_group_notes(
1905 store: String,
bc21ade2 1906 ns: Option<BackupNamespace>,
8c74349b 1907 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1908 notes: String,
1909 rpcenv: &mut dyn RpcEnvironment,
1910) -> Result<(), Error> {
d6688884 1911 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1912 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
1913 store,
1914 ns: ns.unwrap_or_default(),
ea2e91e5 1915 };
7a404dc5 1916 let datastore = check_privs_and_load_store(
ea2e91e5 1917 &store_with_ns,
7d6fc15b 1918 &auth_id,
2bc2435a
FG
1919 PRIV_DATASTORE_MODIFY,
1920 PRIV_DATASTORE_BACKUP,
c9396984 1921 Some(Operation::Write),
c9396984
FG
1922 &backup_group,
1923 )?;
d6688884 1924
974a3e52 1925 let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
e0a19d33 1926 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1927
1928 Ok(())
1929}
1930
912b3f5b
DM
1931#[api(
1932 input: {
1933 properties: {
988d575d 1934 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1935 ns: {
133d718f
WB
1936 type: BackupNamespace,
1937 optional: true,
1938 },
8c74349b
WB
1939 backup_dir: {
1940 type: pbs_api_types::BackupDir,
1941 flatten: true,
1942 },
912b3f5b
DM
1943 },
1944 },
1945 access: {
7d6fc15b
TL
1946 permission: &Permission::Anybody,
1947 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1948 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1949 },
1950)]
1951/// Get "notes" for a specific backup
bf78f708 1952pub fn get_notes(
912b3f5b 1953 store: String,
bc21ade2 1954 ns: Option<BackupNamespace>,
8c74349b 1955 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1956 rpcenv: &mut dyn RpcEnvironment,
1957) -> Result<String, Error> {
7d6fc15b 1958 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5
FG
1959 let store_with_ns = DatastoreWithNamespace {
1960 store: store.clone(),
974a3e52 1961 ns: ns.unwrap_or_default(),
ea2e91e5
FG
1962 };
1963
7a404dc5 1964 let datastore = check_privs_and_load_store(
ea2e91e5 1965 &store_with_ns,
7d6fc15b 1966 &auth_id,
2bc2435a
FG
1967 PRIV_DATASTORE_AUDIT,
1968 PRIV_DATASTORE_BACKUP,
c9396984 1969 Some(Operation::Read),
c9396984
FG
1970 &backup_dir.group,
1971 )?;
912b3f5b 1972
974a3e52 1973 let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
912b3f5b 1974
133d718f 1975 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1976
dc7a5b34 1977 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1978
1979 Ok(String::from(notes))
1980}
1981
1982#[api(
1983 input: {
1984 properties: {
988d575d 1985 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1986 ns: {
133d718f
WB
1987 type: BackupNamespace,
1988 optional: true,
1989 },
8c74349b
WB
1990 backup_dir: {
1991 type: pbs_api_types::BackupDir,
1992 flatten: true,
1993 },
912b3f5b
DM
1994 notes: {
1995 description: "A multiline text.",
1996 },
1997 },
1998 },
1999 access: {
7d6fc15b
TL
2000 permission: &Permission::Anybody,
2001 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2002 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2003 },
2004)]
2005/// Set "notes" for a specific backup
bf78f708 2006pub fn set_notes(
912b3f5b 2007 store: String,
bc21ade2 2008 ns: Option<BackupNamespace>,
8c74349b 2009 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2010 notes: String,
2011 rpcenv: &mut dyn RpcEnvironment,
2012) -> Result<(), Error> {
7d6fc15b 2013 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 2014 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
2015 store,
2016 ns: ns.unwrap_or_default(),
ea2e91e5
FG
2017 };
2018
7a404dc5 2019 let datastore = check_privs_and_load_store(
ea2e91e5 2020 &store_with_ns,
7d6fc15b 2021 &auth_id,
2bc2435a
FG
2022 PRIV_DATASTORE_MODIFY,
2023 PRIV_DATASTORE_BACKUP,
c9396984 2024 Some(Operation::Write),
c9396984
FG
2025 &backup_dir.group,
2026 )?;
912b3f5b 2027
974a3e52 2028 let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
912b3f5b 2029
133d718f
WB
2030 backup_dir
2031 .update_manifest(|manifest| {
dc7a5b34
TL
2032 manifest.unprotected["notes"] = notes.into();
2033 })
2034 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2035
2036 Ok(())
2037}
2038
8292d3d2
DC
2039#[api(
2040 input: {
2041 properties: {
988d575d 2042 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2043 ns: {
133d718f
WB
2044 type: BackupNamespace,
2045 optional: true,
2046 },
8c74349b
WB
2047 backup_dir: {
2048 type: pbs_api_types::BackupDir,
2049 flatten: true,
2050 },
8292d3d2
DC
2051 },
2052 },
2053 access: {
7d6fc15b
TL
2054 permission: &Permission::Anybody,
2055 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2056 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2057 },
2058)]
2059/// Query protection for a specific backup
2060pub fn get_protection(
2061 store: String,
bc21ade2 2062 ns: Option<BackupNamespace>,
8c74349b 2063 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2064 rpcenv: &mut dyn RpcEnvironment,
2065) -> Result<bool, Error> {
7d6fc15b 2066 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 2067 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
2068 store,
2069 ns: ns.unwrap_or_default(),
ea2e91e5 2070 };
7a404dc5 2071 let datastore = check_privs_and_load_store(
ea2e91e5 2072 &store_with_ns,
7d6fc15b 2073 &auth_id,
2bc2435a
FG
2074 PRIV_DATASTORE_AUDIT,
2075 PRIV_DATASTORE_BACKUP,
c9396984 2076 Some(Operation::Read),
c9396984
FG
2077 &backup_dir.group,
2078 )?;
8292d3d2 2079
974a3e52 2080 let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
8292d3d2 2081
6da20161 2082 Ok(backup_dir.is_protected())
8292d3d2
DC
2083}
2084
2085#[api(
2086 input: {
2087 properties: {
988d575d 2088 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2089 ns: {
133d718f
WB
2090 type: BackupNamespace,
2091 optional: true,
2092 },
8c74349b
WB
2093 backup_dir: {
2094 type: pbs_api_types::BackupDir,
2095 flatten: true,
2096 },
8292d3d2
DC
2097 protected: {
2098 description: "Enable/disable protection.",
2099 },
2100 },
2101 },
2102 access: {
7d6fc15b
TL
2103 permission: &Permission::Anybody,
2104 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2105 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2106 },
2107)]
2108/// En- or disable protection for a specific backup
2109pub fn set_protection(
2110 store: String,
bc21ade2 2111 ns: Option<BackupNamespace>,
8c74349b 2112 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2113 protected: bool,
2114 rpcenv: &mut dyn RpcEnvironment,
2115) -> Result<(), Error> {
7d6fc15b 2116 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 2117 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
2118 store,
2119 ns: ns.unwrap_or_default(),
ea2e91e5 2120 };
7a404dc5 2121 let datastore = check_privs_and_load_store(
ea2e91e5 2122 &store_with_ns,
7d6fc15b 2123 &auth_id,
2bc2435a
FG
2124 PRIV_DATASTORE_MODIFY,
2125 PRIV_DATASTORE_BACKUP,
c9396984 2126 Some(Operation::Write),
c9396984
FG
2127 &backup_dir.group,
2128 )?;
8292d3d2 2129
974a3e52 2130 let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
8292d3d2 2131
8292d3d2
DC
2132 datastore.update_protection(&backup_dir, protected)
2133}
2134
72be0eb1 2135#[api(
4940012d 2136 input: {
72be0eb1 2137 properties: {
988d575d 2138 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2139 ns: {
133d718f
WB
2140 type: BackupNamespace,
2141 optional: true,
2142 },
8c74349b
WB
2143 backup_group: {
2144 type: pbs_api_types::BackupGroup,
2145 flatten: true,
2146 },
72be0eb1 2147 "new-owner": {
e6dc35ac 2148 type: Authid,
72be0eb1
DW
2149 },
2150 },
4940012d
FG
2151 },
2152 access: {
bff85572 2153 permission: &Permission::Anybody,
7d6fc15b
TL
2154 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2155 a user's token for owned backups with Datastore.Backup"
4940012d 2156 },
72be0eb1
DW
2157)]
2158/// Change owner of a backup group
bf78f708 2159pub fn set_backup_owner(
72be0eb1 2160 store: String,
bc21ade2 2161 ns: Option<BackupNamespace>,
8c74349b 2162 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2163 new_owner: Authid,
bff85572 2164 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2165) -> Result<(), Error> {
bff85572 2166 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 2167 let store_with_ns = DatastoreWithNamespace {
974a3e52
FG
2168 store,
2169 ns: ns.unwrap_or_default(),
ea2e91e5
FG
2170 };
2171 let owner_check_required = check_ns_privs_full(
2172 &store_with_ns,
a724f5fd 2173 &auth_id,
2bc2435a
FG
2174 PRIV_DATASTORE_MODIFY,
2175 PRIV_DATASTORE_BACKUP,
a724f5fd 2176 )?;
1909ece2 2177
974a3e52 2178 let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Write))?;
1909ece2 2179
974a3e52 2180 let backup_group = datastore.backup_group(store_with_ns.ns, backup_group);
bff85572 2181
2bc2435a 2182 if owner_check_required {
133d718f 2183 let owner = backup_group.get_owner()?;
bff85572 2184
2bc2435a 2185 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2186 (true, true) => {
2187 // API token to API token, owned by same user
2188 let owner = owner.user();
2189 let new_owner = new_owner.user();
2190 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2191 }
bff85572
FG
2192 (true, false) => {
2193 // API token to API token owner
dc7a5b34
TL
2194 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2195 }
bff85572
FG
2196 (false, true) => {
2197 // API token owner to API token
dc7a5b34
TL
2198 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2199 }
bff85572
FG
2200 (false, false) => {
2201 // User to User, not allowed for unprivileged users
2202 false
dc7a5b34 2203 }
2bc2435a 2204 };
bff85572 2205
2bc2435a
FG
2206 if !allowed {
2207 return Err(http_err!(
2208 UNAUTHORIZED,
2209 "{} does not have permission to change owner of backup group '{}' to {}",
2210 auth_id,
e13303fc 2211 backup_group.group(),
2bc2435a
FG
2212 new_owner,
2213 ));
2214 }
bff85572
FG
2215 }
2216
7d6fc15b
TL
2217 let user_info = CachedUserInfo::new()?;
2218
e6dc35ac 2219 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2220 bail!(
2221 "{} '{}' is inactive or non-existent",
2222 if new_owner.is_token() {
2223 "API token".to_string()
2224 } else {
2225 "user".to_string()
2226 },
2227 new_owner
2228 );
72be0eb1
DW
2229 }
2230
133d718f 2231 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2232
2233 Ok(())
2234}
2235
552c2259 2236#[sortable]
255f378a 2237const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2238 (
2239 "active-operations",
dc7a5b34 2240 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2241 ),
dc7a5b34 2242 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2243 (
2244 "change-owner",
dc7a5b34 2245 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2246 ),
255f378a
DM
2247 (
2248 "download",
dc7a5b34 2249 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2250 ),
6ef9bb59
DC
2251 (
2252 "download-decoded",
dc7a5b34 2253 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2254 ),
dc7a5b34 2255 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2256 (
2257 "gc",
2258 &Router::new()
2259 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2260 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2261 ),
d6688884
SR
2262 (
2263 "group-notes",
2264 &Router::new()
2265 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2266 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2267 ),
255f378a
DM
2268 (
2269 "groups",
2270 &Router::new()
b31c8019 2271 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2272 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2273 ),
18934ae5
TL
2274 (
2275 "namespace",
2276 // FIXME: move into datastore:: sub-module?!
2277 &crate::api2::admin::namespace::ROUTER,
2278 ),
912b3f5b
DM
2279 (
2280 "notes",
2281 &Router::new()
2282 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2283 .put(&API_METHOD_SET_NOTES),
912b3f5b 2284 ),
8292d3d2
DC
2285 (
2286 "protected",
2287 &Router::new()
2288 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2289 .put(&API_METHOD_SET_PROTECTION),
255f378a 2290 ),
dc7a5b34 2291 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2292 (
2293 "prune-datastore",
dc7a5b34 2294 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2295 ),
d33d8f4e
DC
2296 (
2297 "pxar-file-download",
dc7a5b34 2298 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2299 ),
dc7a5b34 2300 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2301 (
2302 "snapshots",
2303 &Router::new()
fc189b19 2304 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2305 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2306 ),
dc7a5b34 2307 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2308 (
2309 "upload-backup-log",
dc7a5b34 2310 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2311 ),
dc7a5b34 2312 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2313];
2314
ad51d02a 2315const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2316 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2317 .subdirs(DATASTORE_INFO_SUBDIRS);
2318
255f378a 2319pub const ROUTER: Router = Router::new()
bb34b589 2320 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2321 .match_all("store", &DATASTORE_INFO_ROUTER);