]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
accessible group iter: add owner override and owner + extra priv handling
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
5ae393af
FG
35 print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
36 DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
37 GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13
TL
65use crate::backup::{
66 verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups,
68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7d6fc15b
TL
84// TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
85fn get_ns_privs(store: &str, ns: &BackupNamespace, auth_id: &Authid) -> Result<u64, Error> {
86 let user_info = CachedUserInfo::new()?;
87
88 Ok(if ns.is_root() {
89 user_info.lookup_privs(auth_id, &["datastore", store])
90 } else {
91 user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
92 })
93}
94
2bc2435a
FG
95// asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
96// returning value indicates whether further checks like group ownerships are required
97fn check_ns_privs(
7d6fc15b
TL
98 store: &str,
99 ns: &BackupNamespace,
100 auth_id: &Authid,
2bc2435a
FG
101 full_access_privs: u64,
102 partial_access_privs: u64,
103) -> Result<bool, Error> {
7d6fc15b
TL
104 let privs = get_ns_privs(store, ns, auth_id)?;
105
2bc2435a
FG
106 if full_access_privs != 0 && (privs & full_access_privs) != 0 {
107 return Ok(false);
108 }
109 if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
110 return Ok(true);
7d6fc15b 111 }
2bc2435a
FG
112
113 proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
7d6fc15b
TL
114}
115
7a404dc5
FG
116// helper to unify common sequence of checks:
117// 1. check privs on NS (full or limited access)
118// 2. load datastore
119// 3. if needed (only limited access), check owner of group
120fn check_privs_and_load_store(
c9396984
FG
121 store: &str,
122 ns: &BackupNamespace,
123 auth_id: &Authid,
7a404dc5
FG
124 full_access_privs: u64,
125 partial_access_privs: u64,
c9396984 126 operation: Option<Operation>,
c9396984
FG
127 backup_group: &pbs_api_types::BackupGroup,
128) -> Result<Arc<DataStore>, Error> {
7a404dc5
FG
129 let limited = check_ns_privs(store, ns, auth_id, full_access_privs, partial_access_privs)?;
130
c9396984
FG
131 let datastore = DataStore::lookup_datastore(&store, operation)?;
132
7a404dc5 133 if limited {
c9396984
FG
134 let owner = datastore.get_owner(&ns, backup_group)?;
135 check_backup_owner(&owner, &auth_id)?;
136 }
137
138 Ok(datastore)
139}
140
e7cb4dc5 141fn read_backup_index(
e7cb4dc5
WB
142 backup_dir: &BackupDir,
143) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 144 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 145
09b1f7b2
DM
146 let mut result = Vec::new();
147 for item in manifest.files() {
148 result.push(BackupContent {
149 filename: item.filename.clone(),
f28d9088 150 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
151 size: Some(item.size),
152 });
8c70e3eb
DM
153 }
154
09b1f7b2 155 result.push(BackupContent {
96d65fbc 156 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
157 crypt_mode: match manifest.signature {
158 Some(_) => Some(CryptMode::SignOnly),
159 None => Some(CryptMode::None),
160 },
09b1f7b2
DM
161 size: Some(index_size),
162 });
4f1e40a2 163
70030b43 164 Ok((manifest, result))
8c70e3eb
DM
165}
166
1c090810 167fn get_all_snapshot_files(
1c090810 168 info: &BackupInfo,
70030b43 169) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 170 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
171
172 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
173 acc.insert(item.filename.clone());
174 acc
175 });
176
177 for file in &info.files {
dc7a5b34
TL
178 if file_set.contains(file) {
179 continue;
180 }
f28d9088
WB
181 files.push(BackupContent {
182 filename: file.to_string(),
183 size: None,
184 crypt_mode: None,
185 });
1c090810
DC
186 }
187
70030b43 188 Ok((manifest, files))
1c090810
DC
189}
190
b31c8019
DM
191#[api(
192 input: {
193 properties: {
194 store: {
195 schema: DATASTORE_SCHEMA,
196 },
bc21ade2 197 ns: {
89ae3c32
WB
198 type: BackupNamespace,
199 optional: true,
200 },
b31c8019
DM
201 },
202 },
7b570c17 203 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 204 access: {
7d6fc15b
TL
205 permission: &Permission::Anybody,
206 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
207 /datastore/{store}[/{namespace}]",
bb34b589 208 },
b31c8019
DM
209)]
210/// List backup groups.
b2362a12 211pub fn list_groups(
b31c8019 212 store: String,
bc21ade2 213 ns: Option<BackupNamespace>,
54552dda 214 rpcenv: &mut dyn RpcEnvironment,
b31c8019 215) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 216 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 217
bc21ade2 218 let ns = ns.unwrap_or_default();
2bc2435a 219 let list_all = !check_ns_privs(
7d6fc15b 220 &store,
bc21ade2 221 &ns,
7d6fc15b 222 &auth_id,
2bc2435a
FG
223 PRIV_DATASTORE_AUDIT,
224 PRIV_DATASTORE_BACKUP,
7d6fc15b 225 )?;
54552dda 226
e9d2fc93 227 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
e13303fc
FG
228 let store_with_ns = DatastoreWithNamespace {
229 store: store.to_owned(),
230 ns: ns.clone(),
231 };
0d08fcee 232
249dde8b 233 datastore
bc21ade2 234 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
235 .try_fold(Vec::new(), |mut group_info, group| {
236 let group = group?;
e13303fc 237
bc21ade2 238 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
239 Ok(auth_id) => auth_id,
240 Err(err) => {
e13303fc
FG
241 eprintln!(
242 "Failed to get owner of group '{}' in {} - {}",
243 group.group(),
244 store_with_ns,
245 err
246 );
249dde8b 247 return Ok(group_info);
dc7a5b34 248 }
249dde8b
TL
249 };
250 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
251 return Ok(group_info);
252 }
0d08fcee 253
6da20161 254 let snapshots = match group.list_backups() {
249dde8b
TL
255 Ok(snapshots) => snapshots,
256 Err(_) => return Ok(group_info),
257 };
0d08fcee 258
249dde8b
TL
259 let backup_count: u64 = snapshots.len() as u64;
260 if backup_count == 0 {
261 return Ok(group_info);
262 }
0d08fcee 263
249dde8b
TL
264 let last_backup = snapshots
265 .iter()
266 .fold(&snapshots[0], |a, b| {
267 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
268 a
269 } else {
270 b
271 }
272 })
273 .to_owned();
274
bc21ade2 275 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
249dde8b
TL
276 let comment = file_read_firstline(&note_path).ok();
277
278 group_info.push(GroupListItem {
988d575d 279 backup: group.into(),
249dde8b
TL
280 last_backup: last_backup.backup_dir.backup_time(),
281 owner: Some(owner),
282 backup_count,
283 files: last_backup.files,
284 comment,
0d08fcee
FG
285 });
286
249dde8b
TL
287 Ok(group_info)
288 })
812c6f87 289}
8f579717 290
f32791b4
DC
291#[api(
292 input: {
293 properties: {
988d575d 294 store: { schema: DATASTORE_SCHEMA },
bc21ade2 295 ns: {
133d718f
WB
296 type: BackupNamespace,
297 optional: true,
298 },
8c74349b
WB
299 group: {
300 type: pbs_api_types::BackupGroup,
301 flatten: true,
302 },
f32791b4
DC
303 },
304 },
305 access: {
7d6fc15b
TL
306 permission: &Permission::Anybody,
307 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
308 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
309 },
310)]
311/// Delete backup group including all snapshots.
312pub fn delete_group(
313 store: String,
bc21ade2 314 ns: Option<BackupNamespace>,
8c74349b 315 group: pbs_api_types::BackupGroup,
f32791b4
DC
316 _info: &ApiMethod,
317 rpcenv: &mut dyn RpcEnvironment,
318) -> Result<Value, Error> {
f32791b4
DC
319 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
320
bc21ade2 321 let ns = ns.unwrap_or_default();
133d718f 322
7a404dc5 323 let datastore = check_privs_and_load_store(
7d6fc15b 324 &store,
bc21ade2 325 &ns,
7d6fc15b 326 &auth_id,
2bc2435a
FG
327 PRIV_DATASTORE_MODIFY,
328 PRIV_DATASTORE_PRUNE,
c9396984 329 Some(Operation::Write),
c9396984
FG
330 &group,
331 )?;
f32791b4 332
bc21ade2 333 if !datastore.remove_backup_group(&ns, &group)? {
171a00ca 334 bail!("group only partially deleted due to protected snapshots");
5cc7d891 335 }
f32791b4
DC
336
337 Ok(Value::Null)
338}
339
09b1f7b2
DM
340#[api(
341 input: {
342 properties: {
988d575d 343 store: { schema: DATASTORE_SCHEMA },
bc21ade2 344 ns: {
133d718f
WB
345 type: BackupNamespace,
346 optional: true,
347 },
8c74349b
WB
348 backup_dir: {
349 type: pbs_api_types::BackupDir,
350 flatten: true,
351 },
09b1f7b2
DM
352 },
353 },
7b570c17 354 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 355 access: {
7d6fc15b
TL
356 permission: &Permission::Anybody,
357 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
358 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 359 },
09b1f7b2
DM
360)]
361/// List snapshot files.
ea5f547f 362pub fn list_snapshot_files(
09b1f7b2 363 store: String,
bc21ade2 364 ns: Option<BackupNamespace>,
8c74349b 365 backup_dir: pbs_api_types::BackupDir,
01a13423 366 _info: &ApiMethod,
54552dda 367 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 368) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 369 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 370
bc21ade2 371 let ns = ns.unwrap_or_default();
133d718f 372
7a404dc5 373 let datastore = check_privs_and_load_store(
7d6fc15b 374 &store,
bc21ade2 375 &ns,
7d6fc15b 376 &auth_id,
2bc2435a
FG
377 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
378 PRIV_DATASTORE_BACKUP,
c9396984 379 Some(Operation::Read),
c9396984
FG
380 &backup_dir.group,
381 )?;
01a13423 382
bc21ade2 383 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 384
6da20161 385 let info = BackupInfo::new(snapshot)?;
01a13423 386
9ccf933b 387 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43
DM
388
389 Ok(files)
01a13423
DM
390}
391
68a6a0ee
DM
392#[api(
393 input: {
394 properties: {
988d575d 395 store: { schema: DATASTORE_SCHEMA },
bc21ade2 396 ns: {
133d718f
WB
397 type: BackupNamespace,
398 optional: true,
399 },
8c74349b
WB
400 backup_dir: {
401 type: pbs_api_types::BackupDir,
402 flatten: true,
403 },
68a6a0ee
DM
404 },
405 },
bb34b589 406 access: {
7d6fc15b
TL
407 permission: &Permission::Anybody,
408 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
409 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 410 },
68a6a0ee
DM
411)]
412/// Delete backup snapshot.
bf78f708 413pub fn delete_snapshot(
68a6a0ee 414 store: String,
bc21ade2 415 ns: Option<BackupNamespace>,
8c74349b 416 backup_dir: pbs_api_types::BackupDir,
6f62c924 417 _info: &ApiMethod,
54552dda 418 rpcenv: &mut dyn RpcEnvironment,
6f62c924 419) -> Result<Value, Error> {
e6dc35ac 420 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 421
bc21ade2 422 let ns = ns.unwrap_or_default();
7a404dc5 423 let datastore = check_privs_and_load_store(
7d6fc15b 424 &store,
bc21ade2 425 &ns,
7d6fc15b 426 &auth_id,
2bc2435a
FG
427 PRIV_DATASTORE_MODIFY,
428 PRIV_DATASTORE_PRUNE,
c9396984 429 Some(Operation::Write),
c9396984
FG
430 &backup_dir.group,
431 )?;
a724f5fd 432
bc21ade2 433 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 434
133d718f 435 snapshot.destroy(false)?;
6f62c924
DM
436
437 Ok(Value::Null)
438}
439
fc189b19 440#[api(
b7c3eaa9 441 streaming: true,
fc189b19
DM
442 input: {
443 properties: {
988d575d 444 store: { schema: DATASTORE_SCHEMA },
bc21ade2 445 ns: {
8c74349b
WB
446 type: BackupNamespace,
447 optional: true,
448 },
fc189b19
DM
449 "backup-type": {
450 optional: true,
988d575d 451 type: BackupType,
fc189b19
DM
452 },
453 "backup-id": {
454 optional: true,
455 schema: BACKUP_ID_SCHEMA,
456 },
457 },
458 },
7b570c17 459 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 460 access: {
7d6fc15b
TL
461 permission: &Permission::Anybody,
462 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
463 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 464 },
fc189b19
DM
465)]
466/// List backup snapshots.
dc7a5b34 467pub fn list_snapshots(
54552dda 468 store: String,
bc21ade2 469 ns: Option<BackupNamespace>,
988d575d 470 backup_type: Option<BackupType>,
54552dda
DM
471 backup_id: Option<String>,
472 _param: Value,
184f17af 473 _info: &ApiMethod,
54552dda 474 rpcenv: &mut dyn RpcEnvironment,
fc189b19 475) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 476 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 477
bc21ade2 478 let ns = ns.unwrap_or_default();
7d6fc15b 479
2bc2435a 480 let list_all = !check_ns_privs(
7d6fc15b 481 &store,
bc21ade2 482 &ns,
7d6fc15b 483 &auth_id,
2bc2435a
FG
484 PRIV_DATASTORE_AUDIT,
485 PRIV_DATASTORE_BACKUP,
7d6fc15b 486 )?;
184f17af 487
e9d2fc93 488 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
e13303fc
FG
489 let store_with_ns = DatastoreWithNamespace {
490 store: store.to_owned(),
491 ns: ns.clone(),
492 };
184f17af 493
249dde8b
TL
494 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
495 // backup group and provide an error free (Err -> None) accessor
0d08fcee 496 let groups = match (backup_type, backup_id) {
db87d93e 497 (Some(backup_type), Some(backup_id)) => {
bc21ade2 498 vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
db87d93e 499 }
8c74349b 500 // FIXME: Recursion
7d9cb8c4 501 (Some(backup_type), None) => datastore
bc21ade2 502 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
503 .filter(|group| group.backup_type() == backup_type)
504 .collect(),
8c74349b 505 // FIXME: Recursion
7d9cb8c4 506 (None, Some(backup_id)) => datastore
bc21ade2 507 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
508 .filter(|group| group.backup_id() == backup_id)
509 .collect(),
8c74349b 510 // FIXME: Recursion
bc21ade2 511 (None, None) => datastore.list_backup_groups(ns)?,
0d08fcee 512 };
54552dda 513
0d08fcee 514 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
515 let backup = pbs_api_types::BackupDir {
516 group: group.into(),
517 time: info.backup_dir.backup_time(),
518 };
6da20161 519 let protected = info.backup_dir.is_protected();
1c090810 520
9ccf933b 521 match get_all_snapshot_files(&info) {
70030b43 522 Ok((manifest, files)) => {
70030b43
DM
523 // extract the first line from notes
524 let comment: Option<String> = manifest.unprotected["notes"]
525 .as_str()
526 .and_then(|notes| notes.lines().next())
527 .map(String::from);
528
035c40e6
FG
529 let fingerprint = match manifest.fingerprint() {
530 Ok(fp) => fp,
531 Err(err) => {
532 eprintln!("error parsing fingerprint: '{}'", err);
533 None
dc7a5b34 534 }
035c40e6
FG
535 };
536
79c53595 537 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
538 let verification: Option<SnapshotVerifyState> =
539 match serde_json::from_value(verification) {
540 Ok(verify) => verify,
541 Err(err) => {
542 eprintln!("error parsing verification state : '{}'", err);
543 None
544 }
545 };
3b2046d2 546
0d08fcee
FG
547 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
548
79c53595 549 SnapshotListItem {
988d575d 550 backup,
79c53595
FG
551 comment,
552 verification,
035c40e6 553 fingerprint,
79c53595
FG
554 files,
555 size,
556 owner,
02db7267 557 protected,
79c53595 558 }
dc7a5b34 559 }
1c090810
DC
560 Err(err) => {
561 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 562 let files = info
dc7a5b34
TL
563 .files
564 .into_iter()
565 .map(|filename| BackupContent {
566 filename,
567 size: None,
568 crypt_mode: None,
569 })
570 .collect();
79c53595
FG
571
572 SnapshotListItem {
988d575d 573 backup,
79c53595
FG
574 comment: None,
575 verification: None,
035c40e6 576 fingerprint: None,
79c53595
FG
577 files,
578 size: None,
579 owner,
02db7267 580 protected,
79c53595 581 }
dc7a5b34 582 }
0d08fcee
FG
583 }
584 };
184f17af 585
dc7a5b34 586 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 587 let owner = match group.get_owner() {
dc7a5b34
TL
588 Ok(auth_id) => auth_id,
589 Err(err) => {
590 eprintln!(
e13303fc
FG
591 "Failed to get owner of group '{}' in {} - {}",
592 &store_with_ns,
593 group.group(),
594 err
dc7a5b34 595 );
0d08fcee
FG
596 return Ok(snapshots);
597 }
dc7a5b34 598 };
0d08fcee 599
dc7a5b34
TL
600 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
601 return Ok(snapshots);
602 }
0d08fcee 603
6da20161 604 let group_backups = group.list_backups()?;
0d08fcee 605
dc7a5b34
TL
606 snapshots.extend(
607 group_backups
608 .into_iter()
609 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
610 );
611
612 Ok(snapshots)
613 })
184f17af
DM
614}
615
22cfad13 616fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
8122eaad 617 let root_ns = Default::default();
b0166d4e
TL
618 ListAccessibleBackupGroups::new(store, root_ns, MAX_NAMESPACE_DEPTH, owner)?.try_fold(
619 Counts::default(),
620 |mut counts, group| {
22cfad13
TL
621 let group = match group {
622 Ok(group) => group,
623 Err(_) => return Ok(counts), // TODO: add this as error counts?
624 };
6da20161 625 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 626
72f81545 627 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
628 if snapshot_count > 0 {
629 let type_count = match group.backup_type() {
988d575d
WB
630 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
631 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
632 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 633 };
14e08625 634
b44483a8
DM
635 type_count.groups += 1;
636 type_count.snapshots += snapshot_count;
637 }
16f9f244 638
fdfcb74d 639 Ok(counts)
b0166d4e
TL
640 },
641 )
16f9f244
DC
642}
643
1dc117bb
DM
644#[api(
645 input: {
646 properties: {
647 store: {
648 schema: DATASTORE_SCHEMA,
649 },
98afc7b1
FG
650 verbose: {
651 type: bool,
652 default: false,
653 optional: true,
654 description: "Include additional information like snapshot counts and GC status.",
655 },
1dc117bb 656 },
98afc7b1 657
1dc117bb
DM
658 },
659 returns: {
14e08625 660 type: DataStoreStatus,
1dc117bb 661 },
bb34b589 662 access: {
7d6fc15b
TL
663 permission: &Permission::Privilege(
664 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 665 },
1dc117bb
DM
666)]
667/// Get datastore status.
ea5f547f 668pub fn status(
1dc117bb 669 store: String,
98afc7b1 670 verbose: bool,
0eecf38f 671 _info: &ApiMethod,
fdfcb74d 672 rpcenv: &mut dyn RpcEnvironment,
14e08625 673) -> Result<DataStoreStatus, Error> {
e9d2fc93 674 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 675 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
676 let (counts, gc_status) = if verbose {
677 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
678 let user_info = CachedUserInfo::new()?;
679
680 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
681 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
682 None
683 } else {
684 Some(&auth_id)
685 };
686
687 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
688 let gc_status = Some(datastore.last_gc_status());
689
690 (counts, gc_status)
691 } else {
692 (None, None)
98afc7b1 693 };
16f9f244 694
14e08625
DC
695 Ok(DataStoreStatus {
696 total: storage.total,
697 used: storage.used,
698 avail: storage.avail,
699 gc_status,
700 counts,
701 })
0eecf38f
DM
702}
703
c2009e53
DM
704#[api(
705 input: {
706 properties: {
707 store: {
708 schema: DATASTORE_SCHEMA,
709 },
bc21ade2 710 ns: {
8c74349b
WB
711 type: BackupNamespace,
712 optional: true,
713 },
c2009e53 714 "backup-type": {
988d575d 715 type: BackupType,
c2009e53
DM
716 optional: true,
717 },
718 "backup-id": {
719 schema: BACKUP_ID_SCHEMA,
720 optional: true,
721 },
dcbf29e7
HL
722 "ignore-verified": {
723 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
724 optional: true,
725 },
726 "outdated-after": {
727 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
728 optional: true,
729 },
c2009e53
DM
730 "backup-time": {
731 schema: BACKUP_TIME_SCHEMA,
732 optional: true,
733 },
59229bd7
TL
734 "max-depth": {
735 schema: NS_MAX_DEPTH_SCHEMA,
736 optional: true,
737 },
c2009e53
DM
738 },
739 },
740 returns: {
741 schema: UPID_SCHEMA,
742 },
743 access: {
7d6fc15b
TL
744 permission: &Permission::Anybody,
745 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
746 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
747 },
748)]
749/// Verify backups.
750///
751/// This function can verify a single backup snapshot, all backup from a backup group,
752/// or all backups in the datastore.
753pub fn verify(
754 store: String,
bc21ade2 755 ns: Option<BackupNamespace>,
988d575d 756 backup_type: Option<BackupType>,
c2009e53
DM
757 backup_id: Option<String>,
758 backup_time: Option<i64>,
dcbf29e7
HL
759 ignore_verified: Option<bool>,
760 outdated_after: Option<i64>,
59229bd7 761 max_depth: Option<usize>,
c2009e53
DM
762 rpcenv: &mut dyn RpcEnvironment,
763) -> Result<Value, Error> {
7d6fc15b 764 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 765 let ns = ns.unwrap_or_default();
2bc2435a 766 let owner_check_required = check_ns_privs(
7d6fc15b 767 &store,
bc21ade2 768 &ns,
7d6fc15b 769 &auth_id,
2bc2435a
FG
770 PRIV_DATASTORE_VERIFY,
771 PRIV_DATASTORE_BACKUP,
7d6fc15b 772 )?;
a724f5fd 773
e9d2fc93 774 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 775 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 776
8ea00f6e 777 let worker_id;
c2009e53
DM
778
779 let mut backup_dir = None;
780 let mut backup_group = None;
133042b5 781 let mut worker_type = "verify";
c2009e53
DM
782
783 match (backup_type, backup_id, backup_time) {
784 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 785 worker_id = format!(
8c74349b
WB
786 "{}:{}/{}/{}/{:08X}",
787 store,
bc21ade2 788 ns.display_as_path(),
8c74349b
WB
789 backup_type,
790 backup_id,
791 backup_time
dc7a5b34 792 );
bc21ade2
WB
793 let dir =
794 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 795
a724f5fd
FG
796 if owner_check_required {
797 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
798 check_backup_owner(&owner, &auth_id)?;
799 }
09f6a240 800
c2009e53 801 backup_dir = Some(dir);
133042b5 802 worker_type = "verify_snapshot";
c2009e53
DM
803 }
804 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
805 worker_id = format!(
806 "{}:{}/{}/{}",
807 store,
bc21ade2 808 ns.display_as_path(),
8c74349b
WB
809 backup_type,
810 backup_id
811 );
133d718f 812 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 813
a724f5fd 814 if owner_check_required {
bc21ade2 815 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
816 check_backup_owner(&owner, &auth_id)?;
817 }
09f6a240 818
bc21ade2 819 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 820 worker_type = "verify_group";
c2009e53
DM
821 }
822 (None, None, None) => {
bc21ade2 823 worker_id = if ns.is_root() {
59229bd7
TL
824 store.clone()
825 } else {
bc21ade2 826 format!("{store}:{}", ns.display_as_path())
59229bd7 827 };
c2009e53 828 }
5a718dce 829 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
830 }
831
39735609 832 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
833
834 let upid_str = WorkerTask::new_thread(
133042b5 835 worker_type,
44288184 836 Some(worker_id),
049a22a3 837 auth_id.to_string(),
e7cb4dc5
WB
838 to_stdout,
839 move |worker| {
9c26a3d6 840 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 841 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 842 let mut res = Vec::new();
f6b1d1cc 843 if !verify_backup_dir(
9c26a3d6 844 &verify_worker,
f6b1d1cc 845 &backup_dir,
f6b1d1cc 846 worker.upid().clone(),
dc7a5b34 847 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 848 )? {
5ae393af
FG
849 res.push(print_ns_and_snapshot(
850 backup_dir.backup_ns(),
851 backup_dir.as_ref(),
852 ));
adfdc369
DC
853 }
854 res
c2009e53 855 } else if let Some(backup_group) = backup_group {
7e25b9aa 856 let failed_dirs = verify_backup_group(
9c26a3d6 857 &verify_worker,
63d9aca9 858 &backup_group,
7e25b9aa 859 &mut StoreProgress::new(1),
f6b1d1cc 860 worker.upid(),
dc7a5b34 861 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
862 )?;
863 failed_dirs
c2009e53 864 } else {
a724f5fd 865 let owner = if owner_check_required {
de27ebc6 866 Some(&auth_id)
09f6a240
FG
867 } else {
868 None
869 };
870
dcbf29e7
HL
871 verify_all_backups(
872 &verify_worker,
873 worker.upid(),
bc21ade2 874 ns,
59229bd7 875 max_depth,
dcbf29e7 876 owner,
dc7a5b34 877 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 878 )?
c2009e53 879 };
3984a5fd 880 if !failed_dirs.is_empty() {
1ec0d70d 881 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 882 for dir in failed_dirs {
1ec0d70d 883 task_log!(worker, "\t{}", dir);
adfdc369 884 }
1ffe0301 885 bail!("verification failed - please check the log for details");
c2009e53
DM
886 }
887 Ok(())
e7cb4dc5
WB
888 },
889 )?;
c2009e53
DM
890
891 Ok(json!(upid_str))
892}
893
0a240aaa
DC
894#[api(
895 input: {
896 properties: {
bc21ade2 897 ns: {
133d718f
WB
898 type: BackupNamespace,
899 optional: true,
900 },
8c74349b
WB
901 group: {
902 type: pbs_api_types::BackupGroup,
903 flatten: true,
904 },
0a240aaa
DC
905 "dry-run": {
906 optional: true,
907 type: bool,
908 default: false,
909 description: "Just show what prune would do, but do not delete anything.",
910 },
911 "prune-options": {
912 type: PruneOptions,
913 flatten: true,
914 },
915 store: {
916 schema: DATASTORE_SCHEMA,
917 },
918 },
919 },
7b570c17 920 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 921 access: {
7d6fc15b
TL
922 permission: &Permission::Anybody,
923 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
924 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
925 },
926)]
9805207a 927/// Prune a group on the datastore
bf78f708 928pub fn prune(
bc21ade2 929 ns: Option<BackupNamespace>,
8c74349b 930 group: pbs_api_types::BackupGroup,
0a240aaa
DC
931 dry_run: bool,
932 prune_options: PruneOptions,
933 store: String,
934 _param: Value,
54552dda 935 rpcenv: &mut dyn RpcEnvironment,
83b7db02 936) -> Result<Value, Error> {
e6dc35ac 937 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 938 let ns = ns.unwrap_or_default();
7a404dc5 939 let datastore = check_privs_and_load_store(
7d6fc15b 940 &store,
bc21ade2 941 &ns,
7d6fc15b 942 &auth_id,
2bc2435a
FG
943 PRIV_DATASTORE_MODIFY,
944 PRIV_DATASTORE_PRUNE,
c9396984 945 Some(Operation::Write),
c9396984
FG
946 &group,
947 )?;
e13303fc
FG
948 let store_with_ns = DatastoreWithNamespace {
949 store: store.to_owned(),
950 ns: ns.clone(),
951 };
db87d93e 952
36971618 953 let worker_id = format!("{}:{}:{}", store, ns, group);
bc21ade2 954 let group = datastore.backup_group(ns, group);
83b7db02 955
dda70154
DM
956 let mut prune_result = Vec::new();
957
6da20161 958 let list = group.list_backups()?;
dda70154
DM
959
960 let mut prune_info = compute_prune_info(list, &prune_options)?;
961
962 prune_info.reverse(); // delete older snapshots first
963
89725197 964 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
965
966 if dry_run {
02db7267
DC
967 for (info, mark) in prune_info {
968 let keep = keep_all || mark.keep();
dda70154 969
33f2c2a1 970 let mut result = json!({
db87d93e
WB
971 "backup-type": info.backup_dir.backup_type(),
972 "backup-id": info.backup_dir.backup_id(),
973 "backup-time": info.backup_dir.backup_time(),
dda70154 974 "keep": keep,
02db7267 975 "protected": mark.protected(),
33f2c2a1 976 });
bc21ade2
WB
977 let prune_ns = info.backup_dir.backup_ns();
978 if !prune_ns.is_root() {
979 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
980 }
981 prune_result.push(result);
dda70154
DM
982 }
983 return Ok(json!(prune_result));
984 }
985
163e9bbe 986 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 987 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 988
f1539300 989 if keep_all {
1ec0d70d 990 task_log!(worker, "No prune selection - keeping all files.");
f1539300 991 } else {
dc7a5b34
TL
992 task_log!(
993 worker,
994 "retention options: {}",
995 pbs_datastore::prune::cli_options_string(&prune_options)
996 );
997 task_log!(
998 worker,
e13303fc
FG
999 "Starting prune on {} group \"{}\"",
1000 store_with_ns,
1001 group.group(),
dc7a5b34 1002 );
f1539300 1003 }
3b03abfe 1004
02db7267
DC
1005 for (info, mark) in prune_info {
1006 let keep = keep_all || mark.keep();
dda70154 1007
f1539300
SR
1008 let backup_time = info.backup_dir.backup_time();
1009 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1010 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1011
1012 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1013
1ec0d70d 1014 task_log!(worker, "{}", msg);
f1539300 1015
133d718f 1016 prune_result.push(json!({
db87d93e
WB
1017 "backup-type": group.ty,
1018 "backup-id": group.id,
f1539300
SR
1019 "backup-time": backup_time,
1020 "keep": keep,
02db7267 1021 "protected": mark.protected(),
133d718f 1022 }));
f1539300
SR
1023
1024 if !(dry_run || keep) {
133d718f 1025 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1026 task_warn!(
1027 worker,
1028 "failed to remove dir {:?}: {}",
1029 info.backup_dir.relative_path(),
1030 err,
f1539300 1031 );
8f0b4c1f 1032 }
8f579717 1033 }
f1539300 1034 }
dd8e744f 1035
f1539300 1036 worker.log_result(&Ok(()));
83b7db02 1037
dda70154 1038 Ok(json!(prune_result))
83b7db02
DM
1039}
1040
9805207a
DC
1041#[api(
1042 input: {
1043 properties: {
1044 "dry-run": {
1045 optional: true,
1046 type: bool,
1047 default: false,
1048 description: "Just show what prune would do, but do not delete anything.",
1049 },
1050 "prune-options": {
1051 type: PruneOptions,
1052 flatten: true,
1053 },
1054 store: {
1055 schema: DATASTORE_SCHEMA,
1056 },
2f5417f8
TL
1057 ns: {
1058 type: BackupNamespace,
1059 optional: true,
1060 },
9805207a
DC
1061 },
1062 },
1063 returns: {
1064 schema: UPID_SCHEMA,
1065 },
1066 access: {
7d6fc15b
TL
1067 permission: &Permission::Privilege(
1068 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
9805207a
DC
1069 },
1070)]
1071/// Prune the datastore
1072pub fn prune_datastore(
1073 dry_run: bool,
1074 prune_options: PruneOptions,
1075 store: String,
2f5417f8 1076 ns: Option<BackupNamespace>,
9805207a
DC
1077 _param: Value,
1078 rpcenv: &mut dyn RpcEnvironment,
1079) -> Result<String, Error> {
9805207a
DC
1080 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1081
e9d2fc93 1082 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
36971618
FG
1083 let ns = ns.unwrap_or_default();
1084 let worker_id = format!("{}:{}", store, ns);
9805207a 1085
bfa942c0
DC
1086 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1087
7da520ae 1088 // FIXME: add max-depth
7d6fc15b 1089
9805207a
DC
1090 let upid_str = WorkerTask::new_thread(
1091 "prune",
36971618 1092 Some(worker_id),
049a22a3 1093 auth_id.to_string(),
bfa942c0 1094 to_stdout,
dc7a5b34 1095 move |worker| {
36971618 1096 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, ns, dry_run)
dc7a5b34 1097 },
9805207a
DC
1098 )?;
1099
1100 Ok(upid_str)
1101}
1102
dfc58d47
DM
1103#[api(
1104 input: {
1105 properties: {
1106 store: {
1107 schema: DATASTORE_SCHEMA,
1108 },
1109 },
1110 },
1111 returns: {
1112 schema: UPID_SCHEMA,
1113 },
bb34b589 1114 access: {
54552dda 1115 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1116 },
dfc58d47
DM
1117)]
1118/// Start garbage collection.
bf78f708 1119pub fn start_garbage_collection(
dfc58d47 1120 store: String,
6049b71f 1121 _info: &ApiMethod,
dd5495d6 1122 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1123) -> Result<Value, Error> {
e9d2fc93 1124 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1125 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1126
dc7a5b34 1127 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1128 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1129
39735609 1130 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1131
dc7a5b34
TL
1132 let upid_str =
1133 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1134 .map_err(|err| {
1135 format_err!(
1136 "unable to start garbage collection job on datastore {} - {}",
1137 store,
1138 err
1139 )
1140 })?;
0f778e06
DM
1141
1142 Ok(json!(upid_str))
15e9b4ed
DM
1143}
1144
a92830dc
DM
1145#[api(
1146 input: {
1147 properties: {
1148 store: {
1149 schema: DATASTORE_SCHEMA,
1150 },
1151 },
1152 },
1153 returns: {
1154 type: GarbageCollectionStatus,
bb34b589
DM
1155 },
1156 access: {
1157 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1158 },
a92830dc
DM
1159)]
1160/// Garbage collection status.
5eeea607 1161pub fn garbage_collection_status(
a92830dc 1162 store: String,
6049b71f 1163 _info: &ApiMethod,
dd5495d6 1164 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1165) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1166 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1167
f2b99c34 1168 let status = datastore.last_gc_status();
691c89a0 1169
a92830dc 1170 Ok(status)
691c89a0
DM
1171}
1172
7d6fc15b
TL
1173fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1174 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1175 // below /datastore/{store}" helper
1176 let mut iter =
1177 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1178 iter
1179 } else {
1180 return false;
1181 };
1182 let wanted =
1183 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1184 let name = store.name();
1185 iter.any(|ns| -> bool {
1186 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
1187 user_privs & wanted != 0
1188 })
1189}
1190
bb34b589 1191#[api(
30fb6025
DM
1192 returns: {
1193 description: "List the accessible datastores.",
1194 type: Array,
9b93c620 1195 items: { type: DataStoreListItem },
30fb6025 1196 },
bb34b589 1197 access: {
54552dda 1198 permission: &Permission::Anybody,
bb34b589
DM
1199 },
1200)]
1201/// Datastore list
bf78f708 1202pub fn get_datastore_list(
6049b71f
DM
1203 _param: Value,
1204 _info: &ApiMethod,
54552dda 1205 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1206) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1207 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1208
e6dc35ac 1209 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1210 let user_info = CachedUserInfo::new()?;
1211
30fb6025 1212 let mut list = Vec::new();
54552dda 1213
30fb6025 1214 for (store, (_, data)) in &config.sections {
9a37bd6c 1215 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1216 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1217
1218 let mut allow_id = false;
1219 if !allowed {
1220 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1221 // safety: we just cannot go through lookup as we must avoid an operation check
1222 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1223 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1224 }
1225 }
1226
1227 if allowed || allow_id {
dc7a5b34
TL
1228 list.push(DataStoreListItem {
1229 store: store.clone(),
7d6fc15b
TL
1230 comment: if !allowed {
1231 None
1232 } else {
1233 data["comment"].as_str().map(String::from)
1234 },
e022d13c 1235 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1236 });
30fb6025 1237 }
54552dda
DM
1238 }
1239
44288184 1240 Ok(list)
15e9b4ed
DM
1241}
1242
0ab08ac9
DM
1243#[sortable]
1244pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1245 &ApiHandler::AsyncHttp(&download_file),
1246 &ObjectSchema::new(
1247 "Download single raw file from backup snapshot.",
1248 &sorted!([
66c49c21 1249 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1250 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1251 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1252 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1253 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1254 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1255 ]),
dc7a5b34
TL
1256 ),
1257)
1258.access(
7d6fc15b
TL
1259 Some(
1260 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1261 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1262 ),
7d6fc15b 1263 &Permission::Anybody,
54552dda 1264);
691c89a0 1265
bf78f708 1266pub fn download_file(
9e47c0a5
DM
1267 _parts: Parts,
1268 _req_body: Body,
1269 param: Value,
255f378a 1270 _info: &ApiMethod,
54552dda 1271 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1272) -> ApiResponseFuture {
ad51d02a 1273 async move {
7d6fc15b 1274 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1275 let store = required_string_param(&param, "store")?;
133d718f 1276 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1277
1278 let store_with_ns = DatastoreWithNamespace {
1279 store: store.to_owned(),
1280 ns: backup_ns.clone(),
1281 };
7d6fc15b 1282 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1283 let datastore = check_privs_and_load_store(
7d6fc15b 1284 &store,
133d718f 1285 &backup_ns,
7d6fc15b 1286 &auth_id,
2bc2435a
FG
1287 PRIV_DATASTORE_READ,
1288 PRIV_DATASTORE_BACKUP,
c9396984 1289 Some(Operation::Read),
c9396984
FG
1290 &backup_dir.group,
1291 )?;
1292
3c8c2827 1293 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1294
dc7a5b34
TL
1295 println!(
1296 "Download {} from {} ({}/{})",
1afce610 1297 file_name, store_with_ns, backup_dir, file_name
dc7a5b34 1298 );
9e47c0a5 1299
1afce610
FG
1300 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1301
ad51d02a
DM
1302 let mut path = datastore.base_path();
1303 path.push(backup_dir.relative_path());
1304 path.push(&file_name);
1305
ba694720 1306 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1307 .await
1308 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1309
dc7a5b34
TL
1310 let payload =
1311 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1312 .map_ok(|bytes| bytes.freeze())
1313 .map_err(move |err| {
1314 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1315 err
1316 });
ad51d02a 1317 let body = Body::wrap_stream(payload);
9e47c0a5 1318
ad51d02a
DM
1319 // fixme: set other headers ?
1320 Ok(Response::builder()
dc7a5b34
TL
1321 .status(StatusCode::OK)
1322 .header(header::CONTENT_TYPE, "application/octet-stream")
1323 .body(body)
1324 .unwrap())
1325 }
1326 .boxed()
9e47c0a5
DM
1327}
1328
6ef9bb59
DC
1329#[sortable]
1330pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1331 &ApiHandler::AsyncHttp(&download_file_decoded),
1332 &ObjectSchema::new(
1333 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1334 &sorted!([
1335 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1336 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1337 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1338 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1339 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1340 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1341 ]),
dc7a5b34
TL
1342 ),
1343)
1344.access(
7d6fc15b
TL
1345 Some(
1346 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1347 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1348 ),
7d6fc15b 1349 &Permission::Anybody,
6ef9bb59
DC
1350);
1351
bf78f708 1352pub fn download_file_decoded(
6ef9bb59
DC
1353 _parts: Parts,
1354 _req_body: Body,
1355 param: Value,
1356 _info: &ApiMethod,
1357 rpcenv: Box<dyn RpcEnvironment>,
1358) -> ApiResponseFuture {
6ef9bb59 1359 async move {
7d6fc15b 1360 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1361 let store = required_string_param(&param, "store")?;
133d718f 1362 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1363 let store_with_ns = DatastoreWithNamespace {
1364 store: store.to_owned(),
1365 ns: backup_ns.clone(),
1366 };
1367 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1368 let datastore = check_privs_and_load_store(
7d6fc15b 1369 &store,
133d718f 1370 &backup_ns,
7d6fc15b 1371 &auth_id,
2bc2435a
FG
1372 PRIV_DATASTORE_READ,
1373 PRIV_DATASTORE_BACKUP,
c9396984 1374 Some(Operation::Read),
1afce610 1375 &backup_dir_api.group,
c9396984 1376 )?;
a724f5fd 1377
3c8c2827 1378 let file_name = required_string_param(&param, "file-name")?.to_owned();
1afce610 1379 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
6ef9bb59 1380
9ccf933b 1381 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1382 for file in files {
f28d9088 1383 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1384 bail!("cannot decode '{}' - is encrypted", file_name);
1385 }
1386 }
1387
dc7a5b34
TL
1388 println!(
1389 "Download {} from {} ({}/{})",
1afce610 1390 file_name, store_with_ns, backup_dir_api, file_name
dc7a5b34 1391 );
6ef9bb59
DC
1392
1393 let mut path = datastore.base_path();
1394 path.push(backup_dir.relative_path());
1395 path.push(&file_name);
1396
1397 let extension = file_name.rsplitn(2, '.').next().unwrap();
1398
1399 let body = match extension {
1400 "didx" => {
dc7a5b34
TL
1401 let index = DynamicIndexReader::open(&path).map_err(|err| {
1402 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1403 })?;
2d55beec
FG
1404 let (csum, size) = index.compute_csum();
1405 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1406
14f6c9cb 1407 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1408 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1409 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1410 eprintln!("error during streaming of '{:?}' - {}", path, err);
1411 err
1412 }))
1413 }
6ef9bb59 1414 "fidx" => {
dc7a5b34
TL
1415 let index = FixedIndexReader::open(&path).map_err(|err| {
1416 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1417 })?;
6ef9bb59 1418
2d55beec
FG
1419 let (csum, size) = index.compute_csum();
1420 manifest.verify_file(&file_name, &csum, size)?;
1421
14f6c9cb 1422 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1423 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1424 Body::wrap_stream(
1425 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1426 move |err| {
1427 eprintln!("error during streaming of '{:?}' - {}", path, err);
1428 err
1429 },
1430 ),
1431 )
1432 }
6ef9bb59
DC
1433 "blob" => {
1434 let file = std::fs::File::open(&path)
8aa67ee7 1435 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1436
2d55beec
FG
1437 // FIXME: load full blob to verify index checksum?
1438
6ef9bb59 1439 Body::wrap_stream(
dc7a5b34
TL
1440 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1441 move |err| {
6ef9bb59
DC
1442 eprintln!("error during streaming of '{:?}' - {}", path, err);
1443 err
dc7a5b34
TL
1444 },
1445 ),
6ef9bb59 1446 )
dc7a5b34 1447 }
6ef9bb59
DC
1448 extension => {
1449 bail!("cannot download '{}' files", extension);
dc7a5b34 1450 }
6ef9bb59
DC
1451 };
1452
1453 // fixme: set other headers ?
1454 Ok(Response::builder()
dc7a5b34
TL
1455 .status(StatusCode::OK)
1456 .header(header::CONTENT_TYPE, "application/octet-stream")
1457 .body(body)
1458 .unwrap())
1459 }
1460 .boxed()
6ef9bb59
DC
1461}
1462
552c2259 1463#[sortable]
0ab08ac9
DM
1464pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1465 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1466 &ObjectSchema::new(
54552dda 1467 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1468 &sorted!([
66c49c21 1469 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1470 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1471 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1472 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1473 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1474 ]),
dc7a5b34
TL
1475 ),
1476)
1477.access(
54552dda 1478 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1479 &Permission::Anybody,
54552dda 1480);
9e47c0a5 1481
bf78f708 1482pub fn upload_backup_log(
07ee2235
DM
1483 _parts: Parts,
1484 req_body: Body,
1485 param: Value,
255f378a 1486 _info: &ApiMethod,
54552dda 1487 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1488) -> ApiResponseFuture {
ad51d02a 1489 async move {
7d6fc15b 1490 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1491 let store = required_string_param(&param, "store")?;
133d718f 1492 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1493 let store_with_ns = DatastoreWithNamespace {
1494 store: store.to_owned(),
1495 ns: backup_ns.clone(),
1496 };
1497 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1498
7a404dc5
FG
1499 let datastore = check_privs_and_load_store(
1500 &store,
c9396984
FG
1501 &backup_ns,
1502 &auth_id,
7a404dc5
FG
1503 0,
1504 PRIV_DATASTORE_BACKUP,
c9396984 1505 Some(Operation::Write),
1afce610 1506 &backup_dir_api.group,
c9396984 1507 )?;
1afce610 1508 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
07ee2235 1509
dc7a5b34 1510 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1511
133d718f 1512 let mut path = backup_dir.full_path();
ad51d02a 1513 path.push(&file_name);
07ee2235 1514
ad51d02a
DM
1515 if path.exists() {
1516 bail!("backup already contains a log.");
1517 }
e128d4e8 1518
1afce610 1519 println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
ad51d02a
DM
1520
1521 let data = req_body
1522 .map_err(Error::from)
1523 .try_fold(Vec::new(), |mut acc, chunk| {
1524 acc.extend_from_slice(&*chunk);
1525 future::ok::<_, Error>(acc)
1526 })
1527 .await?;
1528
39f18b30
DM
1529 // always verify blob/CRC at server side
1530 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1531
e0a19d33 1532 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1533
1534 // fixme: use correct formatter
53daae8e 1535 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1536 }
1537 .boxed()
07ee2235
DM
1538}
1539
5b1cfa01
DC
1540#[api(
1541 input: {
1542 properties: {
988d575d 1543 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1544 ns: {
133d718f
WB
1545 type: BackupNamespace,
1546 optional: true,
1547 },
8c74349b
WB
1548 backup_dir: {
1549 type: pbs_api_types::BackupDir,
1550 flatten: true,
1551 },
5b1cfa01
DC
1552 "filepath": {
1553 description: "Base64 encoded path.",
1554 type: String,
1555 }
1556 },
1557 },
1558 access: {
7d6fc15b
TL
1559 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1560 DATASTORE_BACKUP and being the owner of the group",
1561 permission: &Permission::Anybody,
5b1cfa01
DC
1562 },
1563)]
1564/// Get the entries of the given path of the catalog
bf78f708 1565pub fn catalog(
5b1cfa01 1566 store: String,
bc21ade2 1567 ns: Option<BackupNamespace>,
8c74349b 1568 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1569 filepath: String,
5b1cfa01 1570 rpcenv: &mut dyn RpcEnvironment,
227501c0 1571) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1572 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1573 let ns = ns.unwrap_or_default();
7a404dc5 1574 let datastore = check_privs_and_load_store(
7d6fc15b 1575 &store,
bc21ade2 1576 &ns,
7d6fc15b 1577 &auth_id,
2bc2435a
FG
1578 PRIV_DATASTORE_READ,
1579 PRIV_DATASTORE_BACKUP,
c9396984 1580 Some(Operation::Read),
c9396984
FG
1581 &backup_dir.group,
1582 )?;
a724f5fd 1583
bc21ade2 1584 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1585
9238cdf5
FG
1586 let file_name = CATALOG_NAME;
1587
9ccf933b 1588 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1589 for file in files {
1590 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1591 bail!("cannot decode '{}' - is encrypted", file_name);
1592 }
1593 }
1594
5b1cfa01
DC
1595 let mut path = datastore.base_path();
1596 path.push(backup_dir.relative_path());
9238cdf5 1597 path.push(file_name);
5b1cfa01
DC
1598
1599 let index = DynamicIndexReader::open(&path)
1600 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1601
2d55beec 1602 let (csum, size) = index.compute_csum();
9a37bd6c 1603 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1604
14f6c9cb 1605 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1606 let reader = BufferedDynamicReader::new(index, chunk_reader);
1607
1608 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1609
5279ee74 1610 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1611 base64::decode(filepath)?
1612 } else {
1613 vec![b'/']
1614 };
5b1cfa01 1615
86582454 1616 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1617}
1618
d33d8f4e
DC
1619#[sortable]
1620pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1621 &ApiHandler::AsyncHttp(&pxar_file_download),
1622 &ObjectSchema::new(
1ffe0301 1623 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1624 &sorted!([
1625 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1626 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1627 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1628 ("backup-id", false, &BACKUP_ID_SCHEMA),
1629 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1630 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1631 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1632 ]),
1633 )
7d6fc15b
TL
1634).access(
1635 Some(
1636 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1637 DATASTORE_BACKUP and being the owner of the group",
1638 ),
1639 &Permission::Anybody,
d33d8f4e
DC
1640);
1641
bf78f708 1642pub fn pxar_file_download(
d33d8f4e
DC
1643 _parts: Parts,
1644 _req_body: Body,
1645 param: Value,
1646 _info: &ApiMethod,
1647 rpcenv: Box<dyn RpcEnvironment>,
1648) -> ApiResponseFuture {
d33d8f4e 1649 async move {
7d6fc15b 1650 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1651 let store = required_string_param(&param, "store")?;
bc21ade2 1652 let ns = optional_ns_param(&param)?;
7d6fc15b 1653 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1654 let datastore = check_privs_and_load_store(
7d6fc15b 1655 &store,
bc21ade2 1656 &ns,
7d6fc15b 1657 &auth_id,
2bc2435a
FG
1658 PRIV_DATASTORE_READ,
1659 PRIV_DATASTORE_BACKUP,
c9396984 1660 Some(Operation::Read),
c9396984
FG
1661 &backup_dir.group,
1662 )?;
a724f5fd 1663
bc21ade2 1664 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1665
3c8c2827 1666 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1667
984ddb2f
DC
1668 let tar = param["tar"].as_bool().unwrap_or(false);
1669
d33d8f4e 1670 let mut components = base64::decode(&filepath)?;
3984a5fd 1671 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1672 components.remove(0);
1673 }
1674
d8d8af98 1675 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1676 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1677 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1678 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1679 for file in files {
1680 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1681 bail!("cannot decode '{}' - is encrypted", pxar_name);
1682 }
1683 }
d33d8f4e 1684
9238cdf5
FG
1685 let mut path = datastore.base_path();
1686 path.push(backup_dir.relative_path());
1687 path.push(pxar_name);
d33d8f4e
DC
1688
1689 let index = DynamicIndexReader::open(&path)
1690 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1691
2d55beec 1692 let (csum, size) = index.compute_csum();
9a37bd6c 1693 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1694
14f6c9cb 1695 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1696 let reader = BufferedDynamicReader::new(index, chunk_reader);
1697 let archive_size = reader.archive_size();
1698 let reader = LocalDynamicReadAt::new(reader);
1699
1700 let decoder = Accessor::new(reader, archive_size).await?;
1701 let root = decoder.open_root().await?;
2e219481 1702 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1703 let file = root
dc7a5b34
TL
1704 .lookup(&path)
1705 .await?
2e219481 1706 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1707
804f6143
DC
1708 let body = match file.kind() {
1709 EntryKind::File { .. } => Body::wrap_stream(
1710 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1711 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1712 err
1713 }),
1714 ),
1715 EntryKind::Hardlink(_) => Body::wrap_stream(
1716 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1717 .map_err(move |err| {
dc7a5b34 1718 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1719 err
1720 }),
1721 ),
1722 EntryKind::Directory => {
984ddb2f 1723 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1724 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1725 if tar {
dc7a5b34
TL
1726 proxmox_rest_server::spawn_internal_task(create_tar(
1727 channelwriter,
1728 decoder,
1729 path.clone(),
1730 false,
1731 ));
984ddb2f
DC
1732 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1733 Body::wrap_stream(zstdstream.map_err(move |err| {
1734 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1735 err
1736 }))
1737 } else {
dc7a5b34
TL
1738 proxmox_rest_server::spawn_internal_task(create_zip(
1739 channelwriter,
1740 decoder,
1741 path.clone(),
1742 false,
1743 ));
984ddb2f
DC
1744 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1745 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1746 err
1747 }))
1748 }
804f6143
DC
1749 }
1750 other => bail!("cannot download file of type {:?}", other),
1751 };
d33d8f4e
DC
1752
1753 // fixme: set other headers ?
1754 Ok(Response::builder()
dc7a5b34
TL
1755 .status(StatusCode::OK)
1756 .header(header::CONTENT_TYPE, "application/octet-stream")
1757 .body(body)
1758 .unwrap())
1759 }
1760 .boxed()
d33d8f4e
DC
1761}
1762
1a0d3d11
DM
1763#[api(
1764 input: {
1765 properties: {
1766 store: {
1767 schema: DATASTORE_SCHEMA,
1768 },
1769 timeframe: {
c68fa58a 1770 type: RRDTimeFrame,
1a0d3d11
DM
1771 },
1772 cf: {
1773 type: RRDMode,
1774 },
1775 },
1776 },
1777 access: {
7d6fc15b
TL
1778 permission: &Permission::Privilege(
1779 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1780 },
1781)]
1782/// Read datastore stats
bf78f708 1783pub fn get_rrd_stats(
1a0d3d11 1784 store: String,
c68fa58a 1785 timeframe: RRDTimeFrame,
1a0d3d11
DM
1786 cf: RRDMode,
1787 _param: Value,
1788) -> Result<Value, Error> {
e9d2fc93 1789 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1790 let disk_manager = crate::tools::disks::DiskManage::new();
1791
1792 let mut rrd_fields = vec![
dc7a5b34
TL
1793 "total",
1794 "used",
1795 "read_ios",
1796 "read_bytes",
1797 "write_ios",
1798 "write_bytes",
f27b6086
DC
1799 ];
1800
1801 // we do not have io_ticks for zpools, so don't include them
1802 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1803 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1804 _ => rrd_fields.push("io_ticks"),
1805 };
1806
dc7a5b34 1807 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1808}
1809
5fd823c3
HL
1810#[api(
1811 input: {
1812 properties: {
1813 store: {
1814 schema: DATASTORE_SCHEMA,
1815 },
1816 },
1817 },
1818 access: {
1819 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1820 },
1821)]
1822/// Read datastore stats
dc7a5b34 1823pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1824 let active_operations = task_tracking::get_active_operations(&store)?;
1825 Ok(json!({
1826 "read": active_operations.read,
1827 "write": active_operations.write,
1828 }))
1829}
1830
d6688884
SR
1831#[api(
1832 input: {
1833 properties: {
988d575d 1834 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1835 ns: {
133d718f
WB
1836 type: BackupNamespace,
1837 optional: true,
1838 },
8c74349b
WB
1839 backup_group: {
1840 type: pbs_api_types::BackupGroup,
1841 flatten: true,
1842 },
d6688884
SR
1843 },
1844 },
1845 access: {
7d6fc15b
TL
1846 permission: &Permission::Anybody,
1847 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1848 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1849 },
1850)]
1851/// Get "notes" for a backup group
1852pub fn get_group_notes(
1853 store: String,
bc21ade2 1854 ns: Option<BackupNamespace>,
8c74349b 1855 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1856 rpcenv: &mut dyn RpcEnvironment,
1857) -> Result<String, Error> {
d6688884 1858 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1859 let ns = ns.unwrap_or_default();
7a404dc5 1860 let datastore = check_privs_and_load_store(
7d6fc15b 1861 &store,
bc21ade2 1862 &ns,
7d6fc15b 1863 &auth_id,
2bc2435a
FG
1864 PRIV_DATASTORE_AUDIT,
1865 PRIV_DATASTORE_BACKUP,
c9396984 1866 Some(Operation::Read),
c9396984
FG
1867 &backup_group,
1868 )?;
d6688884 1869
bc21ade2 1870 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1871 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1872}
1873
1874#[api(
1875 input: {
1876 properties: {
988d575d 1877 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1878 ns: {
133d718f
WB
1879 type: BackupNamespace,
1880 optional: true,
1881 },
8c74349b
WB
1882 backup_group: {
1883 type: pbs_api_types::BackupGroup,
1884 flatten: true,
1885 },
d6688884
SR
1886 notes: {
1887 description: "A multiline text.",
1888 },
1889 },
1890 },
1891 access: {
7d6fc15b
TL
1892 permission: &Permission::Anybody,
1893 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1894 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1895 },
1896)]
1897/// Set "notes" for a backup group
1898pub fn set_group_notes(
1899 store: String,
bc21ade2 1900 ns: Option<BackupNamespace>,
8c74349b 1901 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1902 notes: String,
1903 rpcenv: &mut dyn RpcEnvironment,
1904) -> Result<(), Error> {
d6688884 1905 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1906 let ns = ns.unwrap_or_default();
7a404dc5 1907 let datastore = check_privs_and_load_store(
7d6fc15b 1908 &store,
bc21ade2 1909 &ns,
7d6fc15b 1910 &auth_id,
2bc2435a
FG
1911 PRIV_DATASTORE_MODIFY,
1912 PRIV_DATASTORE_BACKUP,
c9396984 1913 Some(Operation::Write),
c9396984
FG
1914 &backup_group,
1915 )?;
d6688884 1916
bc21ade2 1917 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1918 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1919
1920 Ok(())
1921}
1922
912b3f5b
DM
1923#[api(
1924 input: {
1925 properties: {
988d575d 1926 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1927 ns: {
133d718f
WB
1928 type: BackupNamespace,
1929 optional: true,
1930 },
8c74349b
WB
1931 backup_dir: {
1932 type: pbs_api_types::BackupDir,
1933 flatten: true,
1934 },
912b3f5b
DM
1935 },
1936 },
1937 access: {
7d6fc15b
TL
1938 permission: &Permission::Anybody,
1939 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1940 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1941 },
1942)]
1943/// Get "notes" for a specific backup
bf78f708 1944pub fn get_notes(
912b3f5b 1945 store: String,
bc21ade2 1946 ns: Option<BackupNamespace>,
8c74349b 1947 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1948 rpcenv: &mut dyn RpcEnvironment,
1949) -> Result<String, Error> {
7d6fc15b 1950 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1951 let ns = ns.unwrap_or_default();
7a404dc5 1952 let datastore = check_privs_and_load_store(
7d6fc15b 1953 &store,
bc21ade2 1954 &ns,
7d6fc15b 1955 &auth_id,
2bc2435a
FG
1956 PRIV_DATASTORE_AUDIT,
1957 PRIV_DATASTORE_BACKUP,
c9396984 1958 Some(Operation::Read),
c9396984
FG
1959 &backup_dir.group,
1960 )?;
912b3f5b 1961
bc21ade2 1962 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1963
133d718f 1964 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1965
dc7a5b34 1966 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1967
1968 Ok(String::from(notes))
1969}
1970
1971#[api(
1972 input: {
1973 properties: {
988d575d 1974 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1975 ns: {
133d718f
WB
1976 type: BackupNamespace,
1977 optional: true,
1978 },
8c74349b
WB
1979 backup_dir: {
1980 type: pbs_api_types::BackupDir,
1981 flatten: true,
1982 },
912b3f5b
DM
1983 notes: {
1984 description: "A multiline text.",
1985 },
1986 },
1987 },
1988 access: {
7d6fc15b
TL
1989 permission: &Permission::Anybody,
1990 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1991 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1992 },
1993)]
1994/// Set "notes" for a specific backup
bf78f708 1995pub fn set_notes(
912b3f5b 1996 store: String,
bc21ade2 1997 ns: Option<BackupNamespace>,
8c74349b 1998 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1999 notes: String,
2000 rpcenv: &mut dyn RpcEnvironment,
2001) -> Result<(), Error> {
7d6fc15b 2002 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2003 let ns = ns.unwrap_or_default();
7a404dc5 2004 let datastore = check_privs_and_load_store(
7d6fc15b 2005 &store,
bc21ade2 2006 &ns,
7d6fc15b 2007 &auth_id,
2bc2435a
FG
2008 PRIV_DATASTORE_MODIFY,
2009 PRIV_DATASTORE_BACKUP,
c9396984 2010 Some(Operation::Write),
c9396984
FG
2011 &backup_dir.group,
2012 )?;
912b3f5b 2013
bc21ade2 2014 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2015
133d718f
WB
2016 backup_dir
2017 .update_manifest(|manifest| {
dc7a5b34
TL
2018 manifest.unprotected["notes"] = notes.into();
2019 })
2020 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2021
2022 Ok(())
2023}
2024
8292d3d2
DC
2025#[api(
2026 input: {
2027 properties: {
988d575d 2028 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2029 ns: {
133d718f
WB
2030 type: BackupNamespace,
2031 optional: true,
2032 },
8c74349b
WB
2033 backup_dir: {
2034 type: pbs_api_types::BackupDir,
2035 flatten: true,
2036 },
8292d3d2
DC
2037 },
2038 },
2039 access: {
7d6fc15b
TL
2040 permission: &Permission::Anybody,
2041 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2042 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2043 },
2044)]
2045/// Query protection for a specific backup
2046pub fn get_protection(
2047 store: String,
bc21ade2 2048 ns: Option<BackupNamespace>,
8c74349b 2049 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2050 rpcenv: &mut dyn RpcEnvironment,
2051) -> Result<bool, Error> {
7d6fc15b 2052 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2053 let ns = ns.unwrap_or_default();
7a404dc5 2054 let datastore = check_privs_and_load_store(
7d6fc15b 2055 &store,
bc21ade2 2056 &ns,
7d6fc15b 2057 &auth_id,
2bc2435a
FG
2058 PRIV_DATASTORE_AUDIT,
2059 PRIV_DATASTORE_BACKUP,
c9396984 2060 Some(Operation::Read),
c9396984
FG
2061 &backup_dir.group,
2062 )?;
8292d3d2 2063
bc21ade2 2064 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2065
6da20161 2066 Ok(backup_dir.is_protected())
8292d3d2
DC
2067}
2068
2069#[api(
2070 input: {
2071 properties: {
988d575d 2072 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2073 ns: {
133d718f
WB
2074 type: BackupNamespace,
2075 optional: true,
2076 },
8c74349b
WB
2077 backup_dir: {
2078 type: pbs_api_types::BackupDir,
2079 flatten: true,
2080 },
8292d3d2
DC
2081 protected: {
2082 description: "Enable/disable protection.",
2083 },
2084 },
2085 },
2086 access: {
7d6fc15b
TL
2087 permission: &Permission::Anybody,
2088 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2089 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2090 },
2091)]
2092/// En- or disable protection for a specific backup
2093pub fn set_protection(
2094 store: String,
bc21ade2 2095 ns: Option<BackupNamespace>,
8c74349b 2096 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2097 protected: bool,
2098 rpcenv: &mut dyn RpcEnvironment,
2099) -> Result<(), Error> {
7d6fc15b 2100 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2101 let ns = ns.unwrap_or_default();
7a404dc5 2102 let datastore = check_privs_and_load_store(
7d6fc15b 2103 &store,
bc21ade2 2104 &ns,
7d6fc15b 2105 &auth_id,
2bc2435a
FG
2106 PRIV_DATASTORE_MODIFY,
2107 PRIV_DATASTORE_BACKUP,
c9396984 2108 Some(Operation::Write),
c9396984
FG
2109 &backup_dir.group,
2110 )?;
8292d3d2 2111
bc21ade2 2112 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2113
8292d3d2
DC
2114 datastore.update_protection(&backup_dir, protected)
2115}
2116
72be0eb1 2117#[api(
4940012d 2118 input: {
72be0eb1 2119 properties: {
988d575d 2120 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2121 ns: {
133d718f
WB
2122 type: BackupNamespace,
2123 optional: true,
2124 },
8c74349b
WB
2125 backup_group: {
2126 type: pbs_api_types::BackupGroup,
2127 flatten: true,
2128 },
72be0eb1 2129 "new-owner": {
e6dc35ac 2130 type: Authid,
72be0eb1
DW
2131 },
2132 },
4940012d
FG
2133 },
2134 access: {
bff85572 2135 permission: &Permission::Anybody,
7d6fc15b
TL
2136 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2137 a user's token for owned backups with Datastore.Backup"
4940012d 2138 },
72be0eb1
DW
2139)]
2140/// Change owner of a backup group
bf78f708 2141pub fn set_backup_owner(
72be0eb1 2142 store: String,
bc21ade2 2143 ns: Option<BackupNamespace>,
8c74349b 2144 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2145 new_owner: Authid,
bff85572 2146 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2147) -> Result<(), Error> {
bff85572 2148 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2149 let ns = ns.unwrap_or_default();
2bc2435a 2150 let owner_check_required = check_ns_privs(
a724f5fd 2151 &store,
bc21ade2 2152 &ns,
a724f5fd 2153 &auth_id,
2bc2435a
FG
2154 PRIV_DATASTORE_MODIFY,
2155 PRIV_DATASTORE_BACKUP,
a724f5fd 2156 )?;
1909ece2
FG
2157
2158 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2159
bc21ade2 2160 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2161
2bc2435a 2162 if owner_check_required {
133d718f 2163 let owner = backup_group.get_owner()?;
bff85572 2164
2bc2435a 2165 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2166 (true, true) => {
2167 // API token to API token, owned by same user
2168 let owner = owner.user();
2169 let new_owner = new_owner.user();
2170 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2171 }
bff85572
FG
2172 (true, false) => {
2173 // API token to API token owner
dc7a5b34
TL
2174 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2175 }
bff85572
FG
2176 (false, true) => {
2177 // API token owner to API token
dc7a5b34
TL
2178 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2179 }
bff85572
FG
2180 (false, false) => {
2181 // User to User, not allowed for unprivileged users
2182 false
dc7a5b34 2183 }
2bc2435a 2184 };
bff85572 2185
2bc2435a
FG
2186 if !allowed {
2187 return Err(http_err!(
2188 UNAUTHORIZED,
2189 "{} does not have permission to change owner of backup group '{}' to {}",
2190 auth_id,
e13303fc 2191 backup_group.group(),
2bc2435a
FG
2192 new_owner,
2193 ));
2194 }
bff85572
FG
2195 }
2196
7d6fc15b
TL
2197 let user_info = CachedUserInfo::new()?;
2198
e6dc35ac 2199 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2200 bail!(
2201 "{} '{}' is inactive or non-existent",
2202 if new_owner.is_token() {
2203 "API token".to_string()
2204 } else {
2205 "user".to_string()
2206 },
2207 new_owner
2208 );
72be0eb1
DW
2209 }
2210
133d718f 2211 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2212
2213 Ok(())
2214}
2215
552c2259 2216#[sortable]
255f378a 2217const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2218 (
2219 "active-operations",
dc7a5b34 2220 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2221 ),
dc7a5b34 2222 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2223 (
2224 "change-owner",
dc7a5b34 2225 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2226 ),
255f378a
DM
2227 (
2228 "download",
dc7a5b34 2229 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2230 ),
6ef9bb59
DC
2231 (
2232 "download-decoded",
dc7a5b34 2233 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2234 ),
dc7a5b34 2235 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2236 (
2237 "gc",
2238 &Router::new()
2239 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2240 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2241 ),
d6688884
SR
2242 (
2243 "group-notes",
2244 &Router::new()
2245 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2246 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2247 ),
255f378a
DM
2248 (
2249 "groups",
2250 &Router::new()
b31c8019 2251 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2252 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2253 ),
18934ae5
TL
2254 (
2255 "namespace",
2256 // FIXME: move into datastore:: sub-module?!
2257 &crate::api2::admin::namespace::ROUTER,
2258 ),
912b3f5b
DM
2259 (
2260 "notes",
2261 &Router::new()
2262 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2263 .put(&API_METHOD_SET_NOTES),
912b3f5b 2264 ),
8292d3d2
DC
2265 (
2266 "protected",
2267 &Router::new()
2268 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2269 .put(&API_METHOD_SET_PROTECTION),
255f378a 2270 ),
dc7a5b34 2271 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2272 (
2273 "prune-datastore",
dc7a5b34 2274 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2275 ),
d33d8f4e
DC
2276 (
2277 "pxar-file-download",
dc7a5b34 2278 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2279 ),
dc7a5b34 2280 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2281 (
2282 "snapshots",
2283 &Router::new()
fc189b19 2284 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2285 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2286 ),
dc7a5b34 2287 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2288 (
2289 "upload-backup-log",
dc7a5b34 2290 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2291 ),
dc7a5b34 2292 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2293];
2294
ad51d02a 2295const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2296 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2297 .subdirs(DATASTORE_INFO_SUBDIRS);
2298
255f378a 2299pub const ROUTER: Router = Router::new()
bb34b589 2300 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2301 .match_all("store", &DATASTORE_INFO_ROUTER);