]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
config: cached user info: expose new any_privs_below
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
abd82485
FG
35 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
dba37e21
WB
37 KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
38 SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
39 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
40 MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
41 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
42 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 65use crate::backup::{
de77a20d
TL
66 can_access_any_namespace, check_ns_privs_full, verify_all_backups, verify_backup_dir,
67 verify_backup_group, verify_filter, ListAccessibleBackupGroups,
22cfad13 68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7a404dc5
FG
84// helper to unify common sequence of checks:
85// 1. check privs on NS (full or limited access)
86// 2. load datastore
87// 3. if needed (only limited access), check owner of group
88fn check_privs_and_load_store(
abd82485
FG
89 store: &str,
90 ns: &BackupNamespace,
c9396984 91 auth_id: &Authid,
7a404dc5
FG
92 full_access_privs: u64,
93 partial_access_privs: u64,
c9396984 94 operation: Option<Operation>,
c9396984
FG
95 backup_group: &pbs_api_types::BackupGroup,
96) -> Result<Arc<DataStore>, Error> {
abd82485 97 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
7a404dc5 98
abd82485 99 let datastore = DataStore::lookup_datastore(store, operation)?;
c9396984 100
7a404dc5 101 if limited {
abd82485 102 let owner = datastore.get_owner(ns, backup_group)?;
c9396984
FG
103 check_backup_owner(&owner, &auth_id)?;
104 }
105
106 Ok(datastore)
107}
108
e7cb4dc5 109fn read_backup_index(
e7cb4dc5
WB
110 backup_dir: &BackupDir,
111) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 112 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 113
09b1f7b2
DM
114 let mut result = Vec::new();
115 for item in manifest.files() {
116 result.push(BackupContent {
117 filename: item.filename.clone(),
f28d9088 118 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
119 size: Some(item.size),
120 });
8c70e3eb
DM
121 }
122
09b1f7b2 123 result.push(BackupContent {
96d65fbc 124 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
125 crypt_mode: match manifest.signature {
126 Some(_) => Some(CryptMode::SignOnly),
127 None => Some(CryptMode::None),
128 },
09b1f7b2
DM
129 size: Some(index_size),
130 });
4f1e40a2 131
70030b43 132 Ok((manifest, result))
8c70e3eb
DM
133}
134
1c090810 135fn get_all_snapshot_files(
1c090810 136 info: &BackupInfo,
70030b43 137) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 138 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
139
140 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
141 acc.insert(item.filename.clone());
142 acc
143 });
144
145 for file in &info.files {
dc7a5b34
TL
146 if file_set.contains(file) {
147 continue;
148 }
f28d9088
WB
149 files.push(BackupContent {
150 filename: file.to_string(),
151 size: None,
152 crypt_mode: None,
153 });
1c090810
DC
154 }
155
70030b43 156 Ok((manifest, files))
1c090810
DC
157}
158
b31c8019
DM
159#[api(
160 input: {
161 properties: {
162 store: {
163 schema: DATASTORE_SCHEMA,
164 },
bc21ade2 165 ns: {
89ae3c32
WB
166 type: BackupNamespace,
167 optional: true,
168 },
b31c8019
DM
169 },
170 },
7b570c17 171 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 172 access: {
7d6fc15b
TL
173 permission: &Permission::Anybody,
174 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
175 /datastore/{store}[/{namespace}]",
bb34b589 176 },
b31c8019
DM
177)]
178/// List backup groups.
b2362a12 179pub fn list_groups(
b31c8019 180 store: String,
bc21ade2 181 ns: Option<BackupNamespace>,
54552dda 182 rpcenv: &mut dyn RpcEnvironment,
b31c8019 183) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 185 let ns = ns.unwrap_or_default();
ea2e91e5
FG
186
187 let list_all = !check_ns_privs_full(
abd82485
FG
188 &store,
189 &ns,
7d6fc15b 190 &auth_id,
2bc2435a
FG
191 PRIV_DATASTORE_AUDIT,
192 PRIV_DATASTORE_BACKUP,
7d6fc15b 193 )?;
54552dda 194
abd82485 195 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 196
249dde8b 197 datastore
abd82485 198 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
199 .try_fold(Vec::new(), |mut group_info, group| {
200 let group = group?;
e13303fc 201
abd82485 202 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
203 Ok(auth_id) => auth_id,
204 Err(err) => {
e13303fc
FG
205 eprintln!(
206 "Failed to get owner of group '{}' in {} - {}",
207 group.group(),
abd82485 208 print_store_and_ns(&store, &ns),
e13303fc
FG
209 err
210 );
249dde8b 211 return Ok(group_info);
dc7a5b34 212 }
249dde8b
TL
213 };
214 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
215 return Ok(group_info);
216 }
0d08fcee 217
6da20161 218 let snapshots = match group.list_backups() {
249dde8b
TL
219 Ok(snapshots) => snapshots,
220 Err(_) => return Ok(group_info),
221 };
0d08fcee 222
249dde8b
TL
223 let backup_count: u64 = snapshots.len() as u64;
224 if backup_count == 0 {
225 return Ok(group_info);
226 }
0d08fcee 227
249dde8b
TL
228 let last_backup = snapshots
229 .iter()
230 .fold(&snapshots[0], |a, b| {
231 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
232 a
233 } else {
234 b
235 }
236 })
237 .to_owned();
238
abd82485 239 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
249dde8b
TL
240 let comment = file_read_firstline(&note_path).ok();
241
242 group_info.push(GroupListItem {
988d575d 243 backup: group.into(),
249dde8b
TL
244 last_backup: last_backup.backup_dir.backup_time(),
245 owner: Some(owner),
246 backup_count,
247 files: last_backup.files,
248 comment,
0d08fcee
FG
249 });
250
249dde8b
TL
251 Ok(group_info)
252 })
812c6f87 253}
8f579717 254
f32791b4
DC
255#[api(
256 input: {
257 properties: {
988d575d 258 store: { schema: DATASTORE_SCHEMA },
bc21ade2 259 ns: {
133d718f
WB
260 type: BackupNamespace,
261 optional: true,
262 },
8c74349b
WB
263 group: {
264 type: pbs_api_types::BackupGroup,
265 flatten: true,
266 },
f32791b4
DC
267 },
268 },
269 access: {
7d6fc15b
TL
270 permission: &Permission::Anybody,
271 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
272 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
273 },
274)]
275/// Delete backup group including all snapshots.
276pub fn delete_group(
277 store: String,
bc21ade2 278 ns: Option<BackupNamespace>,
8c74349b 279 group: pbs_api_types::BackupGroup,
f32791b4
DC
280 _info: &ApiMethod,
281 rpcenv: &mut dyn RpcEnvironment,
282) -> Result<Value, Error> {
f32791b4 283 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 284 let ns = ns.unwrap_or_default();
133d718f 285
7a404dc5 286 let datastore = check_privs_and_load_store(
abd82485
FG
287 &store,
288 &ns,
7d6fc15b 289 &auth_id,
2bc2435a
FG
290 PRIV_DATASTORE_MODIFY,
291 PRIV_DATASTORE_PRUNE,
c9396984 292 Some(Operation::Write),
c9396984
FG
293 &group,
294 )?;
f32791b4 295
abd82485 296 if !datastore.remove_backup_group(&ns, &group)? {
171a00ca 297 bail!("group only partially deleted due to protected snapshots");
5cc7d891 298 }
f32791b4
DC
299
300 Ok(Value::Null)
301}
302
09b1f7b2
DM
303#[api(
304 input: {
305 properties: {
988d575d 306 store: { schema: DATASTORE_SCHEMA },
bc21ade2 307 ns: {
133d718f
WB
308 type: BackupNamespace,
309 optional: true,
310 },
8c74349b
WB
311 backup_dir: {
312 type: pbs_api_types::BackupDir,
313 flatten: true,
314 },
09b1f7b2
DM
315 },
316 },
7b570c17 317 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 318 access: {
7d6fc15b
TL
319 permission: &Permission::Anybody,
320 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
321 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 322 },
09b1f7b2
DM
323)]
324/// List snapshot files.
ea5f547f 325pub fn list_snapshot_files(
09b1f7b2 326 store: String,
bc21ade2 327 ns: Option<BackupNamespace>,
8c74349b 328 backup_dir: pbs_api_types::BackupDir,
01a13423 329 _info: &ApiMethod,
54552dda 330 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 331) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 332 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 333 let ns = ns.unwrap_or_default();
133d718f 334
7a404dc5 335 let datastore = check_privs_and_load_store(
abd82485
FG
336 &store,
337 &ns,
7d6fc15b 338 &auth_id,
2bc2435a
FG
339 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
340 PRIV_DATASTORE_BACKUP,
c9396984 341 Some(Operation::Read),
c9396984
FG
342 &backup_dir.group,
343 )?;
01a13423 344
abd82485 345 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 346
6da20161 347 let info = BackupInfo::new(snapshot)?;
01a13423 348
9ccf933b 349 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43
DM
350
351 Ok(files)
01a13423
DM
352}
353
68a6a0ee
DM
354#[api(
355 input: {
356 properties: {
988d575d 357 store: { schema: DATASTORE_SCHEMA },
bc21ade2 358 ns: {
133d718f
WB
359 type: BackupNamespace,
360 optional: true,
361 },
8c74349b
WB
362 backup_dir: {
363 type: pbs_api_types::BackupDir,
364 flatten: true,
365 },
68a6a0ee
DM
366 },
367 },
bb34b589 368 access: {
7d6fc15b
TL
369 permission: &Permission::Anybody,
370 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
371 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 372 },
68a6a0ee
DM
373)]
374/// Delete backup snapshot.
bf78f708 375pub fn delete_snapshot(
68a6a0ee 376 store: String,
bc21ade2 377 ns: Option<BackupNamespace>,
8c74349b 378 backup_dir: pbs_api_types::BackupDir,
6f62c924 379 _info: &ApiMethod,
54552dda 380 rpcenv: &mut dyn RpcEnvironment,
6f62c924 381) -> Result<Value, Error> {
e6dc35ac 382 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 383 let ns = ns.unwrap_or_default();
ea2e91e5 384
7a404dc5 385 let datastore = check_privs_and_load_store(
abd82485
FG
386 &store,
387 &ns,
7d6fc15b 388 &auth_id,
2bc2435a
FG
389 PRIV_DATASTORE_MODIFY,
390 PRIV_DATASTORE_PRUNE,
c9396984 391 Some(Operation::Write),
c9396984
FG
392 &backup_dir.group,
393 )?;
a724f5fd 394
abd82485 395 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 396
133d718f 397 snapshot.destroy(false)?;
6f62c924
DM
398
399 Ok(Value::Null)
400}
401
fc189b19 402#[api(
b7c3eaa9 403 streaming: true,
fc189b19
DM
404 input: {
405 properties: {
988d575d 406 store: { schema: DATASTORE_SCHEMA },
bc21ade2 407 ns: {
8c74349b
WB
408 type: BackupNamespace,
409 optional: true,
410 },
fc189b19
DM
411 "backup-type": {
412 optional: true,
988d575d 413 type: BackupType,
fc189b19
DM
414 },
415 "backup-id": {
416 optional: true,
417 schema: BACKUP_ID_SCHEMA,
418 },
419 },
420 },
7b570c17 421 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 422 access: {
7d6fc15b
TL
423 permission: &Permission::Anybody,
424 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
425 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 426 },
fc189b19
DM
427)]
428/// List backup snapshots.
dc7a5b34 429pub fn list_snapshots(
54552dda 430 store: String,
bc21ade2 431 ns: Option<BackupNamespace>,
988d575d 432 backup_type: Option<BackupType>,
54552dda
DM
433 backup_id: Option<String>,
434 _param: Value,
184f17af 435 _info: &ApiMethod,
54552dda 436 rpcenv: &mut dyn RpcEnvironment,
fc189b19 437) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 438 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 439
bc21ade2 440 let ns = ns.unwrap_or_default();
7d6fc15b 441
ea2e91e5 442 let list_all = !check_ns_privs_full(
abd82485
FG
443 &store,
444 &ns,
7d6fc15b 445 &auth_id,
2bc2435a
FG
446 PRIV_DATASTORE_AUDIT,
447 PRIV_DATASTORE_BACKUP,
7d6fc15b 448 )?;
184f17af 449
abd82485 450 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 451
249dde8b
TL
452 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
453 // backup group and provide an error free (Err -> None) accessor
0d08fcee 454 let groups = match (backup_type, backup_id) {
db87d93e 455 (Some(backup_type), Some(backup_id)) => {
abd82485 456 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
db87d93e 457 }
8c74349b 458 // FIXME: Recursion
7d9cb8c4 459 (Some(backup_type), None) => datastore
abd82485 460 .iter_backup_groups_ok(ns.clone())?
dc7a5b34
TL
461 .filter(|group| group.backup_type() == backup_type)
462 .collect(),
8c74349b 463 // FIXME: Recursion
7d9cb8c4 464 (None, Some(backup_id)) => datastore
abd82485 465 .iter_backup_groups_ok(ns.clone())?
dc7a5b34
TL
466 .filter(|group| group.backup_id() == backup_id)
467 .collect(),
8c74349b 468 // FIXME: Recursion
abd82485 469 (None, None) => datastore.list_backup_groups(ns.clone())?,
0d08fcee 470 };
54552dda 471
0d08fcee 472 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
473 let backup = pbs_api_types::BackupDir {
474 group: group.into(),
475 time: info.backup_dir.backup_time(),
476 };
6da20161 477 let protected = info.backup_dir.is_protected();
1c090810 478
9ccf933b 479 match get_all_snapshot_files(&info) {
70030b43 480 Ok((manifest, files)) => {
70030b43
DM
481 // extract the first line from notes
482 let comment: Option<String> = manifest.unprotected["notes"]
483 .as_str()
484 .and_then(|notes| notes.lines().next())
485 .map(String::from);
486
035c40e6
FG
487 let fingerprint = match manifest.fingerprint() {
488 Ok(fp) => fp,
489 Err(err) => {
490 eprintln!("error parsing fingerprint: '{}'", err);
491 None
dc7a5b34 492 }
035c40e6
FG
493 };
494
79c53595 495 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
496 let verification: Option<SnapshotVerifyState> =
497 match serde_json::from_value(verification) {
498 Ok(verify) => verify,
499 Err(err) => {
500 eprintln!("error parsing verification state : '{}'", err);
501 None
502 }
503 };
3b2046d2 504
0d08fcee
FG
505 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
506
79c53595 507 SnapshotListItem {
988d575d 508 backup,
79c53595
FG
509 comment,
510 verification,
035c40e6 511 fingerprint,
79c53595
FG
512 files,
513 size,
514 owner,
02db7267 515 protected,
79c53595 516 }
dc7a5b34 517 }
1c090810
DC
518 Err(err) => {
519 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 520 let files = info
dc7a5b34
TL
521 .files
522 .into_iter()
523 .map(|filename| BackupContent {
524 filename,
525 size: None,
526 crypt_mode: None,
527 })
528 .collect();
79c53595
FG
529
530 SnapshotListItem {
988d575d 531 backup,
79c53595
FG
532 comment: None,
533 verification: None,
035c40e6 534 fingerprint: None,
79c53595
FG
535 files,
536 size: None,
537 owner,
02db7267 538 protected,
79c53595 539 }
dc7a5b34 540 }
0d08fcee
FG
541 }
542 };
184f17af 543
dc7a5b34 544 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 545 let owner = match group.get_owner() {
dc7a5b34
TL
546 Ok(auth_id) => auth_id,
547 Err(err) => {
548 eprintln!(
e13303fc 549 "Failed to get owner of group '{}' in {} - {}",
e13303fc 550 group.group(),
abd82485 551 print_store_and_ns(&store, &ns),
e13303fc 552 err
dc7a5b34 553 );
0d08fcee
FG
554 return Ok(snapshots);
555 }
dc7a5b34 556 };
0d08fcee 557
dc7a5b34
TL
558 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
559 return Ok(snapshots);
560 }
0d08fcee 561
6da20161 562 let group_backups = group.list_backups()?;
0d08fcee 563
dc7a5b34
TL
564 snapshots.extend(
565 group_backups
566 .into_iter()
567 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
568 );
569
570 Ok(snapshots)
571 })
184f17af
DM
572}
573
22cfad13 574fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
8122eaad 575 let root_ns = Default::default();
f12f408e
TL
576 ListAccessibleBackupGroups::new_with_privs(
577 store,
578 root_ns,
579 MAX_NAMESPACE_DEPTH,
580 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
581 None,
582 owner,
583 )?
584 .try_fold(Counts::default(), |mut counts, group| {
585 let group = match group {
586 Ok(group) => group,
587 Err(_) => return Ok(counts), // TODO: add this as error counts?
588 };
589 let snapshot_count = group.list_backups()?.len() as u64;
590
591 // only include groups with snapshots, counting/displaying emtpy groups can confuse
592 if snapshot_count > 0 {
593 let type_count = match group.backup_type() {
594 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
595 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
596 BackupType::Host => counts.host.get_or_insert(Default::default()),
22cfad13 597 };
14e08625 598
f12f408e
TL
599 type_count.groups += 1;
600 type_count.snapshots += snapshot_count;
601 }
16f9f244 602
f12f408e
TL
603 Ok(counts)
604 })
16f9f244
DC
605}
606
1dc117bb
DM
607#[api(
608 input: {
609 properties: {
610 store: {
611 schema: DATASTORE_SCHEMA,
612 },
98afc7b1
FG
613 verbose: {
614 type: bool,
615 default: false,
616 optional: true,
617 description: "Include additional information like snapshot counts and GC status.",
618 },
1dc117bb 619 },
98afc7b1 620
1dc117bb
DM
621 },
622 returns: {
14e08625 623 type: DataStoreStatus,
1dc117bb 624 },
bb34b589 625 access: {
84de1012
TL
626 permission: &Permission::Anybody,
627 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
628 the full statistics. Counts of accessible groups are always returned, if any",
bb34b589 629 },
1dc117bb
DM
630)]
631/// Get datastore status.
ea5f547f 632pub fn status(
1dc117bb 633 store: String,
98afc7b1 634 verbose: bool,
0eecf38f 635 _info: &ApiMethod,
fdfcb74d 636 rpcenv: &mut dyn RpcEnvironment,
14e08625 637) -> Result<DataStoreStatus, Error> {
84de1012
TL
638 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
639 let user_info = CachedUserInfo::new()?;
640 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
641
642 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
643
644 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
645 true
646 } else if store_privs & PRIV_DATASTORE_READ != 0 {
647 false // allow at least counts, user can read groups anyway..
648 } else if let Ok(ref datastore) = datastore {
649 if !can_access_any_namespace(Arc::clone(datastore), &auth_id, &user_info) {
650 return Err(http_err!(FORBIDDEN, "permission check failed"));
651 }
652 false
653 } else {
654 return Err(http_err!(FORBIDDEN, "permission check failed")); // avoid leaking existance info
655 };
656 let datastore = datastore?; // only unwrap no to avoid leaking existance info
fdfcb74d 657
84de1012 658 let (counts, gc_status) = if verbose {
fdfcb74d
FG
659 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
660 None
661 } else {
662 Some(&auth_id)
663 };
664
665 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
84de1012
TL
666 let gc_status = if store_stats {
667 Some(datastore.last_gc_status())
668 } else {
669 None
670 };
fdfcb74d
FG
671
672 (counts, gc_status)
673 } else {
674 (None, None)
98afc7b1 675 };
16f9f244 676
84de1012
TL
677 Ok(if store_stats {
678 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
679 DataStoreStatus {
680 total: storage.total,
681 used: storage.used,
682 avail: storage.avail,
683 gc_status,
684 counts,
685 }
686 } else {
687 DataStoreStatus {
688 total: 0,
689 used: 0,
690 avail: 0,
691 gc_status,
692 counts,
693 }
14e08625 694 })
0eecf38f
DM
695}
696
c2009e53
DM
697#[api(
698 input: {
699 properties: {
700 store: {
701 schema: DATASTORE_SCHEMA,
702 },
bc21ade2 703 ns: {
8c74349b
WB
704 type: BackupNamespace,
705 optional: true,
706 },
c2009e53 707 "backup-type": {
988d575d 708 type: BackupType,
c2009e53
DM
709 optional: true,
710 },
711 "backup-id": {
712 schema: BACKUP_ID_SCHEMA,
713 optional: true,
714 },
dcbf29e7
HL
715 "ignore-verified": {
716 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
717 optional: true,
718 },
719 "outdated-after": {
720 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
721 optional: true,
722 },
c2009e53
DM
723 "backup-time": {
724 schema: BACKUP_TIME_SCHEMA,
725 optional: true,
726 },
59229bd7
TL
727 "max-depth": {
728 schema: NS_MAX_DEPTH_SCHEMA,
729 optional: true,
730 },
c2009e53
DM
731 },
732 },
733 returns: {
734 schema: UPID_SCHEMA,
735 },
736 access: {
7d6fc15b
TL
737 permission: &Permission::Anybody,
738 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
739 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
740 },
741)]
742/// Verify backups.
743///
744/// This function can verify a single backup snapshot, all backup from a backup group,
745/// or all backups in the datastore.
746pub fn verify(
747 store: String,
bc21ade2 748 ns: Option<BackupNamespace>,
988d575d 749 backup_type: Option<BackupType>,
c2009e53
DM
750 backup_id: Option<String>,
751 backup_time: Option<i64>,
dcbf29e7
HL
752 ignore_verified: Option<bool>,
753 outdated_after: Option<i64>,
59229bd7 754 max_depth: Option<usize>,
c2009e53
DM
755 rpcenv: &mut dyn RpcEnvironment,
756) -> Result<Value, Error> {
7d6fc15b 757 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 758 let ns = ns.unwrap_or_default();
ea2e91e5
FG
759
760 let owner_check_required = check_ns_privs_full(
abd82485
FG
761 &store,
762 &ns,
7d6fc15b 763 &auth_id,
2bc2435a
FG
764 PRIV_DATASTORE_VERIFY,
765 PRIV_DATASTORE_BACKUP,
7d6fc15b 766 )?;
a724f5fd 767
abd82485 768 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 769 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 770
8ea00f6e 771 let worker_id;
c2009e53
DM
772
773 let mut backup_dir = None;
774 let mut backup_group = None;
133042b5 775 let mut worker_type = "verify";
c2009e53
DM
776
777 match (backup_type, backup_id, backup_time) {
778 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 779 worker_id = format!(
8c74349b 780 "{}:{}/{}/{}/{:08X}",
abd82485 781 store,
bc21ade2 782 ns.display_as_path(),
8c74349b
WB
783 backup_type,
784 backup_id,
785 backup_time
dc7a5b34 786 );
bc21ade2
WB
787 let dir =
788 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 789
a724f5fd
FG
790 if owner_check_required {
791 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
792 check_backup_owner(&owner, &auth_id)?;
793 }
09f6a240 794
c2009e53 795 backup_dir = Some(dir);
133042b5 796 worker_type = "verify_snapshot";
c2009e53
DM
797 }
798 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
799 worker_id = format!(
800 "{}:{}/{}/{}",
abd82485 801 store,
bc21ade2 802 ns.display_as_path(),
8c74349b
WB
803 backup_type,
804 backup_id
805 );
133d718f 806 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 807
a724f5fd 808 if owner_check_required {
bc21ade2 809 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
810 check_backup_owner(&owner, &auth_id)?;
811 }
09f6a240 812
bc21ade2 813 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 814 worker_type = "verify_group";
c2009e53
DM
815 }
816 (None, None, None) => {
bc21ade2 817 worker_id = if ns.is_root() {
abd82485 818 store
59229bd7 819 } else {
abd82485 820 format!("{}:{}", store, ns.display_as_path())
59229bd7 821 };
c2009e53 822 }
5a718dce 823 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
824 }
825
39735609 826 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
827
828 let upid_str = WorkerTask::new_thread(
133042b5 829 worker_type,
44288184 830 Some(worker_id),
049a22a3 831 auth_id.to_string(),
e7cb4dc5
WB
832 to_stdout,
833 move |worker| {
9c26a3d6 834 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 835 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 836 let mut res = Vec::new();
f6b1d1cc 837 if !verify_backup_dir(
9c26a3d6 838 &verify_worker,
f6b1d1cc 839 &backup_dir,
f6b1d1cc 840 worker.upid().clone(),
dc7a5b34 841 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 842 )? {
5ae393af
FG
843 res.push(print_ns_and_snapshot(
844 backup_dir.backup_ns(),
845 backup_dir.as_ref(),
846 ));
adfdc369
DC
847 }
848 res
c2009e53 849 } else if let Some(backup_group) = backup_group {
7e25b9aa 850 let failed_dirs = verify_backup_group(
9c26a3d6 851 &verify_worker,
63d9aca9 852 &backup_group,
7e25b9aa 853 &mut StoreProgress::new(1),
f6b1d1cc 854 worker.upid(),
dc7a5b34 855 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
856 )?;
857 failed_dirs
c2009e53 858 } else {
a724f5fd 859 let owner = if owner_check_required {
de27ebc6 860 Some(&auth_id)
09f6a240
FG
861 } else {
862 None
863 };
864
dcbf29e7
HL
865 verify_all_backups(
866 &verify_worker,
867 worker.upid(),
bc21ade2 868 ns,
59229bd7 869 max_depth,
dcbf29e7 870 owner,
dc7a5b34 871 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 872 )?
c2009e53 873 };
3984a5fd 874 if !failed_dirs.is_empty() {
1ec0d70d 875 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 876 for dir in failed_dirs {
1ec0d70d 877 task_log!(worker, "\t{}", dir);
adfdc369 878 }
1ffe0301 879 bail!("verification failed - please check the log for details");
c2009e53
DM
880 }
881 Ok(())
e7cb4dc5
WB
882 },
883 )?;
c2009e53
DM
884
885 Ok(json!(upid_str))
886}
887
0a240aaa
DC
888#[api(
889 input: {
890 properties: {
8c74349b
WB
891 group: {
892 type: pbs_api_types::BackupGroup,
893 flatten: true,
894 },
0a240aaa
DC
895 "dry-run": {
896 optional: true,
897 type: bool,
898 default: false,
899 description: "Just show what prune would do, but do not delete anything.",
900 },
dba37e21
WB
901 "keep-options": {
902 type: KeepOptions,
0a240aaa
DC
903 flatten: true,
904 },
905 store: {
906 schema: DATASTORE_SCHEMA,
907 },
dba37e21
WB
908 ns: {
909 type: BackupNamespace,
910 optional: true,
911 },
0a240aaa
DC
912 },
913 },
7b570c17 914 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 915 access: {
7d6fc15b
TL
916 permission: &Permission::Anybody,
917 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
918 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
919 },
920)]
9805207a 921/// Prune a group on the datastore
bf78f708 922pub fn prune(
8c74349b 923 group: pbs_api_types::BackupGroup,
0a240aaa 924 dry_run: bool,
dba37e21 925 keep_options: KeepOptions,
0a240aaa 926 store: String,
dba37e21 927 ns: Option<BackupNamespace>,
0a240aaa 928 _param: Value,
54552dda 929 rpcenv: &mut dyn RpcEnvironment,
83b7db02 930) -> Result<Value, Error> {
e6dc35ac 931 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 932 let ns = ns.unwrap_or_default();
7a404dc5 933 let datastore = check_privs_and_load_store(
abd82485
FG
934 &store,
935 &ns,
7d6fc15b 936 &auth_id,
2bc2435a
FG
937 PRIV_DATASTORE_MODIFY,
938 PRIV_DATASTORE_PRUNE,
c9396984 939 Some(Operation::Write),
c9396984
FG
940 &group,
941 )?;
db87d93e 942
abd82485
FG
943 let worker_id = format!("{}:{}:{}", store, ns, group);
944 let group = datastore.backup_group(ns.clone(), group);
83b7db02 945
dda70154
DM
946 let mut prune_result = Vec::new();
947
6da20161 948 let list = group.list_backups()?;
dda70154 949
dba37e21 950 let mut prune_info = compute_prune_info(list, &keep_options)?;
dda70154
DM
951
952 prune_info.reverse(); // delete older snapshots first
953
dba37e21 954 let keep_all = !keep_options.keeps_something();
dda70154
DM
955
956 if dry_run {
02db7267
DC
957 for (info, mark) in prune_info {
958 let keep = keep_all || mark.keep();
dda70154 959
33f2c2a1 960 let mut result = json!({
db87d93e
WB
961 "backup-type": info.backup_dir.backup_type(),
962 "backup-id": info.backup_dir.backup_id(),
963 "backup-time": info.backup_dir.backup_time(),
dda70154 964 "keep": keep,
02db7267 965 "protected": mark.protected(),
33f2c2a1 966 });
bc21ade2
WB
967 let prune_ns = info.backup_dir.backup_ns();
968 if !prune_ns.is_root() {
969 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
970 }
971 prune_result.push(result);
dda70154
DM
972 }
973 return Ok(json!(prune_result));
974 }
975
163e9bbe 976 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 977 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 978
f1539300 979 if keep_all {
1ec0d70d 980 task_log!(worker, "No prune selection - keeping all files.");
f1539300 981 } else {
dba37e21
WB
982 let mut opts = Vec::new();
983 if !ns.is_root() {
984 opts.push(format!("--ns {ns}"));
985 }
986 crate::server::cli_keep_options(&mut opts, &keep_options);
987
988 task_log!(worker, "retention options: {}", opts.join(" "));
dc7a5b34
TL
989 task_log!(
990 worker,
e13303fc 991 "Starting prune on {} group \"{}\"",
abd82485 992 print_store_and_ns(&store, &ns),
e13303fc 993 group.group(),
dc7a5b34 994 );
f1539300 995 }
3b03abfe 996
02db7267
DC
997 for (info, mark) in prune_info {
998 let keep = keep_all || mark.keep();
dda70154 999
f1539300
SR
1000 let backup_time = info.backup_dir.backup_time();
1001 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1002 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1003
1004 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1005
1ec0d70d 1006 task_log!(worker, "{}", msg);
f1539300 1007
133d718f 1008 prune_result.push(json!({
db87d93e
WB
1009 "backup-type": group.ty,
1010 "backup-id": group.id,
f1539300
SR
1011 "backup-time": backup_time,
1012 "keep": keep,
02db7267 1013 "protected": mark.protected(),
133d718f 1014 }));
f1539300
SR
1015
1016 if !(dry_run || keep) {
133d718f 1017 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1018 task_warn!(
1019 worker,
1020 "failed to remove dir {:?}: {}",
1021 info.backup_dir.relative_path(),
1022 err,
f1539300 1023 );
8f0b4c1f 1024 }
8f579717 1025 }
f1539300 1026 }
dd8e744f 1027
f1539300 1028 worker.log_result(&Ok(()));
83b7db02 1029
dda70154 1030 Ok(json!(prune_result))
83b7db02
DM
1031}
1032
9805207a
DC
1033#[api(
1034 input: {
1035 properties: {
1036 "dry-run": {
1037 optional: true,
1038 type: bool,
1039 default: false,
1040 description: "Just show what prune would do, but do not delete anything.",
1041 },
1042 "prune-options": {
dba37e21 1043 type: PruneJobOptions,
9805207a
DC
1044 flatten: true,
1045 },
1046 store: {
1047 schema: DATASTORE_SCHEMA,
1048 },
1049 },
1050 },
1051 returns: {
1052 schema: UPID_SCHEMA,
1053 },
1054 access: {
dba37e21
WB
1055 permission: &Permission::Anybody,
1056 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
9805207a
DC
1057 },
1058)]
1059/// Prune the datastore
1060pub fn prune_datastore(
1061 dry_run: bool,
dba37e21 1062 prune_options: PruneJobOptions,
9805207a
DC
1063 store: String,
1064 _param: Value,
1065 rpcenv: &mut dyn RpcEnvironment,
1066) -> Result<String, Error> {
dba37e21
WB
1067 let user_info = CachedUserInfo::new()?;
1068
9805207a
DC
1069 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1070
dba37e21
WB
1071 user_info.check_privs(
1072 &auth_id,
1073 &prune_options.acl_path(&store),
1074 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1075 true,
1076 )?;
1077
e9d2fc93 1078 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
dba37e21 1079 let ns = prune_options.ns.clone().unwrap_or_default();
36971618 1080 let worker_id = format!("{}:{}", store, ns);
9805207a 1081
bfa942c0
DC
1082 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1083
9805207a
DC
1084 let upid_str = WorkerTask::new_thread(
1085 "prune",
36971618 1086 Some(worker_id),
049a22a3 1087 auth_id.to_string(),
bfa942c0 1088 to_stdout,
dc7a5b34 1089 move |worker| {
dba37e21 1090 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
dc7a5b34 1091 },
9805207a
DC
1092 )?;
1093
1094 Ok(upid_str)
1095}
1096
dfc58d47
DM
1097#[api(
1098 input: {
1099 properties: {
1100 store: {
1101 schema: DATASTORE_SCHEMA,
1102 },
1103 },
1104 },
1105 returns: {
1106 schema: UPID_SCHEMA,
1107 },
bb34b589 1108 access: {
54552dda 1109 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1110 },
dfc58d47
DM
1111)]
1112/// Start garbage collection.
bf78f708 1113pub fn start_garbage_collection(
dfc58d47 1114 store: String,
6049b71f 1115 _info: &ApiMethod,
dd5495d6 1116 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1117) -> Result<Value, Error> {
e9d2fc93 1118 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1119 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1120
dc7a5b34 1121 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1122 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1123
39735609 1124 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1125
dc7a5b34
TL
1126 let upid_str =
1127 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1128 .map_err(|err| {
1129 format_err!(
1130 "unable to start garbage collection job on datastore {} - {}",
1131 store,
1132 err
1133 )
1134 })?;
0f778e06
DM
1135
1136 Ok(json!(upid_str))
15e9b4ed
DM
1137}
1138
a92830dc
DM
1139#[api(
1140 input: {
1141 properties: {
1142 store: {
1143 schema: DATASTORE_SCHEMA,
1144 },
1145 },
1146 },
1147 returns: {
1148 type: GarbageCollectionStatus,
bb34b589
DM
1149 },
1150 access: {
1151 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1152 },
a92830dc
DM
1153)]
1154/// Garbage collection status.
5eeea607 1155pub fn garbage_collection_status(
a92830dc 1156 store: String,
6049b71f 1157 _info: &ApiMethod,
dd5495d6 1158 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1159) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1160 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1161
f2b99c34 1162 let status = datastore.last_gc_status();
691c89a0 1163
a92830dc 1164 Ok(status)
691c89a0
DM
1165}
1166
bb34b589 1167#[api(
30fb6025
DM
1168 returns: {
1169 description: "List the accessible datastores.",
1170 type: Array,
9b93c620 1171 items: { type: DataStoreListItem },
30fb6025 1172 },
bb34b589 1173 access: {
54552dda 1174 permission: &Permission::Anybody,
bb34b589
DM
1175 },
1176)]
1177/// Datastore list
bf78f708 1178pub fn get_datastore_list(
6049b71f
DM
1179 _param: Value,
1180 _info: &ApiMethod,
54552dda 1181 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1182) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1183 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1184
e6dc35ac 1185 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1186 let user_info = CachedUserInfo::new()?;
1187
30fb6025 1188 let mut list = Vec::new();
54552dda 1189
30fb6025 1190 for (store, (_, data)) in &config.sections {
9a37bd6c 1191 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1192 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1193
1194 let mut allow_id = false;
1195 if !allowed {
f418f4e4 1196 if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Read)) {
de77a20d 1197 allow_id = can_access_any_namespace(datastore, &auth_id, &user_info);
7d6fc15b 1198 }
f418f4e4
TL
1199 // FIXME: check for any ACL on the datastore below in the error case, otherwise offline
1200 // datastore will disappear for users that can only access a specific namespace
7d6fc15b
TL
1201 }
1202
1203 if allowed || allow_id {
dc7a5b34
TL
1204 list.push(DataStoreListItem {
1205 store: store.clone(),
7d6fc15b
TL
1206 comment: if !allowed {
1207 None
1208 } else {
1209 data["comment"].as_str().map(String::from)
1210 },
e022d13c 1211 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1212 });
30fb6025 1213 }
54552dda
DM
1214 }
1215
44288184 1216 Ok(list)
15e9b4ed
DM
1217}
1218
0ab08ac9
DM
1219#[sortable]
1220pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1221 &ApiHandler::AsyncHttp(&download_file),
1222 &ObjectSchema::new(
1223 "Download single raw file from backup snapshot.",
1224 &sorted!([
66c49c21 1225 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1226 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1227 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1228 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1229 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1230 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1231 ]),
dc7a5b34
TL
1232 ),
1233)
1234.access(
7d6fc15b
TL
1235 Some(
1236 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1237 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1238 ),
7d6fc15b 1239 &Permission::Anybody,
54552dda 1240);
691c89a0 1241
bf78f708 1242pub fn download_file(
9e47c0a5
DM
1243 _parts: Parts,
1244 _req_body: Body,
1245 param: Value,
255f378a 1246 _info: &ApiMethod,
54552dda 1247 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1248) -> ApiResponseFuture {
ad51d02a 1249 async move {
7d6fc15b 1250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1251 let store = required_string_param(&param, "store")?;
133d718f 1252 let backup_ns = optional_ns_param(&param)?;
1afce610 1253
7d6fc15b 1254 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1255 let datastore = check_privs_and_load_store(
abd82485
FG
1256 &store,
1257 &backup_ns,
7d6fc15b 1258 &auth_id,
2bc2435a
FG
1259 PRIV_DATASTORE_READ,
1260 PRIV_DATASTORE_BACKUP,
c9396984 1261 Some(Operation::Read),
c9396984
FG
1262 &backup_dir.group,
1263 )?;
1264
3c8c2827 1265 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1266
dc7a5b34
TL
1267 println!(
1268 "Download {} from {} ({}/{})",
abd82485
FG
1269 file_name,
1270 print_store_and_ns(&store, &backup_ns),
1271 backup_dir,
1272 file_name
dc7a5b34 1273 );
9e47c0a5 1274
1afce610
FG
1275 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1276
ad51d02a
DM
1277 let mut path = datastore.base_path();
1278 path.push(backup_dir.relative_path());
1279 path.push(&file_name);
1280
ba694720 1281 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1282 .await
1283 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1284
dc7a5b34
TL
1285 let payload =
1286 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1287 .map_ok(|bytes| bytes.freeze())
1288 .map_err(move |err| {
1289 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1290 err
1291 });
ad51d02a 1292 let body = Body::wrap_stream(payload);
9e47c0a5 1293
ad51d02a
DM
1294 // fixme: set other headers ?
1295 Ok(Response::builder()
dc7a5b34
TL
1296 .status(StatusCode::OK)
1297 .header(header::CONTENT_TYPE, "application/octet-stream")
1298 .body(body)
1299 .unwrap())
1300 }
1301 .boxed()
9e47c0a5
DM
1302}
1303
6ef9bb59
DC
1304#[sortable]
1305pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1306 &ApiHandler::AsyncHttp(&download_file_decoded),
1307 &ObjectSchema::new(
1308 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1309 &sorted!([
1310 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1311 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1312 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1313 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1314 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1315 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1316 ]),
dc7a5b34
TL
1317 ),
1318)
1319.access(
7d6fc15b
TL
1320 Some(
1321 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1322 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1323 ),
7d6fc15b 1324 &Permission::Anybody,
6ef9bb59
DC
1325);
1326
bf78f708 1327pub fn download_file_decoded(
6ef9bb59
DC
1328 _parts: Parts,
1329 _req_body: Body,
1330 param: Value,
1331 _info: &ApiMethod,
1332 rpcenv: Box<dyn RpcEnvironment>,
1333) -> ApiResponseFuture {
6ef9bb59 1334 async move {
7d6fc15b 1335 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1336 let store = required_string_param(&param, "store")?;
133d718f 1337 let backup_ns = optional_ns_param(&param)?;
abd82485 1338
1afce610 1339 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1340 let datastore = check_privs_and_load_store(
abd82485
FG
1341 &store,
1342 &backup_ns,
7d6fc15b 1343 &auth_id,
2bc2435a
FG
1344 PRIV_DATASTORE_READ,
1345 PRIV_DATASTORE_BACKUP,
c9396984 1346 Some(Operation::Read),
1afce610 1347 &backup_dir_api.group,
c9396984 1348 )?;
a724f5fd 1349
3c8c2827 1350 let file_name = required_string_param(&param, "file-name")?.to_owned();
abd82485 1351 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
6ef9bb59 1352
9ccf933b 1353 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1354 for file in files {
f28d9088 1355 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1356 bail!("cannot decode '{}' - is encrypted", file_name);
1357 }
1358 }
1359
dc7a5b34
TL
1360 println!(
1361 "Download {} from {} ({}/{})",
abd82485
FG
1362 file_name,
1363 print_store_and_ns(&store, &backup_ns),
1364 backup_dir_api,
1365 file_name
dc7a5b34 1366 );
6ef9bb59
DC
1367
1368 let mut path = datastore.base_path();
1369 path.push(backup_dir.relative_path());
1370 path.push(&file_name);
1371
1372 let extension = file_name.rsplitn(2, '.').next().unwrap();
1373
1374 let body = match extension {
1375 "didx" => {
dc7a5b34
TL
1376 let index = DynamicIndexReader::open(&path).map_err(|err| {
1377 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1378 })?;
2d55beec
FG
1379 let (csum, size) = index.compute_csum();
1380 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1381
14f6c9cb 1382 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1383 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1384 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1385 eprintln!("error during streaming of '{:?}' - {}", path, err);
1386 err
1387 }))
1388 }
6ef9bb59 1389 "fidx" => {
dc7a5b34
TL
1390 let index = FixedIndexReader::open(&path).map_err(|err| {
1391 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1392 })?;
6ef9bb59 1393
2d55beec
FG
1394 let (csum, size) = index.compute_csum();
1395 manifest.verify_file(&file_name, &csum, size)?;
1396
14f6c9cb 1397 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1398 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1399 Body::wrap_stream(
1400 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1401 move |err| {
1402 eprintln!("error during streaming of '{:?}' - {}", path, err);
1403 err
1404 },
1405 ),
1406 )
1407 }
6ef9bb59
DC
1408 "blob" => {
1409 let file = std::fs::File::open(&path)
8aa67ee7 1410 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1411
2d55beec
FG
1412 // FIXME: load full blob to verify index checksum?
1413
6ef9bb59 1414 Body::wrap_stream(
dc7a5b34
TL
1415 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1416 move |err| {
6ef9bb59
DC
1417 eprintln!("error during streaming of '{:?}' - {}", path, err);
1418 err
dc7a5b34
TL
1419 },
1420 ),
6ef9bb59 1421 )
dc7a5b34 1422 }
6ef9bb59
DC
1423 extension => {
1424 bail!("cannot download '{}' files", extension);
dc7a5b34 1425 }
6ef9bb59
DC
1426 };
1427
1428 // fixme: set other headers ?
1429 Ok(Response::builder()
dc7a5b34
TL
1430 .status(StatusCode::OK)
1431 .header(header::CONTENT_TYPE, "application/octet-stream")
1432 .body(body)
1433 .unwrap())
1434 }
1435 .boxed()
6ef9bb59
DC
1436}
1437
552c2259 1438#[sortable]
0ab08ac9
DM
1439pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1440 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1441 &ObjectSchema::new(
54552dda 1442 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1443 &sorted!([
66c49c21 1444 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1445 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1446 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1447 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1448 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1449 ]),
dc7a5b34
TL
1450 ),
1451)
1452.access(
54552dda 1453 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1454 &Permission::Anybody,
54552dda 1455);
9e47c0a5 1456
bf78f708 1457pub fn upload_backup_log(
07ee2235
DM
1458 _parts: Parts,
1459 req_body: Body,
1460 param: Value,
255f378a 1461 _info: &ApiMethod,
54552dda 1462 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1463) -> ApiResponseFuture {
ad51d02a 1464 async move {
7d6fc15b 1465 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1466 let store = required_string_param(&param, "store")?;
133d718f 1467 let backup_ns = optional_ns_param(&param)?;
abd82485 1468
1afce610 1469 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1470
7a404dc5 1471 let datastore = check_privs_and_load_store(
abd82485
FG
1472 &store,
1473 &backup_ns,
c9396984 1474 &auth_id,
7a404dc5
FG
1475 0,
1476 PRIV_DATASTORE_BACKUP,
c9396984 1477 Some(Operation::Write),
1afce610 1478 &backup_dir_api.group,
c9396984 1479 )?;
abd82485 1480 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
07ee2235 1481
dc7a5b34 1482 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1483
133d718f 1484 let mut path = backup_dir.full_path();
ad51d02a 1485 path.push(&file_name);
07ee2235 1486
ad51d02a
DM
1487 if path.exists() {
1488 bail!("backup already contains a log.");
1489 }
e128d4e8 1490
abd82485
FG
1491 println!(
1492 "Upload backup log to {} {backup_dir_api}/{file_name}",
1493 print_store_and_ns(&store, &backup_ns),
1494 );
ad51d02a
DM
1495
1496 let data = req_body
1497 .map_err(Error::from)
1498 .try_fold(Vec::new(), |mut acc, chunk| {
1499 acc.extend_from_slice(&*chunk);
1500 future::ok::<_, Error>(acc)
1501 })
1502 .await?;
1503
39f18b30
DM
1504 // always verify blob/CRC at server side
1505 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1506
e0a19d33 1507 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1508
1509 // fixme: use correct formatter
53daae8e 1510 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1511 }
1512 .boxed()
07ee2235
DM
1513}
1514
5b1cfa01
DC
1515#[api(
1516 input: {
1517 properties: {
988d575d 1518 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1519 ns: {
133d718f
WB
1520 type: BackupNamespace,
1521 optional: true,
1522 },
8c74349b
WB
1523 backup_dir: {
1524 type: pbs_api_types::BackupDir,
1525 flatten: true,
1526 },
5b1cfa01
DC
1527 "filepath": {
1528 description: "Base64 encoded path.",
1529 type: String,
1530 }
1531 },
1532 },
1533 access: {
7d6fc15b
TL
1534 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1535 DATASTORE_BACKUP and being the owner of the group",
1536 permission: &Permission::Anybody,
5b1cfa01
DC
1537 },
1538)]
1539/// Get the entries of the given path of the catalog
bf78f708 1540pub fn catalog(
5b1cfa01 1541 store: String,
bc21ade2 1542 ns: Option<BackupNamespace>,
8c74349b 1543 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1544 filepath: String,
5b1cfa01 1545 rpcenv: &mut dyn RpcEnvironment,
227501c0 1546) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1547 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1548 let ns = ns.unwrap_or_default();
ea2e91e5 1549
7a404dc5 1550 let datastore = check_privs_and_load_store(
abd82485
FG
1551 &store,
1552 &ns,
7d6fc15b 1553 &auth_id,
2bc2435a
FG
1554 PRIV_DATASTORE_READ,
1555 PRIV_DATASTORE_BACKUP,
c9396984 1556 Some(Operation::Read),
c9396984
FG
1557 &backup_dir.group,
1558 )?;
a724f5fd 1559
fbfb64a6 1560 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1561
9238cdf5
FG
1562 let file_name = CATALOG_NAME;
1563
9ccf933b 1564 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1565 for file in files {
1566 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1567 bail!("cannot decode '{}' - is encrypted", file_name);
1568 }
1569 }
1570
5b1cfa01
DC
1571 let mut path = datastore.base_path();
1572 path.push(backup_dir.relative_path());
9238cdf5 1573 path.push(file_name);
5b1cfa01
DC
1574
1575 let index = DynamicIndexReader::open(&path)
1576 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1577
2d55beec 1578 let (csum, size) = index.compute_csum();
9a37bd6c 1579 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1580
14f6c9cb 1581 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1582 let reader = BufferedDynamicReader::new(index, chunk_reader);
1583
1584 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1585
5279ee74 1586 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1587 base64::decode(filepath)?
1588 } else {
1589 vec![b'/']
1590 };
5b1cfa01 1591
86582454 1592 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1593}
1594
d33d8f4e
DC
1595#[sortable]
1596pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1597 &ApiHandler::AsyncHttp(&pxar_file_download),
1598 &ObjectSchema::new(
1ffe0301 1599 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1600 &sorted!([
1601 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1602 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1603 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1604 ("backup-id", false, &BACKUP_ID_SCHEMA),
1605 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1606 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1607 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1608 ]),
1609 )
7d6fc15b
TL
1610).access(
1611 Some(
1612 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1613 DATASTORE_BACKUP and being the owner of the group",
1614 ),
1615 &Permission::Anybody,
d33d8f4e
DC
1616);
1617
bf78f708 1618pub fn pxar_file_download(
d33d8f4e
DC
1619 _parts: Parts,
1620 _req_body: Body,
1621 param: Value,
1622 _info: &ApiMethod,
1623 rpcenv: Box<dyn RpcEnvironment>,
1624) -> ApiResponseFuture {
d33d8f4e 1625 async move {
7d6fc15b 1626 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1627 let store = required_string_param(&param, "store")?;
bc21ade2 1628 let ns = optional_ns_param(&param)?;
abd82485 1629
7d6fc15b 1630 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1631 let datastore = check_privs_and_load_store(
abd82485
FG
1632 &store,
1633 &ns,
7d6fc15b 1634 &auth_id,
2bc2435a
FG
1635 PRIV_DATASTORE_READ,
1636 PRIV_DATASTORE_BACKUP,
c9396984 1637 Some(Operation::Read),
c9396984
FG
1638 &backup_dir.group,
1639 )?;
a724f5fd 1640
bc21ade2 1641 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1642
3c8c2827 1643 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1644
984ddb2f
DC
1645 let tar = param["tar"].as_bool().unwrap_or(false);
1646
d33d8f4e 1647 let mut components = base64::decode(&filepath)?;
3984a5fd 1648 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1649 components.remove(0);
1650 }
1651
d8d8af98 1652 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1653 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1654 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1655 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1656 for file in files {
1657 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1658 bail!("cannot decode '{}' - is encrypted", pxar_name);
1659 }
1660 }
d33d8f4e 1661
9238cdf5
FG
1662 let mut path = datastore.base_path();
1663 path.push(backup_dir.relative_path());
1664 path.push(pxar_name);
d33d8f4e
DC
1665
1666 let index = DynamicIndexReader::open(&path)
1667 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1668
2d55beec 1669 let (csum, size) = index.compute_csum();
9a37bd6c 1670 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1671
14f6c9cb 1672 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1673 let reader = BufferedDynamicReader::new(index, chunk_reader);
1674 let archive_size = reader.archive_size();
1675 let reader = LocalDynamicReadAt::new(reader);
1676
1677 let decoder = Accessor::new(reader, archive_size).await?;
1678 let root = decoder.open_root().await?;
2e219481 1679 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1680 let file = root
dc7a5b34
TL
1681 .lookup(&path)
1682 .await?
2e219481 1683 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1684
804f6143
DC
1685 let body = match file.kind() {
1686 EntryKind::File { .. } => Body::wrap_stream(
1687 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1688 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1689 err
1690 }),
1691 ),
1692 EntryKind::Hardlink(_) => Body::wrap_stream(
1693 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1694 .map_err(move |err| {
dc7a5b34 1695 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1696 err
1697 }),
1698 ),
1699 EntryKind::Directory => {
984ddb2f 1700 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1701 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1702 if tar {
dc7a5b34
TL
1703 proxmox_rest_server::spawn_internal_task(create_tar(
1704 channelwriter,
1705 decoder,
1706 path.clone(),
1707 false,
1708 ));
984ddb2f
DC
1709 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1710 Body::wrap_stream(zstdstream.map_err(move |err| {
1711 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1712 err
1713 }))
1714 } else {
dc7a5b34
TL
1715 proxmox_rest_server::spawn_internal_task(create_zip(
1716 channelwriter,
1717 decoder,
1718 path.clone(),
1719 false,
1720 ));
984ddb2f
DC
1721 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1722 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1723 err
1724 }))
1725 }
804f6143
DC
1726 }
1727 other => bail!("cannot download file of type {:?}", other),
1728 };
d33d8f4e
DC
1729
1730 // fixme: set other headers ?
1731 Ok(Response::builder()
dc7a5b34
TL
1732 .status(StatusCode::OK)
1733 .header(header::CONTENT_TYPE, "application/octet-stream")
1734 .body(body)
1735 .unwrap())
1736 }
1737 .boxed()
d33d8f4e
DC
1738}
1739
1a0d3d11
DM
1740#[api(
1741 input: {
1742 properties: {
1743 store: {
1744 schema: DATASTORE_SCHEMA,
1745 },
1746 timeframe: {
c68fa58a 1747 type: RRDTimeFrame,
1a0d3d11
DM
1748 },
1749 cf: {
1750 type: RRDMode,
1751 },
1752 },
1753 },
1754 access: {
7d6fc15b
TL
1755 permission: &Permission::Privilege(
1756 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1757 },
1758)]
1759/// Read datastore stats
bf78f708 1760pub fn get_rrd_stats(
1a0d3d11 1761 store: String,
c68fa58a 1762 timeframe: RRDTimeFrame,
1a0d3d11
DM
1763 cf: RRDMode,
1764 _param: Value,
1765) -> Result<Value, Error> {
e9d2fc93 1766 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1767 let disk_manager = crate::tools::disks::DiskManage::new();
1768
1769 let mut rrd_fields = vec![
dc7a5b34
TL
1770 "total",
1771 "used",
1772 "read_ios",
1773 "read_bytes",
1774 "write_ios",
1775 "write_bytes",
f27b6086
DC
1776 ];
1777
1778 // we do not have io_ticks for zpools, so don't include them
1779 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1780 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1781 _ => rrd_fields.push("io_ticks"),
1782 };
1783
dc7a5b34 1784 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1785}
1786
5fd823c3
HL
1787#[api(
1788 input: {
1789 properties: {
1790 store: {
1791 schema: DATASTORE_SCHEMA,
1792 },
1793 },
1794 },
1795 access: {
1796 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1797 },
1798)]
1799/// Read datastore stats
dc7a5b34 1800pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1801 let active_operations = task_tracking::get_active_operations(&store)?;
1802 Ok(json!({
1803 "read": active_operations.read,
1804 "write": active_operations.write,
1805 }))
1806}
1807
d6688884
SR
1808#[api(
1809 input: {
1810 properties: {
988d575d 1811 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1812 ns: {
133d718f
WB
1813 type: BackupNamespace,
1814 optional: true,
1815 },
8c74349b
WB
1816 backup_group: {
1817 type: pbs_api_types::BackupGroup,
1818 flatten: true,
1819 },
d6688884
SR
1820 },
1821 },
1822 access: {
7d6fc15b
TL
1823 permission: &Permission::Anybody,
1824 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1825 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1826 },
1827)]
1828/// Get "notes" for a backup group
1829pub fn get_group_notes(
1830 store: String,
bc21ade2 1831 ns: Option<BackupNamespace>,
8c74349b 1832 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1833 rpcenv: &mut dyn RpcEnvironment,
1834) -> Result<String, Error> {
d6688884 1835 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1836 let ns = ns.unwrap_or_default();
ea2e91e5 1837
7a404dc5 1838 let datastore = check_privs_and_load_store(
abd82485
FG
1839 &store,
1840 &ns,
7d6fc15b 1841 &auth_id,
2bc2435a
FG
1842 PRIV_DATASTORE_AUDIT,
1843 PRIV_DATASTORE_BACKUP,
c9396984 1844 Some(Operation::Read),
c9396984
FG
1845 &backup_group,
1846 )?;
d6688884 1847
abd82485 1848 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1849 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1850}
1851
1852#[api(
1853 input: {
1854 properties: {
988d575d 1855 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1856 ns: {
133d718f
WB
1857 type: BackupNamespace,
1858 optional: true,
1859 },
8c74349b
WB
1860 backup_group: {
1861 type: pbs_api_types::BackupGroup,
1862 flatten: true,
1863 },
d6688884
SR
1864 notes: {
1865 description: "A multiline text.",
1866 },
1867 },
1868 },
1869 access: {
7d6fc15b
TL
1870 permission: &Permission::Anybody,
1871 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1872 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1873 },
1874)]
1875/// Set "notes" for a backup group
1876pub fn set_group_notes(
1877 store: String,
bc21ade2 1878 ns: Option<BackupNamespace>,
8c74349b 1879 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1880 notes: String,
1881 rpcenv: &mut dyn RpcEnvironment,
1882) -> Result<(), Error> {
d6688884 1883 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485
FG
1884 let ns = ns.unwrap_or_default();
1885
7a404dc5 1886 let datastore = check_privs_and_load_store(
abd82485
FG
1887 &store,
1888 &ns,
7d6fc15b 1889 &auth_id,
2bc2435a
FG
1890 PRIV_DATASTORE_MODIFY,
1891 PRIV_DATASTORE_BACKUP,
c9396984 1892 Some(Operation::Write),
c9396984
FG
1893 &backup_group,
1894 )?;
d6688884 1895
abd82485 1896 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1897 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1898
1899 Ok(())
1900}
1901
912b3f5b
DM
1902#[api(
1903 input: {
1904 properties: {
988d575d 1905 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1906 ns: {
133d718f
WB
1907 type: BackupNamespace,
1908 optional: true,
1909 },
8c74349b
WB
1910 backup_dir: {
1911 type: pbs_api_types::BackupDir,
1912 flatten: true,
1913 },
912b3f5b
DM
1914 },
1915 },
1916 access: {
7d6fc15b
TL
1917 permission: &Permission::Anybody,
1918 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1919 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1920 },
1921)]
1922/// Get "notes" for a specific backup
bf78f708 1923pub fn get_notes(
912b3f5b 1924 store: String,
bc21ade2 1925 ns: Option<BackupNamespace>,
8c74349b 1926 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1927 rpcenv: &mut dyn RpcEnvironment,
1928) -> Result<String, Error> {
7d6fc15b 1929 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1930 let ns = ns.unwrap_or_default();
ea2e91e5 1931
7a404dc5 1932 let datastore = check_privs_and_load_store(
abd82485
FG
1933 &store,
1934 &ns,
7d6fc15b 1935 &auth_id,
2bc2435a
FG
1936 PRIV_DATASTORE_AUDIT,
1937 PRIV_DATASTORE_BACKUP,
c9396984 1938 Some(Operation::Read),
c9396984
FG
1939 &backup_dir.group,
1940 )?;
912b3f5b 1941
fbfb64a6 1942 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1943
133d718f 1944 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1945
dc7a5b34 1946 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1947
1948 Ok(String::from(notes))
1949}
1950
1951#[api(
1952 input: {
1953 properties: {
988d575d 1954 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1955 ns: {
133d718f
WB
1956 type: BackupNamespace,
1957 optional: true,
1958 },
8c74349b
WB
1959 backup_dir: {
1960 type: pbs_api_types::BackupDir,
1961 flatten: true,
1962 },
912b3f5b
DM
1963 notes: {
1964 description: "A multiline text.",
1965 },
1966 },
1967 },
1968 access: {
7d6fc15b
TL
1969 permission: &Permission::Anybody,
1970 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1971 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1972 },
1973)]
1974/// Set "notes" for a specific backup
bf78f708 1975pub fn set_notes(
912b3f5b 1976 store: String,
bc21ade2 1977 ns: Option<BackupNamespace>,
8c74349b 1978 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1979 notes: String,
1980 rpcenv: &mut dyn RpcEnvironment,
1981) -> Result<(), Error> {
7d6fc15b 1982 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 1983 let ns = ns.unwrap_or_default();
ea2e91e5 1984
7a404dc5 1985 let datastore = check_privs_and_load_store(
abd82485
FG
1986 &store,
1987 &ns,
7d6fc15b 1988 &auth_id,
2bc2435a
FG
1989 PRIV_DATASTORE_MODIFY,
1990 PRIV_DATASTORE_BACKUP,
c9396984 1991 Some(Operation::Write),
c9396984
FG
1992 &backup_dir.group,
1993 )?;
912b3f5b 1994
fbfb64a6 1995 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1996
133d718f
WB
1997 backup_dir
1998 .update_manifest(|manifest| {
dc7a5b34
TL
1999 manifest.unprotected["notes"] = notes.into();
2000 })
2001 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2002
2003 Ok(())
2004}
2005
8292d3d2
DC
2006#[api(
2007 input: {
2008 properties: {
988d575d 2009 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2010 ns: {
133d718f
WB
2011 type: BackupNamespace,
2012 optional: true,
2013 },
8c74349b
WB
2014 backup_dir: {
2015 type: pbs_api_types::BackupDir,
2016 flatten: true,
2017 },
8292d3d2
DC
2018 },
2019 },
2020 access: {
7d6fc15b
TL
2021 permission: &Permission::Anybody,
2022 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2023 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2024 },
2025)]
2026/// Query protection for a specific backup
2027pub fn get_protection(
2028 store: String,
bc21ade2 2029 ns: Option<BackupNamespace>,
8c74349b 2030 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2031 rpcenv: &mut dyn RpcEnvironment,
2032) -> Result<bool, Error> {
7d6fc15b 2033 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2034 let ns = ns.unwrap_or_default();
7a404dc5 2035 let datastore = check_privs_and_load_store(
abd82485
FG
2036 &store,
2037 &ns,
7d6fc15b 2038 &auth_id,
2bc2435a
FG
2039 PRIV_DATASTORE_AUDIT,
2040 PRIV_DATASTORE_BACKUP,
c9396984 2041 Some(Operation::Read),
c9396984
FG
2042 &backup_dir.group,
2043 )?;
8292d3d2 2044
fbfb64a6 2045 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2046
6da20161 2047 Ok(backup_dir.is_protected())
8292d3d2
DC
2048}
2049
2050#[api(
2051 input: {
2052 properties: {
988d575d 2053 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2054 ns: {
133d718f
WB
2055 type: BackupNamespace,
2056 optional: true,
2057 },
8c74349b
WB
2058 backup_dir: {
2059 type: pbs_api_types::BackupDir,
2060 flatten: true,
2061 },
8292d3d2
DC
2062 protected: {
2063 description: "Enable/disable protection.",
2064 },
2065 },
2066 },
2067 access: {
7d6fc15b
TL
2068 permission: &Permission::Anybody,
2069 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2070 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2071 },
2072)]
2073/// En- or disable protection for a specific backup
2074pub fn set_protection(
2075 store: String,
bc21ade2 2076 ns: Option<BackupNamespace>,
8c74349b 2077 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2078 protected: bool,
2079 rpcenv: &mut dyn RpcEnvironment,
2080) -> Result<(), Error> {
7d6fc15b 2081 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2082 let ns = ns.unwrap_or_default();
7a404dc5 2083 let datastore = check_privs_and_load_store(
abd82485
FG
2084 &store,
2085 &ns,
7d6fc15b 2086 &auth_id,
2bc2435a
FG
2087 PRIV_DATASTORE_MODIFY,
2088 PRIV_DATASTORE_BACKUP,
c9396984 2089 Some(Operation::Write),
c9396984
FG
2090 &backup_dir.group,
2091 )?;
8292d3d2 2092
fbfb64a6 2093 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2094
8292d3d2
DC
2095 datastore.update_protection(&backup_dir, protected)
2096}
2097
72be0eb1 2098#[api(
4940012d 2099 input: {
72be0eb1 2100 properties: {
988d575d 2101 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2102 ns: {
133d718f
WB
2103 type: BackupNamespace,
2104 optional: true,
2105 },
8c74349b
WB
2106 backup_group: {
2107 type: pbs_api_types::BackupGroup,
2108 flatten: true,
2109 },
72be0eb1 2110 "new-owner": {
e6dc35ac 2111 type: Authid,
72be0eb1
DW
2112 },
2113 },
4940012d
FG
2114 },
2115 access: {
bff85572 2116 permission: &Permission::Anybody,
7d6fc15b
TL
2117 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2118 a user's token for owned backups with Datastore.Backup"
4940012d 2119 },
72be0eb1
DW
2120)]
2121/// Change owner of a backup group
bf78f708 2122pub fn set_backup_owner(
72be0eb1 2123 store: String,
bc21ade2 2124 ns: Option<BackupNamespace>,
8c74349b 2125 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2126 new_owner: Authid,
bff85572 2127 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2128) -> Result<(), Error> {
bff85572 2129 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2130 let ns = ns.unwrap_or_default();
ea2e91e5 2131 let owner_check_required = check_ns_privs_full(
abd82485
FG
2132 &store,
2133 &ns,
a724f5fd 2134 &auth_id,
2bc2435a
FG
2135 PRIV_DATASTORE_MODIFY,
2136 PRIV_DATASTORE_BACKUP,
a724f5fd 2137 )?;
1909ece2 2138
abd82485 2139 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
1909ece2 2140
abd82485 2141 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2142
2bc2435a 2143 if owner_check_required {
133d718f 2144 let owner = backup_group.get_owner()?;
bff85572 2145
2bc2435a 2146 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2147 (true, true) => {
2148 // API token to API token, owned by same user
2149 let owner = owner.user();
2150 let new_owner = new_owner.user();
2151 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2152 }
bff85572
FG
2153 (true, false) => {
2154 // API token to API token owner
dc7a5b34
TL
2155 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2156 }
bff85572
FG
2157 (false, true) => {
2158 // API token owner to API token
dc7a5b34
TL
2159 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2160 }
bff85572
FG
2161 (false, false) => {
2162 // User to User, not allowed for unprivileged users
2163 false
dc7a5b34 2164 }
2bc2435a 2165 };
bff85572 2166
2bc2435a
FG
2167 if !allowed {
2168 return Err(http_err!(
2169 UNAUTHORIZED,
2170 "{} does not have permission to change owner of backup group '{}' to {}",
2171 auth_id,
e13303fc 2172 backup_group.group(),
2bc2435a
FG
2173 new_owner,
2174 ));
2175 }
bff85572
FG
2176 }
2177
7d6fc15b
TL
2178 let user_info = CachedUserInfo::new()?;
2179
e6dc35ac 2180 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2181 bail!(
2182 "{} '{}' is inactive or non-existent",
2183 if new_owner.is_token() {
2184 "API token".to_string()
2185 } else {
2186 "user".to_string()
2187 },
2188 new_owner
2189 );
72be0eb1
DW
2190 }
2191
133d718f 2192 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2193
2194 Ok(())
2195}
2196
552c2259 2197#[sortable]
255f378a 2198const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2199 (
2200 "active-operations",
dc7a5b34 2201 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2202 ),
dc7a5b34 2203 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2204 (
2205 "change-owner",
dc7a5b34 2206 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2207 ),
255f378a
DM
2208 (
2209 "download",
dc7a5b34 2210 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2211 ),
6ef9bb59
DC
2212 (
2213 "download-decoded",
dc7a5b34 2214 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2215 ),
dc7a5b34 2216 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2217 (
2218 "gc",
2219 &Router::new()
2220 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2221 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2222 ),
d6688884
SR
2223 (
2224 "group-notes",
2225 &Router::new()
2226 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2227 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2228 ),
255f378a
DM
2229 (
2230 "groups",
2231 &Router::new()
b31c8019 2232 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2233 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2234 ),
18934ae5
TL
2235 (
2236 "namespace",
2237 // FIXME: move into datastore:: sub-module?!
2238 &crate::api2::admin::namespace::ROUTER,
2239 ),
912b3f5b
DM
2240 (
2241 "notes",
2242 &Router::new()
2243 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2244 .put(&API_METHOD_SET_NOTES),
912b3f5b 2245 ),
8292d3d2
DC
2246 (
2247 "protected",
2248 &Router::new()
2249 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2250 .put(&API_METHOD_SET_PROTECTION),
255f378a 2251 ),
dc7a5b34 2252 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2253 (
2254 "prune-datastore",
dc7a5b34 2255 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2256 ),
d33d8f4e
DC
2257 (
2258 "pxar-file-download",
dc7a5b34 2259 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2260 ),
dc7a5b34 2261 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2262 (
2263 "snapshots",
2264 &Router::new()
fc189b19 2265 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2266 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2267 ),
dc7a5b34 2268 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2269 (
2270 "upload-backup-log",
dc7a5b34 2271 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2272 ),
dc7a5b34 2273 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2274];
2275
ad51d02a 2276const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2277 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2278 .subdirs(DATASTORE_INFO_SUBDIRS);
2279
255f378a 2280pub const ROUTER: Router = Router::new()
bb34b589 2281 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2282 .match_all("store", &DATASTORE_INFO_ROUTER);