]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
ui: move max NS prefix length logic to reduced max-depth selector
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
8c74349b
WB
35 Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
36 DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
37 RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
33f2c2a1 38 BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
59229bd7
TL
39 DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT,
40 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
41 PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 42};
984ddb2f 43use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 44use pbs_config::CachedUserInfo;
b2065dc7
WB
45use pbs_datastore::backup_info::BackupInfo;
46use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 47use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
48use pbs_datastore::data_blob::DataBlob;
49use pbs_datastore::data_blob_reader::DataBlobReader;
50use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 51use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
52use pbs_datastore::index::IndexFile;
53use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 54use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
55use pbs_datastore::{
56 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
57 StoreProgress, CATALOG_NAME,
58};
8c74349b 59use pbs_tools::json::required_string_param;
dc7a5b34 60use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 61
133d718f 62use crate::api2::backup::optional_ns_param;
431cc7b1 63use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 64use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 65
b9700a9f 66use crate::server::jobstate::Job;
804f6143 67
d6688884
SR
68const GROUP_NOTES_FILE_NAME: &str = "notes";
69
133d718f
WB
70fn get_group_note_path(
71 store: &DataStore,
72 ns: &BackupNamespace,
73 group: &pbs_api_types::BackupGroup,
74) -> PathBuf {
75 let mut note_path = store.group_path(ns, group);
d6688884
SR
76 note_path.push(GROUP_NOTES_FILE_NAME);
77 note_path
78}
79
7d6fc15b
TL
80// TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
81fn get_ns_privs(store: &str, ns: &BackupNamespace, auth_id: &Authid) -> Result<u64, Error> {
82 let user_info = CachedUserInfo::new()?;
83
84 Ok(if ns.is_root() {
85 user_info.lookup_privs(auth_id, &["datastore", store])
86 } else {
87 user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
88 })
89}
90
2bc2435a
FG
91// asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
92// returning value indicates whether further checks like group ownerships are required
93fn check_ns_privs(
7d6fc15b
TL
94 store: &str,
95 ns: &BackupNamespace,
96 auth_id: &Authid,
2bc2435a
FG
97 full_access_privs: u64,
98 partial_access_privs: u64,
99) -> Result<bool, Error> {
7d6fc15b
TL
100 let privs = get_ns_privs(store, ns, auth_id)?;
101
2bc2435a
FG
102 if full_access_privs != 0 && (privs & full_access_privs) != 0 {
103 return Ok(false);
104 }
105 if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
106 return Ok(true);
7d6fc15b 107 }
2bc2435a
FG
108
109 proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
7d6fc15b
TL
110}
111
7a404dc5
FG
112// helper to unify common sequence of checks:
113// 1. check privs on NS (full or limited access)
114// 2. load datastore
115// 3. if needed (only limited access), check owner of group
116fn check_privs_and_load_store(
c9396984
FG
117 store: &str,
118 ns: &BackupNamespace,
119 auth_id: &Authid,
7a404dc5
FG
120 full_access_privs: u64,
121 partial_access_privs: u64,
c9396984 122 operation: Option<Operation>,
c9396984
FG
123 backup_group: &pbs_api_types::BackupGroup,
124) -> Result<Arc<DataStore>, Error> {
7a404dc5
FG
125 let limited = check_ns_privs(store, ns, auth_id, full_access_privs, partial_access_privs)?;
126
c9396984
FG
127 let datastore = DataStore::lookup_datastore(&store, operation)?;
128
7a404dc5 129 if limited {
c9396984
FG
130 let owner = datastore.get_owner(&ns, backup_group)?;
131 check_backup_owner(&owner, &auth_id)?;
132 }
133
134 Ok(datastore)
135}
136
e7cb4dc5 137fn read_backup_index(
e7cb4dc5
WB
138 backup_dir: &BackupDir,
139) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 140 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 141
09b1f7b2
DM
142 let mut result = Vec::new();
143 for item in manifest.files() {
144 result.push(BackupContent {
145 filename: item.filename.clone(),
f28d9088 146 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
147 size: Some(item.size),
148 });
8c70e3eb
DM
149 }
150
09b1f7b2 151 result.push(BackupContent {
96d65fbc 152 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
153 crypt_mode: match manifest.signature {
154 Some(_) => Some(CryptMode::SignOnly),
155 None => Some(CryptMode::None),
156 },
09b1f7b2
DM
157 size: Some(index_size),
158 });
4f1e40a2 159
70030b43 160 Ok((manifest, result))
8c70e3eb
DM
161}
162
1c090810 163fn get_all_snapshot_files(
1c090810 164 info: &BackupInfo,
70030b43 165) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 166 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
167
168 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
169 acc.insert(item.filename.clone());
170 acc
171 });
172
173 for file in &info.files {
dc7a5b34
TL
174 if file_set.contains(file) {
175 continue;
176 }
f28d9088
WB
177 files.push(BackupContent {
178 filename: file.to_string(),
179 size: None,
180 crypt_mode: None,
181 });
1c090810
DC
182 }
183
70030b43 184 Ok((manifest, files))
1c090810
DC
185}
186
b31c8019
DM
187#[api(
188 input: {
189 properties: {
190 store: {
191 schema: DATASTORE_SCHEMA,
192 },
89ae3c32
WB
193 "backup-ns": {
194 type: BackupNamespace,
195 optional: true,
196 },
b31c8019
DM
197 },
198 },
7b570c17 199 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 200 access: {
7d6fc15b
TL
201 permission: &Permission::Anybody,
202 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
203 /datastore/{store}[/{namespace}]",
bb34b589 204 },
b31c8019
DM
205)]
206/// List backup groups.
b2362a12 207pub fn list_groups(
b31c8019 208 store: String,
89ae3c32 209 backup_ns: Option<BackupNamespace>,
54552dda 210 rpcenv: &mut dyn RpcEnvironment,
b31c8019 211) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 212 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b
TL
213
214 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 215 let list_all = !check_ns_privs(
7d6fc15b
TL
216 &store,
217 &backup_ns,
218 &auth_id,
2bc2435a
FG
219 PRIV_DATASTORE_AUDIT,
220 PRIV_DATASTORE_BACKUP,
7d6fc15b 221 )?;
54552dda 222
e9d2fc93 223 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 224
249dde8b 225 datastore
133d718f 226 .iter_backup_groups(backup_ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
227 .try_fold(Vec::new(), |mut group_info, group| {
228 let group = group?;
133d718f 229 let owner = match datastore.get_owner(&backup_ns, group.as_ref()) {
249dde8b
TL
230 Ok(auth_id) => auth_id,
231 Err(err) => {
232 let id = &store;
233 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
234 return Ok(group_info);
dc7a5b34 235 }
249dde8b
TL
236 };
237 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
238 return Ok(group_info);
239 }
0d08fcee 240
6da20161 241 let snapshots = match group.list_backups() {
249dde8b
TL
242 Ok(snapshots) => snapshots,
243 Err(_) => return Ok(group_info),
244 };
0d08fcee 245
249dde8b
TL
246 let backup_count: u64 = snapshots.len() as u64;
247 if backup_count == 0 {
248 return Ok(group_info);
249 }
0d08fcee 250
249dde8b
TL
251 let last_backup = snapshots
252 .iter()
253 .fold(&snapshots[0], |a, b| {
254 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
255 a
256 } else {
257 b
258 }
259 })
260 .to_owned();
261
133d718f 262 let note_path = get_group_note_path(&datastore, &backup_ns, group.as_ref());
249dde8b
TL
263 let comment = file_read_firstline(&note_path).ok();
264
265 group_info.push(GroupListItem {
988d575d 266 backup: group.into(),
249dde8b
TL
267 last_backup: last_backup.backup_dir.backup_time(),
268 owner: Some(owner),
269 backup_count,
270 files: last_backup.files,
271 comment,
0d08fcee
FG
272 });
273
249dde8b
TL
274 Ok(group_info)
275 })
812c6f87 276}
8f579717 277
f32791b4
DC
278#[api(
279 input: {
280 properties: {
988d575d 281 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
282 "backup-ns": {
283 type: BackupNamespace,
284 optional: true,
285 },
8c74349b
WB
286 group: {
287 type: pbs_api_types::BackupGroup,
288 flatten: true,
289 },
f32791b4
DC
290 },
291 },
292 access: {
7d6fc15b
TL
293 permission: &Permission::Anybody,
294 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
295 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
296 },
297)]
298/// Delete backup group including all snapshots.
299pub fn delete_group(
300 store: String,
133d718f 301 backup_ns: Option<BackupNamespace>,
8c74349b 302 group: pbs_api_types::BackupGroup,
f32791b4
DC
303 _info: &ApiMethod,
304 rpcenv: &mut dyn RpcEnvironment,
305) -> Result<Value, Error> {
f32791b4
DC
306 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
307
133d718f
WB
308 let backup_ns = backup_ns.unwrap_or_default();
309
7a404dc5 310 let datastore = check_privs_and_load_store(
7d6fc15b 311 &store,
133d718f 312 &backup_ns,
7d6fc15b 313 &auth_id,
2bc2435a
FG
314 PRIV_DATASTORE_MODIFY,
315 PRIV_DATASTORE_PRUNE,
c9396984 316 Some(Operation::Write),
c9396984
FG
317 &group,
318 )?;
f32791b4 319
133d718f 320 if !datastore.remove_backup_group(&backup_ns, &group)? {
171a00ca 321 bail!("group only partially deleted due to protected snapshots");
5cc7d891 322 }
f32791b4
DC
323
324 Ok(Value::Null)
325}
326
09b1f7b2
DM
327#[api(
328 input: {
329 properties: {
988d575d 330 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
331 "backup-ns": {
332 type: BackupNamespace,
333 optional: true,
334 },
8c74349b
WB
335 backup_dir: {
336 type: pbs_api_types::BackupDir,
337 flatten: true,
338 },
09b1f7b2
DM
339 },
340 },
7b570c17 341 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 342 access: {
7d6fc15b
TL
343 permission: &Permission::Anybody,
344 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
345 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 346 },
09b1f7b2
DM
347)]
348/// List snapshot files.
ea5f547f 349pub fn list_snapshot_files(
09b1f7b2 350 store: String,
133d718f 351 backup_ns: Option<BackupNamespace>,
8c74349b 352 backup_dir: pbs_api_types::BackupDir,
01a13423 353 _info: &ApiMethod,
54552dda 354 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 355) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 356 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 357
133d718f
WB
358 let backup_ns = backup_ns.unwrap_or_default();
359
7a404dc5 360 let datastore = check_privs_and_load_store(
7d6fc15b 361 &store,
133d718f 362 &backup_ns,
7d6fc15b 363 &auth_id,
2bc2435a
FG
364 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
365 PRIV_DATASTORE_BACKUP,
c9396984 366 Some(Operation::Read),
c9396984
FG
367 &backup_dir.group,
368 )?;
01a13423 369
a724f5fd 370 let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 371
6da20161 372 let info = BackupInfo::new(snapshot)?;
01a13423 373
9ccf933b 374 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43
DM
375
376 Ok(files)
01a13423
DM
377}
378
68a6a0ee
DM
379#[api(
380 input: {
381 properties: {
988d575d 382 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
383 "backup-ns": {
384 type: BackupNamespace,
385 optional: true,
386 },
8c74349b
WB
387 backup_dir: {
388 type: pbs_api_types::BackupDir,
389 flatten: true,
390 },
68a6a0ee
DM
391 },
392 },
bb34b589 393 access: {
7d6fc15b
TL
394 permission: &Permission::Anybody,
395 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
396 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 397 },
68a6a0ee
DM
398)]
399/// Delete backup snapshot.
bf78f708 400pub fn delete_snapshot(
68a6a0ee 401 store: String,
133d718f 402 backup_ns: Option<BackupNamespace>,
8c74349b 403 backup_dir: pbs_api_types::BackupDir,
6f62c924 404 _info: &ApiMethod,
54552dda 405 rpcenv: &mut dyn RpcEnvironment,
6f62c924 406) -> Result<Value, Error> {
e6dc35ac 407 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 408
133d718f 409 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 410 let datastore = check_privs_and_load_store(
7d6fc15b 411 &store,
133d718f 412 &backup_ns,
7d6fc15b 413 &auth_id,
2bc2435a
FG
414 PRIV_DATASTORE_MODIFY,
415 PRIV_DATASTORE_PRUNE,
c9396984 416 Some(Operation::Write),
c9396984
FG
417 &backup_dir.group,
418 )?;
a724f5fd
FG
419
420 let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 421
133d718f 422 snapshot.destroy(false)?;
6f62c924
DM
423
424 Ok(Value::Null)
425}
426
fc189b19 427#[api(
b7c3eaa9 428 streaming: true,
fc189b19
DM
429 input: {
430 properties: {
988d575d 431 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
432 "backup-ns": {
433 type: BackupNamespace,
434 optional: true,
435 },
fc189b19
DM
436 "backup-type": {
437 optional: true,
988d575d 438 type: BackupType,
fc189b19
DM
439 },
440 "backup-id": {
441 optional: true,
442 schema: BACKUP_ID_SCHEMA,
443 },
444 },
445 },
7b570c17 446 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 447 access: {
7d6fc15b
TL
448 permission: &Permission::Anybody,
449 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
450 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 451 },
fc189b19
DM
452)]
453/// List backup snapshots.
dc7a5b34 454pub fn list_snapshots(
54552dda 455 store: String,
8c74349b 456 backup_ns: Option<BackupNamespace>,
988d575d 457 backup_type: Option<BackupType>,
54552dda
DM
458 backup_id: Option<String>,
459 _param: Value,
184f17af 460 _info: &ApiMethod,
54552dda 461 rpcenv: &mut dyn RpcEnvironment,
fc189b19 462) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 463 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b
TL
464
465 let backup_ns = backup_ns.unwrap_or_default();
466
2bc2435a 467 let list_all = !check_ns_privs(
7d6fc15b
TL
468 &store,
469 &backup_ns,
470 &auth_id,
2bc2435a
FG
471 PRIV_DATASTORE_AUDIT,
472 PRIV_DATASTORE_BACKUP,
7d6fc15b 473 )?;
184f17af 474
e9d2fc93 475 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 476
249dde8b
TL
477 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
478 // backup group and provide an error free (Err -> None) accessor
0d08fcee 479 let groups = match (backup_type, backup_id) {
db87d93e 480 (Some(backup_type), Some(backup_id)) => {
8c74349b 481 vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
db87d93e 482 }
8c74349b 483 // FIXME: Recursion
7d9cb8c4 484 (Some(backup_type), None) => datastore
8c74349b 485 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
486 .filter(|group| group.backup_type() == backup_type)
487 .collect(),
8c74349b 488 // FIXME: Recursion
7d9cb8c4 489 (None, Some(backup_id)) => datastore
8c74349b 490 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
491 .filter(|group| group.backup_id() == backup_id)
492 .collect(),
8c74349b
WB
493 // FIXME: Recursion
494 (None, None) => datastore.list_backup_groups(backup_ns)?,
0d08fcee 495 };
54552dda 496
0d08fcee 497 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
498 let backup = pbs_api_types::BackupDir {
499 group: group.into(),
500 time: info.backup_dir.backup_time(),
501 };
6da20161 502 let protected = info.backup_dir.is_protected();
1c090810 503
9ccf933b 504 match get_all_snapshot_files(&info) {
70030b43 505 Ok((manifest, files)) => {
70030b43
DM
506 // extract the first line from notes
507 let comment: Option<String> = manifest.unprotected["notes"]
508 .as_str()
509 .and_then(|notes| notes.lines().next())
510 .map(String::from);
511
035c40e6
FG
512 let fingerprint = match manifest.fingerprint() {
513 Ok(fp) => fp,
514 Err(err) => {
515 eprintln!("error parsing fingerprint: '{}'", err);
516 None
dc7a5b34 517 }
035c40e6
FG
518 };
519
79c53595 520 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
521 let verification: Option<SnapshotVerifyState> =
522 match serde_json::from_value(verification) {
523 Ok(verify) => verify,
524 Err(err) => {
525 eprintln!("error parsing verification state : '{}'", err);
526 None
527 }
528 };
3b2046d2 529
0d08fcee
FG
530 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
531
79c53595 532 SnapshotListItem {
988d575d 533 backup,
79c53595
FG
534 comment,
535 verification,
035c40e6 536 fingerprint,
79c53595
FG
537 files,
538 size,
539 owner,
02db7267 540 protected,
79c53595 541 }
dc7a5b34 542 }
1c090810
DC
543 Err(err) => {
544 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 545 let files = info
dc7a5b34
TL
546 .files
547 .into_iter()
548 .map(|filename| BackupContent {
549 filename,
550 size: None,
551 crypt_mode: None,
552 })
553 .collect();
79c53595
FG
554
555 SnapshotListItem {
988d575d 556 backup,
79c53595
FG
557 comment: None,
558 verification: None,
035c40e6 559 fingerprint: None,
79c53595
FG
560 files,
561 size: None,
562 owner,
02db7267 563 protected,
79c53595 564 }
dc7a5b34 565 }
0d08fcee
FG
566 }
567 };
184f17af 568
dc7a5b34 569 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 570 let owner = match group.get_owner() {
dc7a5b34
TL
571 Ok(auth_id) => auth_id,
572 Err(err) => {
573 eprintln!(
574 "Failed to get owner of group '{}/{}' - {}",
575 &store, group, err
576 );
0d08fcee
FG
577 return Ok(snapshots);
578 }
dc7a5b34 579 };
0d08fcee 580
dc7a5b34
TL
581 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
582 return Ok(snapshots);
583 }
0d08fcee 584
6da20161 585 let group_backups = group.list_backups()?;
0d08fcee 586
dc7a5b34
TL
587 snapshots.extend(
588 group_backups
589 .into_iter()
590 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
591 );
592
593 Ok(snapshots)
594 })
184f17af
DM
595}
596
6da20161
WB
597fn get_snapshots_count(
598 store: &Arc<DataStore>,
599 filter_owner: Option<&Authid>,
600) -> Result<Counts, Error> {
7d9cb8c4 601 store
8c74349b 602 .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
fdfcb74d 603 .filter(|group| {
133d718f
WB
604 // FIXME: namespace:
605 let owner = match store.get_owner(&BackupNamespace::root(), group.as_ref()) {
fdfcb74d
FG
606 Ok(owner) => owner,
607 Err(err) => {
72f81545
TL
608 let id = store.name();
609 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 610 return false;
dc7a5b34 611 }
fdfcb74d 612 };
14e08625 613
fdfcb74d
FG
614 match filter_owner {
615 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
616 None => true,
617 }
618 })
619 .try_fold(Counts::default(), |mut counts, group| {
6da20161 620 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 621
72f81545 622 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
623 if snapshot_count > 0 {
624 let type_count = match group.backup_type() {
988d575d
WB
625 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
626 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
627 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 628 };
14e08625 629
b44483a8
DM
630 type_count.groups += 1;
631 type_count.snapshots += snapshot_count;
632 }
16f9f244 633
fdfcb74d
FG
634 Ok(counts)
635 })
16f9f244
DC
636}
637
1dc117bb
DM
638#[api(
639 input: {
640 properties: {
641 store: {
642 schema: DATASTORE_SCHEMA,
643 },
98afc7b1
FG
644 verbose: {
645 type: bool,
646 default: false,
647 optional: true,
648 description: "Include additional information like snapshot counts and GC status.",
649 },
1dc117bb 650 },
98afc7b1 651
1dc117bb
DM
652 },
653 returns: {
14e08625 654 type: DataStoreStatus,
1dc117bb 655 },
bb34b589 656 access: {
7d6fc15b
TL
657 permission: &Permission::Privilege(
658 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 659 },
1dc117bb
DM
660)]
661/// Get datastore status.
ea5f547f 662pub fn status(
1dc117bb 663 store: String,
98afc7b1 664 verbose: bool,
0eecf38f 665 _info: &ApiMethod,
fdfcb74d 666 rpcenv: &mut dyn RpcEnvironment,
14e08625 667) -> Result<DataStoreStatus, Error> {
e9d2fc93 668 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 669 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
670 let (counts, gc_status) = if verbose {
671 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
672 let user_info = CachedUserInfo::new()?;
673
674 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
675 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
676 None
677 } else {
678 Some(&auth_id)
679 };
680
681 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
682 let gc_status = Some(datastore.last_gc_status());
683
684 (counts, gc_status)
685 } else {
686 (None, None)
98afc7b1 687 };
16f9f244 688
14e08625
DC
689 Ok(DataStoreStatus {
690 total: storage.total,
691 used: storage.used,
692 avail: storage.avail,
693 gc_status,
694 counts,
695 })
0eecf38f
DM
696}
697
c2009e53
DM
698#[api(
699 input: {
700 properties: {
701 store: {
702 schema: DATASTORE_SCHEMA,
703 },
8c74349b
WB
704 "backup-ns": {
705 type: BackupNamespace,
706 optional: true,
707 },
c2009e53 708 "backup-type": {
988d575d 709 type: BackupType,
c2009e53
DM
710 optional: true,
711 },
712 "backup-id": {
713 schema: BACKUP_ID_SCHEMA,
714 optional: true,
715 },
dcbf29e7
HL
716 "ignore-verified": {
717 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
718 optional: true,
719 },
720 "outdated-after": {
721 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
722 optional: true,
723 },
c2009e53
DM
724 "backup-time": {
725 schema: BACKUP_TIME_SCHEMA,
726 optional: true,
727 },
59229bd7
TL
728 "max-depth": {
729 schema: NS_MAX_DEPTH_SCHEMA,
730 optional: true,
731 },
c2009e53
DM
732 },
733 },
734 returns: {
735 schema: UPID_SCHEMA,
736 },
737 access: {
7d6fc15b
TL
738 permission: &Permission::Anybody,
739 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
740 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
741 },
742)]
743/// Verify backups.
744///
745/// This function can verify a single backup snapshot, all backup from a backup group,
746/// or all backups in the datastore.
747pub fn verify(
748 store: String,
8c74349b 749 backup_ns: Option<BackupNamespace>,
988d575d 750 backup_type: Option<BackupType>,
c2009e53
DM
751 backup_id: Option<String>,
752 backup_time: Option<i64>,
dcbf29e7
HL
753 ignore_verified: Option<bool>,
754 outdated_after: Option<i64>,
59229bd7 755 max_depth: Option<usize>,
c2009e53
DM
756 rpcenv: &mut dyn RpcEnvironment,
757) -> Result<Value, Error> {
7d6fc15b
TL
758 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
759 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 760 let owner_check_required = check_ns_privs(
7d6fc15b
TL
761 &store,
762 &backup_ns,
763 &auth_id,
2bc2435a
FG
764 PRIV_DATASTORE_VERIFY,
765 PRIV_DATASTORE_BACKUP,
7d6fc15b 766 )?;
a724f5fd 767
e9d2fc93 768 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 769 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 770
8ea00f6e 771 let worker_id;
c2009e53
DM
772
773 let mut backup_dir = None;
774 let mut backup_group = None;
133042b5 775 let mut worker_type = "verify";
c2009e53
DM
776
777 match (backup_type, backup_id, backup_time) {
778 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 779 worker_id = format!(
8c74349b
WB
780 "{}:{}/{}/{}/{:08X}",
781 store,
782 backup_ns.display_as_path(),
783 backup_type,
784 backup_id,
785 backup_time
dc7a5b34 786 );
59229bd7
TL
787 let dir = datastore.backup_dir_from_parts(
788 backup_ns.clone(),
789 backup_type,
790 backup_id,
791 backup_time,
792 )?;
09f6a240 793
a724f5fd
FG
794 if owner_check_required {
795 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
796 check_backup_owner(&owner, &auth_id)?;
797 }
09f6a240 798
c2009e53 799 backup_dir = Some(dir);
133042b5 800 worker_type = "verify_snapshot";
c2009e53
DM
801 }
802 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
803 worker_id = format!(
804 "{}:{}/{}/{}",
805 store,
806 backup_ns.display_as_path(),
807 backup_type,
808 backup_id
809 );
133d718f 810 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 811
a724f5fd
FG
812 if owner_check_required {
813 let owner = datastore.get_owner(&backup_ns, &group)?;
814 check_backup_owner(&owner, &auth_id)?;
815 }
09f6a240 816
59229bd7 817 backup_group = Some(datastore.backup_group(backup_ns.clone(), group));
133042b5 818 worker_type = "verify_group";
c2009e53
DM
819 }
820 (None, None, None) => {
59229bd7
TL
821 worker_id = if backup_ns.is_root() {
822 store.clone()
823 } else {
824 format!("{store}:{}", backup_ns.display_as_path())
825 };
c2009e53 826 }
5a718dce 827 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
828 }
829
39735609 830 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
831
832 let upid_str = WorkerTask::new_thread(
133042b5 833 worker_type,
44288184 834 Some(worker_id),
049a22a3 835 auth_id.to_string(),
e7cb4dc5
WB
836 to_stdout,
837 move |worker| {
9c26a3d6 838 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 839 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 840 let mut res = Vec::new();
f6b1d1cc 841 if !verify_backup_dir(
9c26a3d6 842 &verify_worker,
f6b1d1cc 843 &backup_dir,
f6b1d1cc 844 worker.upid().clone(),
dc7a5b34 845 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 846 )? {
adfdc369
DC
847 res.push(backup_dir.to_string());
848 }
849 res
c2009e53 850 } else if let Some(backup_group) = backup_group {
7e25b9aa 851 let failed_dirs = verify_backup_group(
9c26a3d6 852 &verify_worker,
63d9aca9 853 &backup_group,
7e25b9aa 854 &mut StoreProgress::new(1),
f6b1d1cc 855 worker.upid(),
dc7a5b34 856 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
857 )?;
858 failed_dirs
c2009e53 859 } else {
a724f5fd 860 let owner = if owner_check_required {
09f6a240
FG
861 Some(auth_id)
862 } else {
863 None
864 };
865
dcbf29e7
HL
866 verify_all_backups(
867 &verify_worker,
868 worker.upid(),
59229bd7
TL
869 backup_ns,
870 max_depth,
dcbf29e7 871 owner,
dc7a5b34 872 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 873 )?
c2009e53 874 };
3984a5fd 875 if !failed_dirs.is_empty() {
1ec0d70d 876 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 877 for dir in failed_dirs {
1ec0d70d 878 task_log!(worker, "\t{}", dir);
adfdc369 879 }
1ffe0301 880 bail!("verification failed - please check the log for details");
c2009e53
DM
881 }
882 Ok(())
e7cb4dc5
WB
883 },
884 )?;
c2009e53
DM
885
886 Ok(json!(upid_str))
887}
888
0a240aaa
DC
889#[api(
890 input: {
891 properties: {
133d718f
WB
892 "backup-ns": {
893 type: BackupNamespace,
894 optional: true,
895 },
8c74349b
WB
896 group: {
897 type: pbs_api_types::BackupGroup,
898 flatten: true,
899 },
0a240aaa
DC
900 "dry-run": {
901 optional: true,
902 type: bool,
903 default: false,
904 description: "Just show what prune would do, but do not delete anything.",
905 },
906 "prune-options": {
907 type: PruneOptions,
908 flatten: true,
909 },
910 store: {
911 schema: DATASTORE_SCHEMA,
912 },
913 },
914 },
7b570c17 915 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 916 access: {
7d6fc15b
TL
917 permission: &Permission::Anybody,
918 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
919 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
920 },
921)]
9805207a 922/// Prune a group on the datastore
bf78f708 923pub fn prune(
133d718f 924 backup_ns: Option<BackupNamespace>,
8c74349b 925 group: pbs_api_types::BackupGroup,
0a240aaa
DC
926 dry_run: bool,
927 prune_options: PruneOptions,
928 store: String,
929 _param: Value,
54552dda 930 rpcenv: &mut dyn RpcEnvironment,
83b7db02 931) -> Result<Value, Error> {
e6dc35ac 932 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 933 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 934 let datastore = check_privs_and_load_store(
7d6fc15b 935 &store,
133d718f 936 &backup_ns,
7d6fc15b 937 &auth_id,
2bc2435a
FG
938 PRIV_DATASTORE_MODIFY,
939 PRIV_DATASTORE_PRUNE,
c9396984 940 Some(Operation::Write),
c9396984
FG
941 &group,
942 )?;
db87d93e 943
a724f5fd 944 let group = datastore.backup_group(backup_ns, group);
83b7db02 945
8c74349b 946 let worker_id = format!("{}:{}", store, group);
503995c7 947
dda70154
DM
948 let mut prune_result = Vec::new();
949
6da20161 950 let list = group.list_backups()?;
dda70154
DM
951
952 let mut prune_info = compute_prune_info(list, &prune_options)?;
953
954 prune_info.reverse(); // delete older snapshots first
955
89725197 956 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
957
958 if dry_run {
02db7267
DC
959 for (info, mark) in prune_info {
960 let keep = keep_all || mark.keep();
dda70154 961
33f2c2a1 962 let mut result = json!({
db87d93e
WB
963 "backup-type": info.backup_dir.backup_type(),
964 "backup-id": info.backup_dir.backup_id(),
965 "backup-time": info.backup_dir.backup_time(),
dda70154 966 "keep": keep,
02db7267 967 "protected": mark.protected(),
33f2c2a1
WB
968 });
969 let ns = info.backup_dir.backup_ns();
970 if !ns.is_root() {
971 result["backup-ns"] = serde_json::to_value(ns)?;
972 }
973 prune_result.push(result);
dda70154
DM
974 }
975 return Ok(json!(prune_result));
976 }
977
163e9bbe 978 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 979 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 980
f1539300 981 if keep_all {
1ec0d70d 982 task_log!(worker, "No prune selection - keeping all files.");
f1539300 983 } else {
dc7a5b34
TL
984 task_log!(
985 worker,
986 "retention options: {}",
987 pbs_datastore::prune::cli_options_string(&prune_options)
988 );
989 task_log!(
990 worker,
8c74349b 991 "Starting prune on store \"{}\" group \"{}\"",
dc7a5b34 992 store,
8c74349b 993 group,
dc7a5b34 994 );
f1539300 995 }
3b03abfe 996
02db7267
DC
997 for (info, mark) in prune_info {
998 let keep = keep_all || mark.keep();
dda70154 999
f1539300
SR
1000 let backup_time = info.backup_dir.backup_time();
1001 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1002 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1003
1004 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1005
1ec0d70d 1006 task_log!(worker, "{}", msg);
f1539300 1007
133d718f 1008 prune_result.push(json!({
db87d93e
WB
1009 "backup-type": group.ty,
1010 "backup-id": group.id,
f1539300
SR
1011 "backup-time": backup_time,
1012 "keep": keep,
02db7267 1013 "protected": mark.protected(),
133d718f 1014 }));
f1539300
SR
1015
1016 if !(dry_run || keep) {
133d718f 1017 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1018 task_warn!(
1019 worker,
1020 "failed to remove dir {:?}: {}",
1021 info.backup_dir.relative_path(),
1022 err,
f1539300 1023 );
8f0b4c1f 1024 }
8f579717 1025 }
f1539300 1026 }
dd8e744f 1027
f1539300 1028 worker.log_result(&Ok(()));
83b7db02 1029
dda70154 1030 Ok(json!(prune_result))
83b7db02
DM
1031}
1032
9805207a
DC
1033#[api(
1034 input: {
1035 properties: {
1036 "dry-run": {
1037 optional: true,
1038 type: bool,
1039 default: false,
1040 description: "Just show what prune would do, but do not delete anything.",
1041 },
1042 "prune-options": {
1043 type: PruneOptions,
1044 flatten: true,
1045 },
1046 store: {
1047 schema: DATASTORE_SCHEMA,
1048 },
2f5417f8
TL
1049 ns: {
1050 type: BackupNamespace,
1051 optional: true,
1052 },
9805207a
DC
1053 },
1054 },
1055 returns: {
1056 schema: UPID_SCHEMA,
1057 },
1058 access: {
7d6fc15b
TL
1059 permission: &Permission::Privilege(
1060 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
9805207a
DC
1061 },
1062)]
1063/// Prune the datastore
1064pub fn prune_datastore(
1065 dry_run: bool,
1066 prune_options: PruneOptions,
1067 store: String,
2f5417f8 1068 ns: Option<BackupNamespace>,
9805207a
DC
1069 _param: Value,
1070 rpcenv: &mut dyn RpcEnvironment,
1071) -> Result<String, Error> {
9805207a
DC
1072 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1073
e9d2fc93 1074 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 1075
bfa942c0
DC
1076 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1077
7d6fc15b
TL
1078 // FIXME: also allow a per-namespace pruning with max-depth
1079
9805207a
DC
1080 let upid_str = WorkerTask::new_thread(
1081 "prune",
1082 Some(store.clone()),
049a22a3 1083 auth_id.to_string(),
bfa942c0 1084 to_stdout,
dc7a5b34
TL
1085 move |worker| {
1086 crate::server::prune_datastore(
1087 worker,
1088 auth_id,
1089 prune_options,
dc7a5b34 1090 datastore,
2f5417f8 1091 ns.unwrap_or_default(),
dc7a5b34
TL
1092 dry_run,
1093 )
1094 },
9805207a
DC
1095 )?;
1096
1097 Ok(upid_str)
1098}
1099
dfc58d47
DM
1100#[api(
1101 input: {
1102 properties: {
1103 store: {
1104 schema: DATASTORE_SCHEMA,
1105 },
1106 },
1107 },
1108 returns: {
1109 schema: UPID_SCHEMA,
1110 },
bb34b589 1111 access: {
54552dda 1112 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1113 },
dfc58d47
DM
1114)]
1115/// Start garbage collection.
bf78f708 1116pub fn start_garbage_collection(
dfc58d47 1117 store: String,
6049b71f 1118 _info: &ApiMethod,
dd5495d6 1119 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1120) -> Result<Value, Error> {
e9d2fc93 1121 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1122 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1123
dc7a5b34 1124 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1125 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1126
39735609 1127 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1128
dc7a5b34
TL
1129 let upid_str =
1130 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1131 .map_err(|err| {
1132 format_err!(
1133 "unable to start garbage collection job on datastore {} - {}",
1134 store,
1135 err
1136 )
1137 })?;
0f778e06
DM
1138
1139 Ok(json!(upid_str))
15e9b4ed
DM
1140}
1141
a92830dc
DM
1142#[api(
1143 input: {
1144 properties: {
1145 store: {
1146 schema: DATASTORE_SCHEMA,
1147 },
1148 },
1149 },
1150 returns: {
1151 type: GarbageCollectionStatus,
bb34b589
DM
1152 },
1153 access: {
1154 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1155 },
a92830dc
DM
1156)]
1157/// Garbage collection status.
5eeea607 1158pub fn garbage_collection_status(
a92830dc 1159 store: String,
6049b71f 1160 _info: &ApiMethod,
dd5495d6 1161 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1162) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1163 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1164
f2b99c34 1165 let status = datastore.last_gc_status();
691c89a0 1166
a92830dc 1167 Ok(status)
691c89a0
DM
1168}
1169
7d6fc15b
TL
1170fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1171 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1172 // below /datastore/{store}" helper
1173 let mut iter =
1174 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1175 iter
1176 } else {
1177 return false;
1178 };
1179 let wanted =
1180 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1181 let name = store.name();
1182 iter.any(|ns| -> bool {
1183 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
1184 user_privs & wanted != 0
1185 })
1186}
1187
bb34b589 1188#[api(
30fb6025
DM
1189 returns: {
1190 description: "List the accessible datastores.",
1191 type: Array,
9b93c620 1192 items: { type: DataStoreListItem },
30fb6025 1193 },
bb34b589 1194 access: {
54552dda 1195 permission: &Permission::Anybody,
bb34b589
DM
1196 },
1197)]
1198/// Datastore list
bf78f708 1199pub fn get_datastore_list(
6049b71f
DM
1200 _param: Value,
1201 _info: &ApiMethod,
54552dda 1202 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1203) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1204 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1205
e6dc35ac 1206 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1207 let user_info = CachedUserInfo::new()?;
1208
30fb6025 1209 let mut list = Vec::new();
54552dda 1210
30fb6025 1211 for (store, (_, data)) in &config.sections {
9a37bd6c 1212 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1213 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1214
1215 let mut allow_id = false;
1216 if !allowed {
1217 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1218 // safety: we just cannot go through lookup as we must avoid an operation check
1219 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1220 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1221 }
1222 }
1223
1224 if allowed || allow_id {
dc7a5b34
TL
1225 list.push(DataStoreListItem {
1226 store: store.clone(),
7d6fc15b
TL
1227 comment: if !allowed {
1228 None
1229 } else {
1230 data["comment"].as_str().map(String::from)
1231 },
e022d13c 1232 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1233 });
30fb6025 1234 }
54552dda
DM
1235 }
1236
44288184 1237 Ok(list)
15e9b4ed
DM
1238}
1239
0ab08ac9
DM
1240#[sortable]
1241pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1242 &ApiHandler::AsyncHttp(&download_file),
1243 &ObjectSchema::new(
1244 "Download single raw file from backup snapshot.",
1245 &sorted!([
66c49c21 1246 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1247 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1248 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1249 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1250 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1251 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1252 ]),
dc7a5b34
TL
1253 ),
1254)
1255.access(
7d6fc15b
TL
1256 Some(
1257 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1258 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1259 ),
7d6fc15b 1260 &Permission::Anybody,
54552dda 1261);
691c89a0 1262
bf78f708 1263pub fn download_file(
9e47c0a5
DM
1264 _parts: Parts,
1265 _req_body: Body,
1266 param: Value,
255f378a 1267 _info: &ApiMethod,
54552dda 1268 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1269) -> ApiResponseFuture {
ad51d02a 1270 async move {
7d6fc15b 1271 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1272 let store = required_string_param(&param, "store")?;
133d718f 1273 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1274 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1275 let datastore = check_privs_and_load_store(
7d6fc15b 1276 &store,
133d718f 1277 &backup_ns,
7d6fc15b 1278 &auth_id,
2bc2435a
FG
1279 PRIV_DATASTORE_READ,
1280 PRIV_DATASTORE_BACKUP,
c9396984 1281 Some(Operation::Read),
c9396984
FG
1282 &backup_dir.group,
1283 )?;
1284
133d718f 1285 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 1286
3c8c2827 1287 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1288
dc7a5b34
TL
1289 println!(
1290 "Download {} from {} ({}/{})",
1291 file_name, store, backup_dir, file_name
1292 );
9e47c0a5 1293
ad51d02a
DM
1294 let mut path = datastore.base_path();
1295 path.push(backup_dir.relative_path());
1296 path.push(&file_name);
1297
ba694720 1298 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1299 .await
1300 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1301
dc7a5b34
TL
1302 let payload =
1303 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1304 .map_ok(|bytes| bytes.freeze())
1305 .map_err(move |err| {
1306 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1307 err
1308 });
ad51d02a 1309 let body = Body::wrap_stream(payload);
9e47c0a5 1310
ad51d02a
DM
1311 // fixme: set other headers ?
1312 Ok(Response::builder()
dc7a5b34
TL
1313 .status(StatusCode::OK)
1314 .header(header::CONTENT_TYPE, "application/octet-stream")
1315 .body(body)
1316 .unwrap())
1317 }
1318 .boxed()
9e47c0a5
DM
1319}
1320
6ef9bb59
DC
1321#[sortable]
1322pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1323 &ApiHandler::AsyncHttp(&download_file_decoded),
1324 &ObjectSchema::new(
1325 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1326 &sorted!([
1327 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1328 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1329 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1330 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1331 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1332 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1333 ]),
dc7a5b34
TL
1334 ),
1335)
1336.access(
7d6fc15b
TL
1337 Some(
1338 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1339 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1340 ),
7d6fc15b 1341 &Permission::Anybody,
6ef9bb59
DC
1342);
1343
bf78f708 1344pub fn download_file_decoded(
6ef9bb59
DC
1345 _parts: Parts,
1346 _req_body: Body,
1347 param: Value,
1348 _info: &ApiMethod,
1349 rpcenv: Box<dyn RpcEnvironment>,
1350) -> ApiResponseFuture {
6ef9bb59 1351 async move {
7d6fc15b 1352 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1353 let store = required_string_param(&param, "store")?;
133d718f 1354 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1355 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1356 let datastore = check_privs_and_load_store(
7d6fc15b 1357 &store,
133d718f 1358 &backup_ns,
7d6fc15b 1359 &auth_id,
2bc2435a
FG
1360 PRIV_DATASTORE_READ,
1361 PRIV_DATASTORE_BACKUP,
c9396984 1362 Some(Operation::Read),
c9396984
FG
1363 &backup_dir.group,
1364 )?;
a724f5fd 1365
133d718f 1366 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
6ef9bb59 1367
3c8c2827 1368 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1369
9ccf933b 1370 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1371 for file in files {
f28d9088 1372 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1373 bail!("cannot decode '{}' - is encrypted", file_name);
1374 }
1375 }
1376
dc7a5b34
TL
1377 println!(
1378 "Download {} from {} ({}/{})",
1379 file_name, store, backup_dir, file_name
1380 );
6ef9bb59
DC
1381
1382 let mut path = datastore.base_path();
1383 path.push(backup_dir.relative_path());
1384 path.push(&file_name);
1385
1386 let extension = file_name.rsplitn(2, '.').next().unwrap();
1387
1388 let body = match extension {
1389 "didx" => {
dc7a5b34
TL
1390 let index = DynamicIndexReader::open(&path).map_err(|err| {
1391 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1392 })?;
2d55beec
FG
1393 let (csum, size) = index.compute_csum();
1394 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1395
14f6c9cb 1396 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1397 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1398 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1399 eprintln!("error during streaming of '{:?}' - {}", path, err);
1400 err
1401 }))
1402 }
6ef9bb59 1403 "fidx" => {
dc7a5b34
TL
1404 let index = FixedIndexReader::open(&path).map_err(|err| {
1405 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1406 })?;
6ef9bb59 1407
2d55beec
FG
1408 let (csum, size) = index.compute_csum();
1409 manifest.verify_file(&file_name, &csum, size)?;
1410
14f6c9cb 1411 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1412 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1413 Body::wrap_stream(
1414 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1415 move |err| {
1416 eprintln!("error during streaming of '{:?}' - {}", path, err);
1417 err
1418 },
1419 ),
1420 )
1421 }
6ef9bb59
DC
1422 "blob" => {
1423 let file = std::fs::File::open(&path)
8aa67ee7 1424 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1425
2d55beec
FG
1426 // FIXME: load full blob to verify index checksum?
1427
6ef9bb59 1428 Body::wrap_stream(
dc7a5b34
TL
1429 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1430 move |err| {
6ef9bb59
DC
1431 eprintln!("error during streaming of '{:?}' - {}", path, err);
1432 err
dc7a5b34
TL
1433 },
1434 ),
6ef9bb59 1435 )
dc7a5b34 1436 }
6ef9bb59
DC
1437 extension => {
1438 bail!("cannot download '{}' files", extension);
dc7a5b34 1439 }
6ef9bb59
DC
1440 };
1441
1442 // fixme: set other headers ?
1443 Ok(Response::builder()
dc7a5b34
TL
1444 .status(StatusCode::OK)
1445 .header(header::CONTENT_TYPE, "application/octet-stream")
1446 .body(body)
1447 .unwrap())
1448 }
1449 .boxed()
6ef9bb59
DC
1450}
1451
552c2259 1452#[sortable]
0ab08ac9
DM
1453pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1454 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1455 &ObjectSchema::new(
54552dda 1456 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1457 &sorted!([
66c49c21 1458 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1459 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1460 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1461 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1462 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1463 ]),
dc7a5b34
TL
1464 ),
1465)
1466.access(
54552dda 1467 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1468 &Permission::Anybody,
54552dda 1469);
9e47c0a5 1470
bf78f708 1471pub fn upload_backup_log(
07ee2235
DM
1472 _parts: Parts,
1473 req_body: Body,
1474 param: Value,
255f378a 1475 _info: &ApiMethod,
54552dda 1476 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1477) -> ApiResponseFuture {
ad51d02a 1478 async move {
7d6fc15b 1479 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1480 let store = required_string_param(&param, "store")?;
133d718f 1481 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1482 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1483
7a404dc5
FG
1484 let datastore = check_privs_and_load_store(
1485 &store,
c9396984
FG
1486 &backup_ns,
1487 &auth_id,
7a404dc5
FG
1488 0,
1489 PRIV_DATASTORE_BACKUP,
c9396984 1490 Some(Operation::Write),
c9396984
FG
1491 &backup_dir.group,
1492 )?;
133d718f 1493 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
07ee2235 1494
dc7a5b34 1495 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1496
133d718f 1497 let mut path = backup_dir.full_path();
ad51d02a 1498 path.push(&file_name);
07ee2235 1499
ad51d02a
DM
1500 if path.exists() {
1501 bail!("backup already contains a log.");
1502 }
e128d4e8 1503
8c74349b 1504 println!("Upload backup log to {store}/{backup_dir}/{file_name}");
ad51d02a
DM
1505
1506 let data = req_body
1507 .map_err(Error::from)
1508 .try_fold(Vec::new(), |mut acc, chunk| {
1509 acc.extend_from_slice(&*chunk);
1510 future::ok::<_, Error>(acc)
1511 })
1512 .await?;
1513
39f18b30
DM
1514 // always verify blob/CRC at server side
1515 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1516
e0a19d33 1517 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1518
1519 // fixme: use correct formatter
53daae8e 1520 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1521 }
1522 .boxed()
07ee2235
DM
1523}
1524
5b1cfa01
DC
1525#[api(
1526 input: {
1527 properties: {
988d575d 1528 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1529 "backup-ns": {
1530 type: BackupNamespace,
1531 optional: true,
1532 },
8c74349b
WB
1533 backup_dir: {
1534 type: pbs_api_types::BackupDir,
1535 flatten: true,
1536 },
5b1cfa01
DC
1537 "filepath": {
1538 description: "Base64 encoded path.",
1539 type: String,
1540 }
1541 },
1542 },
1543 access: {
7d6fc15b
TL
1544 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1545 DATASTORE_BACKUP and being the owner of the group",
1546 permission: &Permission::Anybody,
5b1cfa01
DC
1547 },
1548)]
1549/// Get the entries of the given path of the catalog
bf78f708 1550pub fn catalog(
5b1cfa01 1551 store: String,
133d718f 1552 backup_ns: Option<BackupNamespace>,
8c74349b 1553 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1554 filepath: String,
5b1cfa01 1555 rpcenv: &mut dyn RpcEnvironment,
227501c0 1556) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1557 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1558 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 1559 let datastore = check_privs_and_load_store(
7d6fc15b 1560 &store,
133d718f 1561 &backup_ns,
7d6fc15b 1562 &auth_id,
2bc2435a
FG
1563 PRIV_DATASTORE_READ,
1564 PRIV_DATASTORE_BACKUP,
c9396984 1565 Some(Operation::Read),
c9396984
FG
1566 &backup_dir.group,
1567 )?;
a724f5fd
FG
1568
1569 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
5b1cfa01 1570
9238cdf5
FG
1571 let file_name = CATALOG_NAME;
1572
9ccf933b 1573 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1574 for file in files {
1575 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1576 bail!("cannot decode '{}' - is encrypted", file_name);
1577 }
1578 }
1579
5b1cfa01
DC
1580 let mut path = datastore.base_path();
1581 path.push(backup_dir.relative_path());
9238cdf5 1582 path.push(file_name);
5b1cfa01
DC
1583
1584 let index = DynamicIndexReader::open(&path)
1585 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1586
2d55beec 1587 let (csum, size) = index.compute_csum();
9a37bd6c 1588 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1589
14f6c9cb 1590 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1591 let reader = BufferedDynamicReader::new(index, chunk_reader);
1592
1593 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1594
5279ee74 1595 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1596 base64::decode(filepath)?
1597 } else {
1598 vec![b'/']
1599 };
5b1cfa01 1600
86582454 1601 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1602}
1603
d33d8f4e
DC
1604#[sortable]
1605pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1606 &ApiHandler::AsyncHttp(&pxar_file_download),
1607 &ObjectSchema::new(
1ffe0301 1608 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1609 &sorted!([
1610 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1611 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1612 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1613 ("backup-id", false, &BACKUP_ID_SCHEMA),
1614 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1615 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1616 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1617 ]),
1618 )
7d6fc15b
TL
1619).access(
1620 Some(
1621 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1622 DATASTORE_BACKUP and being the owner of the group",
1623 ),
1624 &Permission::Anybody,
d33d8f4e
DC
1625);
1626
bf78f708 1627pub fn pxar_file_download(
d33d8f4e
DC
1628 _parts: Parts,
1629 _req_body: Body,
1630 param: Value,
1631 _info: &ApiMethod,
1632 rpcenv: Box<dyn RpcEnvironment>,
1633) -> ApiResponseFuture {
d33d8f4e 1634 async move {
7d6fc15b 1635 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1636 let store = required_string_param(&param, "store")?;
133d718f 1637 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1638 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1639 let datastore = check_privs_and_load_store(
7d6fc15b 1640 &store,
133d718f 1641 &backup_ns,
7d6fc15b 1642 &auth_id,
2bc2435a
FG
1643 PRIV_DATASTORE_READ,
1644 PRIV_DATASTORE_BACKUP,
c9396984 1645 Some(Operation::Read),
c9396984
FG
1646 &backup_dir.group,
1647 )?;
a724f5fd 1648
133d718f 1649 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
d33d8f4e 1650
3c8c2827 1651 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1652
984ddb2f
DC
1653 let tar = param["tar"].as_bool().unwrap_or(false);
1654
d33d8f4e 1655 let mut components = base64::decode(&filepath)?;
3984a5fd 1656 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1657 components.remove(0);
1658 }
1659
d8d8af98 1660 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1661 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1662 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1663 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1664 for file in files {
1665 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1666 bail!("cannot decode '{}' - is encrypted", pxar_name);
1667 }
1668 }
d33d8f4e 1669
9238cdf5
FG
1670 let mut path = datastore.base_path();
1671 path.push(backup_dir.relative_path());
1672 path.push(pxar_name);
d33d8f4e
DC
1673
1674 let index = DynamicIndexReader::open(&path)
1675 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1676
2d55beec 1677 let (csum, size) = index.compute_csum();
9a37bd6c 1678 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1679
14f6c9cb 1680 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1681 let reader = BufferedDynamicReader::new(index, chunk_reader);
1682 let archive_size = reader.archive_size();
1683 let reader = LocalDynamicReadAt::new(reader);
1684
1685 let decoder = Accessor::new(reader, archive_size).await?;
1686 let root = decoder.open_root().await?;
2e219481 1687 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1688 let file = root
dc7a5b34
TL
1689 .lookup(&path)
1690 .await?
2e219481 1691 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1692
804f6143
DC
1693 let body = match file.kind() {
1694 EntryKind::File { .. } => Body::wrap_stream(
1695 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1696 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1697 err
1698 }),
1699 ),
1700 EntryKind::Hardlink(_) => Body::wrap_stream(
1701 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1702 .map_err(move |err| {
dc7a5b34 1703 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1704 err
1705 }),
1706 ),
1707 EntryKind::Directory => {
984ddb2f 1708 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1709 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1710 if tar {
dc7a5b34
TL
1711 proxmox_rest_server::spawn_internal_task(create_tar(
1712 channelwriter,
1713 decoder,
1714 path.clone(),
1715 false,
1716 ));
984ddb2f
DC
1717 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1718 Body::wrap_stream(zstdstream.map_err(move |err| {
1719 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1720 err
1721 }))
1722 } else {
dc7a5b34
TL
1723 proxmox_rest_server::spawn_internal_task(create_zip(
1724 channelwriter,
1725 decoder,
1726 path.clone(),
1727 false,
1728 ));
984ddb2f
DC
1729 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1730 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1731 err
1732 }))
1733 }
804f6143
DC
1734 }
1735 other => bail!("cannot download file of type {:?}", other),
1736 };
d33d8f4e
DC
1737
1738 // fixme: set other headers ?
1739 Ok(Response::builder()
dc7a5b34
TL
1740 .status(StatusCode::OK)
1741 .header(header::CONTENT_TYPE, "application/octet-stream")
1742 .body(body)
1743 .unwrap())
1744 }
1745 .boxed()
d33d8f4e
DC
1746}
1747
1a0d3d11
DM
1748#[api(
1749 input: {
1750 properties: {
1751 store: {
1752 schema: DATASTORE_SCHEMA,
1753 },
1754 timeframe: {
c68fa58a 1755 type: RRDTimeFrame,
1a0d3d11
DM
1756 },
1757 cf: {
1758 type: RRDMode,
1759 },
1760 },
1761 },
1762 access: {
7d6fc15b
TL
1763 permission: &Permission::Privilege(
1764 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1765 },
1766)]
1767/// Read datastore stats
bf78f708 1768pub fn get_rrd_stats(
1a0d3d11 1769 store: String,
c68fa58a 1770 timeframe: RRDTimeFrame,
1a0d3d11
DM
1771 cf: RRDMode,
1772 _param: Value,
1773) -> Result<Value, Error> {
e9d2fc93 1774 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1775 let disk_manager = crate::tools::disks::DiskManage::new();
1776
1777 let mut rrd_fields = vec![
dc7a5b34
TL
1778 "total",
1779 "used",
1780 "read_ios",
1781 "read_bytes",
1782 "write_ios",
1783 "write_bytes",
f27b6086
DC
1784 ];
1785
1786 // we do not have io_ticks for zpools, so don't include them
1787 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1788 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1789 _ => rrd_fields.push("io_ticks"),
1790 };
1791
dc7a5b34 1792 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1793}
1794
5fd823c3
HL
1795#[api(
1796 input: {
1797 properties: {
1798 store: {
1799 schema: DATASTORE_SCHEMA,
1800 },
1801 },
1802 },
1803 access: {
1804 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1805 },
1806)]
1807/// Read datastore stats
dc7a5b34 1808pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1809 let active_operations = task_tracking::get_active_operations(&store)?;
1810 Ok(json!({
1811 "read": active_operations.read,
1812 "write": active_operations.write,
1813 }))
1814}
1815
d6688884
SR
1816#[api(
1817 input: {
1818 properties: {
988d575d 1819 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1820 "backup-ns": {
1821 type: BackupNamespace,
1822 optional: true,
1823 },
8c74349b
WB
1824 backup_group: {
1825 type: pbs_api_types::BackupGroup,
1826 flatten: true,
1827 },
d6688884
SR
1828 },
1829 },
1830 access: {
7d6fc15b
TL
1831 permission: &Permission::Anybody,
1832 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1833 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1834 },
1835)]
1836/// Get "notes" for a backup group
1837pub fn get_group_notes(
1838 store: String,
133d718f 1839 backup_ns: Option<BackupNamespace>,
8c74349b 1840 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1841 rpcenv: &mut dyn RpcEnvironment,
1842) -> Result<String, Error> {
d6688884 1843 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1844 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 1845 let datastore = check_privs_and_load_store(
7d6fc15b 1846 &store,
133d718f 1847 &backup_ns,
7d6fc15b 1848 &auth_id,
2bc2435a
FG
1849 PRIV_DATASTORE_AUDIT,
1850 PRIV_DATASTORE_BACKUP,
c9396984 1851 Some(Operation::Read),
c9396984
FG
1852 &backup_group,
1853 )?;
d6688884 1854
133d718f 1855 let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
d6688884
SR
1856 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1857}
1858
1859#[api(
1860 input: {
1861 properties: {
988d575d 1862 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1863 "backup-ns": {
1864 type: BackupNamespace,
1865 optional: true,
1866 },
8c74349b
WB
1867 backup_group: {
1868 type: pbs_api_types::BackupGroup,
1869 flatten: true,
1870 },
d6688884
SR
1871 notes: {
1872 description: "A multiline text.",
1873 },
1874 },
1875 },
1876 access: {
7d6fc15b
TL
1877 permission: &Permission::Anybody,
1878 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1879 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1880 },
1881)]
1882/// Set "notes" for a backup group
1883pub fn set_group_notes(
1884 store: String,
133d718f 1885 backup_ns: Option<BackupNamespace>,
8c74349b 1886 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1887 notes: String,
1888 rpcenv: &mut dyn RpcEnvironment,
1889) -> Result<(), Error> {
d6688884 1890 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1891 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 1892 let datastore = check_privs_and_load_store(
7d6fc15b 1893 &store,
133d718f 1894 &backup_ns,
7d6fc15b 1895 &auth_id,
2bc2435a
FG
1896 PRIV_DATASTORE_MODIFY,
1897 PRIV_DATASTORE_BACKUP,
c9396984 1898 Some(Operation::Write),
c9396984
FG
1899 &backup_group,
1900 )?;
d6688884 1901
133d718f 1902 let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
e0a19d33 1903 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1904
1905 Ok(())
1906}
1907
912b3f5b
DM
1908#[api(
1909 input: {
1910 properties: {
988d575d 1911 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1912 "backup-ns": {
1913 type: BackupNamespace,
1914 optional: true,
1915 },
8c74349b
WB
1916 backup_dir: {
1917 type: pbs_api_types::BackupDir,
1918 flatten: true,
1919 },
912b3f5b
DM
1920 },
1921 },
1922 access: {
7d6fc15b
TL
1923 permission: &Permission::Anybody,
1924 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1925 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1926 },
1927)]
1928/// Get "notes" for a specific backup
bf78f708 1929pub fn get_notes(
912b3f5b 1930 store: String,
133d718f 1931 backup_ns: Option<BackupNamespace>,
8c74349b 1932 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1933 rpcenv: &mut dyn RpcEnvironment,
1934) -> Result<String, Error> {
7d6fc15b 1935 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1936 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 1937 let datastore = check_privs_and_load_store(
7d6fc15b 1938 &store,
133d718f 1939 &backup_ns,
7d6fc15b 1940 &auth_id,
2bc2435a
FG
1941 PRIV_DATASTORE_AUDIT,
1942 PRIV_DATASTORE_BACKUP,
c9396984 1943 Some(Operation::Read),
c9396984
FG
1944 &backup_dir.group,
1945 )?;
912b3f5b 1946
133d718f 1947 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
912b3f5b 1948
133d718f 1949 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1950
dc7a5b34 1951 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1952
1953 Ok(String::from(notes))
1954}
1955
1956#[api(
1957 input: {
1958 properties: {
988d575d 1959 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1960 "backup-ns": {
1961 type: BackupNamespace,
1962 optional: true,
1963 },
8c74349b
WB
1964 backup_dir: {
1965 type: pbs_api_types::BackupDir,
1966 flatten: true,
1967 },
912b3f5b
DM
1968 notes: {
1969 description: "A multiline text.",
1970 },
1971 },
1972 },
1973 access: {
7d6fc15b
TL
1974 permission: &Permission::Anybody,
1975 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1976 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1977 },
1978)]
1979/// Set "notes" for a specific backup
bf78f708 1980pub fn set_notes(
912b3f5b 1981 store: String,
133d718f 1982 backup_ns: Option<BackupNamespace>,
8c74349b 1983 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1984 notes: String,
1985 rpcenv: &mut dyn RpcEnvironment,
1986) -> Result<(), Error> {
7d6fc15b 1987 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1988 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 1989 let datastore = check_privs_and_load_store(
7d6fc15b 1990 &store,
133d718f 1991 &backup_ns,
7d6fc15b 1992 &auth_id,
2bc2435a
FG
1993 PRIV_DATASTORE_MODIFY,
1994 PRIV_DATASTORE_BACKUP,
c9396984 1995 Some(Operation::Write),
c9396984
FG
1996 &backup_dir.group,
1997 )?;
912b3f5b 1998
133d718f 1999 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
912b3f5b 2000
133d718f
WB
2001 backup_dir
2002 .update_manifest(|manifest| {
dc7a5b34
TL
2003 manifest.unprotected["notes"] = notes.into();
2004 })
2005 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2006
2007 Ok(())
2008}
2009
8292d3d2
DC
2010#[api(
2011 input: {
2012 properties: {
988d575d 2013 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2014 "backup-ns": {
2015 type: BackupNamespace,
2016 optional: true,
2017 },
8c74349b
WB
2018 backup_dir: {
2019 type: pbs_api_types::BackupDir,
2020 flatten: true,
2021 },
8292d3d2
DC
2022 },
2023 },
2024 access: {
7d6fc15b
TL
2025 permission: &Permission::Anybody,
2026 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2027 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2028 },
2029)]
2030/// Query protection for a specific backup
2031pub fn get_protection(
2032 store: String,
133d718f 2033 backup_ns: Option<BackupNamespace>,
8c74349b 2034 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2035 rpcenv: &mut dyn RpcEnvironment,
2036) -> Result<bool, Error> {
7d6fc15b 2037 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2038 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 2039 let datastore = check_privs_and_load_store(
7d6fc15b 2040 &store,
133d718f 2041 &backup_ns,
7d6fc15b 2042 &auth_id,
2bc2435a
FG
2043 PRIV_DATASTORE_AUDIT,
2044 PRIV_DATASTORE_BACKUP,
c9396984 2045 Some(Operation::Read),
c9396984
FG
2046 &backup_dir.group,
2047 )?;
8292d3d2 2048
133d718f 2049 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
8292d3d2 2050
6da20161 2051 Ok(backup_dir.is_protected())
8292d3d2
DC
2052}
2053
2054#[api(
2055 input: {
2056 properties: {
988d575d 2057 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2058 "backup-ns": {
2059 type: BackupNamespace,
2060 optional: true,
2061 },
8c74349b
WB
2062 backup_dir: {
2063 type: pbs_api_types::BackupDir,
2064 flatten: true,
2065 },
8292d3d2
DC
2066 protected: {
2067 description: "Enable/disable protection.",
2068 },
2069 },
2070 },
2071 access: {
7d6fc15b
TL
2072 permission: &Permission::Anybody,
2073 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2074 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2075 },
2076)]
2077/// En- or disable protection for a specific backup
2078pub fn set_protection(
2079 store: String,
133d718f 2080 backup_ns: Option<BackupNamespace>,
8c74349b 2081 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2082 protected: bool,
2083 rpcenv: &mut dyn RpcEnvironment,
2084) -> Result<(), Error> {
7d6fc15b 2085 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2086 let backup_ns = backup_ns.unwrap_or_default();
7a404dc5 2087 let datastore = check_privs_and_load_store(
7d6fc15b 2088 &store,
133d718f 2089 &backup_ns,
7d6fc15b 2090 &auth_id,
2bc2435a
FG
2091 PRIV_DATASTORE_MODIFY,
2092 PRIV_DATASTORE_BACKUP,
c9396984 2093 Some(Operation::Write),
c9396984
FG
2094 &backup_dir.group,
2095 )?;
8292d3d2 2096
133d718f 2097 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
8292d3d2 2098
8292d3d2
DC
2099 datastore.update_protection(&backup_dir, protected)
2100}
2101
72be0eb1 2102#[api(
4940012d 2103 input: {
72be0eb1 2104 properties: {
988d575d 2105 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2106 "backup-ns": {
2107 type: BackupNamespace,
2108 optional: true,
2109 },
8c74349b
WB
2110 backup_group: {
2111 type: pbs_api_types::BackupGroup,
2112 flatten: true,
2113 },
72be0eb1 2114 "new-owner": {
e6dc35ac 2115 type: Authid,
72be0eb1
DW
2116 },
2117 },
4940012d
FG
2118 },
2119 access: {
bff85572 2120 permission: &Permission::Anybody,
7d6fc15b
TL
2121 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2122 a user's token for owned backups with Datastore.Backup"
4940012d 2123 },
72be0eb1
DW
2124)]
2125/// Change owner of a backup group
bf78f708 2126pub fn set_backup_owner(
72be0eb1 2127 store: String,
133d718f 2128 backup_ns: Option<BackupNamespace>,
8c74349b 2129 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2130 new_owner: Authid,
bff85572 2131 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2132) -> Result<(), Error> {
bff85572 2133 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2134 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2135 let owner_check_required = check_ns_privs(
a724f5fd
FG
2136 &store,
2137 &backup_ns,
2138 &auth_id,
2bc2435a
FG
2139 PRIV_DATASTORE_MODIFY,
2140 PRIV_DATASTORE_BACKUP,
a724f5fd 2141 )?;
1909ece2
FG
2142
2143 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2144
133d718f 2145 let backup_group = datastore.backup_group(backup_ns, backup_group);
bff85572 2146
2bc2435a 2147 if owner_check_required {
133d718f 2148 let owner = backup_group.get_owner()?;
bff85572 2149
2bc2435a 2150 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2151 (true, true) => {
2152 // API token to API token, owned by same user
2153 let owner = owner.user();
2154 let new_owner = new_owner.user();
2155 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2156 }
bff85572
FG
2157 (true, false) => {
2158 // API token to API token owner
dc7a5b34
TL
2159 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2160 }
bff85572
FG
2161 (false, true) => {
2162 // API token owner to API token
dc7a5b34
TL
2163 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2164 }
bff85572
FG
2165 (false, false) => {
2166 // User to User, not allowed for unprivileged users
2167 false
dc7a5b34 2168 }
2bc2435a 2169 };
bff85572 2170
2bc2435a
FG
2171 if !allowed {
2172 return Err(http_err!(
2173 UNAUTHORIZED,
2174 "{} does not have permission to change owner of backup group '{}' to {}",
2175 auth_id,
2176 backup_group,
2177 new_owner,
2178 ));
2179 }
bff85572
FG
2180 }
2181
7d6fc15b
TL
2182 let user_info = CachedUserInfo::new()?;
2183
e6dc35ac 2184 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2185 bail!(
2186 "{} '{}' is inactive or non-existent",
2187 if new_owner.is_token() {
2188 "API token".to_string()
2189 } else {
2190 "user".to_string()
2191 },
2192 new_owner
2193 );
72be0eb1
DW
2194 }
2195
133d718f 2196 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2197
2198 Ok(())
2199}
2200
552c2259 2201#[sortable]
255f378a 2202const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2203 (
2204 "active-operations",
dc7a5b34 2205 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2206 ),
dc7a5b34 2207 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2208 (
2209 "change-owner",
dc7a5b34 2210 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2211 ),
255f378a
DM
2212 (
2213 "download",
dc7a5b34 2214 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2215 ),
6ef9bb59
DC
2216 (
2217 "download-decoded",
dc7a5b34 2218 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2219 ),
dc7a5b34 2220 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2221 (
2222 "gc",
2223 &Router::new()
2224 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2225 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2226 ),
d6688884
SR
2227 (
2228 "group-notes",
2229 &Router::new()
2230 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2231 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2232 ),
255f378a
DM
2233 (
2234 "groups",
2235 &Router::new()
b31c8019 2236 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2237 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2238 ),
18934ae5
TL
2239 (
2240 "namespace",
2241 // FIXME: move into datastore:: sub-module?!
2242 &crate::api2::admin::namespace::ROUTER,
2243 ),
912b3f5b
DM
2244 (
2245 "notes",
2246 &Router::new()
2247 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2248 .put(&API_METHOD_SET_NOTES),
912b3f5b 2249 ),
8292d3d2
DC
2250 (
2251 "protected",
2252 &Router::new()
2253 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2254 .put(&API_METHOD_SET_PROTECTION),
255f378a 2255 ),
dc7a5b34 2256 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2257 (
2258 "prune-datastore",
dc7a5b34 2259 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2260 ),
d33d8f4e
DC
2261 (
2262 "pxar-file-download",
dc7a5b34 2263 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2264 ),
dc7a5b34 2265 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2266 (
2267 "snapshots",
2268 &Router::new()
fc189b19 2269 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2270 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2271 ),
dc7a5b34 2272 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2273 (
2274 "upload-backup-log",
dc7a5b34 2275 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2276 ),
dc7a5b34 2277 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2278];
2279
ad51d02a 2280const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2281 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2282 .subdirs(DATASTORE_INFO_SUBDIRS);
2283
255f378a 2284pub const ROUTER: Router = Router::new()
bb34b589 2285 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2286 .match_all("store", &DATASTORE_INFO_ROUTER);