]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
api: datastore: load datastore & check owner helper
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
8c74349b
WB
35 Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
36 DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
37 RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
33f2c2a1
WB
38 BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
39 DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
8c74349b
WB
40 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
41 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 42};
984ddb2f 43use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 44use pbs_config::CachedUserInfo;
b2065dc7
WB
45use pbs_datastore::backup_info::BackupInfo;
46use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 47use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
48use pbs_datastore::data_blob::DataBlob;
49use pbs_datastore::data_blob_reader::DataBlobReader;
50use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 51use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
52use pbs_datastore::index::IndexFile;
53use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 54use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
55use pbs_datastore::{
56 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
57 StoreProgress, CATALOG_NAME,
58};
8c74349b 59use pbs_tools::json::required_string_param;
dc7a5b34 60use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 61
133d718f 62use crate::api2::backup::optional_ns_param;
431cc7b1 63use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 64use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 65
b9700a9f 66use crate::server::jobstate::Job;
804f6143 67
d6688884
SR
68const GROUP_NOTES_FILE_NAME: &str = "notes";
69
133d718f
WB
70fn get_group_note_path(
71 store: &DataStore,
72 ns: &BackupNamespace,
73 group: &pbs_api_types::BackupGroup,
74) -> PathBuf {
75 let mut note_path = store.group_path(ns, group);
d6688884
SR
76 note_path.push(GROUP_NOTES_FILE_NAME);
77 note_path
78}
79
7d6fc15b
TL
80// TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
81fn get_ns_privs(store: &str, ns: &BackupNamespace, auth_id: &Authid) -> Result<u64, Error> {
82 let user_info = CachedUserInfo::new()?;
83
84 Ok(if ns.is_root() {
85 user_info.lookup_privs(auth_id, &["datastore", store])
86 } else {
87 user_info.lookup_privs(auth_id, &["datastore", store, &ns.to_string()])
88 })
89}
90
2bc2435a
FG
91// asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
92// returning value indicates whether further checks like group ownerships are required
93fn check_ns_privs(
7d6fc15b
TL
94 store: &str,
95 ns: &BackupNamespace,
96 auth_id: &Authid,
2bc2435a
FG
97 full_access_privs: u64,
98 partial_access_privs: u64,
99) -> Result<bool, Error> {
7d6fc15b
TL
100 let privs = get_ns_privs(store, ns, auth_id)?;
101
2bc2435a
FG
102 if full_access_privs != 0 && (privs & full_access_privs) != 0 {
103 return Ok(false);
104 }
105 if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
106 return Ok(true);
7d6fc15b 107 }
2bc2435a
FG
108
109 proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
7d6fc15b
TL
110}
111
c9396984
FG
112fn check_owner_load_store(
113 store: &str,
114 ns: &BackupNamespace,
115 auth_id: &Authid,
116 operation: Option<Operation>,
117 owner_check_required: bool,
118 backup_group: &pbs_api_types::BackupGroup,
119) -> Result<Arc<DataStore>, Error> {
120 let datastore = DataStore::lookup_datastore(&store, operation)?;
121
122 if owner_check_required {
123 let owner = datastore.get_owner(&ns, backup_group)?;
124 check_backup_owner(&owner, &auth_id)?;
125 }
126
127 Ok(datastore)
128}
129
e7cb4dc5
WB
130fn read_backup_index(
131 store: &DataStore,
132 backup_dir: &BackupDir,
133) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
ff86ef00 134 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 135
09b1f7b2
DM
136 let mut result = Vec::new();
137 for item in manifest.files() {
138 result.push(BackupContent {
139 filename: item.filename.clone(),
f28d9088 140 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
141 size: Some(item.size),
142 });
8c70e3eb
DM
143 }
144
09b1f7b2 145 result.push(BackupContent {
96d65fbc 146 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
147 crypt_mode: match manifest.signature {
148 Some(_) => Some(CryptMode::SignOnly),
149 None => Some(CryptMode::None),
150 },
09b1f7b2
DM
151 size: Some(index_size),
152 });
4f1e40a2 153
70030b43 154 Ok((manifest, result))
8c70e3eb
DM
155}
156
1c090810
DC
157fn get_all_snapshot_files(
158 store: &DataStore,
159 info: &BackupInfo,
70030b43 160) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9a37bd6c 161 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
162
163 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
164 acc.insert(item.filename.clone());
165 acc
166 });
167
168 for file in &info.files {
dc7a5b34
TL
169 if file_set.contains(file) {
170 continue;
171 }
f28d9088
WB
172 files.push(BackupContent {
173 filename: file.to_string(),
174 size: None,
175 crypt_mode: None,
176 });
1c090810
DC
177 }
178
70030b43 179 Ok((manifest, files))
1c090810
DC
180}
181
b31c8019
DM
182#[api(
183 input: {
184 properties: {
185 store: {
186 schema: DATASTORE_SCHEMA,
187 },
89ae3c32
WB
188 "backup-ns": {
189 type: BackupNamespace,
190 optional: true,
191 },
b31c8019
DM
192 },
193 },
7b570c17 194 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 195 access: {
7d6fc15b
TL
196 permission: &Permission::Anybody,
197 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
198 /datastore/{store}[/{namespace}]",
bb34b589 199 },
b31c8019
DM
200)]
201/// List backup groups.
b2362a12 202pub fn list_groups(
b31c8019 203 store: String,
89ae3c32 204 backup_ns: Option<BackupNamespace>,
54552dda 205 rpcenv: &mut dyn RpcEnvironment,
b31c8019 206) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 207 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b
TL
208
209 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 210 let list_all = !check_ns_privs(
7d6fc15b
TL
211 &store,
212 &backup_ns,
213 &auth_id,
2bc2435a
FG
214 PRIV_DATASTORE_AUDIT,
215 PRIV_DATASTORE_BACKUP,
7d6fc15b 216 )?;
54552dda 217
e9d2fc93 218 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 219
249dde8b 220 datastore
133d718f 221 .iter_backup_groups(backup_ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
222 .try_fold(Vec::new(), |mut group_info, group| {
223 let group = group?;
133d718f 224 let owner = match datastore.get_owner(&backup_ns, group.as_ref()) {
249dde8b
TL
225 Ok(auth_id) => auth_id,
226 Err(err) => {
227 let id = &store;
228 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
229 return Ok(group_info);
dc7a5b34 230 }
249dde8b
TL
231 };
232 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
233 return Ok(group_info);
234 }
0d08fcee 235
6da20161 236 let snapshots = match group.list_backups() {
249dde8b
TL
237 Ok(snapshots) => snapshots,
238 Err(_) => return Ok(group_info),
239 };
0d08fcee 240
249dde8b
TL
241 let backup_count: u64 = snapshots.len() as u64;
242 if backup_count == 0 {
243 return Ok(group_info);
244 }
0d08fcee 245
249dde8b
TL
246 let last_backup = snapshots
247 .iter()
248 .fold(&snapshots[0], |a, b| {
249 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
250 a
251 } else {
252 b
253 }
254 })
255 .to_owned();
256
133d718f 257 let note_path = get_group_note_path(&datastore, &backup_ns, group.as_ref());
249dde8b
TL
258 let comment = file_read_firstline(&note_path).ok();
259
260 group_info.push(GroupListItem {
988d575d 261 backup: group.into(),
249dde8b
TL
262 last_backup: last_backup.backup_dir.backup_time(),
263 owner: Some(owner),
264 backup_count,
265 files: last_backup.files,
266 comment,
0d08fcee
FG
267 });
268
249dde8b
TL
269 Ok(group_info)
270 })
812c6f87 271}
8f579717 272
f32791b4
DC
273#[api(
274 input: {
275 properties: {
988d575d 276 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
277 "backup-ns": {
278 type: BackupNamespace,
279 optional: true,
280 },
8c74349b
WB
281 group: {
282 type: pbs_api_types::BackupGroup,
283 flatten: true,
284 },
f32791b4
DC
285 },
286 },
287 access: {
7d6fc15b
TL
288 permission: &Permission::Anybody,
289 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
290 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
291 },
292)]
293/// Delete backup group including all snapshots.
294pub fn delete_group(
295 store: String,
133d718f 296 backup_ns: Option<BackupNamespace>,
8c74349b 297 group: pbs_api_types::BackupGroup,
f32791b4
DC
298 _info: &ApiMethod,
299 rpcenv: &mut dyn RpcEnvironment,
300) -> Result<Value, Error> {
f32791b4
DC
301 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
302
133d718f
WB
303 let backup_ns = backup_ns.unwrap_or_default();
304
2bc2435a 305 let owner_check_required = check_ns_privs(
7d6fc15b 306 &store,
133d718f 307 &backup_ns,
7d6fc15b 308 &auth_id,
2bc2435a
FG
309 PRIV_DATASTORE_MODIFY,
310 PRIV_DATASTORE_PRUNE,
7d6fc15b
TL
311 )?;
312
c9396984
FG
313 let datastore = check_owner_load_store(
314 &store,
315 &backup_ns,
316 &auth_id,
317 Some(Operation::Write),
318 owner_check_required,
319 &group,
320 )?;
f32791b4 321
133d718f 322 if !datastore.remove_backup_group(&backup_ns, &group)? {
171a00ca 323 bail!("group only partially deleted due to protected snapshots");
5cc7d891 324 }
f32791b4
DC
325
326 Ok(Value::Null)
327}
328
09b1f7b2
DM
329#[api(
330 input: {
331 properties: {
988d575d 332 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
333 "backup-ns": {
334 type: BackupNamespace,
335 optional: true,
336 },
8c74349b
WB
337 backup_dir: {
338 type: pbs_api_types::BackupDir,
339 flatten: true,
340 },
09b1f7b2
DM
341 },
342 },
7b570c17 343 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 344 access: {
7d6fc15b
TL
345 permission: &Permission::Anybody,
346 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
347 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 348 },
09b1f7b2
DM
349)]
350/// List snapshot files.
ea5f547f 351pub fn list_snapshot_files(
09b1f7b2 352 store: String,
133d718f 353 backup_ns: Option<BackupNamespace>,
8c74349b 354 backup_dir: pbs_api_types::BackupDir,
01a13423 355 _info: &ApiMethod,
54552dda 356 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 357) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 358 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 359
133d718f
WB
360 let backup_ns = backup_ns.unwrap_or_default();
361
2bc2435a 362 let owner_check_required = check_ns_privs(
7d6fc15b 363 &store,
133d718f 364 &backup_ns,
7d6fc15b 365 &auth_id,
2bc2435a
FG
366 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
367 PRIV_DATASTORE_BACKUP,
7d6fc15b
TL
368 )?;
369
c9396984
FG
370 let datastore = check_owner_load_store(
371 &store,
372 &backup_ns,
373 &auth_id,
374 Some(Operation::Read),
375 owner_check_required,
376 &backup_dir.group,
377 )?;
01a13423 378
a724f5fd 379 let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 380
6da20161 381 let info = BackupInfo::new(snapshot)?;
01a13423 382
70030b43
DM
383 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
384
385 Ok(files)
01a13423
DM
386}
387
68a6a0ee
DM
388#[api(
389 input: {
390 properties: {
988d575d 391 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
392 "backup-ns": {
393 type: BackupNamespace,
394 optional: true,
395 },
8c74349b
WB
396 backup_dir: {
397 type: pbs_api_types::BackupDir,
398 flatten: true,
399 },
68a6a0ee
DM
400 },
401 },
bb34b589 402 access: {
7d6fc15b
TL
403 permission: &Permission::Anybody,
404 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
405 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 406 },
68a6a0ee
DM
407)]
408/// Delete backup snapshot.
bf78f708 409pub fn delete_snapshot(
68a6a0ee 410 store: String,
133d718f 411 backup_ns: Option<BackupNamespace>,
8c74349b 412 backup_dir: pbs_api_types::BackupDir,
6f62c924 413 _info: &ApiMethod,
54552dda 414 rpcenv: &mut dyn RpcEnvironment,
6f62c924 415) -> Result<Value, Error> {
e6dc35ac 416 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 417
133d718f
WB
418 let backup_ns = backup_ns.unwrap_or_default();
419
2bc2435a 420 let owner_check_required = check_ns_privs(
7d6fc15b 421 &store,
133d718f 422 &backup_ns,
7d6fc15b 423 &auth_id,
2bc2435a
FG
424 PRIV_DATASTORE_MODIFY,
425 PRIV_DATASTORE_PRUNE,
7d6fc15b
TL
426 )?;
427
c9396984
FG
428 let datastore = check_owner_load_store(
429 &store,
430 &backup_ns,
431 &auth_id,
432 Some(Operation::Write),
433 owner_check_required,
434 &backup_dir.group,
435 )?;
a724f5fd
FG
436
437 let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 438
133d718f 439 snapshot.destroy(false)?;
6f62c924
DM
440
441 Ok(Value::Null)
442}
443
fc189b19 444#[api(
b7c3eaa9 445 streaming: true,
fc189b19
DM
446 input: {
447 properties: {
988d575d 448 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
449 "backup-ns": {
450 type: BackupNamespace,
451 optional: true,
452 },
fc189b19
DM
453 "backup-type": {
454 optional: true,
988d575d 455 type: BackupType,
fc189b19
DM
456 },
457 "backup-id": {
458 optional: true,
459 schema: BACKUP_ID_SCHEMA,
460 },
461 },
462 },
7b570c17 463 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 464 access: {
7d6fc15b
TL
465 permission: &Permission::Anybody,
466 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
467 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 468 },
fc189b19
DM
469)]
470/// List backup snapshots.
dc7a5b34 471pub fn list_snapshots(
54552dda 472 store: String,
8c74349b 473 backup_ns: Option<BackupNamespace>,
988d575d 474 backup_type: Option<BackupType>,
54552dda
DM
475 backup_id: Option<String>,
476 _param: Value,
184f17af 477 _info: &ApiMethod,
54552dda 478 rpcenv: &mut dyn RpcEnvironment,
fc189b19 479) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 480 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b
TL
481
482 let backup_ns = backup_ns.unwrap_or_default();
483
2bc2435a 484 let list_all = !check_ns_privs(
7d6fc15b
TL
485 &store,
486 &backup_ns,
487 &auth_id,
2bc2435a
FG
488 PRIV_DATASTORE_AUDIT,
489 PRIV_DATASTORE_BACKUP,
7d6fc15b 490 )?;
184f17af 491
e9d2fc93 492 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 493
249dde8b
TL
494 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
495 // backup group and provide an error free (Err -> None) accessor
0d08fcee 496 let groups = match (backup_type, backup_id) {
db87d93e 497 (Some(backup_type), Some(backup_id)) => {
8c74349b 498 vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
db87d93e 499 }
8c74349b 500 // FIXME: Recursion
7d9cb8c4 501 (Some(backup_type), None) => datastore
8c74349b 502 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
503 .filter(|group| group.backup_type() == backup_type)
504 .collect(),
8c74349b 505 // FIXME: Recursion
7d9cb8c4 506 (None, Some(backup_id)) => datastore
8c74349b 507 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
508 .filter(|group| group.backup_id() == backup_id)
509 .collect(),
8c74349b
WB
510 // FIXME: Recursion
511 (None, None) => datastore.list_backup_groups(backup_ns)?,
0d08fcee 512 };
54552dda 513
0d08fcee 514 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
515 let backup = pbs_api_types::BackupDir {
516 group: group.into(),
517 time: info.backup_dir.backup_time(),
518 };
6da20161 519 let protected = info.backup_dir.is_protected();
1c090810 520
79c53595 521 match get_all_snapshot_files(&datastore, &info) {
70030b43 522 Ok((manifest, files)) => {
70030b43
DM
523 // extract the first line from notes
524 let comment: Option<String> = manifest.unprotected["notes"]
525 .as_str()
526 .and_then(|notes| notes.lines().next())
527 .map(String::from);
528
035c40e6
FG
529 let fingerprint = match manifest.fingerprint() {
530 Ok(fp) => fp,
531 Err(err) => {
532 eprintln!("error parsing fingerprint: '{}'", err);
533 None
dc7a5b34 534 }
035c40e6
FG
535 };
536
79c53595 537 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
538 let verification: Option<SnapshotVerifyState> =
539 match serde_json::from_value(verification) {
540 Ok(verify) => verify,
541 Err(err) => {
542 eprintln!("error parsing verification state : '{}'", err);
543 None
544 }
545 };
3b2046d2 546
0d08fcee
FG
547 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
548
79c53595 549 SnapshotListItem {
988d575d 550 backup,
79c53595
FG
551 comment,
552 verification,
035c40e6 553 fingerprint,
79c53595
FG
554 files,
555 size,
556 owner,
02db7267 557 protected,
79c53595 558 }
dc7a5b34 559 }
1c090810
DC
560 Err(err) => {
561 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 562 let files = info
dc7a5b34
TL
563 .files
564 .into_iter()
565 .map(|filename| BackupContent {
566 filename,
567 size: None,
568 crypt_mode: None,
569 })
570 .collect();
79c53595
FG
571
572 SnapshotListItem {
988d575d 573 backup,
79c53595
FG
574 comment: None,
575 verification: None,
035c40e6 576 fingerprint: None,
79c53595
FG
577 files,
578 size: None,
579 owner,
02db7267 580 protected,
79c53595 581 }
dc7a5b34 582 }
0d08fcee
FG
583 }
584 };
184f17af 585
dc7a5b34 586 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 587 let owner = match group.get_owner() {
dc7a5b34
TL
588 Ok(auth_id) => auth_id,
589 Err(err) => {
590 eprintln!(
591 "Failed to get owner of group '{}/{}' - {}",
592 &store, group, err
593 );
0d08fcee
FG
594 return Ok(snapshots);
595 }
dc7a5b34 596 };
0d08fcee 597
dc7a5b34
TL
598 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
599 return Ok(snapshots);
600 }
0d08fcee 601
6da20161 602 let group_backups = group.list_backups()?;
0d08fcee 603
dc7a5b34
TL
604 snapshots.extend(
605 group_backups
606 .into_iter()
607 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
608 );
609
610 Ok(snapshots)
611 })
184f17af
DM
612}
613
6da20161
WB
614fn get_snapshots_count(
615 store: &Arc<DataStore>,
616 filter_owner: Option<&Authid>,
617) -> Result<Counts, Error> {
7d9cb8c4 618 store
8c74349b 619 .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
fdfcb74d 620 .filter(|group| {
133d718f
WB
621 // FIXME: namespace:
622 let owner = match store.get_owner(&BackupNamespace::root(), group.as_ref()) {
fdfcb74d
FG
623 Ok(owner) => owner,
624 Err(err) => {
72f81545
TL
625 let id = store.name();
626 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 627 return false;
dc7a5b34 628 }
fdfcb74d 629 };
14e08625 630
fdfcb74d
FG
631 match filter_owner {
632 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
633 None => true,
634 }
635 })
636 .try_fold(Counts::default(), |mut counts, group| {
6da20161 637 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 638
72f81545 639 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
640 if snapshot_count > 0 {
641 let type_count = match group.backup_type() {
988d575d
WB
642 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
643 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
644 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 645 };
14e08625 646
b44483a8
DM
647 type_count.groups += 1;
648 type_count.snapshots += snapshot_count;
649 }
16f9f244 650
fdfcb74d
FG
651 Ok(counts)
652 })
16f9f244
DC
653}
654
1dc117bb
DM
655#[api(
656 input: {
657 properties: {
658 store: {
659 schema: DATASTORE_SCHEMA,
660 },
98afc7b1
FG
661 verbose: {
662 type: bool,
663 default: false,
664 optional: true,
665 description: "Include additional information like snapshot counts and GC status.",
666 },
1dc117bb 667 },
98afc7b1 668
1dc117bb
DM
669 },
670 returns: {
14e08625 671 type: DataStoreStatus,
1dc117bb 672 },
bb34b589 673 access: {
7d6fc15b
TL
674 permission: &Permission::Privilege(
675 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 676 },
1dc117bb
DM
677)]
678/// Get datastore status.
ea5f547f 679pub fn status(
1dc117bb 680 store: String,
98afc7b1 681 verbose: bool,
0eecf38f 682 _info: &ApiMethod,
fdfcb74d 683 rpcenv: &mut dyn RpcEnvironment,
14e08625 684) -> Result<DataStoreStatus, Error> {
e9d2fc93 685 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 686 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
687 let (counts, gc_status) = if verbose {
688 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
689 let user_info = CachedUserInfo::new()?;
690
691 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
692 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
693 None
694 } else {
695 Some(&auth_id)
696 };
697
698 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
699 let gc_status = Some(datastore.last_gc_status());
700
701 (counts, gc_status)
702 } else {
703 (None, None)
98afc7b1 704 };
16f9f244 705
14e08625
DC
706 Ok(DataStoreStatus {
707 total: storage.total,
708 used: storage.used,
709 avail: storage.avail,
710 gc_status,
711 counts,
712 })
0eecf38f
DM
713}
714
c2009e53
DM
715#[api(
716 input: {
717 properties: {
718 store: {
719 schema: DATASTORE_SCHEMA,
720 },
8c74349b
WB
721 "backup-ns": {
722 type: BackupNamespace,
723 optional: true,
724 },
c2009e53 725 "backup-type": {
988d575d 726 type: BackupType,
c2009e53
DM
727 optional: true,
728 },
729 "backup-id": {
730 schema: BACKUP_ID_SCHEMA,
731 optional: true,
732 },
dcbf29e7
HL
733 "ignore-verified": {
734 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
735 optional: true,
736 },
737 "outdated-after": {
738 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
739 optional: true,
740 },
c2009e53
DM
741 "backup-time": {
742 schema: BACKUP_TIME_SCHEMA,
743 optional: true,
744 },
745 },
746 },
747 returns: {
748 schema: UPID_SCHEMA,
749 },
750 access: {
7d6fc15b
TL
751 permission: &Permission::Anybody,
752 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
753 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
754 },
755)]
756/// Verify backups.
757///
758/// This function can verify a single backup snapshot, all backup from a backup group,
759/// or all backups in the datastore.
760pub fn verify(
761 store: String,
8c74349b 762 backup_ns: Option<BackupNamespace>,
988d575d 763 backup_type: Option<BackupType>,
c2009e53
DM
764 backup_id: Option<String>,
765 backup_time: Option<i64>,
dcbf29e7
HL
766 ignore_verified: Option<bool>,
767 outdated_after: Option<i64>,
c2009e53
DM
768 rpcenv: &mut dyn RpcEnvironment,
769) -> Result<Value, Error> {
7d6fc15b
TL
770 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
771 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 772 let owner_check_required = check_ns_privs(
7d6fc15b
TL
773 &store,
774 &backup_ns,
775 &auth_id,
2bc2435a
FG
776 PRIV_DATASTORE_VERIFY,
777 PRIV_DATASTORE_BACKUP,
7d6fc15b 778 )?;
a724f5fd 779
e9d2fc93 780 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 781 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 782
8ea00f6e 783 let worker_id;
c2009e53
DM
784
785 let mut backup_dir = None;
786 let mut backup_group = None;
133042b5 787 let mut worker_type = "verify";
c2009e53 788
8c74349b
WB
789 // FIXME: Recursion
790 // FIXME: Namespaces and worker ID, could this be an issue?
c2009e53
DM
791 match (backup_type, backup_id, backup_time) {
792 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 793 worker_id = format!(
8c74349b
WB
794 "{}:{}/{}/{}/{:08X}",
795 store,
796 backup_ns.display_as_path(),
797 backup_type,
798 backup_id,
799 backup_time
dc7a5b34 800 );
8c74349b
WB
801 let dir =
802 datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
09f6a240 803
a724f5fd
FG
804 if owner_check_required {
805 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
806 check_backup_owner(&owner, &auth_id)?;
807 }
09f6a240 808
c2009e53 809 backup_dir = Some(dir);
133042b5 810 worker_type = "verify_snapshot";
c2009e53
DM
811 }
812 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
813 worker_id = format!(
814 "{}:{}/{}/{}",
815 store,
816 backup_ns.display_as_path(),
817 backup_type,
818 backup_id
819 );
133d718f 820 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 821
a724f5fd
FG
822 if owner_check_required {
823 let owner = datastore.get_owner(&backup_ns, &group)?;
824 check_backup_owner(&owner, &auth_id)?;
825 }
09f6a240 826
133d718f 827 backup_group = Some(datastore.backup_group(backup_ns, group));
133042b5 828 worker_type = "verify_group";
c2009e53
DM
829 }
830 (None, None, None) => {
8ea00f6e 831 worker_id = store.clone();
c2009e53 832 }
5a718dce 833 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
834 }
835
39735609 836 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
837
838 let upid_str = WorkerTask::new_thread(
133042b5 839 worker_type,
44288184 840 Some(worker_id),
049a22a3 841 auth_id.to_string(),
e7cb4dc5
WB
842 to_stdout,
843 move |worker| {
9c26a3d6 844 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 845 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 846 let mut res = Vec::new();
f6b1d1cc 847 if !verify_backup_dir(
9c26a3d6 848 &verify_worker,
f6b1d1cc 849 &backup_dir,
f6b1d1cc 850 worker.upid().clone(),
dc7a5b34 851 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 852 )? {
adfdc369
DC
853 res.push(backup_dir.to_string());
854 }
855 res
c2009e53 856 } else if let Some(backup_group) = backup_group {
7e25b9aa 857 let failed_dirs = verify_backup_group(
9c26a3d6 858 &verify_worker,
63d9aca9 859 &backup_group,
7e25b9aa 860 &mut StoreProgress::new(1),
f6b1d1cc 861 worker.upid(),
dc7a5b34 862 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
863 )?;
864 failed_dirs
c2009e53 865 } else {
a724f5fd 866 let owner = if owner_check_required {
09f6a240
FG
867 Some(auth_id)
868 } else {
869 None
870 };
871
a724f5fd
FG
872 // FIXME namespace missing here..
873
dcbf29e7
HL
874 verify_all_backups(
875 &verify_worker,
876 worker.upid(),
877 owner,
dc7a5b34 878 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 879 )?
c2009e53 880 };
3984a5fd 881 if !failed_dirs.is_empty() {
1ec0d70d 882 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 883 for dir in failed_dirs {
1ec0d70d 884 task_log!(worker, "\t{}", dir);
adfdc369 885 }
1ffe0301 886 bail!("verification failed - please check the log for details");
c2009e53
DM
887 }
888 Ok(())
e7cb4dc5
WB
889 },
890 )?;
c2009e53
DM
891
892 Ok(json!(upid_str))
893}
894
0a240aaa
DC
895#[api(
896 input: {
897 properties: {
133d718f
WB
898 "backup-ns": {
899 type: BackupNamespace,
900 optional: true,
901 },
8c74349b
WB
902 group: {
903 type: pbs_api_types::BackupGroup,
904 flatten: true,
905 },
0a240aaa
DC
906 "dry-run": {
907 optional: true,
908 type: bool,
909 default: false,
910 description: "Just show what prune would do, but do not delete anything.",
911 },
912 "prune-options": {
913 type: PruneOptions,
914 flatten: true,
915 },
916 store: {
917 schema: DATASTORE_SCHEMA,
918 },
919 },
920 },
7b570c17 921 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 922 access: {
7d6fc15b
TL
923 permission: &Permission::Anybody,
924 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
925 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
926 },
927)]
9805207a 928/// Prune a group on the datastore
bf78f708 929pub fn prune(
133d718f 930 backup_ns: Option<BackupNamespace>,
8c74349b 931 group: pbs_api_types::BackupGroup,
0a240aaa
DC
932 dry_run: bool,
933 prune_options: PruneOptions,
934 store: String,
935 _param: Value,
54552dda 936 rpcenv: &mut dyn RpcEnvironment,
83b7db02 937) -> Result<Value, Error> {
e6dc35ac 938 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f
WB
939
940 let backup_ns = backup_ns.unwrap_or_default();
941
2bc2435a 942 let owner_check_required = check_ns_privs(
7d6fc15b 943 &store,
133d718f 944 &backup_ns,
7d6fc15b 945 &auth_id,
2bc2435a
FG
946 PRIV_DATASTORE_MODIFY,
947 PRIV_DATASTORE_PRUNE,
7d6fc15b 948 )?;
54552dda 949
c9396984
FG
950 let datastore = check_owner_load_store(
951 &store,
952 &backup_ns,
953 &auth_id,
954 Some(Operation::Write),
955 owner_check_required,
956 &group,
957 )?;
db87d93e 958
a724f5fd 959 let group = datastore.backup_group(backup_ns, group);
83b7db02 960
8c74349b 961 let worker_id = format!("{}:{}", store, group);
503995c7 962
dda70154
DM
963 let mut prune_result = Vec::new();
964
6da20161 965 let list = group.list_backups()?;
dda70154
DM
966
967 let mut prune_info = compute_prune_info(list, &prune_options)?;
968
969 prune_info.reverse(); // delete older snapshots first
970
89725197 971 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
972
973 if dry_run {
02db7267
DC
974 for (info, mark) in prune_info {
975 let keep = keep_all || mark.keep();
dda70154 976
33f2c2a1 977 let mut result = json!({
db87d93e
WB
978 "backup-type": info.backup_dir.backup_type(),
979 "backup-id": info.backup_dir.backup_id(),
980 "backup-time": info.backup_dir.backup_time(),
dda70154 981 "keep": keep,
02db7267 982 "protected": mark.protected(),
33f2c2a1
WB
983 });
984 let ns = info.backup_dir.backup_ns();
985 if !ns.is_root() {
986 result["backup-ns"] = serde_json::to_value(ns)?;
987 }
988 prune_result.push(result);
dda70154
DM
989 }
990 return Ok(json!(prune_result));
991 }
992
163e9bbe 993 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 994 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 995
f1539300 996 if keep_all {
1ec0d70d 997 task_log!(worker, "No prune selection - keeping all files.");
f1539300 998 } else {
dc7a5b34
TL
999 task_log!(
1000 worker,
1001 "retention options: {}",
1002 pbs_datastore::prune::cli_options_string(&prune_options)
1003 );
1004 task_log!(
1005 worker,
8c74349b 1006 "Starting prune on store \"{}\" group \"{}\"",
dc7a5b34 1007 store,
8c74349b 1008 group,
dc7a5b34 1009 );
f1539300 1010 }
3b03abfe 1011
02db7267
DC
1012 for (info, mark) in prune_info {
1013 let keep = keep_all || mark.keep();
dda70154 1014
f1539300
SR
1015 let backup_time = info.backup_dir.backup_time();
1016 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1017 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1018
1019 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1020
1ec0d70d 1021 task_log!(worker, "{}", msg);
f1539300 1022
133d718f 1023 prune_result.push(json!({
db87d93e
WB
1024 "backup-type": group.ty,
1025 "backup-id": group.id,
f1539300
SR
1026 "backup-time": backup_time,
1027 "keep": keep,
02db7267 1028 "protected": mark.protected(),
133d718f 1029 }));
f1539300
SR
1030
1031 if !(dry_run || keep) {
133d718f 1032 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1033 task_warn!(
1034 worker,
1035 "failed to remove dir {:?}: {}",
1036 info.backup_dir.relative_path(),
1037 err,
f1539300 1038 );
8f0b4c1f 1039 }
8f579717 1040 }
f1539300 1041 }
dd8e744f 1042
f1539300 1043 worker.log_result(&Ok(()));
83b7db02 1044
dda70154 1045 Ok(json!(prune_result))
83b7db02
DM
1046}
1047
9805207a
DC
1048#[api(
1049 input: {
1050 properties: {
1051 "dry-run": {
1052 optional: true,
1053 type: bool,
1054 default: false,
1055 description: "Just show what prune would do, but do not delete anything.",
1056 },
1057 "prune-options": {
1058 type: PruneOptions,
1059 flatten: true,
1060 },
1061 store: {
1062 schema: DATASTORE_SCHEMA,
1063 },
2f5417f8
TL
1064 ns: {
1065 type: BackupNamespace,
1066 optional: true,
1067 },
9805207a
DC
1068 },
1069 },
1070 returns: {
1071 schema: UPID_SCHEMA,
1072 },
1073 access: {
7d6fc15b
TL
1074 permission: &Permission::Privilege(
1075 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
9805207a
DC
1076 },
1077)]
1078/// Prune the datastore
1079pub fn prune_datastore(
1080 dry_run: bool,
1081 prune_options: PruneOptions,
1082 store: String,
2f5417f8 1083 ns: Option<BackupNamespace>,
9805207a
DC
1084 _param: Value,
1085 rpcenv: &mut dyn RpcEnvironment,
1086) -> Result<String, Error> {
9805207a
DC
1087 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1088
e9d2fc93 1089 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 1090
bfa942c0
DC
1091 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1092
7d6fc15b
TL
1093 // FIXME: also allow a per-namespace pruning with max-depth
1094
9805207a
DC
1095 let upid_str = WorkerTask::new_thread(
1096 "prune",
1097 Some(store.clone()),
049a22a3 1098 auth_id.to_string(),
bfa942c0 1099 to_stdout,
dc7a5b34
TL
1100 move |worker| {
1101 crate::server::prune_datastore(
1102 worker,
1103 auth_id,
1104 prune_options,
dc7a5b34 1105 datastore,
2f5417f8 1106 ns.unwrap_or_default(),
dc7a5b34
TL
1107 dry_run,
1108 )
1109 },
9805207a
DC
1110 )?;
1111
1112 Ok(upid_str)
1113}
1114
dfc58d47
DM
1115#[api(
1116 input: {
1117 properties: {
1118 store: {
1119 schema: DATASTORE_SCHEMA,
1120 },
1121 },
1122 },
1123 returns: {
1124 schema: UPID_SCHEMA,
1125 },
bb34b589 1126 access: {
54552dda 1127 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1128 },
dfc58d47
DM
1129)]
1130/// Start garbage collection.
bf78f708 1131pub fn start_garbage_collection(
dfc58d47 1132 store: String,
6049b71f 1133 _info: &ApiMethod,
dd5495d6 1134 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1135) -> Result<Value, Error> {
e9d2fc93 1136 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1137 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1138
dc7a5b34 1139 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1140 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1141
39735609 1142 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1143
dc7a5b34
TL
1144 let upid_str =
1145 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1146 .map_err(|err| {
1147 format_err!(
1148 "unable to start garbage collection job on datastore {} - {}",
1149 store,
1150 err
1151 )
1152 })?;
0f778e06
DM
1153
1154 Ok(json!(upid_str))
15e9b4ed
DM
1155}
1156
a92830dc
DM
1157#[api(
1158 input: {
1159 properties: {
1160 store: {
1161 schema: DATASTORE_SCHEMA,
1162 },
1163 },
1164 },
1165 returns: {
1166 type: GarbageCollectionStatus,
bb34b589
DM
1167 },
1168 access: {
1169 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1170 },
a92830dc
DM
1171)]
1172/// Garbage collection status.
5eeea607 1173pub fn garbage_collection_status(
a92830dc 1174 store: String,
6049b71f 1175 _info: &ApiMethod,
dd5495d6 1176 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1177) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1178 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1179
f2b99c34 1180 let status = datastore.last_gc_status();
691c89a0 1181
a92830dc 1182 Ok(status)
691c89a0
DM
1183}
1184
7d6fc15b
TL
1185fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1186 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1187 // below /datastore/{store}" helper
1188 let mut iter =
1189 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1190 iter
1191 } else {
1192 return false;
1193 };
1194 let wanted =
1195 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1196 let name = store.name();
1197 iter.any(|ns| -> bool {
1198 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
1199 user_privs & wanted != 0
1200 })
1201}
1202
bb34b589 1203#[api(
30fb6025
DM
1204 returns: {
1205 description: "List the accessible datastores.",
1206 type: Array,
9b93c620 1207 items: { type: DataStoreListItem },
30fb6025 1208 },
bb34b589 1209 access: {
54552dda 1210 permission: &Permission::Anybody,
bb34b589
DM
1211 },
1212)]
1213/// Datastore list
bf78f708 1214pub fn get_datastore_list(
6049b71f
DM
1215 _param: Value,
1216 _info: &ApiMethod,
54552dda 1217 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1218) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1219 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1220
e6dc35ac 1221 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1222 let user_info = CachedUserInfo::new()?;
1223
30fb6025 1224 let mut list = Vec::new();
54552dda 1225
30fb6025 1226 for (store, (_, data)) in &config.sections {
9a37bd6c 1227 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1228 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1229
1230 let mut allow_id = false;
1231 if !allowed {
1232 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1233 // safety: we just cannot go through lookup as we must avoid an operation check
1234 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1235 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1236 }
1237 }
1238
1239 if allowed || allow_id {
dc7a5b34
TL
1240 list.push(DataStoreListItem {
1241 store: store.clone(),
7d6fc15b
TL
1242 comment: if !allowed {
1243 None
1244 } else {
1245 data["comment"].as_str().map(String::from)
1246 },
e022d13c 1247 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1248 });
30fb6025 1249 }
54552dda
DM
1250 }
1251
44288184 1252 Ok(list)
15e9b4ed
DM
1253}
1254
0ab08ac9
DM
1255#[sortable]
1256pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1257 &ApiHandler::AsyncHttp(&download_file),
1258 &ObjectSchema::new(
1259 "Download single raw file from backup snapshot.",
1260 &sorted!([
66c49c21 1261 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1262 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1263 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1264 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1265 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1266 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1267 ]),
dc7a5b34
TL
1268 ),
1269)
1270.access(
7d6fc15b
TL
1271 Some(
1272 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1273 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1274 ),
7d6fc15b 1275 &Permission::Anybody,
54552dda 1276);
691c89a0 1277
bf78f708 1278pub fn download_file(
9e47c0a5
DM
1279 _parts: Parts,
1280 _req_body: Body,
1281 param: Value,
255f378a 1282 _info: &ApiMethod,
54552dda 1283 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1284) -> ApiResponseFuture {
ad51d02a 1285 async move {
7d6fc15b 1286 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1287 let store = required_string_param(&param, "store")?;
133d718f 1288 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1289 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1290 let owner_check_required = check_ns_privs(
7d6fc15b 1291 &store,
133d718f 1292 &backup_ns,
7d6fc15b 1293 &auth_id,
2bc2435a
FG
1294 PRIV_DATASTORE_READ,
1295 PRIV_DATASTORE_BACKUP,
7d6fc15b 1296 )?;
a724f5fd 1297
c9396984
FG
1298 let datastore = check_owner_load_store(
1299 store,
1300 &backup_ns,
1301 &auth_id,
1302 Some(Operation::Read),
1303 owner_check_required,
1304 &backup_dir.group,
1305 )?;
1306
133d718f 1307 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
54552dda 1308
3c8c2827 1309 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1310
dc7a5b34
TL
1311 println!(
1312 "Download {} from {} ({}/{})",
1313 file_name, store, backup_dir, file_name
1314 );
9e47c0a5 1315
ad51d02a
DM
1316 let mut path = datastore.base_path();
1317 path.push(backup_dir.relative_path());
1318 path.push(&file_name);
1319
ba694720 1320 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1321 .await
1322 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1323
dc7a5b34
TL
1324 let payload =
1325 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1326 .map_ok(|bytes| bytes.freeze())
1327 .map_err(move |err| {
1328 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1329 err
1330 });
ad51d02a 1331 let body = Body::wrap_stream(payload);
9e47c0a5 1332
ad51d02a
DM
1333 // fixme: set other headers ?
1334 Ok(Response::builder()
dc7a5b34
TL
1335 .status(StatusCode::OK)
1336 .header(header::CONTENT_TYPE, "application/octet-stream")
1337 .body(body)
1338 .unwrap())
1339 }
1340 .boxed()
9e47c0a5
DM
1341}
1342
6ef9bb59
DC
1343#[sortable]
1344pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1345 &ApiHandler::AsyncHttp(&download_file_decoded),
1346 &ObjectSchema::new(
1347 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1348 &sorted!([
1349 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1350 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1351 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1352 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1353 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1354 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1355 ]),
dc7a5b34
TL
1356 ),
1357)
1358.access(
7d6fc15b
TL
1359 Some(
1360 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1361 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1362 ),
7d6fc15b 1363 &Permission::Anybody,
6ef9bb59
DC
1364);
1365
bf78f708 1366pub fn download_file_decoded(
6ef9bb59
DC
1367 _parts: Parts,
1368 _req_body: Body,
1369 param: Value,
1370 _info: &ApiMethod,
1371 rpcenv: Box<dyn RpcEnvironment>,
1372) -> ApiResponseFuture {
6ef9bb59 1373 async move {
7d6fc15b 1374 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1375 let store = required_string_param(&param, "store")?;
133d718f 1376 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1377 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1378 let owner_check_required = check_ns_privs(
7d6fc15b 1379 &store,
133d718f 1380 &backup_ns,
7d6fc15b 1381 &auth_id,
2bc2435a
FG
1382 PRIV_DATASTORE_READ,
1383 PRIV_DATASTORE_BACKUP,
7d6fc15b 1384 )?;
c9396984
FG
1385 let datastore = check_owner_load_store(
1386 store,
1387 &backup_ns,
1388 &auth_id,
1389 Some(Operation::Read),
1390 owner_check_required,
1391 &backup_dir.group,
1392 )?;
a724f5fd 1393
133d718f 1394 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
6ef9bb59 1395
3c8c2827 1396 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1397
2d55beec 1398 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1399 for file in files {
f28d9088 1400 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1401 bail!("cannot decode '{}' - is encrypted", file_name);
1402 }
1403 }
1404
dc7a5b34
TL
1405 println!(
1406 "Download {} from {} ({}/{})",
1407 file_name, store, backup_dir, file_name
1408 );
6ef9bb59
DC
1409
1410 let mut path = datastore.base_path();
1411 path.push(backup_dir.relative_path());
1412 path.push(&file_name);
1413
1414 let extension = file_name.rsplitn(2, '.').next().unwrap();
1415
1416 let body = match extension {
1417 "didx" => {
dc7a5b34
TL
1418 let index = DynamicIndexReader::open(&path).map_err(|err| {
1419 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1420 })?;
2d55beec
FG
1421 let (csum, size) = index.compute_csum();
1422 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1423
14f6c9cb 1424 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1425 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1426 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1427 eprintln!("error during streaming of '{:?}' - {}", path, err);
1428 err
1429 }))
1430 }
6ef9bb59 1431 "fidx" => {
dc7a5b34
TL
1432 let index = FixedIndexReader::open(&path).map_err(|err| {
1433 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1434 })?;
6ef9bb59 1435
2d55beec
FG
1436 let (csum, size) = index.compute_csum();
1437 manifest.verify_file(&file_name, &csum, size)?;
1438
14f6c9cb 1439 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1440 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1441 Body::wrap_stream(
1442 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1443 move |err| {
1444 eprintln!("error during streaming of '{:?}' - {}", path, err);
1445 err
1446 },
1447 ),
1448 )
1449 }
6ef9bb59
DC
1450 "blob" => {
1451 let file = std::fs::File::open(&path)
8aa67ee7 1452 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1453
2d55beec
FG
1454 // FIXME: load full blob to verify index checksum?
1455
6ef9bb59 1456 Body::wrap_stream(
dc7a5b34
TL
1457 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1458 move |err| {
6ef9bb59
DC
1459 eprintln!("error during streaming of '{:?}' - {}", path, err);
1460 err
dc7a5b34
TL
1461 },
1462 ),
6ef9bb59 1463 )
dc7a5b34 1464 }
6ef9bb59
DC
1465 extension => {
1466 bail!("cannot download '{}' files", extension);
dc7a5b34 1467 }
6ef9bb59
DC
1468 };
1469
1470 // fixme: set other headers ?
1471 Ok(Response::builder()
dc7a5b34
TL
1472 .status(StatusCode::OK)
1473 .header(header::CONTENT_TYPE, "application/octet-stream")
1474 .body(body)
1475 .unwrap())
1476 }
1477 .boxed()
6ef9bb59
DC
1478}
1479
552c2259 1480#[sortable]
0ab08ac9
DM
1481pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1482 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1483 &ObjectSchema::new(
54552dda 1484 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1485 &sorted!([
66c49c21 1486 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1487 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1488 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1489 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1490 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1491 ]),
dc7a5b34
TL
1492 ),
1493)
1494.access(
54552dda 1495 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1496 &Permission::Anybody,
54552dda 1497);
9e47c0a5 1498
bf78f708 1499pub fn upload_backup_log(
07ee2235
DM
1500 _parts: Parts,
1501 req_body: Body,
1502 param: Value,
255f378a 1503 _info: &ApiMethod,
54552dda 1504 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1505) -> ApiResponseFuture {
ad51d02a 1506 async move {
7d6fc15b 1507 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1508 let store = required_string_param(&param, "store")?;
133d718f 1509 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1510 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
c9396984
FG
1511 check_ns_privs(&store, &backup_ns, &auth_id, PRIV_DATASTORE_BACKUP, 0).map_err(|_| {
1512 http_err!(
1513 UNAUTHORIZED,
1514 "{} does not have permission to upload log for backup snapshot '{}'",
1515 auth_id,
1516 backup_dir,
1517 )
1518 })?;
2bc2435a 1519
c9396984
FG
1520 let datastore = check_owner_load_store(
1521 store,
1522 &backup_ns,
1523 &auth_id,
1524 Some(Operation::Write),
1525 true,
1526 &backup_dir.group,
1527 )?;
133d718f 1528 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
07ee2235 1529
dc7a5b34 1530 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1531
133d718f 1532 let owner = backup_dir.get_owner()?;
bff85572 1533 check_backup_owner(&owner, &auth_id)?;
54552dda 1534
133d718f 1535 let mut path = backup_dir.full_path();
ad51d02a 1536 path.push(&file_name);
07ee2235 1537
ad51d02a
DM
1538 if path.exists() {
1539 bail!("backup already contains a log.");
1540 }
e128d4e8 1541
8c74349b 1542 println!("Upload backup log to {store}/{backup_dir}/{file_name}");
ad51d02a
DM
1543
1544 let data = req_body
1545 .map_err(Error::from)
1546 .try_fold(Vec::new(), |mut acc, chunk| {
1547 acc.extend_from_slice(&*chunk);
1548 future::ok::<_, Error>(acc)
1549 })
1550 .await?;
1551
39f18b30
DM
1552 // always verify blob/CRC at server side
1553 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1554
e0a19d33 1555 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1556
1557 // fixme: use correct formatter
53daae8e 1558 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1559 }
1560 .boxed()
07ee2235
DM
1561}
1562
5b1cfa01
DC
1563#[api(
1564 input: {
1565 properties: {
988d575d 1566 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1567 "backup-ns": {
1568 type: BackupNamespace,
1569 optional: true,
1570 },
8c74349b
WB
1571 backup_dir: {
1572 type: pbs_api_types::BackupDir,
1573 flatten: true,
1574 },
5b1cfa01
DC
1575 "filepath": {
1576 description: "Base64 encoded path.",
1577 type: String,
1578 }
1579 },
1580 },
1581 access: {
7d6fc15b
TL
1582 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1583 DATASTORE_BACKUP and being the owner of the group",
1584 permission: &Permission::Anybody,
5b1cfa01
DC
1585 },
1586)]
1587/// Get the entries of the given path of the catalog
bf78f708 1588pub fn catalog(
5b1cfa01 1589 store: String,
133d718f 1590 backup_ns: Option<BackupNamespace>,
8c74349b 1591 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1592 filepath: String,
5b1cfa01 1593 rpcenv: &mut dyn RpcEnvironment,
227501c0 1594) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1595 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1596 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 1597 let owner_check_required = check_ns_privs(
7d6fc15b 1598 &store,
133d718f 1599 &backup_ns,
7d6fc15b 1600 &auth_id,
2bc2435a
FG
1601 PRIV_DATASTORE_READ,
1602 PRIV_DATASTORE_BACKUP,
7d6fc15b 1603 )?;
5b1cfa01 1604
c9396984
FG
1605 let datastore = check_owner_load_store(
1606 &store,
1607 &backup_ns,
1608 &auth_id,
1609 Some(Operation::Read),
1610 owner_check_required,
1611 &backup_dir.group,
1612 )?;
a724f5fd
FG
1613
1614 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
5b1cfa01 1615
9238cdf5
FG
1616 let file_name = CATALOG_NAME;
1617
2d55beec 1618 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1619 for file in files {
1620 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1621 bail!("cannot decode '{}' - is encrypted", file_name);
1622 }
1623 }
1624
5b1cfa01
DC
1625 let mut path = datastore.base_path();
1626 path.push(backup_dir.relative_path());
9238cdf5 1627 path.push(file_name);
5b1cfa01
DC
1628
1629 let index = DynamicIndexReader::open(&path)
1630 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1631
2d55beec 1632 let (csum, size) = index.compute_csum();
9a37bd6c 1633 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1634
14f6c9cb 1635 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1636 let reader = BufferedDynamicReader::new(index, chunk_reader);
1637
1638 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1639
5279ee74 1640 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1641 base64::decode(filepath)?
1642 } else {
1643 vec![b'/']
1644 };
5b1cfa01 1645
86582454 1646 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1647}
1648
d33d8f4e
DC
1649#[sortable]
1650pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1651 &ApiHandler::AsyncHttp(&pxar_file_download),
1652 &ObjectSchema::new(
1ffe0301 1653 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1654 &sorted!([
1655 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 1656 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1657 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1658 ("backup-id", false, &BACKUP_ID_SCHEMA),
1659 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1660 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1661 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1662 ]),
1663 )
7d6fc15b
TL
1664).access(
1665 Some(
1666 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1667 DATASTORE_BACKUP and being the owner of the group",
1668 ),
1669 &Permission::Anybody,
d33d8f4e
DC
1670);
1671
bf78f708 1672pub fn pxar_file_download(
d33d8f4e
DC
1673 _parts: Parts,
1674 _req_body: Body,
1675 param: Value,
1676 _info: &ApiMethod,
1677 rpcenv: Box<dyn RpcEnvironment>,
1678) -> ApiResponseFuture {
d33d8f4e 1679 async move {
7d6fc15b 1680 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1681 let store = required_string_param(&param, "store")?;
133d718f 1682 let backup_ns = optional_ns_param(&param)?;
7d6fc15b 1683 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1684 let owner_check_required = check_ns_privs(
7d6fc15b 1685 &store,
133d718f 1686 &backup_ns,
7d6fc15b 1687 &auth_id,
2bc2435a
FG
1688 PRIV_DATASTORE_READ,
1689 PRIV_DATASTORE_BACKUP,
7d6fc15b 1690 )?;
c9396984
FG
1691 let datastore = check_owner_load_store(
1692 &store,
1693 &backup_ns,
1694 &auth_id,
1695 Some(Operation::Read),
1696 owner_check_required,
1697 &backup_dir.group,
1698 )?;
a724f5fd 1699
133d718f 1700 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
d33d8f4e 1701
3c8c2827 1702 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1703
984ddb2f
DC
1704 let tar = param["tar"].as_bool().unwrap_or(false);
1705
d33d8f4e 1706 let mut components = base64::decode(&filepath)?;
3984a5fd 1707 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1708 components.remove(0);
1709 }
1710
d8d8af98 1711 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1712 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1713 let file_path = split.next().unwrap_or(b"/");
2d55beec 1714 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1715 for file in files {
1716 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1717 bail!("cannot decode '{}' - is encrypted", pxar_name);
1718 }
1719 }
d33d8f4e 1720
9238cdf5
FG
1721 let mut path = datastore.base_path();
1722 path.push(backup_dir.relative_path());
1723 path.push(pxar_name);
d33d8f4e
DC
1724
1725 let index = DynamicIndexReader::open(&path)
1726 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1727
2d55beec 1728 let (csum, size) = index.compute_csum();
9a37bd6c 1729 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1730
14f6c9cb 1731 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1732 let reader = BufferedDynamicReader::new(index, chunk_reader);
1733 let archive_size = reader.archive_size();
1734 let reader = LocalDynamicReadAt::new(reader);
1735
1736 let decoder = Accessor::new(reader, archive_size).await?;
1737 let root = decoder.open_root().await?;
2e219481 1738 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1739 let file = root
dc7a5b34
TL
1740 .lookup(&path)
1741 .await?
2e219481 1742 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1743
804f6143
DC
1744 let body = match file.kind() {
1745 EntryKind::File { .. } => Body::wrap_stream(
1746 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1747 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1748 err
1749 }),
1750 ),
1751 EntryKind::Hardlink(_) => Body::wrap_stream(
1752 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1753 .map_err(move |err| {
dc7a5b34 1754 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1755 err
1756 }),
1757 ),
1758 EntryKind::Directory => {
984ddb2f 1759 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1760 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1761 if tar {
dc7a5b34
TL
1762 proxmox_rest_server::spawn_internal_task(create_tar(
1763 channelwriter,
1764 decoder,
1765 path.clone(),
1766 false,
1767 ));
984ddb2f
DC
1768 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1769 Body::wrap_stream(zstdstream.map_err(move |err| {
1770 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1771 err
1772 }))
1773 } else {
dc7a5b34
TL
1774 proxmox_rest_server::spawn_internal_task(create_zip(
1775 channelwriter,
1776 decoder,
1777 path.clone(),
1778 false,
1779 ));
984ddb2f
DC
1780 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1781 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1782 err
1783 }))
1784 }
804f6143
DC
1785 }
1786 other => bail!("cannot download file of type {:?}", other),
1787 };
d33d8f4e
DC
1788
1789 // fixme: set other headers ?
1790 Ok(Response::builder()
dc7a5b34
TL
1791 .status(StatusCode::OK)
1792 .header(header::CONTENT_TYPE, "application/octet-stream")
1793 .body(body)
1794 .unwrap())
1795 }
1796 .boxed()
d33d8f4e
DC
1797}
1798
1a0d3d11
DM
1799#[api(
1800 input: {
1801 properties: {
1802 store: {
1803 schema: DATASTORE_SCHEMA,
1804 },
1805 timeframe: {
c68fa58a 1806 type: RRDTimeFrame,
1a0d3d11
DM
1807 },
1808 cf: {
1809 type: RRDMode,
1810 },
1811 },
1812 },
1813 access: {
7d6fc15b
TL
1814 permission: &Permission::Privilege(
1815 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1816 },
1817)]
1818/// Read datastore stats
bf78f708 1819pub fn get_rrd_stats(
1a0d3d11 1820 store: String,
c68fa58a 1821 timeframe: RRDTimeFrame,
1a0d3d11
DM
1822 cf: RRDMode,
1823 _param: Value,
1824) -> Result<Value, Error> {
e9d2fc93 1825 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1826 let disk_manager = crate::tools::disks::DiskManage::new();
1827
1828 let mut rrd_fields = vec![
dc7a5b34
TL
1829 "total",
1830 "used",
1831 "read_ios",
1832 "read_bytes",
1833 "write_ios",
1834 "write_bytes",
f27b6086
DC
1835 ];
1836
1837 // we do not have io_ticks for zpools, so don't include them
1838 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1839 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1840 _ => rrd_fields.push("io_ticks"),
1841 };
1842
dc7a5b34 1843 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1844}
1845
5fd823c3
HL
1846#[api(
1847 input: {
1848 properties: {
1849 store: {
1850 schema: DATASTORE_SCHEMA,
1851 },
1852 },
1853 },
1854 access: {
1855 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1856 },
1857)]
1858/// Read datastore stats
dc7a5b34 1859pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1860 let active_operations = task_tracking::get_active_operations(&store)?;
1861 Ok(json!({
1862 "read": active_operations.read,
1863 "write": active_operations.write,
1864 }))
1865}
1866
d6688884
SR
1867#[api(
1868 input: {
1869 properties: {
988d575d 1870 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1871 "backup-ns": {
1872 type: BackupNamespace,
1873 optional: true,
1874 },
8c74349b
WB
1875 backup_group: {
1876 type: pbs_api_types::BackupGroup,
1877 flatten: true,
1878 },
d6688884
SR
1879 },
1880 },
1881 access: {
7d6fc15b
TL
1882 permission: &Permission::Anybody,
1883 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1884 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1885 },
1886)]
1887/// Get "notes" for a backup group
1888pub fn get_group_notes(
1889 store: String,
133d718f 1890 backup_ns: Option<BackupNamespace>,
8c74349b 1891 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1892 rpcenv: &mut dyn RpcEnvironment,
1893) -> Result<String, Error> {
d6688884 1894 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1895 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 1896 let owner_check_required = check_ns_privs(
7d6fc15b 1897 &store,
133d718f 1898 &backup_ns,
7d6fc15b 1899 &auth_id,
2bc2435a
FG
1900 PRIV_DATASTORE_AUDIT,
1901 PRIV_DATASTORE_BACKUP,
7d6fc15b 1902 )?;
c9396984
FG
1903 let datastore = check_owner_load_store(
1904 &store,
1905 &backup_ns,
1906 &auth_id,
1907 Some(Operation::Read),
1908 owner_check_required,
1909 &backup_group,
1910 )?;
d6688884 1911
133d718f 1912 let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
d6688884
SR
1913 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1914}
1915
1916#[api(
1917 input: {
1918 properties: {
988d575d 1919 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1920 "backup-ns": {
1921 type: BackupNamespace,
1922 optional: true,
1923 },
8c74349b
WB
1924 backup_group: {
1925 type: pbs_api_types::BackupGroup,
1926 flatten: true,
1927 },
d6688884
SR
1928 notes: {
1929 description: "A multiline text.",
1930 },
1931 },
1932 },
1933 access: {
7d6fc15b
TL
1934 permission: &Permission::Anybody,
1935 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1936 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1937 },
1938)]
1939/// Set "notes" for a backup group
1940pub fn set_group_notes(
1941 store: String,
133d718f 1942 backup_ns: Option<BackupNamespace>,
8c74349b 1943 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1944 notes: String,
1945 rpcenv: &mut dyn RpcEnvironment,
1946) -> Result<(), Error> {
d6688884 1947 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1948 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 1949 let owner_check_required = check_ns_privs(
7d6fc15b 1950 &store,
133d718f 1951 &backup_ns,
7d6fc15b 1952 &auth_id,
2bc2435a
FG
1953 PRIV_DATASTORE_MODIFY,
1954 PRIV_DATASTORE_BACKUP,
7d6fc15b 1955 )?;
c9396984
FG
1956 let datastore = check_owner_load_store(
1957 &store,
1958 &backup_ns,
1959 &auth_id,
1960 Some(Operation::Write),
1961 owner_check_required,
1962 &backup_group,
1963 )?;
d6688884 1964
133d718f 1965 let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
e0a19d33 1966 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1967
1968 Ok(())
1969}
1970
912b3f5b
DM
1971#[api(
1972 input: {
1973 properties: {
988d575d 1974 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
1975 "backup-ns": {
1976 type: BackupNamespace,
1977 optional: true,
1978 },
8c74349b
WB
1979 backup_dir: {
1980 type: pbs_api_types::BackupDir,
1981 flatten: true,
1982 },
912b3f5b
DM
1983 },
1984 },
1985 access: {
7d6fc15b
TL
1986 permission: &Permission::Anybody,
1987 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1988 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1989 },
1990)]
1991/// Get "notes" for a specific backup
bf78f708 1992pub fn get_notes(
912b3f5b 1993 store: String,
133d718f 1994 backup_ns: Option<BackupNamespace>,
8c74349b 1995 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1996 rpcenv: &mut dyn RpcEnvironment,
1997) -> Result<String, Error> {
7d6fc15b 1998 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 1999 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2000 let owner_check_required = check_ns_privs(
7d6fc15b 2001 &store,
133d718f 2002 &backup_ns,
7d6fc15b 2003 &auth_id,
2bc2435a
FG
2004 PRIV_DATASTORE_AUDIT,
2005 PRIV_DATASTORE_BACKUP,
7d6fc15b 2006 )?;
c9396984
FG
2007 let datastore = check_owner_load_store(
2008 &store,
2009 &backup_ns,
2010 &auth_id,
2011 Some(Operation::Read),
2012 owner_check_required,
2013 &backup_dir.group,
2014 )?;
912b3f5b 2015
133d718f 2016 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
912b3f5b 2017
133d718f 2018 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 2019
dc7a5b34 2020 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
2021
2022 Ok(String::from(notes))
2023}
2024
2025#[api(
2026 input: {
2027 properties: {
988d575d 2028 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2029 "backup-ns": {
2030 type: BackupNamespace,
2031 optional: true,
2032 },
8c74349b
WB
2033 backup_dir: {
2034 type: pbs_api_types::BackupDir,
2035 flatten: true,
2036 },
912b3f5b
DM
2037 notes: {
2038 description: "A multiline text.",
2039 },
2040 },
2041 },
2042 access: {
7d6fc15b
TL
2043 permission: &Permission::Anybody,
2044 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2045 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2046 },
2047)]
2048/// Set "notes" for a specific backup
bf78f708 2049pub fn set_notes(
912b3f5b 2050 store: String,
133d718f 2051 backup_ns: Option<BackupNamespace>,
8c74349b 2052 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2053 notes: String,
2054 rpcenv: &mut dyn RpcEnvironment,
2055) -> Result<(), Error> {
7d6fc15b 2056 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2057 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2058 let owner_check_required = check_ns_privs(
7d6fc15b 2059 &store,
133d718f 2060 &backup_ns,
7d6fc15b 2061 &auth_id,
2bc2435a
FG
2062 PRIV_DATASTORE_MODIFY,
2063 PRIV_DATASTORE_BACKUP,
7d6fc15b 2064 )?;
c9396984
FG
2065 let datastore = check_owner_load_store(
2066 &store,
2067 &backup_ns,
2068 &auth_id,
2069 Some(Operation::Write),
2070 owner_check_required,
2071 &backup_dir.group,
2072 )?;
912b3f5b 2073
133d718f 2074 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
912b3f5b 2075
133d718f
WB
2076 backup_dir
2077 .update_manifest(|manifest| {
dc7a5b34
TL
2078 manifest.unprotected["notes"] = notes.into();
2079 })
2080 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2081
2082 Ok(())
2083}
2084
8292d3d2
DC
2085#[api(
2086 input: {
2087 properties: {
988d575d 2088 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2089 "backup-ns": {
2090 type: BackupNamespace,
2091 optional: true,
2092 },
8c74349b
WB
2093 backup_dir: {
2094 type: pbs_api_types::BackupDir,
2095 flatten: true,
2096 },
8292d3d2
DC
2097 },
2098 },
2099 access: {
7d6fc15b
TL
2100 permission: &Permission::Anybody,
2101 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2102 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2103 },
2104)]
2105/// Query protection for a specific backup
2106pub fn get_protection(
2107 store: String,
133d718f 2108 backup_ns: Option<BackupNamespace>,
8c74349b 2109 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2110 rpcenv: &mut dyn RpcEnvironment,
2111) -> Result<bool, Error> {
7d6fc15b 2112 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2113 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2114 let owner_check_required = check_ns_privs(
7d6fc15b 2115 &store,
133d718f 2116 &backup_ns,
7d6fc15b 2117 &auth_id,
2bc2435a
FG
2118 PRIV_DATASTORE_AUDIT,
2119 PRIV_DATASTORE_BACKUP,
7d6fc15b 2120 )?;
c9396984
FG
2121 let datastore = check_owner_load_store(
2122 &store,
2123 &backup_ns,
2124 &auth_id,
2125 Some(Operation::Read),
2126 owner_check_required,
2127 &backup_dir.group,
2128 )?;
8292d3d2 2129
133d718f 2130 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
8292d3d2 2131
6da20161 2132 Ok(backup_dir.is_protected())
8292d3d2
DC
2133}
2134
2135#[api(
2136 input: {
2137 properties: {
988d575d 2138 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2139 "backup-ns": {
2140 type: BackupNamespace,
2141 optional: true,
2142 },
8c74349b
WB
2143 backup_dir: {
2144 type: pbs_api_types::BackupDir,
2145 flatten: true,
2146 },
8292d3d2
DC
2147 protected: {
2148 description: "Enable/disable protection.",
2149 },
2150 },
2151 },
2152 access: {
7d6fc15b
TL
2153 permission: &Permission::Anybody,
2154 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2155 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2156 },
2157)]
2158/// En- or disable protection for a specific backup
2159pub fn set_protection(
2160 store: String,
133d718f 2161 backup_ns: Option<BackupNamespace>,
8c74349b 2162 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2163 protected: bool,
2164 rpcenv: &mut dyn RpcEnvironment,
2165) -> Result<(), Error> {
7d6fc15b 2166 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2167 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2168 let owner_check_required = check_ns_privs(
7d6fc15b 2169 &store,
133d718f 2170 &backup_ns,
7d6fc15b 2171 &auth_id,
2bc2435a
FG
2172 PRIV_DATASTORE_MODIFY,
2173 PRIV_DATASTORE_BACKUP,
7d6fc15b 2174 )?;
c9396984
FG
2175 let datastore = check_owner_load_store(
2176 &store,
2177 &backup_ns,
2178 &auth_id,
2179 Some(Operation::Write),
2180 owner_check_required,
2181 &backup_dir.group,
2182 )?;
8292d3d2 2183
133d718f 2184 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
8292d3d2 2185
8292d3d2
DC
2186 datastore.update_protection(&backup_dir, protected)
2187}
2188
72be0eb1 2189#[api(
4940012d 2190 input: {
72be0eb1 2191 properties: {
988d575d 2192 store: { schema: DATASTORE_SCHEMA },
133d718f
WB
2193 "backup-ns": {
2194 type: BackupNamespace,
2195 optional: true,
2196 },
8c74349b
WB
2197 backup_group: {
2198 type: pbs_api_types::BackupGroup,
2199 flatten: true,
2200 },
72be0eb1 2201 "new-owner": {
e6dc35ac 2202 type: Authid,
72be0eb1
DW
2203 },
2204 },
4940012d
FG
2205 },
2206 access: {
bff85572 2207 permission: &Permission::Anybody,
7d6fc15b
TL
2208 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2209 a user's token for owned backups with Datastore.Backup"
4940012d 2210 },
72be0eb1
DW
2211)]
2212/// Change owner of a backup group
bf78f708 2213pub fn set_backup_owner(
72be0eb1 2214 store: String,
133d718f 2215 backup_ns: Option<BackupNamespace>,
8c74349b 2216 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2217 new_owner: Authid,
bff85572 2218 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2219) -> Result<(), Error> {
bff85572 2220 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 2221 let backup_ns = backup_ns.unwrap_or_default();
2bc2435a 2222 let owner_check_required = check_ns_privs(
a724f5fd
FG
2223 &store,
2224 &backup_ns,
2225 &auth_id,
2bc2435a
FG
2226 PRIV_DATASTORE_MODIFY,
2227 PRIV_DATASTORE_BACKUP,
a724f5fd 2228 )?;
1909ece2
FG
2229
2230 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2231
133d718f 2232 let backup_group = datastore.backup_group(backup_ns, backup_group);
bff85572 2233
2bc2435a 2234 if owner_check_required {
133d718f 2235 let owner = backup_group.get_owner()?;
bff85572 2236
2bc2435a 2237 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2238 (true, true) => {
2239 // API token to API token, owned by same user
2240 let owner = owner.user();
2241 let new_owner = new_owner.user();
2242 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2243 }
bff85572
FG
2244 (true, false) => {
2245 // API token to API token owner
dc7a5b34
TL
2246 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2247 }
bff85572
FG
2248 (false, true) => {
2249 // API token owner to API token
dc7a5b34
TL
2250 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2251 }
bff85572
FG
2252 (false, false) => {
2253 // User to User, not allowed for unprivileged users
2254 false
dc7a5b34 2255 }
2bc2435a 2256 };
bff85572 2257
2bc2435a
FG
2258 if !allowed {
2259 return Err(http_err!(
2260 UNAUTHORIZED,
2261 "{} does not have permission to change owner of backup group '{}' to {}",
2262 auth_id,
2263 backup_group,
2264 new_owner,
2265 ));
2266 }
bff85572
FG
2267 }
2268
7d6fc15b
TL
2269 let user_info = CachedUserInfo::new()?;
2270
e6dc35ac 2271 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2272 bail!(
2273 "{} '{}' is inactive or non-existent",
2274 if new_owner.is_token() {
2275 "API token".to_string()
2276 } else {
2277 "user".to_string()
2278 },
2279 new_owner
2280 );
72be0eb1
DW
2281 }
2282
133d718f 2283 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2284
2285 Ok(())
2286}
2287
552c2259 2288#[sortable]
255f378a 2289const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2290 (
2291 "active-operations",
dc7a5b34 2292 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2293 ),
dc7a5b34 2294 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2295 (
2296 "change-owner",
dc7a5b34 2297 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2298 ),
255f378a
DM
2299 (
2300 "download",
dc7a5b34 2301 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2302 ),
6ef9bb59
DC
2303 (
2304 "download-decoded",
dc7a5b34 2305 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2306 ),
dc7a5b34 2307 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2308 (
2309 "gc",
2310 &Router::new()
2311 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2312 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2313 ),
d6688884
SR
2314 (
2315 "group-notes",
2316 &Router::new()
2317 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2318 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2319 ),
255f378a
DM
2320 (
2321 "groups",
2322 &Router::new()
b31c8019 2323 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2324 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2325 ),
18934ae5
TL
2326 (
2327 "namespace",
2328 // FIXME: move into datastore:: sub-module?!
2329 &crate::api2::admin::namespace::ROUTER,
2330 ),
912b3f5b
DM
2331 (
2332 "notes",
2333 &Router::new()
2334 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2335 .put(&API_METHOD_SET_NOTES),
912b3f5b 2336 ),
8292d3d2
DC
2337 (
2338 "protected",
2339 &Router::new()
2340 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2341 .put(&API_METHOD_SET_PROTECTION),
255f378a 2342 ),
dc7a5b34 2343 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2344 (
2345 "prune-datastore",
dc7a5b34 2346 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2347 ),
d33d8f4e
DC
2348 (
2349 "pxar-file-download",
dc7a5b34 2350 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2351 ),
dc7a5b34 2352 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2353 (
2354 "snapshots",
2355 &Router::new()
fc189b19 2356 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2357 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2358 ),
dc7a5b34 2359 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2360 (
2361 "upload-backup-log",
dc7a5b34 2362 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2363 ),
dc7a5b34 2364 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2365];
2366
ad51d02a 2367const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2368 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2369 .subdirs(DATASTORE_INFO_SUBDIRS);
2370
255f378a 2371pub const ROUTER: Router = Router::new()
bb34b589 2372 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2373 .match_all("store", &DATASTORE_INFO_ROUTER);