]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
sync/pull: cleanup priv checks and logging
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
7d0dbaa0
FG
35 print_ns_and_snapshot, privs_to_priv_names, Authid, BackupContent, BackupNamespace, BackupType,
36 Counts, CryptMode, DataStoreListItem, DataStoreStatus, DatastoreWithNamespace,
37 GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
38 SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
39 BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
40 IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT,
41 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
42 PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 43};
984ddb2f 44use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 45use pbs_config::CachedUserInfo;
b2065dc7
WB
46use pbs_datastore::backup_info::BackupInfo;
47use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 48use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
49use pbs_datastore::data_blob::DataBlob;
50use pbs_datastore::data_blob_reader::DataBlobReader;
51use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 52use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
53use pbs_datastore::index::IndexFile;
54use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 55use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
56use pbs_datastore::{
57 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
58 StoreProgress, CATALOG_NAME,
59};
8c74349b 60use pbs_tools::json::required_string_param;
dc7a5b34 61use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 62
133d718f 63use crate::api2::backup::optional_ns_param;
431cc7b1 64use crate::api2::node::rrd::create_value_from_rrd;
22cfad13
TL
65use crate::backup::{
66 verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
67 ListAccessibleBackupGroups,
68};
54552dda 69
b9700a9f 70use crate::server::jobstate::Job;
804f6143 71
d6688884
SR
72const GROUP_NOTES_FILE_NAME: &str = "notes";
73
133d718f
WB
74fn get_group_note_path(
75 store: &DataStore,
76 ns: &BackupNamespace,
77 group: &pbs_api_types::BackupGroup,
78) -> PathBuf {
79 let mut note_path = store.group_path(ns, group);
d6688884
SR
80 note_path.push(GROUP_NOTES_FILE_NAME);
81 note_path
82}
83
7d6fc15b 84// TODO: move somewhere we can reuse it from (namespace has its own copy atm.)
210ded98 85fn get_ns_privs(store_with_ns: &DatastoreWithNamespace, auth_id: &Authid) -> Result<u64, Error> {
7d6fc15b
TL
86 let user_info = CachedUserInfo::new()?;
87
210ded98 88 Ok(user_info.lookup_privs(auth_id, &store_with_ns.acl_path()))
7d6fc15b
TL
89}
90
2bc2435a
FG
91// asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled,
92// returning value indicates whether further checks like group ownerships are required
93fn check_ns_privs(
7d6fc15b
TL
94 store: &str,
95 ns: &BackupNamespace,
96 auth_id: &Authid,
2bc2435a
FG
97 full_access_privs: u64,
98 partial_access_privs: u64,
99) -> Result<bool, Error> {
210ded98
FG
100 let store_with_ns = DatastoreWithNamespace {
101 store: store.to_string(),
102 ns: ns.clone(),
103 };
104 let privs = get_ns_privs(&store_with_ns, auth_id)?;
7d6fc15b 105
2bc2435a
FG
106 if full_access_privs != 0 && (privs & full_access_privs) != 0 {
107 return Ok(false);
108 }
109 if partial_access_privs != 0 && (privs & partial_access_privs) != 0 {
110 return Ok(true);
7d6fc15b 111 }
2bc2435a 112
7d0dbaa0
FG
113 let priv_names = privs_to_priv_names(full_access_privs | partial_access_privs).join("|");
114 let path = format!("/{}", store_with_ns.acl_path().join("/"));
115
116 proxmox_router::http_bail!(
117 FORBIDDEN,
118 "permission check failed - missing {priv_names} on {path}"
119 );
7d6fc15b
TL
120}
121
7a404dc5
FG
122// helper to unify common sequence of checks:
123// 1. check privs on NS (full or limited access)
124// 2. load datastore
125// 3. if needed (only limited access), check owner of group
126fn check_privs_and_load_store(
c9396984
FG
127 store: &str,
128 ns: &BackupNamespace,
129 auth_id: &Authid,
7a404dc5
FG
130 full_access_privs: u64,
131 partial_access_privs: u64,
c9396984 132 operation: Option<Operation>,
c9396984
FG
133 backup_group: &pbs_api_types::BackupGroup,
134) -> Result<Arc<DataStore>, Error> {
7a404dc5
FG
135 let limited = check_ns_privs(store, ns, auth_id, full_access_privs, partial_access_privs)?;
136
c9396984
FG
137 let datastore = DataStore::lookup_datastore(&store, operation)?;
138
7a404dc5 139 if limited {
c9396984
FG
140 let owner = datastore.get_owner(&ns, backup_group)?;
141 check_backup_owner(&owner, &auth_id)?;
142 }
143
144 Ok(datastore)
145}
146
e7cb4dc5 147fn read_backup_index(
e7cb4dc5
WB
148 backup_dir: &BackupDir,
149) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 150 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 151
09b1f7b2
DM
152 let mut result = Vec::new();
153 for item in manifest.files() {
154 result.push(BackupContent {
155 filename: item.filename.clone(),
f28d9088 156 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
157 size: Some(item.size),
158 });
8c70e3eb
DM
159 }
160
09b1f7b2 161 result.push(BackupContent {
96d65fbc 162 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
163 crypt_mode: match manifest.signature {
164 Some(_) => Some(CryptMode::SignOnly),
165 None => Some(CryptMode::None),
166 },
09b1f7b2
DM
167 size: Some(index_size),
168 });
4f1e40a2 169
70030b43 170 Ok((manifest, result))
8c70e3eb
DM
171}
172
1c090810 173fn get_all_snapshot_files(
1c090810 174 info: &BackupInfo,
70030b43 175) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 176 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
177
178 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
179 acc.insert(item.filename.clone());
180 acc
181 });
182
183 for file in &info.files {
dc7a5b34
TL
184 if file_set.contains(file) {
185 continue;
186 }
f28d9088
WB
187 files.push(BackupContent {
188 filename: file.to_string(),
189 size: None,
190 crypt_mode: None,
191 });
1c090810
DC
192 }
193
70030b43 194 Ok((manifest, files))
1c090810
DC
195}
196
b31c8019
DM
197#[api(
198 input: {
199 properties: {
200 store: {
201 schema: DATASTORE_SCHEMA,
202 },
bc21ade2 203 ns: {
89ae3c32
WB
204 type: BackupNamespace,
205 optional: true,
206 },
b31c8019
DM
207 },
208 },
7b570c17 209 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 210 access: {
7d6fc15b
TL
211 permission: &Permission::Anybody,
212 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
213 /datastore/{store}[/{namespace}]",
bb34b589 214 },
b31c8019
DM
215)]
216/// List backup groups.
b2362a12 217pub fn list_groups(
b31c8019 218 store: String,
bc21ade2 219 ns: Option<BackupNamespace>,
54552dda 220 rpcenv: &mut dyn RpcEnvironment,
b31c8019 221) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 222 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 223
bc21ade2 224 let ns = ns.unwrap_or_default();
2bc2435a 225 let list_all = !check_ns_privs(
7d6fc15b 226 &store,
bc21ade2 227 &ns,
7d6fc15b 228 &auth_id,
2bc2435a
FG
229 PRIV_DATASTORE_AUDIT,
230 PRIV_DATASTORE_BACKUP,
7d6fc15b 231 )?;
54552dda 232
e9d2fc93 233 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
e13303fc
FG
234 let store_with_ns = DatastoreWithNamespace {
235 store: store.to_owned(),
236 ns: ns.clone(),
237 };
0d08fcee 238
249dde8b 239 datastore
bc21ade2 240 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
241 .try_fold(Vec::new(), |mut group_info, group| {
242 let group = group?;
e13303fc 243
bc21ade2 244 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
245 Ok(auth_id) => auth_id,
246 Err(err) => {
e13303fc
FG
247 eprintln!(
248 "Failed to get owner of group '{}' in {} - {}",
249 group.group(),
250 store_with_ns,
251 err
252 );
249dde8b 253 return Ok(group_info);
dc7a5b34 254 }
249dde8b
TL
255 };
256 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
257 return Ok(group_info);
258 }
0d08fcee 259
6da20161 260 let snapshots = match group.list_backups() {
249dde8b
TL
261 Ok(snapshots) => snapshots,
262 Err(_) => return Ok(group_info),
263 };
0d08fcee 264
249dde8b
TL
265 let backup_count: u64 = snapshots.len() as u64;
266 if backup_count == 0 {
267 return Ok(group_info);
268 }
0d08fcee 269
249dde8b
TL
270 let last_backup = snapshots
271 .iter()
272 .fold(&snapshots[0], |a, b| {
273 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
274 a
275 } else {
276 b
277 }
278 })
279 .to_owned();
280
bc21ade2 281 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
249dde8b
TL
282 let comment = file_read_firstline(&note_path).ok();
283
284 group_info.push(GroupListItem {
988d575d 285 backup: group.into(),
249dde8b
TL
286 last_backup: last_backup.backup_dir.backup_time(),
287 owner: Some(owner),
288 backup_count,
289 files: last_backup.files,
290 comment,
0d08fcee
FG
291 });
292
249dde8b
TL
293 Ok(group_info)
294 })
812c6f87 295}
8f579717 296
f32791b4
DC
297#[api(
298 input: {
299 properties: {
988d575d 300 store: { schema: DATASTORE_SCHEMA },
bc21ade2 301 ns: {
133d718f
WB
302 type: BackupNamespace,
303 optional: true,
304 },
8c74349b
WB
305 group: {
306 type: pbs_api_types::BackupGroup,
307 flatten: true,
308 },
f32791b4
DC
309 },
310 },
311 access: {
7d6fc15b
TL
312 permission: &Permission::Anybody,
313 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
314 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
315 },
316)]
317/// Delete backup group including all snapshots.
318pub fn delete_group(
319 store: String,
bc21ade2 320 ns: Option<BackupNamespace>,
8c74349b 321 group: pbs_api_types::BackupGroup,
f32791b4
DC
322 _info: &ApiMethod,
323 rpcenv: &mut dyn RpcEnvironment,
324) -> Result<Value, Error> {
f32791b4
DC
325 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
326
bc21ade2 327 let ns = ns.unwrap_or_default();
133d718f 328
7a404dc5 329 let datastore = check_privs_and_load_store(
7d6fc15b 330 &store,
bc21ade2 331 &ns,
7d6fc15b 332 &auth_id,
2bc2435a
FG
333 PRIV_DATASTORE_MODIFY,
334 PRIV_DATASTORE_PRUNE,
c9396984 335 Some(Operation::Write),
c9396984
FG
336 &group,
337 )?;
f32791b4 338
bc21ade2 339 if !datastore.remove_backup_group(&ns, &group)? {
171a00ca 340 bail!("group only partially deleted due to protected snapshots");
5cc7d891 341 }
f32791b4
DC
342
343 Ok(Value::Null)
344}
345
09b1f7b2
DM
346#[api(
347 input: {
348 properties: {
988d575d 349 store: { schema: DATASTORE_SCHEMA },
bc21ade2 350 ns: {
133d718f
WB
351 type: BackupNamespace,
352 optional: true,
353 },
8c74349b
WB
354 backup_dir: {
355 type: pbs_api_types::BackupDir,
356 flatten: true,
357 },
09b1f7b2
DM
358 },
359 },
7b570c17 360 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 361 access: {
7d6fc15b
TL
362 permission: &Permission::Anybody,
363 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
364 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 365 },
09b1f7b2
DM
366)]
367/// List snapshot files.
ea5f547f 368pub fn list_snapshot_files(
09b1f7b2 369 store: String,
bc21ade2 370 ns: Option<BackupNamespace>,
8c74349b 371 backup_dir: pbs_api_types::BackupDir,
01a13423 372 _info: &ApiMethod,
54552dda 373 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 374) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 375 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 376
bc21ade2 377 let ns = ns.unwrap_or_default();
133d718f 378
7a404dc5 379 let datastore = check_privs_and_load_store(
7d6fc15b 380 &store,
bc21ade2 381 &ns,
7d6fc15b 382 &auth_id,
2bc2435a
FG
383 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
384 PRIV_DATASTORE_BACKUP,
c9396984 385 Some(Operation::Read),
c9396984
FG
386 &backup_dir.group,
387 )?;
01a13423 388
bc21ade2 389 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 390
6da20161 391 let info = BackupInfo::new(snapshot)?;
01a13423 392
9ccf933b 393 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43
DM
394
395 Ok(files)
01a13423
DM
396}
397
68a6a0ee
DM
398#[api(
399 input: {
400 properties: {
988d575d 401 store: { schema: DATASTORE_SCHEMA },
bc21ade2 402 ns: {
133d718f
WB
403 type: BackupNamespace,
404 optional: true,
405 },
8c74349b
WB
406 backup_dir: {
407 type: pbs_api_types::BackupDir,
408 flatten: true,
409 },
68a6a0ee
DM
410 },
411 },
bb34b589 412 access: {
7d6fc15b
TL
413 permission: &Permission::Anybody,
414 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
415 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 416 },
68a6a0ee
DM
417)]
418/// Delete backup snapshot.
bf78f708 419pub fn delete_snapshot(
68a6a0ee 420 store: String,
bc21ade2 421 ns: Option<BackupNamespace>,
8c74349b 422 backup_dir: pbs_api_types::BackupDir,
6f62c924 423 _info: &ApiMethod,
54552dda 424 rpcenv: &mut dyn RpcEnvironment,
6f62c924 425) -> Result<Value, Error> {
e6dc35ac 426 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 427
bc21ade2 428 let ns = ns.unwrap_or_default();
7a404dc5 429 let datastore = check_privs_and_load_store(
7d6fc15b 430 &store,
bc21ade2 431 &ns,
7d6fc15b 432 &auth_id,
2bc2435a
FG
433 PRIV_DATASTORE_MODIFY,
434 PRIV_DATASTORE_PRUNE,
c9396984 435 Some(Operation::Write),
c9396984
FG
436 &backup_dir.group,
437 )?;
a724f5fd 438
bc21ade2 439 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 440
133d718f 441 snapshot.destroy(false)?;
6f62c924
DM
442
443 Ok(Value::Null)
444}
445
fc189b19 446#[api(
b7c3eaa9 447 streaming: true,
fc189b19
DM
448 input: {
449 properties: {
988d575d 450 store: { schema: DATASTORE_SCHEMA },
bc21ade2 451 ns: {
8c74349b
WB
452 type: BackupNamespace,
453 optional: true,
454 },
fc189b19
DM
455 "backup-type": {
456 optional: true,
988d575d 457 type: BackupType,
fc189b19
DM
458 },
459 "backup-id": {
460 optional: true,
461 schema: BACKUP_ID_SCHEMA,
462 },
463 },
464 },
7b570c17 465 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 466 access: {
7d6fc15b
TL
467 permission: &Permission::Anybody,
468 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
469 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 470 },
fc189b19
DM
471)]
472/// List backup snapshots.
dc7a5b34 473pub fn list_snapshots(
54552dda 474 store: String,
bc21ade2 475 ns: Option<BackupNamespace>,
988d575d 476 backup_type: Option<BackupType>,
54552dda
DM
477 backup_id: Option<String>,
478 _param: Value,
184f17af 479 _info: &ApiMethod,
54552dda 480 rpcenv: &mut dyn RpcEnvironment,
fc189b19 481) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 482 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 483
bc21ade2 484 let ns = ns.unwrap_or_default();
7d6fc15b 485
2bc2435a 486 let list_all = !check_ns_privs(
7d6fc15b 487 &store,
bc21ade2 488 &ns,
7d6fc15b 489 &auth_id,
2bc2435a
FG
490 PRIV_DATASTORE_AUDIT,
491 PRIV_DATASTORE_BACKUP,
7d6fc15b 492 )?;
184f17af 493
e9d2fc93 494 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
e13303fc
FG
495 let store_with_ns = DatastoreWithNamespace {
496 store: store.to_owned(),
497 ns: ns.clone(),
498 };
184f17af 499
249dde8b
TL
500 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
501 // backup group and provide an error free (Err -> None) accessor
0d08fcee 502 let groups = match (backup_type, backup_id) {
db87d93e 503 (Some(backup_type), Some(backup_id)) => {
bc21ade2 504 vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
db87d93e 505 }
8c74349b 506 // FIXME: Recursion
7d9cb8c4 507 (Some(backup_type), None) => datastore
bc21ade2 508 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
509 .filter(|group| group.backup_type() == backup_type)
510 .collect(),
8c74349b 511 // FIXME: Recursion
7d9cb8c4 512 (None, Some(backup_id)) => datastore
bc21ade2 513 .iter_backup_groups_ok(ns)?
dc7a5b34
TL
514 .filter(|group| group.backup_id() == backup_id)
515 .collect(),
8c74349b 516 // FIXME: Recursion
bc21ade2 517 (None, None) => datastore.list_backup_groups(ns)?,
0d08fcee 518 };
54552dda 519
0d08fcee 520 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
521 let backup = pbs_api_types::BackupDir {
522 group: group.into(),
523 time: info.backup_dir.backup_time(),
524 };
6da20161 525 let protected = info.backup_dir.is_protected();
1c090810 526
9ccf933b 527 match get_all_snapshot_files(&info) {
70030b43 528 Ok((manifest, files)) => {
70030b43
DM
529 // extract the first line from notes
530 let comment: Option<String> = manifest.unprotected["notes"]
531 .as_str()
532 .and_then(|notes| notes.lines().next())
533 .map(String::from);
534
035c40e6
FG
535 let fingerprint = match manifest.fingerprint() {
536 Ok(fp) => fp,
537 Err(err) => {
538 eprintln!("error parsing fingerprint: '{}'", err);
539 None
dc7a5b34 540 }
035c40e6
FG
541 };
542
79c53595 543 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
544 let verification: Option<SnapshotVerifyState> =
545 match serde_json::from_value(verification) {
546 Ok(verify) => verify,
547 Err(err) => {
548 eprintln!("error parsing verification state : '{}'", err);
549 None
550 }
551 };
3b2046d2 552
0d08fcee
FG
553 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
554
79c53595 555 SnapshotListItem {
988d575d 556 backup,
79c53595
FG
557 comment,
558 verification,
035c40e6 559 fingerprint,
79c53595
FG
560 files,
561 size,
562 owner,
02db7267 563 protected,
79c53595 564 }
dc7a5b34 565 }
1c090810
DC
566 Err(err) => {
567 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 568 let files = info
dc7a5b34
TL
569 .files
570 .into_iter()
571 .map(|filename| BackupContent {
572 filename,
573 size: None,
574 crypt_mode: None,
575 })
576 .collect();
79c53595
FG
577
578 SnapshotListItem {
988d575d 579 backup,
79c53595
FG
580 comment: None,
581 verification: None,
035c40e6 582 fingerprint: None,
79c53595
FG
583 files,
584 size: None,
585 owner,
02db7267 586 protected,
79c53595 587 }
dc7a5b34 588 }
0d08fcee
FG
589 }
590 };
184f17af 591
dc7a5b34 592 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 593 let owner = match group.get_owner() {
dc7a5b34
TL
594 Ok(auth_id) => auth_id,
595 Err(err) => {
596 eprintln!(
e13303fc 597 "Failed to get owner of group '{}' in {} - {}",
e13303fc 598 group.group(),
4a4dd66c 599 &store_with_ns,
e13303fc 600 err
dc7a5b34 601 );
0d08fcee
FG
602 return Ok(snapshots);
603 }
dc7a5b34 604 };
0d08fcee 605
dc7a5b34
TL
606 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
607 return Ok(snapshots);
608 }
0d08fcee 609
6da20161 610 let group_backups = group.list_backups()?;
0d08fcee 611
dc7a5b34
TL
612 snapshots.extend(
613 group_backups
614 .into_iter()
615 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
616 );
617
618 Ok(snapshots)
619 })
184f17af
DM
620}
621
22cfad13 622fn get_snapshots_count(store: &Arc<DataStore>, owner: Option<&Authid>) -> Result<Counts, Error> {
8122eaad 623 let root_ns = Default::default();
f12f408e
TL
624 ListAccessibleBackupGroups::new_with_privs(
625 store,
626 root_ns,
627 MAX_NAMESPACE_DEPTH,
628 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
629 None,
630 owner,
631 )?
632 .try_fold(Counts::default(), |mut counts, group| {
633 let group = match group {
634 Ok(group) => group,
635 Err(_) => return Ok(counts), // TODO: add this as error counts?
636 };
637 let snapshot_count = group.list_backups()?.len() as u64;
638
639 // only include groups with snapshots, counting/displaying emtpy groups can confuse
640 if snapshot_count > 0 {
641 let type_count = match group.backup_type() {
642 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
643 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
644 BackupType::Host => counts.host.get_or_insert(Default::default()),
22cfad13 645 };
14e08625 646
f12f408e
TL
647 type_count.groups += 1;
648 type_count.snapshots += snapshot_count;
649 }
16f9f244 650
f12f408e
TL
651 Ok(counts)
652 })
16f9f244
DC
653}
654
1dc117bb
DM
655#[api(
656 input: {
657 properties: {
658 store: {
659 schema: DATASTORE_SCHEMA,
660 },
98afc7b1
FG
661 verbose: {
662 type: bool,
663 default: false,
664 optional: true,
665 description: "Include additional information like snapshot counts and GC status.",
666 },
1dc117bb 667 },
98afc7b1 668
1dc117bb
DM
669 },
670 returns: {
14e08625 671 type: DataStoreStatus,
1dc117bb 672 },
bb34b589 673 access: {
7d6fc15b
TL
674 permission: &Permission::Privilege(
675 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 676 },
1dc117bb
DM
677)]
678/// Get datastore status.
ea5f547f 679pub fn status(
1dc117bb 680 store: String,
98afc7b1 681 verbose: bool,
0eecf38f 682 _info: &ApiMethod,
fdfcb74d 683 rpcenv: &mut dyn RpcEnvironment,
14e08625 684) -> Result<DataStoreStatus, Error> {
e9d2fc93 685 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 686 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
687 let (counts, gc_status) = if verbose {
688 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
689 let user_info = CachedUserInfo::new()?;
690
691 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
692 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
693 None
694 } else {
695 Some(&auth_id)
696 };
697
698 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
699 let gc_status = Some(datastore.last_gc_status());
700
701 (counts, gc_status)
702 } else {
703 (None, None)
98afc7b1 704 };
16f9f244 705
14e08625
DC
706 Ok(DataStoreStatus {
707 total: storage.total,
708 used: storage.used,
709 avail: storage.avail,
710 gc_status,
711 counts,
712 })
0eecf38f
DM
713}
714
c2009e53
DM
715#[api(
716 input: {
717 properties: {
718 store: {
719 schema: DATASTORE_SCHEMA,
720 },
bc21ade2 721 ns: {
8c74349b
WB
722 type: BackupNamespace,
723 optional: true,
724 },
c2009e53 725 "backup-type": {
988d575d 726 type: BackupType,
c2009e53
DM
727 optional: true,
728 },
729 "backup-id": {
730 schema: BACKUP_ID_SCHEMA,
731 optional: true,
732 },
dcbf29e7
HL
733 "ignore-verified": {
734 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
735 optional: true,
736 },
737 "outdated-after": {
738 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
739 optional: true,
740 },
c2009e53
DM
741 "backup-time": {
742 schema: BACKUP_TIME_SCHEMA,
743 optional: true,
744 },
59229bd7
TL
745 "max-depth": {
746 schema: NS_MAX_DEPTH_SCHEMA,
747 optional: true,
748 },
c2009e53
DM
749 },
750 },
751 returns: {
752 schema: UPID_SCHEMA,
753 },
754 access: {
7d6fc15b
TL
755 permission: &Permission::Anybody,
756 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
757 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
758 },
759)]
760/// Verify backups.
761///
762/// This function can verify a single backup snapshot, all backup from a backup group,
763/// or all backups in the datastore.
764pub fn verify(
765 store: String,
bc21ade2 766 ns: Option<BackupNamespace>,
988d575d 767 backup_type: Option<BackupType>,
c2009e53
DM
768 backup_id: Option<String>,
769 backup_time: Option<i64>,
dcbf29e7
HL
770 ignore_verified: Option<bool>,
771 outdated_after: Option<i64>,
59229bd7 772 max_depth: Option<usize>,
c2009e53
DM
773 rpcenv: &mut dyn RpcEnvironment,
774) -> Result<Value, Error> {
7d6fc15b 775 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 776 let ns = ns.unwrap_or_default();
2bc2435a 777 let owner_check_required = check_ns_privs(
7d6fc15b 778 &store,
bc21ade2 779 &ns,
7d6fc15b 780 &auth_id,
2bc2435a
FG
781 PRIV_DATASTORE_VERIFY,
782 PRIV_DATASTORE_BACKUP,
7d6fc15b 783 )?;
a724f5fd 784
e9d2fc93 785 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 786 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 787
8ea00f6e 788 let worker_id;
c2009e53
DM
789
790 let mut backup_dir = None;
791 let mut backup_group = None;
133042b5 792 let mut worker_type = "verify";
c2009e53
DM
793
794 match (backup_type, backup_id, backup_time) {
795 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 796 worker_id = format!(
8c74349b
WB
797 "{}:{}/{}/{}/{:08X}",
798 store,
bc21ade2 799 ns.display_as_path(),
8c74349b
WB
800 backup_type,
801 backup_id,
802 backup_time
dc7a5b34 803 );
bc21ade2
WB
804 let dir =
805 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 806
a724f5fd
FG
807 if owner_check_required {
808 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
809 check_backup_owner(&owner, &auth_id)?;
810 }
09f6a240 811
c2009e53 812 backup_dir = Some(dir);
133042b5 813 worker_type = "verify_snapshot";
c2009e53
DM
814 }
815 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
816 worker_id = format!(
817 "{}:{}/{}/{}",
818 store,
bc21ade2 819 ns.display_as_path(),
8c74349b
WB
820 backup_type,
821 backup_id
822 );
133d718f 823 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 824
a724f5fd 825 if owner_check_required {
bc21ade2 826 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
827 check_backup_owner(&owner, &auth_id)?;
828 }
09f6a240 829
bc21ade2 830 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 831 worker_type = "verify_group";
c2009e53
DM
832 }
833 (None, None, None) => {
bc21ade2 834 worker_id = if ns.is_root() {
59229bd7
TL
835 store.clone()
836 } else {
bc21ade2 837 format!("{store}:{}", ns.display_as_path())
59229bd7 838 };
c2009e53 839 }
5a718dce 840 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
841 }
842
39735609 843 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
844
845 let upid_str = WorkerTask::new_thread(
133042b5 846 worker_type,
44288184 847 Some(worker_id),
049a22a3 848 auth_id.to_string(),
e7cb4dc5
WB
849 to_stdout,
850 move |worker| {
9c26a3d6 851 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 852 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 853 let mut res = Vec::new();
f6b1d1cc 854 if !verify_backup_dir(
9c26a3d6 855 &verify_worker,
f6b1d1cc 856 &backup_dir,
f6b1d1cc 857 worker.upid().clone(),
dc7a5b34 858 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 859 )? {
5ae393af
FG
860 res.push(print_ns_and_snapshot(
861 backup_dir.backup_ns(),
862 backup_dir.as_ref(),
863 ));
adfdc369
DC
864 }
865 res
c2009e53 866 } else if let Some(backup_group) = backup_group {
7e25b9aa 867 let failed_dirs = verify_backup_group(
9c26a3d6 868 &verify_worker,
63d9aca9 869 &backup_group,
7e25b9aa 870 &mut StoreProgress::new(1),
f6b1d1cc 871 worker.upid(),
dc7a5b34 872 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
873 )?;
874 failed_dirs
c2009e53 875 } else {
a724f5fd 876 let owner = if owner_check_required {
de27ebc6 877 Some(&auth_id)
09f6a240
FG
878 } else {
879 None
880 };
881
dcbf29e7
HL
882 verify_all_backups(
883 &verify_worker,
884 worker.upid(),
bc21ade2 885 ns,
59229bd7 886 max_depth,
dcbf29e7 887 owner,
dc7a5b34 888 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 889 )?
c2009e53 890 };
3984a5fd 891 if !failed_dirs.is_empty() {
1ec0d70d 892 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 893 for dir in failed_dirs {
1ec0d70d 894 task_log!(worker, "\t{}", dir);
adfdc369 895 }
1ffe0301 896 bail!("verification failed - please check the log for details");
c2009e53
DM
897 }
898 Ok(())
e7cb4dc5
WB
899 },
900 )?;
c2009e53
DM
901
902 Ok(json!(upid_str))
903}
904
0a240aaa
DC
905#[api(
906 input: {
907 properties: {
bc21ade2 908 ns: {
133d718f
WB
909 type: BackupNamespace,
910 optional: true,
911 },
8c74349b
WB
912 group: {
913 type: pbs_api_types::BackupGroup,
914 flatten: true,
915 },
0a240aaa
DC
916 "dry-run": {
917 optional: true,
918 type: bool,
919 default: false,
920 description: "Just show what prune would do, but do not delete anything.",
921 },
922 "prune-options": {
923 type: PruneOptions,
924 flatten: true,
925 },
926 store: {
927 schema: DATASTORE_SCHEMA,
928 },
929 },
930 },
7b570c17 931 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 932 access: {
7d6fc15b
TL
933 permission: &Permission::Anybody,
934 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
935 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
936 },
937)]
9805207a 938/// Prune a group on the datastore
bf78f708 939pub fn prune(
bc21ade2 940 ns: Option<BackupNamespace>,
8c74349b 941 group: pbs_api_types::BackupGroup,
0a240aaa
DC
942 dry_run: bool,
943 prune_options: PruneOptions,
944 store: String,
945 _param: Value,
54552dda 946 rpcenv: &mut dyn RpcEnvironment,
83b7db02 947) -> Result<Value, Error> {
e6dc35ac 948 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 949 let ns = ns.unwrap_or_default();
7a404dc5 950 let datastore = check_privs_and_load_store(
7d6fc15b 951 &store,
bc21ade2 952 &ns,
7d6fc15b 953 &auth_id,
2bc2435a
FG
954 PRIV_DATASTORE_MODIFY,
955 PRIV_DATASTORE_PRUNE,
c9396984 956 Some(Operation::Write),
c9396984
FG
957 &group,
958 )?;
e13303fc
FG
959 let store_with_ns = DatastoreWithNamespace {
960 store: store.to_owned(),
961 ns: ns.clone(),
962 };
db87d93e 963
36971618 964 let worker_id = format!("{}:{}:{}", store, ns, group);
bc21ade2 965 let group = datastore.backup_group(ns, group);
83b7db02 966
dda70154
DM
967 let mut prune_result = Vec::new();
968
6da20161 969 let list = group.list_backups()?;
dda70154
DM
970
971 let mut prune_info = compute_prune_info(list, &prune_options)?;
972
973 prune_info.reverse(); // delete older snapshots first
974
89725197 975 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
976
977 if dry_run {
02db7267
DC
978 for (info, mark) in prune_info {
979 let keep = keep_all || mark.keep();
dda70154 980
33f2c2a1 981 let mut result = json!({
db87d93e
WB
982 "backup-type": info.backup_dir.backup_type(),
983 "backup-id": info.backup_dir.backup_id(),
984 "backup-time": info.backup_dir.backup_time(),
dda70154 985 "keep": keep,
02db7267 986 "protected": mark.protected(),
33f2c2a1 987 });
bc21ade2
WB
988 let prune_ns = info.backup_dir.backup_ns();
989 if !prune_ns.is_root() {
990 result["ns"] = serde_json::to_value(prune_ns)?;
33f2c2a1
WB
991 }
992 prune_result.push(result);
dda70154
DM
993 }
994 return Ok(json!(prune_result));
995 }
996
163e9bbe 997 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 998 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 999
f1539300 1000 if keep_all {
1ec0d70d 1001 task_log!(worker, "No prune selection - keeping all files.");
f1539300 1002 } else {
dc7a5b34
TL
1003 task_log!(
1004 worker,
1005 "retention options: {}",
1006 pbs_datastore::prune::cli_options_string(&prune_options)
1007 );
1008 task_log!(
1009 worker,
e13303fc
FG
1010 "Starting prune on {} group \"{}\"",
1011 store_with_ns,
1012 group.group(),
dc7a5b34 1013 );
f1539300 1014 }
3b03abfe 1015
02db7267
DC
1016 for (info, mark) in prune_info {
1017 let keep = keep_all || mark.keep();
dda70154 1018
f1539300
SR
1019 let backup_time = info.backup_dir.backup_time();
1020 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
1021 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
1022
1023 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 1024
1ec0d70d 1025 task_log!(worker, "{}", msg);
f1539300 1026
133d718f 1027 prune_result.push(json!({
db87d93e
WB
1028 "backup-type": group.ty,
1029 "backup-id": group.id,
f1539300
SR
1030 "backup-time": backup_time,
1031 "keep": keep,
02db7267 1032 "protected": mark.protected(),
133d718f 1033 }));
f1539300
SR
1034
1035 if !(dry_run || keep) {
133d718f 1036 if let Err(err) = info.backup_dir.destroy(false) {
1ec0d70d
DM
1037 task_warn!(
1038 worker,
1039 "failed to remove dir {:?}: {}",
1040 info.backup_dir.relative_path(),
1041 err,
f1539300 1042 );
8f0b4c1f 1043 }
8f579717 1044 }
f1539300 1045 }
dd8e744f 1046
f1539300 1047 worker.log_result(&Ok(()));
83b7db02 1048
dda70154 1049 Ok(json!(prune_result))
83b7db02
DM
1050}
1051
9805207a
DC
1052#[api(
1053 input: {
1054 properties: {
1055 "dry-run": {
1056 optional: true,
1057 type: bool,
1058 default: false,
1059 description: "Just show what prune would do, but do not delete anything.",
1060 },
1061 "prune-options": {
1062 type: PruneOptions,
1063 flatten: true,
1064 },
1065 store: {
1066 schema: DATASTORE_SCHEMA,
1067 },
2f5417f8
TL
1068 ns: {
1069 type: BackupNamespace,
1070 optional: true,
1071 },
e3c26aea
TL
1072 "max-depth": {
1073 schema: NS_MAX_DEPTH_SCHEMA,
1074 optional: true,
1075 },
9805207a
DC
1076 },
1077 },
1078 returns: {
1079 schema: UPID_SCHEMA,
1080 },
1081 access: {
7d6fc15b
TL
1082 permission: &Permission::Privilege(
1083 &["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
9805207a
DC
1084 },
1085)]
1086/// Prune the datastore
1087pub fn prune_datastore(
1088 dry_run: bool,
1089 prune_options: PruneOptions,
1090 store: String,
2f5417f8 1091 ns: Option<BackupNamespace>,
e3c26aea 1092 max_depth: Option<usize>,
9805207a
DC
1093 _param: Value,
1094 rpcenv: &mut dyn RpcEnvironment,
1095) -> Result<String, Error> {
9805207a
DC
1096 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1097
e9d2fc93 1098 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
36971618
FG
1099 let ns = ns.unwrap_or_default();
1100 let worker_id = format!("{}:{}", store, ns);
9805207a 1101
bfa942c0
DC
1102 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1103
9805207a
DC
1104 let upid_str = WorkerTask::new_thread(
1105 "prune",
36971618 1106 Some(worker_id),
049a22a3 1107 auth_id.to_string(),
bfa942c0 1108 to_stdout,
dc7a5b34 1109 move |worker| {
e3c26aea
TL
1110 crate::server::prune_datastore(
1111 worker,
1112 auth_id,
1113 prune_options,
1114 datastore,
1115 ns,
1116 max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
1117 dry_run,
1118 )
dc7a5b34 1119 },
9805207a
DC
1120 )?;
1121
1122 Ok(upid_str)
1123}
1124
dfc58d47
DM
1125#[api(
1126 input: {
1127 properties: {
1128 store: {
1129 schema: DATASTORE_SCHEMA,
1130 },
1131 },
1132 },
1133 returns: {
1134 schema: UPID_SCHEMA,
1135 },
bb34b589 1136 access: {
54552dda 1137 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1138 },
dfc58d47
DM
1139)]
1140/// Start garbage collection.
bf78f708 1141pub fn start_garbage_collection(
dfc58d47 1142 store: String,
6049b71f 1143 _info: &ApiMethod,
dd5495d6 1144 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1145) -> Result<Value, Error> {
e9d2fc93 1146 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1147 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1148
dc7a5b34 1149 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1150 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1151
39735609 1152 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1153
dc7a5b34
TL
1154 let upid_str =
1155 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1156 .map_err(|err| {
1157 format_err!(
1158 "unable to start garbage collection job on datastore {} - {}",
1159 store,
1160 err
1161 )
1162 })?;
0f778e06
DM
1163
1164 Ok(json!(upid_str))
15e9b4ed
DM
1165}
1166
a92830dc
DM
1167#[api(
1168 input: {
1169 properties: {
1170 store: {
1171 schema: DATASTORE_SCHEMA,
1172 },
1173 },
1174 },
1175 returns: {
1176 type: GarbageCollectionStatus,
bb34b589
DM
1177 },
1178 access: {
1179 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1180 },
a92830dc
DM
1181)]
1182/// Garbage collection status.
5eeea607 1183pub fn garbage_collection_status(
a92830dc 1184 store: String,
6049b71f 1185 _info: &ApiMethod,
dd5495d6 1186 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1187) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1188 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1189
f2b99c34 1190 let status = datastore.last_gc_status();
691c89a0 1191
a92830dc 1192 Ok(status)
691c89a0
DM
1193}
1194
7d6fc15b
TL
1195fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &CachedUserInfo) -> bool {
1196 // NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
1197 // below /datastore/{store}" helper
1198 let mut iter =
1199 if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
1200 iter
1201 } else {
1202 return false;
1203 };
1204 let wanted =
1205 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
1206 let name = store.name();
1207 iter.any(|ns| -> bool {
210ded98
FG
1208 let store_with_ns = DatastoreWithNamespace {
1209 store: name.to_string(),
1210 ns: ns,
1211 };
1212 let user_privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
7d6fc15b
TL
1213 user_privs & wanted != 0
1214 })
1215}
1216
bb34b589 1217#[api(
30fb6025
DM
1218 returns: {
1219 description: "List the accessible datastores.",
1220 type: Array,
9b93c620 1221 items: { type: DataStoreListItem },
30fb6025 1222 },
bb34b589 1223 access: {
54552dda 1224 permission: &Permission::Anybody,
bb34b589
DM
1225 },
1226)]
1227/// Datastore list
bf78f708 1228pub fn get_datastore_list(
6049b71f
DM
1229 _param: Value,
1230 _info: &ApiMethod,
54552dda 1231 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1232) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1233 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1234
e6dc35ac 1235 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1236 let user_info = CachedUserInfo::new()?;
1237
30fb6025 1238 let mut list = Vec::new();
54552dda 1239
30fb6025 1240 for (store, (_, data)) in &config.sections {
9a37bd6c 1241 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1242 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1243
1244 let mut allow_id = false;
1245 if !allowed {
1246 let scfg: pbs_api_types::DataStoreConfig = serde_json::from_value(data.to_owned())?;
1247 // safety: we just cannot go through lookup as we must avoid an operation check
1248 if let Ok(datastore) = unsafe { DataStore::open_from_config(scfg, None) } {
1249 allow_id = can_access_any_ns(datastore, &auth_id, &user_info);
1250 }
1251 }
1252
1253 if allowed || allow_id {
dc7a5b34
TL
1254 list.push(DataStoreListItem {
1255 store: store.clone(),
7d6fc15b
TL
1256 comment: if !allowed {
1257 None
1258 } else {
1259 data["comment"].as_str().map(String::from)
1260 },
e022d13c 1261 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1262 });
30fb6025 1263 }
54552dda
DM
1264 }
1265
44288184 1266 Ok(list)
15e9b4ed
DM
1267}
1268
0ab08ac9
DM
1269#[sortable]
1270pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1271 &ApiHandler::AsyncHttp(&download_file),
1272 &ObjectSchema::new(
1273 "Download single raw file from backup snapshot.",
1274 &sorted!([
66c49c21 1275 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1276 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1277 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1278 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1279 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1280 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1281 ]),
dc7a5b34
TL
1282 ),
1283)
1284.access(
7d6fc15b
TL
1285 Some(
1286 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1287 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1288 ),
7d6fc15b 1289 &Permission::Anybody,
54552dda 1290);
691c89a0 1291
bf78f708 1292pub fn download_file(
9e47c0a5
DM
1293 _parts: Parts,
1294 _req_body: Body,
1295 param: Value,
255f378a 1296 _info: &ApiMethod,
54552dda 1297 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1298) -> ApiResponseFuture {
ad51d02a 1299 async move {
7d6fc15b 1300 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1301 let store = required_string_param(&param, "store")?;
133d718f 1302 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1303
1304 let store_with_ns = DatastoreWithNamespace {
1305 store: store.to_owned(),
1306 ns: backup_ns.clone(),
1307 };
7d6fc15b 1308 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1309 let datastore = check_privs_and_load_store(
7d6fc15b 1310 &store,
133d718f 1311 &backup_ns,
7d6fc15b 1312 &auth_id,
2bc2435a
FG
1313 PRIV_DATASTORE_READ,
1314 PRIV_DATASTORE_BACKUP,
c9396984 1315 Some(Operation::Read),
c9396984
FG
1316 &backup_dir.group,
1317 )?;
1318
3c8c2827 1319 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1320
dc7a5b34
TL
1321 println!(
1322 "Download {} from {} ({}/{})",
1afce610 1323 file_name, store_with_ns, backup_dir, file_name
dc7a5b34 1324 );
9e47c0a5 1325
1afce610
FG
1326 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1327
ad51d02a
DM
1328 let mut path = datastore.base_path();
1329 path.push(backup_dir.relative_path());
1330 path.push(&file_name);
1331
ba694720 1332 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1333 .await
1334 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1335
dc7a5b34
TL
1336 let payload =
1337 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1338 .map_ok(|bytes| bytes.freeze())
1339 .map_err(move |err| {
1340 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1341 err
1342 });
ad51d02a 1343 let body = Body::wrap_stream(payload);
9e47c0a5 1344
ad51d02a
DM
1345 // fixme: set other headers ?
1346 Ok(Response::builder()
dc7a5b34
TL
1347 .status(StatusCode::OK)
1348 .header(header::CONTENT_TYPE, "application/octet-stream")
1349 .body(body)
1350 .unwrap())
1351 }
1352 .boxed()
9e47c0a5
DM
1353}
1354
6ef9bb59
DC
1355#[sortable]
1356pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1357 &ApiHandler::AsyncHttp(&download_file_decoded),
1358 &ObjectSchema::new(
1359 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1360 &sorted!([
1361 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1362 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1363 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1364 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1365 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1366 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1367 ]),
dc7a5b34
TL
1368 ),
1369)
1370.access(
7d6fc15b
TL
1371 Some(
1372 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1373 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1374 ),
7d6fc15b 1375 &Permission::Anybody,
6ef9bb59
DC
1376);
1377
bf78f708 1378pub fn download_file_decoded(
6ef9bb59
DC
1379 _parts: Parts,
1380 _req_body: Body,
1381 param: Value,
1382 _info: &ApiMethod,
1383 rpcenv: Box<dyn RpcEnvironment>,
1384) -> ApiResponseFuture {
6ef9bb59 1385 async move {
7d6fc15b 1386 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1387 let store = required_string_param(&param, "store")?;
133d718f 1388 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1389 let store_with_ns = DatastoreWithNamespace {
1390 store: store.to_owned(),
1391 ns: backup_ns.clone(),
1392 };
1393 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1394 let datastore = check_privs_and_load_store(
7d6fc15b 1395 &store,
133d718f 1396 &backup_ns,
7d6fc15b 1397 &auth_id,
2bc2435a
FG
1398 PRIV_DATASTORE_READ,
1399 PRIV_DATASTORE_BACKUP,
c9396984 1400 Some(Operation::Read),
1afce610 1401 &backup_dir_api.group,
c9396984 1402 )?;
a724f5fd 1403
3c8c2827 1404 let file_name = required_string_param(&param, "file-name")?.to_owned();
1afce610 1405 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
6ef9bb59 1406
9ccf933b 1407 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1408 for file in files {
f28d9088 1409 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1410 bail!("cannot decode '{}' - is encrypted", file_name);
1411 }
1412 }
1413
dc7a5b34
TL
1414 println!(
1415 "Download {} from {} ({}/{})",
1afce610 1416 file_name, store_with_ns, backup_dir_api, file_name
dc7a5b34 1417 );
6ef9bb59
DC
1418
1419 let mut path = datastore.base_path();
1420 path.push(backup_dir.relative_path());
1421 path.push(&file_name);
1422
1423 let extension = file_name.rsplitn(2, '.').next().unwrap();
1424
1425 let body = match extension {
1426 "didx" => {
dc7a5b34
TL
1427 let index = DynamicIndexReader::open(&path).map_err(|err| {
1428 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1429 })?;
2d55beec
FG
1430 let (csum, size) = index.compute_csum();
1431 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1432
14f6c9cb 1433 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1434 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1435 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1436 eprintln!("error during streaming of '{:?}' - {}", path, err);
1437 err
1438 }))
1439 }
6ef9bb59 1440 "fidx" => {
dc7a5b34
TL
1441 let index = FixedIndexReader::open(&path).map_err(|err| {
1442 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1443 })?;
6ef9bb59 1444
2d55beec
FG
1445 let (csum, size) = index.compute_csum();
1446 manifest.verify_file(&file_name, &csum, size)?;
1447
14f6c9cb 1448 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1449 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1450 Body::wrap_stream(
1451 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1452 move |err| {
1453 eprintln!("error during streaming of '{:?}' - {}", path, err);
1454 err
1455 },
1456 ),
1457 )
1458 }
6ef9bb59
DC
1459 "blob" => {
1460 let file = std::fs::File::open(&path)
8aa67ee7 1461 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1462
2d55beec
FG
1463 // FIXME: load full blob to verify index checksum?
1464
6ef9bb59 1465 Body::wrap_stream(
dc7a5b34
TL
1466 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1467 move |err| {
6ef9bb59
DC
1468 eprintln!("error during streaming of '{:?}' - {}", path, err);
1469 err
dc7a5b34
TL
1470 },
1471 ),
6ef9bb59 1472 )
dc7a5b34 1473 }
6ef9bb59
DC
1474 extension => {
1475 bail!("cannot download '{}' files", extension);
dc7a5b34 1476 }
6ef9bb59
DC
1477 };
1478
1479 // fixme: set other headers ?
1480 Ok(Response::builder()
dc7a5b34
TL
1481 .status(StatusCode::OK)
1482 .header(header::CONTENT_TYPE, "application/octet-stream")
1483 .body(body)
1484 .unwrap())
1485 }
1486 .boxed()
6ef9bb59
DC
1487}
1488
552c2259 1489#[sortable]
0ab08ac9
DM
1490pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1491 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1492 &ObjectSchema::new(
54552dda 1493 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1494 &sorted!([
66c49c21 1495 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1496 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1497 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1498 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1499 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1500 ]),
dc7a5b34
TL
1501 ),
1502)
1503.access(
54552dda 1504 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1505 &Permission::Anybody,
54552dda 1506);
9e47c0a5 1507
bf78f708 1508pub fn upload_backup_log(
07ee2235
DM
1509 _parts: Parts,
1510 req_body: Body,
1511 param: Value,
255f378a 1512 _info: &ApiMethod,
54552dda 1513 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1514) -> ApiResponseFuture {
ad51d02a 1515 async move {
7d6fc15b 1516 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1517 let store = required_string_param(&param, "store")?;
133d718f 1518 let backup_ns = optional_ns_param(&param)?;
1afce610
FG
1519 let store_with_ns = DatastoreWithNamespace {
1520 store: store.to_owned(),
1521 ns: backup_ns.clone(),
1522 };
1523 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1524
7a404dc5
FG
1525 let datastore = check_privs_and_load_store(
1526 &store,
c9396984
FG
1527 &backup_ns,
1528 &auth_id,
7a404dc5
FG
1529 0,
1530 PRIV_DATASTORE_BACKUP,
c9396984 1531 Some(Operation::Write),
1afce610 1532 &backup_dir_api.group,
c9396984 1533 )?;
1afce610 1534 let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
07ee2235 1535
dc7a5b34 1536 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1537
133d718f 1538 let mut path = backup_dir.full_path();
ad51d02a 1539 path.push(&file_name);
07ee2235 1540
ad51d02a
DM
1541 if path.exists() {
1542 bail!("backup already contains a log.");
1543 }
e128d4e8 1544
1afce610 1545 println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
ad51d02a
DM
1546
1547 let data = req_body
1548 .map_err(Error::from)
1549 .try_fold(Vec::new(), |mut acc, chunk| {
1550 acc.extend_from_slice(&*chunk);
1551 future::ok::<_, Error>(acc)
1552 })
1553 .await?;
1554
39f18b30
DM
1555 // always verify blob/CRC at server side
1556 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1557
e0a19d33 1558 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1559
1560 // fixme: use correct formatter
53daae8e 1561 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1562 }
1563 .boxed()
07ee2235
DM
1564}
1565
5b1cfa01
DC
1566#[api(
1567 input: {
1568 properties: {
988d575d 1569 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1570 ns: {
133d718f
WB
1571 type: BackupNamespace,
1572 optional: true,
1573 },
8c74349b
WB
1574 backup_dir: {
1575 type: pbs_api_types::BackupDir,
1576 flatten: true,
1577 },
5b1cfa01
DC
1578 "filepath": {
1579 description: "Base64 encoded path.",
1580 type: String,
1581 }
1582 },
1583 },
1584 access: {
7d6fc15b
TL
1585 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1586 DATASTORE_BACKUP and being the owner of the group",
1587 permission: &Permission::Anybody,
5b1cfa01
DC
1588 },
1589)]
1590/// Get the entries of the given path of the catalog
bf78f708 1591pub fn catalog(
5b1cfa01 1592 store: String,
bc21ade2 1593 ns: Option<BackupNamespace>,
8c74349b 1594 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1595 filepath: String,
5b1cfa01 1596 rpcenv: &mut dyn RpcEnvironment,
227501c0 1597) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1598 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1599 let ns = ns.unwrap_or_default();
7a404dc5 1600 let datastore = check_privs_and_load_store(
7d6fc15b 1601 &store,
bc21ade2 1602 &ns,
7d6fc15b 1603 &auth_id,
2bc2435a
FG
1604 PRIV_DATASTORE_READ,
1605 PRIV_DATASTORE_BACKUP,
c9396984 1606 Some(Operation::Read),
c9396984
FG
1607 &backup_dir.group,
1608 )?;
a724f5fd 1609
bc21ade2 1610 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1611
9238cdf5
FG
1612 let file_name = CATALOG_NAME;
1613
9ccf933b 1614 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1615 for file in files {
1616 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1617 bail!("cannot decode '{}' - is encrypted", file_name);
1618 }
1619 }
1620
5b1cfa01
DC
1621 let mut path = datastore.base_path();
1622 path.push(backup_dir.relative_path());
9238cdf5 1623 path.push(file_name);
5b1cfa01
DC
1624
1625 let index = DynamicIndexReader::open(&path)
1626 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1627
2d55beec 1628 let (csum, size) = index.compute_csum();
9a37bd6c 1629 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1630
14f6c9cb 1631 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1632 let reader = BufferedDynamicReader::new(index, chunk_reader);
1633
1634 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1635
5279ee74 1636 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1637 base64::decode(filepath)?
1638 } else {
1639 vec![b'/']
1640 };
5b1cfa01 1641
86582454 1642 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1643}
1644
d33d8f4e
DC
1645#[sortable]
1646pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1647 &ApiHandler::AsyncHttp(&pxar_file_download),
1648 &ObjectSchema::new(
1ffe0301 1649 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1650 &sorted!([
1651 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1652 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1653 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1654 ("backup-id", false, &BACKUP_ID_SCHEMA),
1655 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1656 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1657 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1658 ]),
1659 )
7d6fc15b
TL
1660).access(
1661 Some(
1662 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1663 DATASTORE_BACKUP and being the owner of the group",
1664 ),
1665 &Permission::Anybody,
d33d8f4e
DC
1666);
1667
bf78f708 1668pub fn pxar_file_download(
d33d8f4e
DC
1669 _parts: Parts,
1670 _req_body: Body,
1671 param: Value,
1672 _info: &ApiMethod,
1673 rpcenv: Box<dyn RpcEnvironment>,
1674) -> ApiResponseFuture {
d33d8f4e 1675 async move {
7d6fc15b 1676 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1677 let store = required_string_param(&param, "store")?;
bc21ade2 1678 let ns = optional_ns_param(&param)?;
7d6fc15b 1679 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1680 let datastore = check_privs_and_load_store(
7d6fc15b 1681 &store,
bc21ade2 1682 &ns,
7d6fc15b 1683 &auth_id,
2bc2435a
FG
1684 PRIV_DATASTORE_READ,
1685 PRIV_DATASTORE_BACKUP,
c9396984 1686 Some(Operation::Read),
c9396984
FG
1687 &backup_dir.group,
1688 )?;
a724f5fd 1689
bc21ade2 1690 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1691
3c8c2827 1692 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1693
984ddb2f
DC
1694 let tar = param["tar"].as_bool().unwrap_or(false);
1695
d33d8f4e 1696 let mut components = base64::decode(&filepath)?;
3984a5fd 1697 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1698 components.remove(0);
1699 }
1700
d8d8af98 1701 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1702 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1703 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1704 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1705 for file in files {
1706 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1707 bail!("cannot decode '{}' - is encrypted", pxar_name);
1708 }
1709 }
d33d8f4e 1710
9238cdf5
FG
1711 let mut path = datastore.base_path();
1712 path.push(backup_dir.relative_path());
1713 path.push(pxar_name);
d33d8f4e
DC
1714
1715 let index = DynamicIndexReader::open(&path)
1716 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1717
2d55beec 1718 let (csum, size) = index.compute_csum();
9a37bd6c 1719 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1720
14f6c9cb 1721 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1722 let reader = BufferedDynamicReader::new(index, chunk_reader);
1723 let archive_size = reader.archive_size();
1724 let reader = LocalDynamicReadAt::new(reader);
1725
1726 let decoder = Accessor::new(reader, archive_size).await?;
1727 let root = decoder.open_root().await?;
2e219481 1728 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1729 let file = root
dc7a5b34
TL
1730 .lookup(&path)
1731 .await?
2e219481 1732 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1733
804f6143
DC
1734 let body = match file.kind() {
1735 EntryKind::File { .. } => Body::wrap_stream(
1736 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1737 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1738 err
1739 }),
1740 ),
1741 EntryKind::Hardlink(_) => Body::wrap_stream(
1742 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1743 .map_err(move |err| {
dc7a5b34 1744 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1745 err
1746 }),
1747 ),
1748 EntryKind::Directory => {
984ddb2f 1749 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1750 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1751 if tar {
dc7a5b34
TL
1752 proxmox_rest_server::spawn_internal_task(create_tar(
1753 channelwriter,
1754 decoder,
1755 path.clone(),
1756 false,
1757 ));
984ddb2f
DC
1758 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1759 Body::wrap_stream(zstdstream.map_err(move |err| {
1760 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1761 err
1762 }))
1763 } else {
dc7a5b34
TL
1764 proxmox_rest_server::spawn_internal_task(create_zip(
1765 channelwriter,
1766 decoder,
1767 path.clone(),
1768 false,
1769 ));
984ddb2f
DC
1770 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1771 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1772 err
1773 }))
1774 }
804f6143
DC
1775 }
1776 other => bail!("cannot download file of type {:?}", other),
1777 };
d33d8f4e
DC
1778
1779 // fixme: set other headers ?
1780 Ok(Response::builder()
dc7a5b34
TL
1781 .status(StatusCode::OK)
1782 .header(header::CONTENT_TYPE, "application/octet-stream")
1783 .body(body)
1784 .unwrap())
1785 }
1786 .boxed()
d33d8f4e
DC
1787}
1788
1a0d3d11
DM
1789#[api(
1790 input: {
1791 properties: {
1792 store: {
1793 schema: DATASTORE_SCHEMA,
1794 },
1795 timeframe: {
c68fa58a 1796 type: RRDTimeFrame,
1a0d3d11
DM
1797 },
1798 cf: {
1799 type: RRDMode,
1800 },
1801 },
1802 },
1803 access: {
7d6fc15b
TL
1804 permission: &Permission::Privilege(
1805 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1806 },
1807)]
1808/// Read datastore stats
bf78f708 1809pub fn get_rrd_stats(
1a0d3d11 1810 store: String,
c68fa58a 1811 timeframe: RRDTimeFrame,
1a0d3d11
DM
1812 cf: RRDMode,
1813 _param: Value,
1814) -> Result<Value, Error> {
e9d2fc93 1815 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1816 let disk_manager = crate::tools::disks::DiskManage::new();
1817
1818 let mut rrd_fields = vec![
dc7a5b34
TL
1819 "total",
1820 "used",
1821 "read_ios",
1822 "read_bytes",
1823 "write_ios",
1824 "write_bytes",
f27b6086
DC
1825 ];
1826
1827 // we do not have io_ticks for zpools, so don't include them
1828 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1829 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1830 _ => rrd_fields.push("io_ticks"),
1831 };
1832
dc7a5b34 1833 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1834}
1835
5fd823c3
HL
1836#[api(
1837 input: {
1838 properties: {
1839 store: {
1840 schema: DATASTORE_SCHEMA,
1841 },
1842 },
1843 },
1844 access: {
1845 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1846 },
1847)]
1848/// Read datastore stats
dc7a5b34 1849pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1850 let active_operations = task_tracking::get_active_operations(&store)?;
1851 Ok(json!({
1852 "read": active_operations.read,
1853 "write": active_operations.write,
1854 }))
1855}
1856
d6688884
SR
1857#[api(
1858 input: {
1859 properties: {
988d575d 1860 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1861 ns: {
133d718f
WB
1862 type: BackupNamespace,
1863 optional: true,
1864 },
8c74349b
WB
1865 backup_group: {
1866 type: pbs_api_types::BackupGroup,
1867 flatten: true,
1868 },
d6688884
SR
1869 },
1870 },
1871 access: {
7d6fc15b
TL
1872 permission: &Permission::Anybody,
1873 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1874 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1875 },
1876)]
1877/// Get "notes" for a backup group
1878pub fn get_group_notes(
1879 store: String,
bc21ade2 1880 ns: Option<BackupNamespace>,
8c74349b 1881 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1882 rpcenv: &mut dyn RpcEnvironment,
1883) -> Result<String, Error> {
d6688884 1884 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1885 let ns = ns.unwrap_or_default();
7a404dc5 1886 let datastore = check_privs_and_load_store(
7d6fc15b 1887 &store,
bc21ade2 1888 &ns,
7d6fc15b 1889 &auth_id,
2bc2435a
FG
1890 PRIV_DATASTORE_AUDIT,
1891 PRIV_DATASTORE_BACKUP,
c9396984 1892 Some(Operation::Read),
c9396984
FG
1893 &backup_group,
1894 )?;
d6688884 1895
bc21ade2 1896 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
1897 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1898}
1899
1900#[api(
1901 input: {
1902 properties: {
988d575d 1903 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1904 ns: {
133d718f
WB
1905 type: BackupNamespace,
1906 optional: true,
1907 },
8c74349b
WB
1908 backup_group: {
1909 type: pbs_api_types::BackupGroup,
1910 flatten: true,
1911 },
d6688884
SR
1912 notes: {
1913 description: "A multiline text.",
1914 },
1915 },
1916 },
1917 access: {
7d6fc15b
TL
1918 permission: &Permission::Anybody,
1919 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
1920 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
1921 },
1922)]
1923/// Set "notes" for a backup group
1924pub fn set_group_notes(
1925 store: String,
bc21ade2 1926 ns: Option<BackupNamespace>,
8c74349b 1927 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1928 notes: String,
1929 rpcenv: &mut dyn RpcEnvironment,
1930) -> Result<(), Error> {
d6688884 1931 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1932 let ns = ns.unwrap_or_default();
7a404dc5 1933 let datastore = check_privs_and_load_store(
7d6fc15b 1934 &store,
bc21ade2 1935 &ns,
7d6fc15b 1936 &auth_id,
2bc2435a
FG
1937 PRIV_DATASTORE_MODIFY,
1938 PRIV_DATASTORE_BACKUP,
c9396984 1939 Some(Operation::Write),
c9396984
FG
1940 &backup_group,
1941 )?;
d6688884 1942
bc21ade2 1943 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 1944 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1945
1946 Ok(())
1947}
1948
912b3f5b
DM
1949#[api(
1950 input: {
1951 properties: {
988d575d 1952 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1953 ns: {
133d718f
WB
1954 type: BackupNamespace,
1955 optional: true,
1956 },
8c74349b
WB
1957 backup_dir: {
1958 type: pbs_api_types::BackupDir,
1959 flatten: true,
1960 },
912b3f5b
DM
1961 },
1962 },
1963 access: {
7d6fc15b
TL
1964 permission: &Permission::Anybody,
1965 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
1966 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
1967 },
1968)]
1969/// Get "notes" for a specific backup
bf78f708 1970pub fn get_notes(
912b3f5b 1971 store: String,
bc21ade2 1972 ns: Option<BackupNamespace>,
8c74349b 1973 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1974 rpcenv: &mut dyn RpcEnvironment,
1975) -> Result<String, Error> {
7d6fc15b 1976 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 1977 let ns = ns.unwrap_or_default();
7a404dc5 1978 let datastore = check_privs_and_load_store(
7d6fc15b 1979 &store,
bc21ade2 1980 &ns,
7d6fc15b 1981 &auth_id,
2bc2435a
FG
1982 PRIV_DATASTORE_AUDIT,
1983 PRIV_DATASTORE_BACKUP,
c9396984 1984 Some(Operation::Read),
c9396984
FG
1985 &backup_dir.group,
1986 )?;
912b3f5b 1987
bc21ade2 1988 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 1989
133d718f 1990 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 1991
dc7a5b34 1992 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1993
1994 Ok(String::from(notes))
1995}
1996
1997#[api(
1998 input: {
1999 properties: {
988d575d 2000 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2001 ns: {
133d718f
WB
2002 type: BackupNamespace,
2003 optional: true,
2004 },
8c74349b
WB
2005 backup_dir: {
2006 type: pbs_api_types::BackupDir,
2007 flatten: true,
2008 },
912b3f5b
DM
2009 notes: {
2010 description: "A multiline text.",
2011 },
2012 },
2013 },
2014 access: {
7d6fc15b
TL
2015 permission: &Permission::Anybody,
2016 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2017 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2018 },
2019)]
2020/// Set "notes" for a specific backup
bf78f708 2021pub fn set_notes(
912b3f5b 2022 store: String,
bc21ade2 2023 ns: Option<BackupNamespace>,
8c74349b 2024 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2025 notes: String,
2026 rpcenv: &mut dyn RpcEnvironment,
2027) -> Result<(), Error> {
7d6fc15b 2028 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2029 let ns = ns.unwrap_or_default();
7a404dc5 2030 let datastore = check_privs_and_load_store(
7d6fc15b 2031 &store,
bc21ade2 2032 &ns,
7d6fc15b 2033 &auth_id,
2bc2435a
FG
2034 PRIV_DATASTORE_MODIFY,
2035 PRIV_DATASTORE_BACKUP,
c9396984 2036 Some(Operation::Write),
c9396984
FG
2037 &backup_dir.group,
2038 )?;
912b3f5b 2039
bc21ade2 2040 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2041
133d718f
WB
2042 backup_dir
2043 .update_manifest(|manifest| {
dc7a5b34
TL
2044 manifest.unprotected["notes"] = notes.into();
2045 })
2046 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2047
2048 Ok(())
2049}
2050
8292d3d2
DC
2051#[api(
2052 input: {
2053 properties: {
988d575d 2054 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2055 ns: {
133d718f
WB
2056 type: BackupNamespace,
2057 optional: true,
2058 },
8c74349b
WB
2059 backup_dir: {
2060 type: pbs_api_types::BackupDir,
2061 flatten: true,
2062 },
8292d3d2
DC
2063 },
2064 },
2065 access: {
7d6fc15b
TL
2066 permission: &Permission::Anybody,
2067 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2068 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2069 },
2070)]
2071/// Query protection for a specific backup
2072pub fn get_protection(
2073 store: String,
bc21ade2 2074 ns: Option<BackupNamespace>,
8c74349b 2075 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2076 rpcenv: &mut dyn RpcEnvironment,
2077) -> Result<bool, Error> {
7d6fc15b 2078 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2079 let ns = ns.unwrap_or_default();
7a404dc5 2080 let datastore = check_privs_and_load_store(
7d6fc15b 2081 &store,
bc21ade2 2082 &ns,
7d6fc15b 2083 &auth_id,
2bc2435a
FG
2084 PRIV_DATASTORE_AUDIT,
2085 PRIV_DATASTORE_BACKUP,
c9396984 2086 Some(Operation::Read),
c9396984
FG
2087 &backup_dir.group,
2088 )?;
8292d3d2 2089
bc21ade2 2090 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2091
6da20161 2092 Ok(backup_dir.is_protected())
8292d3d2
DC
2093}
2094
2095#[api(
2096 input: {
2097 properties: {
988d575d 2098 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2099 ns: {
133d718f
WB
2100 type: BackupNamespace,
2101 optional: true,
2102 },
8c74349b
WB
2103 backup_dir: {
2104 type: pbs_api_types::BackupDir,
2105 flatten: true,
2106 },
8292d3d2
DC
2107 protected: {
2108 description: "Enable/disable protection.",
2109 },
2110 },
2111 },
2112 access: {
7d6fc15b
TL
2113 permission: &Permission::Anybody,
2114 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2115 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2116 },
2117)]
2118/// En- or disable protection for a specific backup
2119pub fn set_protection(
2120 store: String,
bc21ade2 2121 ns: Option<BackupNamespace>,
8c74349b 2122 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2123 protected: bool,
2124 rpcenv: &mut dyn RpcEnvironment,
2125) -> Result<(), Error> {
7d6fc15b 2126 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2127 let ns = ns.unwrap_or_default();
7a404dc5 2128 let datastore = check_privs_and_load_store(
7d6fc15b 2129 &store,
bc21ade2 2130 &ns,
7d6fc15b 2131 &auth_id,
2bc2435a
FG
2132 PRIV_DATASTORE_MODIFY,
2133 PRIV_DATASTORE_BACKUP,
c9396984 2134 Some(Operation::Write),
c9396984
FG
2135 &backup_dir.group,
2136 )?;
8292d3d2 2137
bc21ade2 2138 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2139
8292d3d2
DC
2140 datastore.update_protection(&backup_dir, protected)
2141}
2142
72be0eb1 2143#[api(
4940012d 2144 input: {
72be0eb1 2145 properties: {
988d575d 2146 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2147 ns: {
133d718f
WB
2148 type: BackupNamespace,
2149 optional: true,
2150 },
8c74349b
WB
2151 backup_group: {
2152 type: pbs_api_types::BackupGroup,
2153 flatten: true,
2154 },
72be0eb1 2155 "new-owner": {
e6dc35ac 2156 type: Authid,
72be0eb1
DW
2157 },
2158 },
4940012d
FG
2159 },
2160 access: {
bff85572 2161 permission: &Permission::Anybody,
7d6fc15b
TL
2162 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2163 a user's token for owned backups with Datastore.Backup"
4940012d 2164 },
72be0eb1
DW
2165)]
2166/// Change owner of a backup group
bf78f708 2167pub fn set_backup_owner(
72be0eb1 2168 store: String,
bc21ade2 2169 ns: Option<BackupNamespace>,
8c74349b 2170 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2171 new_owner: Authid,
bff85572 2172 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2173) -> Result<(), Error> {
bff85572 2174 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 2175 let ns = ns.unwrap_or_default();
2bc2435a 2176 let owner_check_required = check_ns_privs(
a724f5fd 2177 &store,
bc21ade2 2178 &ns,
a724f5fd 2179 &auth_id,
2bc2435a
FG
2180 PRIV_DATASTORE_MODIFY,
2181 PRIV_DATASTORE_BACKUP,
a724f5fd 2182 )?;
1909ece2
FG
2183
2184 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
2185
bc21ade2 2186 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2187
2bc2435a 2188 if owner_check_required {
133d718f 2189 let owner = backup_group.get_owner()?;
bff85572 2190
2bc2435a 2191 let allowed = match (owner.is_token(), new_owner.is_token()) {
bff85572
FG
2192 (true, true) => {
2193 // API token to API token, owned by same user
2194 let owner = owner.user();
2195 let new_owner = new_owner.user();
2196 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 2197 }
bff85572
FG
2198 (true, false) => {
2199 // API token to API token owner
dc7a5b34
TL
2200 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2201 }
bff85572
FG
2202 (false, true) => {
2203 // API token owner to API token
dc7a5b34
TL
2204 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2205 }
bff85572
FG
2206 (false, false) => {
2207 // User to User, not allowed for unprivileged users
2208 false
dc7a5b34 2209 }
2bc2435a 2210 };
bff85572 2211
2bc2435a
FG
2212 if !allowed {
2213 return Err(http_err!(
2214 UNAUTHORIZED,
2215 "{} does not have permission to change owner of backup group '{}' to {}",
2216 auth_id,
e13303fc 2217 backup_group.group(),
2bc2435a
FG
2218 new_owner,
2219 ));
2220 }
bff85572
FG
2221 }
2222
7d6fc15b
TL
2223 let user_info = CachedUserInfo::new()?;
2224
e6dc35ac 2225 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2226 bail!(
2227 "{} '{}' is inactive or non-existent",
2228 if new_owner.is_token() {
2229 "API token".to_string()
2230 } else {
2231 "user".to_string()
2232 },
2233 new_owner
2234 );
72be0eb1
DW
2235 }
2236
133d718f 2237 backup_group.set_owner(&new_owner, true)?;
72be0eb1
DW
2238
2239 Ok(())
2240}
2241
552c2259 2242#[sortable]
255f378a 2243const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2244 (
2245 "active-operations",
dc7a5b34 2246 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2247 ),
dc7a5b34 2248 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2249 (
2250 "change-owner",
dc7a5b34 2251 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2252 ),
255f378a
DM
2253 (
2254 "download",
dc7a5b34 2255 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2256 ),
6ef9bb59
DC
2257 (
2258 "download-decoded",
dc7a5b34 2259 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2260 ),
dc7a5b34 2261 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2262 (
2263 "gc",
2264 &Router::new()
2265 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2266 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2267 ),
d6688884
SR
2268 (
2269 "group-notes",
2270 &Router::new()
2271 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2272 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2273 ),
255f378a
DM
2274 (
2275 "groups",
2276 &Router::new()
b31c8019 2277 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2278 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2279 ),
18934ae5
TL
2280 (
2281 "namespace",
2282 // FIXME: move into datastore:: sub-module?!
2283 &crate::api2::admin::namespace::ROUTER,
2284 ),
912b3f5b
DM
2285 (
2286 "notes",
2287 &Router::new()
2288 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2289 .put(&API_METHOD_SET_NOTES),
912b3f5b 2290 ),
8292d3d2
DC
2291 (
2292 "protected",
2293 &Router::new()
2294 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2295 .put(&API_METHOD_SET_PROTECTION),
255f378a 2296 ),
dc7a5b34 2297 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2298 (
2299 "prune-datastore",
dc7a5b34 2300 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2301 ),
d33d8f4e
DC
2302 (
2303 "pxar-file-download",
dc7a5b34 2304 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2305 ),
dc7a5b34 2306 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2307 (
2308 "snapshots",
2309 &Router::new()
fc189b19 2310 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2311 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2312 ),
dc7a5b34 2313 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2314 (
2315 "upload-backup-log",
dc7a5b34 2316 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2317 ),
dc7a5b34 2318 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2319];
2320
ad51d02a 2321const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2322 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2323 .subdirs(DATASTORE_INFO_SUBDIRS);
2324
255f378a 2325pub const ROUTER: Router = Router::new()
bb34b589 2326 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2327 .match_all("store", &DATASTORE_INFO_ROUTER);