]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
GC: flatten existing status into job status
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
20ecaad1 25use proxmox_sortable_macro::sortable;
dc7a5b34
TL
26use proxmox_sys::fs::{
27 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
28};
d5790a9f 29use proxmox_sys::{task_log, task_warn};
fe1d34d2 30use proxmox_time::CalendarEvent;
e18a6c9e 31
2e219481 32use pxar::accessor::aio::Accessor;
d33d8f4e
DC
33use pxar::EntryKind;
34
dc7a5b34 35use pbs_api_types::{
abd82485 36 print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
fe1d34d2 37 Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
3ae21d87
FG
38 GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
39 PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
40 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
41 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
42 NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
43 PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
44 VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 45};
984ddb2f 46use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 47use pbs_config::CachedUserInfo;
b2065dc7
WB
48use pbs_datastore::backup_info::BackupInfo;
49use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 50use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
51use pbs_datastore::data_blob::DataBlob;
52use pbs_datastore::data_blob_reader::DataBlobReader;
53use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 54use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
55use pbs_datastore::index::IndexFile;
56use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 57use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
58use pbs_datastore::{
59 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
60 StoreProgress, CATALOG_NAME,
61};
8c74349b 62use pbs_tools::json::required_string_param;
dc7a5b34 63use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 64
133d718f 65use crate::api2::backup::optional_ns_param;
431cc7b1 66use crate::api2::node::rrd::create_value_from_rrd;
22cfad13 67use crate::backup::{
2981cdd4
TL
68 check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
69 ListAccessibleBackupGroups, NS_PRIVS_OK,
22cfad13 70};
54552dda 71
fe1d34d2 72use crate::server::jobstate::{compute_schedule_status, Job, JobState};
804f6143 73
d6688884
SR
74const GROUP_NOTES_FILE_NAME: &str = "notes";
75
133d718f
WB
76fn get_group_note_path(
77 store: &DataStore,
78 ns: &BackupNamespace,
79 group: &pbs_api_types::BackupGroup,
80) -> PathBuf {
81 let mut note_path = store.group_path(ns, group);
d6688884
SR
82 note_path.push(GROUP_NOTES_FILE_NAME);
83 note_path
84}
85
7a404dc5
FG
86// helper to unify common sequence of checks:
87// 1. check privs on NS (full or limited access)
88// 2. load datastore
89// 3. if needed (only limited access), check owner of group
90fn check_privs_and_load_store(
abd82485
FG
91 store: &str,
92 ns: &BackupNamespace,
c9396984 93 auth_id: &Authid,
7a404dc5
FG
94 full_access_privs: u64,
95 partial_access_privs: u64,
c9396984 96 operation: Option<Operation>,
c9396984
FG
97 backup_group: &pbs_api_types::BackupGroup,
98) -> Result<Arc<DataStore>, Error> {
abd82485 99 let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
7a404dc5 100
abd82485 101 let datastore = DataStore::lookup_datastore(store, operation)?;
c9396984 102
7a404dc5 103 if limited {
abd82485 104 let owner = datastore.get_owner(ns, backup_group)?;
e1db0670 105 check_backup_owner(&owner, auth_id)?;
c9396984
FG
106 }
107
108 Ok(datastore)
109}
110
e7cb4dc5 111fn read_backup_index(
e7cb4dc5
WB
112 backup_dir: &BackupDir,
113) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 114 let (manifest, index_size) = backup_dir.load_manifest()?;
8c70e3eb 115
09b1f7b2
DM
116 let mut result = Vec::new();
117 for item in manifest.files() {
118 result.push(BackupContent {
119 filename: item.filename.clone(),
f28d9088 120 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
121 size: Some(item.size),
122 });
8c70e3eb
DM
123 }
124
09b1f7b2 125 result.push(BackupContent {
96d65fbc 126 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
127 crypt_mode: match manifest.signature {
128 Some(_) => Some(CryptMode::SignOnly),
129 None => Some(CryptMode::None),
130 },
09b1f7b2
DM
131 size: Some(index_size),
132 });
4f1e40a2 133
70030b43 134 Ok((manifest, result))
8c70e3eb
DM
135}
136
1c090810 137fn get_all_snapshot_files(
1c090810 138 info: &BackupInfo,
70030b43 139) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9ccf933b 140 let (manifest, mut files) = read_backup_index(&info.backup_dir)?;
1c090810
DC
141
142 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
143 acc.insert(item.filename.clone());
144 acc
145 });
146
147 for file in &info.files {
dc7a5b34
TL
148 if file_set.contains(file) {
149 continue;
150 }
f28d9088
WB
151 files.push(BackupContent {
152 filename: file.to_string(),
153 size: None,
154 crypt_mode: None,
155 });
1c090810
DC
156 }
157
70030b43 158 Ok((manifest, files))
1c090810
DC
159}
160
b31c8019
DM
161#[api(
162 input: {
163 properties: {
164 store: {
165 schema: DATASTORE_SCHEMA,
166 },
bc21ade2 167 ns: {
89ae3c32
WB
168 type: BackupNamespace,
169 optional: true,
170 },
b31c8019
DM
171 },
172 },
7b570c17 173 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 174 access: {
7d6fc15b
TL
175 permission: &Permission::Anybody,
176 description: "Requires DATASTORE_AUDIT for all or DATASTORE_BACKUP for owned groups on \
177 /datastore/{store}[/{namespace}]",
bb34b589 178 },
b31c8019
DM
179)]
180/// List backup groups.
b2362a12 181pub fn list_groups(
b31c8019 182 store: String,
bc21ade2 183 ns: Option<BackupNamespace>,
54552dda 184 rpcenv: &mut dyn RpcEnvironment,
b31c8019 185) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 186 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 187 let ns = ns.unwrap_or_default();
ea2e91e5
FG
188
189 let list_all = !check_ns_privs_full(
abd82485
FG
190 &store,
191 &ns,
7d6fc15b 192 &auth_id,
2bc2435a
FG
193 PRIV_DATASTORE_AUDIT,
194 PRIV_DATASTORE_BACKUP,
7d6fc15b 195 )?;
54552dda 196
abd82485 197 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee 198
249dde8b 199 datastore
abd82485 200 .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
201 .try_fold(Vec::new(), |mut group_info, group| {
202 let group = group?;
e13303fc 203
abd82485 204 let owner = match datastore.get_owner(&ns, group.as_ref()) {
249dde8b
TL
205 Ok(auth_id) => auth_id,
206 Err(err) => {
e13303fc
FG
207 eprintln!(
208 "Failed to get owner of group '{}' in {} - {}",
209 group.group(),
abd82485 210 print_store_and_ns(&store, &ns),
e13303fc
FG
211 err
212 );
249dde8b 213 return Ok(group_info);
dc7a5b34 214 }
249dde8b
TL
215 };
216 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
217 return Ok(group_info);
218 }
0d08fcee 219
6da20161 220 let snapshots = match group.list_backups() {
249dde8b
TL
221 Ok(snapshots) => snapshots,
222 Err(_) => return Ok(group_info),
223 };
0d08fcee 224
249dde8b
TL
225 let backup_count: u64 = snapshots.len() as u64;
226 if backup_count == 0 {
227 return Ok(group_info);
228 }
0d08fcee 229
249dde8b
TL
230 let last_backup = snapshots
231 .iter()
232 .fold(&snapshots[0], |a, b| {
233 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
234 a
235 } else {
236 b
237 }
238 })
239 .to_owned();
240
abd82485 241 let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
cd0daa8b 242 let comment = file_read_firstline(note_path).ok();
249dde8b
TL
243
244 group_info.push(GroupListItem {
988d575d 245 backup: group.into(),
249dde8b
TL
246 last_backup: last_backup.backup_dir.backup_time(),
247 owner: Some(owner),
248 backup_count,
249 files: last_backup.files,
250 comment,
0d08fcee
FG
251 });
252
249dde8b
TL
253 Ok(group_info)
254 })
812c6f87 255}
8f579717 256
f32791b4
DC
257#[api(
258 input: {
259 properties: {
988d575d 260 store: { schema: DATASTORE_SCHEMA },
bc21ade2 261 ns: {
133d718f
WB
262 type: BackupNamespace,
263 optional: true,
264 },
8c74349b
WB
265 group: {
266 type: pbs_api_types::BackupGroup,
267 flatten: true,
268 },
f32791b4
DC
269 },
270 },
271 access: {
7d6fc15b
TL
272 permission: &Permission::Anybody,
273 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
274 or DATASTORE_PRUNE and being the owner of the group",
f32791b4
DC
275 },
276)]
277/// Delete backup group including all snapshots.
6f67dc11 278pub async fn delete_group(
f32791b4 279 store: String,
bc21ade2 280 ns: Option<BackupNamespace>,
8c74349b 281 group: pbs_api_types::BackupGroup,
f32791b4
DC
282 rpcenv: &mut dyn RpcEnvironment,
283) -> Result<Value, Error> {
f32791b4 284 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
f32791b4 285
6f67dc11
WB
286 tokio::task::spawn_blocking(move || {
287 let ns = ns.unwrap_or_default();
288
289 let datastore = check_privs_and_load_store(
290 &store,
291 &ns,
292 &auth_id,
293 PRIV_DATASTORE_MODIFY,
294 PRIV_DATASTORE_PRUNE,
295 Some(Operation::Write),
296 &group,
297 )?;
298
524ed404
CE
299 let delete_stats = datastore.remove_backup_group(&ns, &group)?;
300 if !delete_stats.all_removed() {
6f67dc11
WB
301 bail!("group only partially deleted due to protected snapshots");
302 }
f32791b4 303
6f67dc11
WB
304 Ok(Value::Null)
305 })
306 .await?
f32791b4
DC
307}
308
09b1f7b2
DM
309#[api(
310 input: {
311 properties: {
988d575d 312 store: { schema: DATASTORE_SCHEMA },
bc21ade2 313 ns: {
133d718f
WB
314 type: BackupNamespace,
315 optional: true,
316 },
8c74349b
WB
317 backup_dir: {
318 type: pbs_api_types::BackupDir,
319 flatten: true,
320 },
09b1f7b2
DM
321 },
322 },
7b570c17 323 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 324 access: {
7d6fc15b
TL
325 permission: &Permission::Anybody,
326 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT or \
327 DATASTORE_READ for any or DATASTORE_BACKUP and being the owner of the group",
bb34b589 328 },
09b1f7b2
DM
329)]
330/// List snapshot files.
6cb674aa 331pub async fn list_snapshot_files(
09b1f7b2 332 store: String,
bc21ade2 333 ns: Option<BackupNamespace>,
8c74349b 334 backup_dir: pbs_api_types::BackupDir,
01a13423 335 _info: &ApiMethod,
54552dda 336 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 337) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 338 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
133d718f 339
6cb674aa
WB
340 tokio::task::spawn_blocking(move || {
341 let ns = ns.unwrap_or_default();
01a13423 342
6cb674aa
WB
343 let datastore = check_privs_and_load_store(
344 &store,
345 &ns,
346 &auth_id,
347 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
348 PRIV_DATASTORE_BACKUP,
349 Some(Operation::Read),
350 &backup_dir.group,
351 )?;
352
353 let snapshot = datastore.backup_dir(ns, backup_dir)?;
54552dda 354
6cb674aa 355 let info = BackupInfo::new(snapshot)?;
01a13423 356
6cb674aa 357 let (_manifest, files) = get_all_snapshot_files(&info)?;
70030b43 358
6cb674aa
WB
359 Ok(files)
360 })
361 .await?
01a13423
DM
362}
363
68a6a0ee
DM
364#[api(
365 input: {
366 properties: {
988d575d 367 store: { schema: DATASTORE_SCHEMA },
bc21ade2 368 ns: {
133d718f
WB
369 type: BackupNamespace,
370 optional: true,
371 },
8c74349b
WB
372 backup_dir: {
373 type: pbs_api_types::BackupDir,
374 flatten: true,
375 },
68a6a0ee
DM
376 },
377 },
bb34b589 378 access: {
7d6fc15b
TL
379 permission: &Permission::Anybody,
380 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
381 or DATASTORE_PRUNE and being the owner of the group",
bb34b589 382 },
68a6a0ee
DM
383)]
384/// Delete backup snapshot.
af201d7a 385pub async fn delete_snapshot(
68a6a0ee 386 store: String,
bc21ade2 387 ns: Option<BackupNamespace>,
8c74349b 388 backup_dir: pbs_api_types::BackupDir,
6f62c924 389 _info: &ApiMethod,
54552dda 390 rpcenv: &mut dyn RpcEnvironment,
6f62c924 391) -> Result<Value, Error> {
e6dc35ac 392 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 393
af201d7a
WB
394 tokio::task::spawn_blocking(move || {
395 let ns = ns.unwrap_or_default();
a724f5fd 396
af201d7a
WB
397 let datastore = check_privs_and_load_store(
398 &store,
399 &ns,
400 &auth_id,
401 PRIV_DATASTORE_MODIFY,
402 PRIV_DATASTORE_PRUNE,
403 Some(Operation::Write),
404 &backup_dir.group,
405 )?;
54552dda 406
af201d7a
WB
407 let snapshot = datastore.backup_dir(ns, backup_dir)?;
408
409 snapshot.destroy(false)?;
6f62c924 410
af201d7a
WB
411 Ok(Value::Null)
412 })
413 .await?
6f62c924
DM
414}
415
fc189b19 416#[api(
b7c3eaa9 417 streaming: true,
fc189b19
DM
418 input: {
419 properties: {
988d575d 420 store: { schema: DATASTORE_SCHEMA },
bc21ade2 421 ns: {
8c74349b
WB
422 type: BackupNamespace,
423 optional: true,
424 },
fc189b19
DM
425 "backup-type": {
426 optional: true,
988d575d 427 type: BackupType,
fc189b19
DM
428 },
429 "backup-id": {
430 optional: true,
431 schema: BACKUP_ID_SCHEMA,
432 },
433 },
434 },
7b570c17 435 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 436 access: {
7d6fc15b
TL
437 permission: &Permission::Anybody,
438 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
439 or DATASTORE_BACKUP and being the owner of the group",
bb34b589 440 },
fc189b19
DM
441)]
442/// List backup snapshots.
a577d7d8 443pub async fn list_snapshots(
54552dda 444 store: String,
bc21ade2 445 ns: Option<BackupNamespace>,
988d575d 446 backup_type: Option<BackupType>,
54552dda
DM
447 backup_id: Option<String>,
448 _param: Value,
184f17af 449 _info: &ApiMethod,
54552dda 450 rpcenv: &mut dyn RpcEnvironment,
fc189b19 451) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 452 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
7d6fc15b 453
a577d7d8
WB
454 tokio::task::spawn_blocking(move || unsafe {
455 list_snapshots_blocking(store, ns, backup_type, backup_id, auth_id)
456 })
457 .await
458 .map_err(|err| format_err!("failed to await blocking task: {err}"))?
459}
460
461/// This must not run in a main worker thread as it potentially does tons of I/O.
462unsafe fn list_snapshots_blocking(
463 store: String,
464 ns: Option<BackupNamespace>,
465 backup_type: Option<BackupType>,
466 backup_id: Option<String>,
467 auth_id: Authid,
468) -> Result<Vec<SnapshotListItem>, Error> {
bc21ade2 469 let ns = ns.unwrap_or_default();
7d6fc15b 470
ea2e91e5 471 let list_all = !check_ns_privs_full(
abd82485
FG
472 &store,
473 &ns,
7d6fc15b 474 &auth_id,
2bc2435a
FG
475 PRIV_DATASTORE_AUDIT,
476 PRIV_DATASTORE_BACKUP,
7d6fc15b 477 )?;
184f17af 478
abd82485 479 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 480
249dde8b
TL
481 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
482 // backup group and provide an error free (Err -> None) accessor
0d08fcee 483 let groups = match (backup_type, backup_id) {
db87d93e 484 (Some(backup_type), Some(backup_id)) => {
abd82485 485 vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
db87d93e 486 }
8c74349b 487 // FIXME: Recursion
7d9cb8c4 488 (Some(backup_type), None) => datastore
91f4b11f 489 .iter_backup_type_ok(ns.clone(), backup_type)?
dc7a5b34 490 .collect(),
8c74349b 491 // FIXME: Recursion
91f4b11f
WB
492 (None, Some(backup_id)) => BackupType::iter()
493 .filter_map(|backup_type| {
494 let group =
495 datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id.clone());
f097eaa8 496 group.exists().then_some(group)
91f4b11f 497 })
dc7a5b34 498 .collect(),
8c74349b 499 // FIXME: Recursion
abd82485 500 (None, None) => datastore.list_backup_groups(ns.clone())?,
0d08fcee 501 };
54552dda 502
0d08fcee 503 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
504 let backup = pbs_api_types::BackupDir {
505 group: group.into(),
506 time: info.backup_dir.backup_time(),
507 };
6da20161 508 let protected = info.backup_dir.is_protected();
1c090810 509
9ccf933b 510 match get_all_snapshot_files(&info) {
70030b43 511 Ok((manifest, files)) => {
70030b43
DM
512 // extract the first line from notes
513 let comment: Option<String> = manifest.unprotected["notes"]
514 .as_str()
515 .and_then(|notes| notes.lines().next())
516 .map(String::from);
517
035c40e6
FG
518 let fingerprint = match manifest.fingerprint() {
519 Ok(fp) => fp,
520 Err(err) => {
521 eprintln!("error parsing fingerprint: '{}'", err);
522 None
dc7a5b34 523 }
035c40e6
FG
524 };
525
79c53595 526 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
527 let verification: Option<SnapshotVerifyState> =
528 match serde_json::from_value(verification) {
529 Ok(verify) => verify,
530 Err(err) => {
531 eprintln!("error parsing verification state : '{}'", err);
532 None
533 }
534 };
3b2046d2 535
0d08fcee
FG
536 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
537
79c53595 538 SnapshotListItem {
988d575d 539 backup,
79c53595
FG
540 comment,
541 verification,
035c40e6 542 fingerprint,
79c53595
FG
543 files,
544 size,
545 owner,
02db7267 546 protected,
79c53595 547 }
dc7a5b34 548 }
1c090810
DC
549 Err(err) => {
550 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 551 let files = info
dc7a5b34
TL
552 .files
553 .into_iter()
554 .map(|filename| BackupContent {
555 filename,
556 size: None,
557 crypt_mode: None,
558 })
559 .collect();
79c53595
FG
560
561 SnapshotListItem {
988d575d 562 backup,
79c53595
FG
563 comment: None,
564 verification: None,
035c40e6 565 fingerprint: None,
79c53595
FG
566 files,
567 size: None,
568 owner,
02db7267 569 protected,
79c53595 570 }
dc7a5b34 571 }
0d08fcee
FG
572 }
573 };
184f17af 574
dc7a5b34 575 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
133d718f 576 let owner = match group.get_owner() {
dc7a5b34
TL
577 Ok(auth_id) => auth_id,
578 Err(err) => {
579 eprintln!(
e13303fc 580 "Failed to get owner of group '{}' in {} - {}",
e13303fc 581 group.group(),
abd82485 582 print_store_and_ns(&store, &ns),
e13303fc 583 err
dc7a5b34 584 );
0d08fcee
FG
585 return Ok(snapshots);
586 }
dc7a5b34 587 };
0d08fcee 588
dc7a5b34
TL
589 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
590 return Ok(snapshots);
591 }
0d08fcee 592
6da20161 593 let group_backups = group.list_backups()?;
0d08fcee 594
dc7a5b34
TL
595 snapshots.extend(
596 group_backups
597 .into_iter()
598 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
599 );
600
601 Ok(snapshots)
602 })
184f17af
DM
603}
604
5a2f7ea7
WB
605async fn get_snapshots_count(
606 store: &Arc<DataStore>,
607 owner: Option<&Authid>,
608) -> Result<Counts, Error> {
609 let store = Arc::clone(store);
610 let owner = owner.cloned();
611 tokio::task::spawn_blocking(move || {
612 let root_ns = Default::default();
613 ListAccessibleBackupGroups::new_with_privs(
614 &store,
615 root_ns,
616 MAX_NAMESPACE_DEPTH,
617 Some(PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ),
618 None,
619 owner.as_ref(),
620 )?
621 .try_fold(Counts::default(), |mut counts, group| {
622 let group = match group {
623 Ok(group) => group,
624 Err(_) => return Ok(counts), // TODO: add this as error counts?
22cfad13 625 };
5a2f7ea7
WB
626 let snapshot_count = group.list_backups()?.len() as u64;
627
628 // only include groups with snapshots, counting/displaying empty groups can confuse
629 if snapshot_count > 0 {
630 let type_count = match group.backup_type() {
631 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
632 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
633 BackupType::Host => counts.host.get_or_insert(Default::default()),
634 };
14e08625 635
5a2f7ea7
WB
636 type_count.groups += 1;
637 type_count.snapshots += snapshot_count;
638 }
16f9f244 639
5a2f7ea7
WB
640 Ok(counts)
641 })
f12f408e 642 })
5a2f7ea7 643 .await?
16f9f244
DC
644}
645
1dc117bb
DM
646#[api(
647 input: {
648 properties: {
649 store: {
650 schema: DATASTORE_SCHEMA,
651 },
98afc7b1
FG
652 verbose: {
653 type: bool,
654 default: false,
655 optional: true,
656 description: "Include additional information like snapshot counts and GC status.",
657 },
1dc117bb 658 },
98afc7b1 659
1dc117bb
DM
660 },
661 returns: {
14e08625 662 type: DataStoreStatus,
1dc117bb 663 },
bb34b589 664 access: {
84de1012
TL
665 permission: &Permission::Anybody,
666 description: "Requires on /datastore/{store} either DATASTORE_AUDIT or DATASTORE_BACKUP for \
667 the full statistics. Counts of accessible groups are always returned, if any",
bb34b589 668 },
1dc117bb
DM
669)]
670/// Get datastore status.
143ac7e6 671pub async fn status(
1dc117bb 672 store: String,
98afc7b1 673 verbose: bool,
0eecf38f 674 _info: &ApiMethod,
fdfcb74d 675 rpcenv: &mut dyn RpcEnvironment,
14e08625 676) -> Result<DataStoreStatus, Error> {
84de1012
TL
677 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
678 let user_info = CachedUserInfo::new()?;
679 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
680
681 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read));
682
683 let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 {
684 true
685 } else if store_privs & PRIV_DATASTORE_READ != 0 {
686 false // allow at least counts, user can read groups anyway..
84de1012 687 } else {
2981cdd4 688 match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) {
d20137e5 689 // avoid leaking existence info if users hasn't at least any priv. below
2981cdd4
TL
690 Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")),
691 _ => false,
692 }
84de1012 693 };
d20137e5 694 let datastore = datastore?; // only unwrap no to avoid leaking existence info
fdfcb74d 695
84de1012 696 let (counts, gc_status) = if verbose {
fdfcb74d
FG
697 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
698 None
699 } else {
700 Some(&auth_id)
701 };
702
5a2f7ea7 703 let counts = Some(get_snapshots_count(&datastore, filter_owner).await?);
84de1012
TL
704 let gc_status = if store_stats {
705 Some(datastore.last_gc_status())
706 } else {
707 None
708 };
fdfcb74d
FG
709
710 (counts, gc_status)
711 } else {
712 (None, None)
98afc7b1 713 };
16f9f244 714
84de1012 715 Ok(if store_stats {
143ac7e6 716 let storage = crate::tools::fs::fs_info(datastore.base_path()).await?;
84de1012
TL
717 DataStoreStatus {
718 total: storage.total,
719 used: storage.used,
1cc73a43 720 avail: storage.available,
84de1012
TL
721 gc_status,
722 counts,
723 }
724 } else {
725 DataStoreStatus {
726 total: 0,
727 used: 0,
728 avail: 0,
729 gc_status,
730 counts,
731 }
14e08625 732 })
0eecf38f
DM
733}
734
c2009e53
DM
735#[api(
736 input: {
737 properties: {
738 store: {
739 schema: DATASTORE_SCHEMA,
740 },
bc21ade2 741 ns: {
8c74349b
WB
742 type: BackupNamespace,
743 optional: true,
744 },
c2009e53 745 "backup-type": {
988d575d 746 type: BackupType,
c2009e53
DM
747 optional: true,
748 },
749 "backup-id": {
750 schema: BACKUP_ID_SCHEMA,
751 optional: true,
752 },
dcbf29e7
HL
753 "ignore-verified": {
754 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
755 optional: true,
756 },
757 "outdated-after": {
758 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
759 optional: true,
760 },
c2009e53
DM
761 "backup-time": {
762 schema: BACKUP_TIME_SCHEMA,
763 optional: true,
764 },
59229bd7
TL
765 "max-depth": {
766 schema: NS_MAX_DEPTH_SCHEMA,
767 optional: true,
768 },
c2009e53
DM
769 },
770 },
771 returns: {
772 schema: UPID_SCHEMA,
773 },
774 access: {
7d6fc15b
TL
775 permission: &Permission::Anybody,
776 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_VERIFY for any \
777 or DATASTORE_BACKUP and being the owner of the group",
c2009e53
DM
778 },
779)]
780/// Verify backups.
781///
782/// This function can verify a single backup snapshot, all backup from a backup group,
783/// or all backups in the datastore.
e1db0670 784#[allow(clippy::too_many_arguments)]
c2009e53
DM
785pub fn verify(
786 store: String,
bc21ade2 787 ns: Option<BackupNamespace>,
988d575d 788 backup_type: Option<BackupType>,
c2009e53
DM
789 backup_id: Option<String>,
790 backup_time: Option<i64>,
dcbf29e7
HL
791 ignore_verified: Option<bool>,
792 outdated_after: Option<i64>,
59229bd7 793 max_depth: Option<usize>,
c2009e53
DM
794 rpcenv: &mut dyn RpcEnvironment,
795) -> Result<Value, Error> {
7d6fc15b 796 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bc21ade2 797 let ns = ns.unwrap_or_default();
ea2e91e5
FG
798
799 let owner_check_required = check_ns_privs_full(
abd82485
FG
800 &store,
801 &ns,
7d6fc15b 802 &auth_id,
2bc2435a
FG
803 PRIV_DATASTORE_VERIFY,
804 PRIV_DATASTORE_BACKUP,
7d6fc15b 805 )?;
a724f5fd 806
abd82485 807 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 808 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 809
8ea00f6e 810 let worker_id;
c2009e53
DM
811
812 let mut backup_dir = None;
813 let mut backup_group = None;
133042b5 814 let mut worker_type = "verify";
c2009e53
DM
815
816 match (backup_type, backup_id, backup_time) {
817 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 818 worker_id = format!(
8c74349b 819 "{}:{}/{}/{}/{:08X}",
abd82485 820 store,
bc21ade2 821 ns.display_as_path(),
8c74349b
WB
822 backup_type,
823 backup_id,
824 backup_time
dc7a5b34 825 );
bc21ade2
WB
826 let dir =
827 datastore.backup_dir_from_parts(ns.clone(), backup_type, backup_id, backup_time)?;
09f6a240 828
a724f5fd
FG
829 if owner_check_required {
830 let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
831 check_backup_owner(&owner, &auth_id)?;
832 }
09f6a240 833
c2009e53 834 backup_dir = Some(dir);
133042b5 835 worker_type = "verify_snapshot";
c2009e53
DM
836 }
837 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
838 worker_id = format!(
839 "{}:{}/{}/{}",
abd82485 840 store,
bc21ade2 841 ns.display_as_path(),
8c74349b
WB
842 backup_type,
843 backup_id
844 );
133d718f 845 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240 846
a724f5fd 847 if owner_check_required {
bc21ade2 848 let owner = datastore.get_owner(&ns, &group)?;
a724f5fd
FG
849 check_backup_owner(&owner, &auth_id)?;
850 }
09f6a240 851
bc21ade2 852 backup_group = Some(datastore.backup_group(ns.clone(), group));
133042b5 853 worker_type = "verify_group";
c2009e53
DM
854 }
855 (None, None, None) => {
bc21ade2 856 worker_id = if ns.is_root() {
abd82485 857 store
59229bd7 858 } else {
abd82485 859 format!("{}:{}", store, ns.display_as_path())
59229bd7 860 };
c2009e53 861 }
5a718dce 862 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
863 }
864
39735609 865 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
866
867 let upid_str = WorkerTask::new_thread(
133042b5 868 worker_type,
44288184 869 Some(worker_id),
049a22a3 870 auth_id.to_string(),
e7cb4dc5
WB
871 to_stdout,
872 move |worker| {
9c26a3d6 873 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 874 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 875 let mut res = Vec::new();
f6b1d1cc 876 if !verify_backup_dir(
9c26a3d6 877 &verify_worker,
f6b1d1cc 878 &backup_dir,
f6b1d1cc 879 worker.upid().clone(),
dc7a5b34 880 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 881 )? {
5ae393af
FG
882 res.push(print_ns_and_snapshot(
883 backup_dir.backup_ns(),
884 backup_dir.as_ref(),
885 ));
adfdc369
DC
886 }
887 res
c2009e53 888 } else if let Some(backup_group) = backup_group {
10dac693 889 verify_backup_group(
9c26a3d6 890 &verify_worker,
63d9aca9 891 &backup_group,
7e25b9aa 892 &mut StoreProgress::new(1),
f6b1d1cc 893 worker.upid(),
dc7a5b34 894 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
10dac693 895 )?
c2009e53 896 } else {
a724f5fd 897 let owner = if owner_check_required {
de27ebc6 898 Some(&auth_id)
09f6a240
FG
899 } else {
900 None
901 };
902
dcbf29e7
HL
903 verify_all_backups(
904 &verify_worker,
905 worker.upid(),
bc21ade2 906 ns,
59229bd7 907 max_depth,
dcbf29e7 908 owner,
dc7a5b34 909 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 910 )?
c2009e53 911 };
3984a5fd 912 if !failed_dirs.is_empty() {
1ec0d70d 913 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 914 for dir in failed_dirs {
1ec0d70d 915 task_log!(worker, "\t{}", dir);
adfdc369 916 }
1ffe0301 917 bail!("verification failed - please check the log for details");
c2009e53
DM
918 }
919 Ok(())
e7cb4dc5
WB
920 },
921 )?;
c2009e53
DM
922
923 Ok(json!(upid_str))
924}
925
0a240aaa
DC
926#[api(
927 input: {
928 properties: {
8c74349b
WB
929 group: {
930 type: pbs_api_types::BackupGroup,
931 flatten: true,
932 },
0a240aaa
DC
933 "dry-run": {
934 optional: true,
935 type: bool,
936 default: false,
937 description: "Just show what prune would do, but do not delete anything.",
938 },
dba37e21
WB
939 "keep-options": {
940 type: KeepOptions,
0a240aaa
DC
941 flatten: true,
942 },
943 store: {
944 schema: DATASTORE_SCHEMA,
945 },
dba37e21
WB
946 ns: {
947 type: BackupNamespace,
948 optional: true,
949 },
432de66a
GG
950 "use-task": {
951 type: bool,
952 default: false,
953 optional: true,
954 description: "Spins up an asynchronous task that does the work.",
955 },
0a240aaa
DC
956 },
957 },
7b570c17 958 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa 959 access: {
7d6fc15b
TL
960 permission: &Permission::Anybody,
961 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\
962 or DATASTORE_PRUNE and being the owner of the group",
0a240aaa
DC
963 },
964)]
9805207a 965/// Prune a group on the datastore
bf78f708 966pub fn prune(
8c74349b 967 group: pbs_api_types::BackupGroup,
0a240aaa 968 dry_run: bool,
dba37e21 969 keep_options: KeepOptions,
0a240aaa 970 store: String,
dba37e21 971 ns: Option<BackupNamespace>,
432de66a 972 param: Value,
54552dda 973 rpcenv: &mut dyn RpcEnvironment,
83b7db02 974) -> Result<Value, Error> {
e6dc35ac 975 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 976 let ns = ns.unwrap_or_default();
7a404dc5 977 let datastore = check_privs_and_load_store(
abd82485
FG
978 &store,
979 &ns,
7d6fc15b 980 &auth_id,
2bc2435a
FG
981 PRIV_DATASTORE_MODIFY,
982 PRIV_DATASTORE_PRUNE,
c9396984 983 Some(Operation::Write),
c9396984
FG
984 &group,
985 )?;
db87d93e 986
abd82485
FG
987 let worker_id = format!("{}:{}:{}", store, ns, group);
988 let group = datastore.backup_group(ns.clone(), group);
83b7db02 989
432de66a
GG
990 #[derive(Debug, serde::Serialize)]
991 struct PruneResult {
992 #[serde(rename = "backup-type")]
993 backup_type: BackupType,
994 #[serde(rename = "backup-id")]
995 backup_id: String,
996 #[serde(rename = "backup-time")]
997 backup_time: i64,
998 keep: bool,
999 protected: bool,
1000 #[serde(skip_serializing_if = "Option::is_none")]
1001 ns: Option<BackupNamespace>,
1002 }
1003 let mut prune_result: Vec<PruneResult> = Vec::new();
dda70154 1004
6da20161 1005 let list = group.list_backups()?;
dda70154 1006
dba37e21 1007 let mut prune_info = compute_prune_info(list, &keep_options)?;
dda70154
DM
1008
1009 prune_info.reverse(); // delete older snapshots first
1010
dba37e21 1011 let keep_all = !keep_options.keeps_something();
dda70154
DM
1012
1013 if dry_run {
02db7267
DC
1014 for (info, mark) in prune_info {
1015 let keep = keep_all || mark.keep();
432de66a
GG
1016 let backup_dir = &info.backup_dir;
1017
1018 let mut result = PruneResult {
1019 backup_type: backup_dir.backup_type(),
1020 backup_id: backup_dir.backup_id().to_owned(),
1021 backup_time: backup_dir.backup_time(),
1022 keep,
1023 protected: mark.protected(),
1024 ns: None,
1025 };
1026 let prune_ns = backup_dir.backup_ns();
bc21ade2 1027 if !prune_ns.is_root() {
432de66a 1028 result.ns = Some(prune_ns.to_owned());
33f2c2a1
WB
1029 }
1030 prune_result.push(result);
dda70154
DM
1031 }
1032 return Ok(json!(prune_result));
1033 }
1034
432de66a
GG
1035 let prune_group = move |worker: Arc<WorkerTask>| {
1036 if keep_all {
1037 task_log!(worker, "No prune selection - keeping all files.");
1038 } else {
1039 let mut opts = Vec::new();
1040 if !ns.is_root() {
1041 opts.push(format!("--ns {ns}"));
1042 }
1043 crate::server::cli_keep_options(&mut opts, &keep_options);
1044
1045 task_log!(worker, "retention options: {}", opts.join(" "));
1046 task_log!(
1047 worker,
1048 "Starting prune on {} group \"{}\"",
1049 print_store_and_ns(&store, &ns),
1050 group.group(),
1051 );
dba37e21 1052 }
3b03abfe 1053
432de66a
GG
1054 for (info, mark) in prune_info {
1055 let keep = keep_all || mark.keep();
1056 let backup_dir = &info.backup_dir;
dda70154 1057
432de66a
GG
1058 let backup_time = backup_dir.backup_time();
1059 let timestamp = backup_dir.backup_time_string();
1060 let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
db87d93e 1061
432de66a 1062 let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
f1539300 1063
432de66a 1064 task_log!(worker, "{msg}");
f1539300 1065
432de66a
GG
1066 prune_result.push(PruneResult {
1067 backup_type: group.ty,
1068 backup_id: group.id.clone(),
1069 backup_time,
1070 keep,
1071 protected: mark.protected(),
1072 ns: None,
1073 });
f1539300 1074
432de66a
GG
1075 if !keep {
1076 if let Err(err) = backup_dir.destroy(false) {
1077 task_warn!(
1078 worker,
1079 "failed to remove dir {:?}: {}",
1080 backup_dir.relative_path(),
1081 err,
1082 );
1083 }
8f0b4c1f 1084 }
8f579717 1085 }
432de66a
GG
1086 prune_result
1087 };
83b7db02 1088
432de66a
GG
1089 if param["use-task"].as_bool().unwrap_or(false) {
1090 let upid = WorkerTask::spawn(
1091 "prune",
1092 Some(worker_id),
1093 auth_id.to_string(),
1094 true,
1095 move |worker| async move {
1096 let _ = prune_group(worker.clone());
1097 Ok(())
1098 },
1099 )?;
1100 Ok(json!(upid))
1101 } else {
1102 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
1103 let result = prune_group(worker.clone());
1104 worker.log_result(&Ok(()));
1105 Ok(json!(result))
1106 }
83b7db02
DM
1107}
1108
9805207a
DC
1109#[api(
1110 input: {
1111 properties: {
1112 "dry-run": {
1113 optional: true,
1114 type: bool,
1115 default: false,
1116 description: "Just show what prune would do, but do not delete anything.",
1117 },
1118 "prune-options": {
dba37e21 1119 type: PruneJobOptions,
9805207a
DC
1120 flatten: true,
1121 },
1122 store: {
1123 schema: DATASTORE_SCHEMA,
1124 },
1125 },
1126 },
1127 returns: {
1128 schema: UPID_SCHEMA,
1129 },
1130 access: {
dba37e21
WB
1131 permission: &Permission::Anybody,
1132 description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
9805207a
DC
1133 },
1134)]
1135/// Prune the datastore
1136pub fn prune_datastore(
1137 dry_run: bool,
dba37e21 1138 prune_options: PruneJobOptions,
9805207a
DC
1139 store: String,
1140 _param: Value,
1141 rpcenv: &mut dyn RpcEnvironment,
1142) -> Result<String, Error> {
dba37e21
WB
1143 let user_info = CachedUserInfo::new()?;
1144
9805207a
DC
1145 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1146
dba37e21
WB
1147 user_info.check_privs(
1148 &auth_id,
1149 &prune_options.acl_path(&store),
1150 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
1151 true,
1152 )?;
1153
e9d2fc93 1154 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
dba37e21 1155 let ns = prune_options.ns.clone().unwrap_or_default();
36971618 1156 let worker_id = format!("{}:{}", store, ns);
9805207a 1157
bfa942c0
DC
1158 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
1159
9805207a
DC
1160 let upid_str = WorkerTask::new_thread(
1161 "prune",
36971618 1162 Some(worker_id),
049a22a3 1163 auth_id.to_string(),
bfa942c0 1164 to_stdout,
dc7a5b34 1165 move |worker| {
dba37e21 1166 crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
dc7a5b34 1167 },
9805207a
DC
1168 )?;
1169
1170 Ok(upid_str)
1171}
1172
dfc58d47
DM
1173#[api(
1174 input: {
1175 properties: {
1176 store: {
1177 schema: DATASTORE_SCHEMA,
1178 },
1179 },
1180 },
1181 returns: {
1182 schema: UPID_SCHEMA,
1183 },
bb34b589 1184 access: {
54552dda 1185 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1186 },
dfc58d47
DM
1187)]
1188/// Start garbage collection.
bf78f708 1189pub fn start_garbage_collection(
dfc58d47 1190 store: String,
6049b71f 1191 _info: &ApiMethod,
dd5495d6 1192 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1193) -> Result<Value, Error> {
e9d2fc93 1194 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1195 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1196
dc7a5b34 1197 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1198 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1199
39735609 1200 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1201
dc7a5b34
TL
1202 let upid_str =
1203 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1204 .map_err(|err| {
1205 format_err!(
1206 "unable to start garbage collection job on datastore {} - {}",
1207 store,
1208 err
1209 )
1210 })?;
0f778e06
DM
1211
1212 Ok(json!(upid_str))
15e9b4ed
DM
1213}
1214
a92830dc
DM
1215#[api(
1216 input: {
1217 properties: {
1218 store: {
1219 schema: DATASTORE_SCHEMA,
1220 },
1221 },
1222 },
1223 returns: {
1224 type: GarbageCollectionStatus,
bb34b589
DM
1225 },
1226 access: {
1227 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1228 },
a92830dc
DM
1229)]
1230/// Garbage collection status.
5eeea607 1231pub fn garbage_collection_status(
a92830dc 1232 store: String,
6049b71f 1233 _info: &ApiMethod,
dd5495d6 1234 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1235) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1236 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1237
f2b99c34 1238 let status = datastore.last_gc_status();
691c89a0 1239
a92830dc 1240 Ok(status)
691c89a0
DM
1241}
1242
fe1d34d2
SL
1243#[api(
1244 input: {
1245 properties: {
1246 store: {
1247 schema: DATASTORE_SCHEMA,
1248 },
1249 },
1250 },
1251 returns: {
1252 type: GarbageCollectionJobStatus,
1253 },
1254 access: {
1255 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1256 },
1257)]
1258/// Garbage collection status.
1259pub fn garbage_collection_job_status(
1260 store: String,
1261 _info: &ApiMethod,
1262 _rpcenv: &mut dyn RpcEnvironment,
1263) -> Result<GarbageCollectionJobStatus, Error> {
1264 let (config, _) = pbs_config::datastore::config()?;
1265 let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
1266
1267 let mut info = GarbageCollectionJobStatus {
1268 store: store.clone(),
1269 schedule: store_config.gc_schedule,
1270 ..Default::default()
1271 };
1272
1273 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
1274 let status_in_memory = datastore.last_gc_status();
1275 let state_file = JobState::load("garbage_collection", &store)
3ae21d87 1276 .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
fe1d34d2
SL
1277 .ok();
1278
3ae21d87
FG
1279 match status_in_memory.upid {
1280 Some(ref upid) => {
fe1d34d2
SL
1281 let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
1282 let mut duration = None;
1283 if let Some(state) = state_file {
3ae21d87 1284 if let Ok(cs) = compute_schedule_status(&state, Some(&upid)) {
fe1d34d2
SL
1285 computed_schedule = cs;
1286 }
1287 }
1288
1289 if let Some(endtime) = computed_schedule.last_run_endtime {
1290 computed_schedule.next_run = info
1291 .schedule
1292 .as_ref()
1293 .and_then(|s| {
1294 s.parse::<CalendarEvent>()
1295 .map_err(|err| log::error!("{err}"))
1296 .ok()
1297 })
1298 .and_then(|e| {
1299 e.compute_next_event(endtime)
1300 .map_err(|err| log::error!("{err}"))
1301 .ok()
1302 })
1303 .and_then(|ne| ne);
1304
1305 if let Ok(parsed_upid) = upid.parse::<UPID>() {
1306 duration = Some(endtime - parsed_upid.starttime);
1307 }
1308 }
1309
3ae21d87 1310 info.status = status_in_memory;
fe1d34d2
SL
1311 info.next_run = computed_schedule.next_run;
1312 info.last_run_endtime = computed_schedule.last_run_endtime;
1313 info.last_run_state = computed_schedule.last_run_state;
1314 info.duration = duration;
1315 }
1316 None => {
1317 if let Some(schedule) = &info.schedule {
1318 info.next_run = schedule
1319 .parse::<CalendarEvent>()
1320 .map_err(|err| log::error!("{err}"))
1321 .ok()
1322 .and_then(|e| {
1323 e.compute_next_event(proxmox_time::epoch_i64())
1324 .map_err(|err| log::error!("{err}"))
1325 .ok()
1326 })
1327 .and_then(|ne| ne);
1328
1329 if let Ok(event) = schedule.parse::<CalendarEvent>() {
1330 if let Ok(next_event) = event.compute_next_event(proxmox_time::epoch_i64()) {
1331 info.next_run = next_event;
1332 }
1333 }
1334 } else {
1335 return Ok(info);
1336 }
1337 }
1338 }
1339
1340 Ok(info)
1341}
1342
bb34b589 1343#[api(
30fb6025
DM
1344 returns: {
1345 description: "List the accessible datastores.",
1346 type: Array,
9b93c620 1347 items: { type: DataStoreListItem },
30fb6025 1348 },
bb34b589 1349 access: {
54552dda 1350 permission: &Permission::Anybody,
bb34b589
DM
1351 },
1352)]
1353/// Datastore list
bf78f708 1354pub fn get_datastore_list(
6049b71f
DM
1355 _param: Value,
1356 _info: &ApiMethod,
54552dda 1357 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1358) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1359 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1360
e6dc35ac 1361 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1362 let user_info = CachedUserInfo::new()?;
1363
30fb6025 1364 let mut list = Vec::new();
54552dda 1365
30fb6025 1366 for (store, (_, data)) in &config.sections {
8c9c6c07
TL
1367 let acl_path = &["datastore", store];
1368 let user_privs = user_info.lookup_privs(&auth_id, acl_path);
dc7a5b34 1369 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
7d6fc15b
TL
1370
1371 let mut allow_id = false;
1372 if !allowed {
8c9c6c07
TL
1373 if let Ok(any_privs) = user_info.any_privs_below(&auth_id, acl_path, NS_PRIVS_OK) {
1374 allow_id = any_privs;
7d6fc15b
TL
1375 }
1376 }
1377
1378 if allowed || allow_id {
dc7a5b34
TL
1379 list.push(DataStoreListItem {
1380 store: store.clone(),
7d6fc15b
TL
1381 comment: if !allowed {
1382 None
1383 } else {
1384 data["comment"].as_str().map(String::from)
1385 },
e022d13c 1386 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1387 });
30fb6025 1388 }
54552dda
DM
1389 }
1390
44288184 1391 Ok(list)
15e9b4ed
DM
1392}
1393
0ab08ac9
DM
1394#[sortable]
1395pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1396 &ApiHandler::AsyncHttp(&download_file),
1397 &ObjectSchema::new(
1398 "Download single raw file from backup snapshot.",
1399 &sorted!([
66c49c21 1400 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1401 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
0ab08ac9 1402 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1403 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1404 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1405 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1406 ]),
dc7a5b34
TL
1407 ),
1408)
1409.access(
7d6fc15b
TL
1410 Some(
1411 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1412 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1413 ),
7d6fc15b 1414 &Permission::Anybody,
54552dda 1415);
691c89a0 1416
bf78f708 1417pub fn download_file(
9e47c0a5
DM
1418 _parts: Parts,
1419 _req_body: Body,
1420 param: Value,
255f378a 1421 _info: &ApiMethod,
54552dda 1422 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1423) -> ApiResponseFuture {
ad51d02a 1424 async move {
7d6fc15b 1425 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1426 let store = required_string_param(&param, "store")?;
133d718f 1427 let backup_ns = optional_ns_param(&param)?;
1afce610 1428
7d6fc15b 1429 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1430 let datastore = check_privs_and_load_store(
e1db0670 1431 store,
abd82485 1432 &backup_ns,
7d6fc15b 1433 &auth_id,
2bc2435a
FG
1434 PRIV_DATASTORE_READ,
1435 PRIV_DATASTORE_BACKUP,
c9396984 1436 Some(Operation::Read),
c9396984
FG
1437 &backup_dir.group,
1438 )?;
1439
3c8c2827 1440 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1441
dc7a5b34
TL
1442 println!(
1443 "Download {} from {} ({}/{})",
abd82485 1444 file_name,
e1db0670 1445 print_store_and_ns(store, &backup_ns),
abd82485
FG
1446 backup_dir,
1447 file_name
dc7a5b34 1448 );
9e47c0a5 1449
1afce610
FG
1450 let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
1451
ad51d02a
DM
1452 let mut path = datastore.base_path();
1453 path.push(backup_dir.relative_path());
1454 path.push(&file_name);
1455
ba694720 1456 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1457 .await
1458 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1459
dc7a5b34
TL
1460 let payload =
1461 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1462 .map_ok(|bytes| bytes.freeze())
1463 .map_err(move |err| {
1464 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1465 err
1466 });
ad51d02a 1467 let body = Body::wrap_stream(payload);
9e47c0a5 1468
ad51d02a
DM
1469 // fixme: set other headers ?
1470 Ok(Response::builder()
dc7a5b34
TL
1471 .status(StatusCode::OK)
1472 .header(header::CONTENT_TYPE, "application/octet-stream")
1473 .body(body)
1474 .unwrap())
1475 }
1476 .boxed()
9e47c0a5
DM
1477}
1478
6ef9bb59
DC
1479#[sortable]
1480pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1481 &ApiHandler::AsyncHttp(&download_file_decoded),
1482 &ObjectSchema::new(
1483 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1484 &sorted!([
1485 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1486 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
6ef9bb59 1487 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1488 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1489 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1490 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1491 ]),
dc7a5b34
TL
1492 ),
1493)
1494.access(
7d6fc15b
TL
1495 Some(
1496 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1497 DATASTORE_BACKUP and being the owner of the group",
dc7a5b34 1498 ),
7d6fc15b 1499 &Permission::Anybody,
6ef9bb59
DC
1500);
1501
bf78f708 1502pub fn download_file_decoded(
6ef9bb59
DC
1503 _parts: Parts,
1504 _req_body: Body,
1505 param: Value,
1506 _info: &ApiMethod,
1507 rpcenv: Box<dyn RpcEnvironment>,
1508) -> ApiResponseFuture {
6ef9bb59 1509 async move {
7d6fc15b 1510 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1511 let store = required_string_param(&param, "store")?;
133d718f 1512 let backup_ns = optional_ns_param(&param)?;
abd82485 1513
1afce610 1514 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1515 let datastore = check_privs_and_load_store(
e1db0670 1516 store,
abd82485 1517 &backup_ns,
7d6fc15b 1518 &auth_id,
2bc2435a
FG
1519 PRIV_DATASTORE_READ,
1520 PRIV_DATASTORE_BACKUP,
c9396984 1521 Some(Operation::Read),
1afce610 1522 &backup_dir_api.group,
c9396984 1523 )?;
a724f5fd 1524
3c8c2827 1525 let file_name = required_string_param(&param, "file-name")?.to_owned();
abd82485 1526 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
6ef9bb59 1527
9ccf933b 1528 let (manifest, files) = read_backup_index(&backup_dir)?;
6ef9bb59 1529 for file in files {
f28d9088 1530 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1531 bail!("cannot decode '{}' - is encrypted", file_name);
1532 }
1533 }
1534
dc7a5b34
TL
1535 println!(
1536 "Download {} from {} ({}/{})",
abd82485 1537 file_name,
e1db0670 1538 print_store_and_ns(store, &backup_ns),
abd82485
FG
1539 backup_dir_api,
1540 file_name
dc7a5b34 1541 );
6ef9bb59
DC
1542
1543 let mut path = datastore.base_path();
1544 path.push(backup_dir.relative_path());
1545 path.push(&file_name);
1546
e1db0670 1547 let (_, extension) = file_name.rsplit_once('.').unwrap();
6ef9bb59
DC
1548
1549 let body = match extension {
1550 "didx" => {
dc7a5b34
TL
1551 let index = DynamicIndexReader::open(&path).map_err(|err| {
1552 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1553 })?;
2d55beec
FG
1554 let (csum, size) = index.compute_csum();
1555 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1556
14f6c9cb 1557 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1558 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1559 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1560 eprintln!("error during streaming of '{:?}' - {}", path, err);
1561 err
1562 }))
1563 }
6ef9bb59 1564 "fidx" => {
dc7a5b34
TL
1565 let index = FixedIndexReader::open(&path).map_err(|err| {
1566 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1567 })?;
6ef9bb59 1568
2d55beec
FG
1569 let (csum, size) = index.compute_csum();
1570 manifest.verify_file(&file_name, &csum, size)?;
1571
14f6c9cb 1572 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1573 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1574 Body::wrap_stream(
1575 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1576 move |err| {
1577 eprintln!("error during streaming of '{:?}' - {}", path, err);
1578 err
1579 },
1580 ),
1581 )
1582 }
6ef9bb59
DC
1583 "blob" => {
1584 let file = std::fs::File::open(&path)
8aa67ee7 1585 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1586
2d55beec
FG
1587 // FIXME: load full blob to verify index checksum?
1588
6ef9bb59 1589 Body::wrap_stream(
dc7a5b34
TL
1590 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1591 move |err| {
6ef9bb59
DC
1592 eprintln!("error during streaming of '{:?}' - {}", path, err);
1593 err
dc7a5b34
TL
1594 },
1595 ),
6ef9bb59 1596 )
dc7a5b34 1597 }
6ef9bb59
DC
1598 extension => {
1599 bail!("cannot download '{}' files", extension);
dc7a5b34 1600 }
6ef9bb59
DC
1601 };
1602
1603 // fixme: set other headers ?
1604 Ok(Response::builder()
dc7a5b34
TL
1605 .status(StatusCode::OK)
1606 .header(header::CONTENT_TYPE, "application/octet-stream")
1607 .body(body)
1608 .unwrap())
1609 }
1610 .boxed()
6ef9bb59
DC
1611}
1612
552c2259 1613#[sortable]
0ab08ac9
DM
1614pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1615 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1616 &ObjectSchema::new(
54552dda 1617 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1618 &sorted!([
66c49c21 1619 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1620 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a 1621 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1622 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1623 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1624 ]),
dc7a5b34
TL
1625 ),
1626)
1627.access(
54552dda 1628 Some("Only the backup creator/owner is allowed to do this."),
7d6fc15b 1629 &Permission::Anybody,
54552dda 1630);
9e47c0a5 1631
bf78f708 1632pub fn upload_backup_log(
07ee2235
DM
1633 _parts: Parts,
1634 req_body: Body,
1635 param: Value,
255f378a 1636 _info: &ApiMethod,
54552dda 1637 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1638) -> ApiResponseFuture {
ad51d02a 1639 async move {
7d6fc15b 1640 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1641 let store = required_string_param(&param, "store")?;
133d718f 1642 let backup_ns = optional_ns_param(&param)?;
abd82485 1643
1afce610 1644 let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
2bc2435a 1645
7a404dc5 1646 let datastore = check_privs_and_load_store(
e1db0670 1647 store,
abd82485 1648 &backup_ns,
c9396984 1649 &auth_id,
7a404dc5
FG
1650 0,
1651 PRIV_DATASTORE_BACKUP,
c9396984 1652 Some(Operation::Write),
1afce610 1653 &backup_dir_api.group,
c9396984 1654 )?;
abd82485 1655 let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
07ee2235 1656
dc7a5b34 1657 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1658
133d718f 1659 let mut path = backup_dir.full_path();
16f6766a 1660 path.push(file_name);
07ee2235 1661
ad51d02a
DM
1662 if path.exists() {
1663 bail!("backup already contains a log.");
1664 }
e128d4e8 1665
abd82485
FG
1666 println!(
1667 "Upload backup log to {} {backup_dir_api}/{file_name}",
e1db0670 1668 print_store_and_ns(store, &backup_ns),
abd82485 1669 );
ad51d02a
DM
1670
1671 let data = req_body
1672 .map_err(Error::from)
1673 .try_fold(Vec::new(), |mut acc, chunk| {
1654ab33 1674 acc.extend_from_slice(&chunk);
ad51d02a
DM
1675 future::ok::<_, Error>(acc)
1676 })
1677 .await?;
1678
39f18b30
DM
1679 // always verify blob/CRC at server side
1680 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1681
e0a19d33 1682 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1683
1684 // fixme: use correct formatter
53daae8e 1685 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1686 }
1687 .boxed()
07ee2235
DM
1688}
1689
5b1cfa01
DC
1690#[api(
1691 input: {
1692 properties: {
988d575d 1693 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1694 ns: {
133d718f
WB
1695 type: BackupNamespace,
1696 optional: true,
1697 },
8c74349b
WB
1698 backup_dir: {
1699 type: pbs_api_types::BackupDir,
1700 flatten: true,
1701 },
5b1cfa01
DC
1702 "filepath": {
1703 description: "Base64 encoded path.",
1704 type: String,
1705 }
1706 },
1707 },
1708 access: {
7d6fc15b
TL
1709 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1710 DATASTORE_BACKUP and being the owner of the group",
1711 permission: &Permission::Anybody,
5b1cfa01
DC
1712 },
1713)]
1714/// Get the entries of the given path of the catalog
7beb27d4 1715pub async fn catalog(
5b1cfa01 1716 store: String,
bc21ade2 1717 ns: Option<BackupNamespace>,
8c74349b 1718 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1719 filepath: String,
5b1cfa01 1720 rpcenv: &mut dyn RpcEnvironment,
227501c0 1721) -> Result<Vec<ArchiveEntry>, Error> {
e6dc35ac 1722 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
ea2e91e5 1723
7beb27d4
WB
1724 tokio::task::spawn_blocking(move || {
1725 let ns = ns.unwrap_or_default();
a724f5fd 1726
7beb27d4
WB
1727 let datastore = check_privs_and_load_store(
1728 &store,
1729 &ns,
1730 &auth_id,
1731 PRIV_DATASTORE_READ,
1732 PRIV_DATASTORE_BACKUP,
1733 Some(Operation::Read),
1734 &backup_dir.group,
1735 )?;
1736
1737 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
5b1cfa01 1738
7beb27d4 1739 let file_name = CATALOG_NAME;
9238cdf5 1740
7beb27d4
WB
1741 let (manifest, files) = read_backup_index(&backup_dir)?;
1742 for file in files {
1743 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1744 bail!("cannot decode '{}' - is encrypted", file_name);
1745 }
9238cdf5 1746 }
9238cdf5 1747
7beb27d4
WB
1748 let mut path = datastore.base_path();
1749 path.push(backup_dir.relative_path());
1750 path.push(file_name);
5b1cfa01 1751
7beb27d4
WB
1752 let index = DynamicIndexReader::open(&path)
1753 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
5b1cfa01 1754
7beb27d4
WB
1755 let (csum, size) = index.compute_csum();
1756 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1757
7beb27d4
WB
1758 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1759 let reader = BufferedDynamicReader::new(index, chunk_reader);
5b1cfa01 1760
7beb27d4 1761 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1762
7beb27d4
WB
1763 let path = if filepath != "root" && filepath != "/" {
1764 base64::decode(filepath)?
1765 } else {
1766 vec![b'/']
1767 };
5b1cfa01 1768
7beb27d4
WB
1769 catalog_reader.list_dir_contents(&path)
1770 })
1771 .await?
5b1cfa01
DC
1772}
1773
d33d8f4e
DC
1774#[sortable]
1775pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1776 &ApiHandler::AsyncHttp(&pxar_file_download),
1777 &ObjectSchema::new(
1ffe0301 1778 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1779 &sorted!([
1780 ("store", false, &DATASTORE_SCHEMA),
bc21ade2 1781 ("ns", true, &BACKUP_NAMESPACE_SCHEMA),
d33d8f4e
DC
1782 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1783 ("backup-id", false, &BACKUP_ID_SCHEMA),
1784 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1785 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1786 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1787 ]),
1788 )
7d6fc15b
TL
1789).access(
1790 Some(
1791 "Requires on /datastore/{store}[/{namespace}] either DATASTORE_READ for any or \
1792 DATASTORE_BACKUP and being the owner of the group",
1793 ),
1794 &Permission::Anybody,
d33d8f4e
DC
1795);
1796
bf78f708 1797pub fn pxar_file_download(
d33d8f4e
DC
1798 _parts: Parts,
1799 _req_body: Body,
1800 param: Value,
1801 _info: &ApiMethod,
1802 rpcenv: Box<dyn RpcEnvironment>,
1803) -> ApiResponseFuture {
d33d8f4e 1804 async move {
7d6fc15b 1805 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
3c8c2827 1806 let store = required_string_param(&param, "store")?;
bc21ade2 1807 let ns = optional_ns_param(&param)?;
abd82485 1808
7d6fc15b 1809 let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
7a404dc5 1810 let datastore = check_privs_and_load_store(
e1db0670 1811 store,
abd82485 1812 &ns,
7d6fc15b 1813 &auth_id,
2bc2435a
FG
1814 PRIV_DATASTORE_READ,
1815 PRIV_DATASTORE_BACKUP,
c9396984 1816 Some(Operation::Read),
c9396984
FG
1817 &backup_dir.group,
1818 )?;
a724f5fd 1819
bc21ade2 1820 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
d33d8f4e 1821
3c8c2827 1822 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1823
984ddb2f
DC
1824 let tar = param["tar"].as_bool().unwrap_or(false);
1825
d33d8f4e 1826 let mut components = base64::decode(&filepath)?;
3984a5fd 1827 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1828 components.remove(0);
1829 }
1830
d8d8af98 1831 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1832 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1833 let file_path = split.next().unwrap_or(b"/");
9ccf933b 1834 let (manifest, files) = read_backup_index(&backup_dir)?;
9238cdf5
FG
1835 for file in files {
1836 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1837 bail!("cannot decode '{}' - is encrypted", pxar_name);
1838 }
1839 }
d33d8f4e 1840
9238cdf5
FG
1841 let mut path = datastore.base_path();
1842 path.push(backup_dir.relative_path());
1843 path.push(pxar_name);
d33d8f4e
DC
1844
1845 let index = DynamicIndexReader::open(&path)
1846 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1847
2d55beec 1848 let (csum, size) = index.compute_csum();
9a37bd6c 1849 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1850
14f6c9cb 1851 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1852 let reader = BufferedDynamicReader::new(index, chunk_reader);
1853 let archive_size = reader.archive_size();
1854 let reader = LocalDynamicReadAt::new(reader);
1855
1856 let decoder = Accessor::new(reader, archive_size).await?;
1857 let root = decoder.open_root().await?;
2e219481 1858 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1859 let file = root
dc7a5b34
TL
1860 .lookup(&path)
1861 .await?
2e219481 1862 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1863
804f6143
DC
1864 let body = match file.kind() {
1865 EntryKind::File { .. } => Body::wrap_stream(
1866 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1867 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1868 err
1869 }),
1870 ),
1871 EntryKind::Hardlink(_) => Body::wrap_stream(
1872 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1873 .map_err(move |err| {
dc7a5b34 1874 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1875 err
1876 }),
1877 ),
1878 EntryKind::Directory => {
984ddb2f 1879 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1880 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1881 if tar {
dc7a5b34
TL
1882 proxmox_rest_server::spawn_internal_task(create_tar(
1883 channelwriter,
1884 decoder,
1885 path.clone(),
dc7a5b34 1886 ));
984ddb2f
DC
1887 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1888 Body::wrap_stream(zstdstream.map_err(move |err| {
0608b36b 1889 log::error!("error during streaming of tar.zst '{:?}' - {}", path, err);
984ddb2f
DC
1890 err
1891 }))
1892 } else {
dc7a5b34
TL
1893 proxmox_rest_server::spawn_internal_task(create_zip(
1894 channelwriter,
1895 decoder,
1896 path.clone(),
dc7a5b34 1897 ));
984ddb2f 1898 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
0608b36b 1899 log::error!("error during streaming of zip '{:?}' - {}", path, err);
984ddb2f
DC
1900 err
1901 }))
1902 }
804f6143
DC
1903 }
1904 other => bail!("cannot download file of type {:?}", other),
1905 };
d33d8f4e
DC
1906
1907 // fixme: set other headers ?
1908 Ok(Response::builder()
dc7a5b34
TL
1909 .status(StatusCode::OK)
1910 .header(header::CONTENT_TYPE, "application/octet-stream")
1911 .body(body)
1912 .unwrap())
1913 }
1914 .boxed()
d33d8f4e
DC
1915}
1916
1a0d3d11
DM
1917#[api(
1918 input: {
1919 properties: {
1920 store: {
1921 schema: DATASTORE_SCHEMA,
1922 },
1923 timeframe: {
c68fa58a 1924 type: RRDTimeFrame,
1a0d3d11
DM
1925 },
1926 cf: {
1927 type: RRDMode,
1928 },
1929 },
1930 },
1931 access: {
7d6fc15b
TL
1932 permission: &Permission::Privilege(
1933 &["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1a0d3d11
DM
1934 },
1935)]
1936/// Read datastore stats
bf78f708 1937pub fn get_rrd_stats(
1a0d3d11 1938 store: String,
c68fa58a 1939 timeframe: RRDTimeFrame,
1a0d3d11
DM
1940 cf: RRDMode,
1941 _param: Value,
1942) -> Result<Value, Error> {
e9d2fc93 1943 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1944 let disk_manager = crate::tools::disks::DiskManage::new();
1945
1946 let mut rrd_fields = vec![
dc7a5b34 1947 "total",
de923258 1948 "available",
dc7a5b34
TL
1949 "used",
1950 "read_ios",
1951 "read_bytes",
1952 "write_ios",
1953 "write_bytes",
f27b6086
DC
1954 ];
1955
1956 // we do not have io_ticks for zpools, so don't include them
1957 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1958 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1959 _ => rrd_fields.push("io_ticks"),
1960 };
1961
dc7a5b34 1962 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1963}
1964
5fd823c3
HL
1965#[api(
1966 input: {
1967 properties: {
1968 store: {
1969 schema: DATASTORE_SCHEMA,
1970 },
1971 },
1972 },
1973 access: {
1974 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1975 },
1976)]
1977/// Read datastore stats
dc7a5b34 1978pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1979 let active_operations = task_tracking::get_active_operations(&store)?;
1980 Ok(json!({
1981 "read": active_operations.read,
1982 "write": active_operations.write,
1983 }))
1984}
1985
d6688884
SR
1986#[api(
1987 input: {
1988 properties: {
988d575d 1989 store: { schema: DATASTORE_SCHEMA },
bc21ade2 1990 ns: {
133d718f
WB
1991 type: BackupNamespace,
1992 optional: true,
1993 },
8c74349b
WB
1994 backup_group: {
1995 type: pbs_api_types::BackupGroup,
1996 flatten: true,
1997 },
d6688884
SR
1998 },
1999 },
2000 access: {
7d6fc15b
TL
2001 permission: &Permission::Anybody,
2002 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2003 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
2004 },
2005)]
2006/// Get "notes" for a backup group
2007pub fn get_group_notes(
2008 store: String,
bc21ade2 2009 ns: Option<BackupNamespace>,
8c74349b 2010 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
2011 rpcenv: &mut dyn RpcEnvironment,
2012) -> Result<String, Error> {
d6688884 2013 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2014 let ns = ns.unwrap_or_default();
ea2e91e5 2015
7a404dc5 2016 let datastore = check_privs_and_load_store(
abd82485
FG
2017 &store,
2018 &ns,
7d6fc15b 2019 &auth_id,
2bc2435a
FG
2020 PRIV_DATASTORE_AUDIT,
2021 PRIV_DATASTORE_BACKUP,
c9396984 2022 Some(Operation::Read),
c9396984
FG
2023 &backup_group,
2024 )?;
d6688884 2025
abd82485 2026 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
d6688884
SR
2027 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
2028}
2029
2030#[api(
2031 input: {
2032 properties: {
988d575d 2033 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2034 ns: {
133d718f
WB
2035 type: BackupNamespace,
2036 optional: true,
2037 },
8c74349b
WB
2038 backup_group: {
2039 type: pbs_api_types::BackupGroup,
2040 flatten: true,
2041 },
d6688884
SR
2042 notes: {
2043 description: "A multiline text.",
2044 },
2045 },
2046 },
2047 access: {
7d6fc15b
TL
2048 permission: &Permission::Anybody,
2049 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2050 or DATASTORE_BACKUP and being the owner of the group",
d6688884
SR
2051 },
2052)]
2053/// Set "notes" for a backup group
2054pub fn set_group_notes(
2055 store: String,
bc21ade2 2056 ns: Option<BackupNamespace>,
8c74349b 2057 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
2058 notes: String,
2059 rpcenv: &mut dyn RpcEnvironment,
2060) -> Result<(), Error> {
d6688884 2061 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485
FG
2062 let ns = ns.unwrap_or_default();
2063
7a404dc5 2064 let datastore = check_privs_and_load_store(
abd82485
FG
2065 &store,
2066 &ns,
7d6fc15b 2067 &auth_id,
2bc2435a
FG
2068 PRIV_DATASTORE_MODIFY,
2069 PRIV_DATASTORE_BACKUP,
c9396984 2070 Some(Operation::Write),
c9396984
FG
2071 &backup_group,
2072 )?;
d6688884 2073
abd82485 2074 let note_path = get_group_note_path(&datastore, &ns, &backup_group);
e0a19d33 2075 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
2076
2077 Ok(())
2078}
2079
912b3f5b
DM
2080#[api(
2081 input: {
2082 properties: {
988d575d 2083 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2084 ns: {
133d718f
WB
2085 type: BackupNamespace,
2086 optional: true,
2087 },
8c74349b
WB
2088 backup_dir: {
2089 type: pbs_api_types::BackupDir,
2090 flatten: true,
2091 },
912b3f5b
DM
2092 },
2093 },
2094 access: {
7d6fc15b
TL
2095 permission: &Permission::Anybody,
2096 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2097 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2098 },
2099)]
2100/// Get "notes" for a specific backup
bf78f708 2101pub fn get_notes(
912b3f5b 2102 store: String,
bc21ade2 2103 ns: Option<BackupNamespace>,
8c74349b 2104 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2105 rpcenv: &mut dyn RpcEnvironment,
2106) -> Result<String, Error> {
7d6fc15b 2107 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2108 let ns = ns.unwrap_or_default();
ea2e91e5 2109
7a404dc5 2110 let datastore = check_privs_and_load_store(
abd82485
FG
2111 &store,
2112 &ns,
7d6fc15b 2113 &auth_id,
2bc2435a
FG
2114 PRIV_DATASTORE_AUDIT,
2115 PRIV_DATASTORE_BACKUP,
c9396984 2116 Some(Operation::Read),
c9396984
FG
2117 &backup_dir.group,
2118 )?;
912b3f5b 2119
fbfb64a6 2120 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2121
133d718f 2122 let (manifest, _) = backup_dir.load_manifest()?;
912b3f5b 2123
dc7a5b34 2124 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
2125
2126 Ok(String::from(notes))
2127}
2128
2129#[api(
2130 input: {
2131 properties: {
988d575d 2132 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2133 ns: {
133d718f
WB
2134 type: BackupNamespace,
2135 optional: true,
2136 },
8c74349b
WB
2137 backup_dir: {
2138 type: pbs_api_types::BackupDir,
2139 flatten: true,
2140 },
912b3f5b
DM
2141 notes: {
2142 description: "A multiline text.",
2143 },
2144 },
2145 },
2146 access: {
7d6fc15b
TL
2147 permission: &Permission::Anybody,
2148 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2149 or DATASTORE_BACKUP and being the owner of the group",
912b3f5b
DM
2150 },
2151)]
2152/// Set "notes" for a specific backup
bf78f708 2153pub fn set_notes(
912b3f5b 2154 store: String,
bc21ade2 2155 ns: Option<BackupNamespace>,
8c74349b 2156 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
2157 notes: String,
2158 rpcenv: &mut dyn RpcEnvironment,
2159) -> Result<(), Error> {
7d6fc15b 2160 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2161 let ns = ns.unwrap_or_default();
ea2e91e5 2162
7a404dc5 2163 let datastore = check_privs_and_load_store(
abd82485
FG
2164 &store,
2165 &ns,
7d6fc15b 2166 &auth_id,
2bc2435a
FG
2167 PRIV_DATASTORE_MODIFY,
2168 PRIV_DATASTORE_BACKUP,
c9396984 2169 Some(Operation::Write),
c9396984
FG
2170 &backup_dir.group,
2171 )?;
912b3f5b 2172
fbfb64a6 2173 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
912b3f5b 2174
133d718f
WB
2175 backup_dir
2176 .update_manifest(|manifest| {
dc7a5b34
TL
2177 manifest.unprotected["notes"] = notes.into();
2178 })
2179 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
2180
2181 Ok(())
2182}
2183
8292d3d2
DC
2184#[api(
2185 input: {
2186 properties: {
988d575d 2187 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2188 ns: {
133d718f
WB
2189 type: BackupNamespace,
2190 optional: true,
2191 },
8c74349b
WB
2192 backup_dir: {
2193 type: pbs_api_types::BackupDir,
2194 flatten: true,
2195 },
8292d3d2
DC
2196 },
2197 },
2198 access: {
7d6fc15b
TL
2199 permission: &Permission::Anybody,
2200 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_AUDIT for any \
2201 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2202 },
2203)]
2204/// Query protection for a specific backup
2205pub fn get_protection(
2206 store: String,
bc21ade2 2207 ns: Option<BackupNamespace>,
8c74349b 2208 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2209 rpcenv: &mut dyn RpcEnvironment,
2210) -> Result<bool, Error> {
7d6fc15b 2211 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
abd82485 2212 let ns = ns.unwrap_or_default();
7a404dc5 2213 let datastore = check_privs_and_load_store(
abd82485
FG
2214 &store,
2215 &ns,
7d6fc15b 2216 &auth_id,
2bc2435a
FG
2217 PRIV_DATASTORE_AUDIT,
2218 PRIV_DATASTORE_BACKUP,
c9396984 2219 Some(Operation::Read),
c9396984
FG
2220 &backup_dir.group,
2221 )?;
8292d3d2 2222
fbfb64a6 2223 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2224
6da20161 2225 Ok(backup_dir.is_protected())
8292d3d2
DC
2226}
2227
2228#[api(
2229 input: {
2230 properties: {
988d575d 2231 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2232 ns: {
133d718f
WB
2233 type: BackupNamespace,
2234 optional: true,
2235 },
8c74349b
WB
2236 backup_dir: {
2237 type: pbs_api_types::BackupDir,
2238 flatten: true,
2239 },
8292d3d2
DC
2240 protected: {
2241 description: "Enable/disable protection.",
2242 },
2243 },
2244 },
2245 access: {
7d6fc15b
TL
2246 permission: &Permission::Anybody,
2247 description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \
2248 or DATASTORE_BACKUP and being the owner of the group",
8292d3d2
DC
2249 },
2250)]
2251/// En- or disable protection for a specific backup
67d7a59d 2252pub async fn set_protection(
8292d3d2 2253 store: String,
bc21ade2 2254 ns: Option<BackupNamespace>,
8c74349b 2255 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
2256 protected: bool,
2257 rpcenv: &mut dyn RpcEnvironment,
2258) -> Result<(), Error> {
7d6fc15b 2259 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8292d3d2 2260
67d7a59d
WB
2261 tokio::task::spawn_blocking(move || {
2262 let ns = ns.unwrap_or_default();
2263 let datastore = check_privs_and_load_store(
2264 &store,
2265 &ns,
2266 &auth_id,
2267 PRIV_DATASTORE_MODIFY,
2268 PRIV_DATASTORE_BACKUP,
2269 Some(Operation::Write),
2270 &backup_dir.group,
2271 )?;
2272
2273 let backup_dir = datastore.backup_dir(ns, backup_dir)?;
8292d3d2 2274
67d7a59d
WB
2275 datastore.update_protection(&backup_dir, protected)
2276 })
2277 .await?
8292d3d2
DC
2278}
2279
72be0eb1 2280#[api(
4940012d 2281 input: {
72be0eb1 2282 properties: {
988d575d 2283 store: { schema: DATASTORE_SCHEMA },
bc21ade2 2284 ns: {
133d718f
WB
2285 type: BackupNamespace,
2286 optional: true,
2287 },
8c74349b
WB
2288 backup_group: {
2289 type: pbs_api_types::BackupGroup,
2290 flatten: true,
2291 },
72be0eb1 2292 "new-owner": {
e6dc35ac 2293 type: Authid,
72be0eb1
DW
2294 },
2295 },
4940012d
FG
2296 },
2297 access: {
bff85572 2298 permission: &Permission::Anybody,
7d6fc15b
TL
2299 description: "Datastore.Modify on whole datastore, or changing ownership between user and \
2300 a user's token for owned backups with Datastore.Backup"
4940012d 2301 },
72be0eb1
DW
2302)]
2303/// Change owner of a backup group
979b3784 2304pub async fn set_backup_owner(
72be0eb1 2305 store: String,
bc21ade2 2306 ns: Option<BackupNamespace>,
8c74349b 2307 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 2308 new_owner: Authid,
bff85572 2309 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 2310) -> Result<(), Error> {
bff85572 2311 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1909ece2 2312
979b3784
WB
2313 tokio::task::spawn_blocking(move || {
2314 let ns = ns.unwrap_or_default();
2315 let owner_check_required = check_ns_privs_full(
2316 &store,
2317 &ns,
2318 &auth_id,
2319 PRIV_DATASTORE_MODIFY,
2320 PRIV_DATASTORE_BACKUP,
2321 )?;
1909ece2 2322
979b3784 2323 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
bff85572 2324
979b3784 2325 let backup_group = datastore.backup_group(ns, backup_group);
bff85572 2326
979b3784
WB
2327 if owner_check_required {
2328 let owner = backup_group.get_owner()?;
bff85572 2329
979b3784
WB
2330 let allowed = match (owner.is_token(), new_owner.is_token()) {
2331 (true, true) => {
2332 // API token to API token, owned by same user
2333 let owner = owner.user();
2334 let new_owner = new_owner.user();
2335 owner == new_owner && Authid::from(owner.clone()) == auth_id
2336 }
2337 (true, false) => {
2338 // API token to API token owner
2339 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2340 }
2341 (false, true) => {
2342 // API token owner to API token
2343 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2344 }
2345 (false, false) => {
2346 // User to User, not allowed for unprivileged users
2347 false
2348 }
2349 };
2350
2351 if !allowed {
2352 return Err(http_err!(
2353 UNAUTHORIZED,
2354 "{} does not have permission to change owner of backup group '{}' to {}",
2355 auth_id,
2356 backup_group.group(),
2357 new_owner,
2358 ));
2359 }
2bc2435a 2360 }
bff85572 2361
979b3784 2362 let user_info = CachedUserInfo::new()?;
7d6fc15b 2363
979b3784
WB
2364 if !user_info.is_active_auth_id(&new_owner) {
2365 bail!(
2366 "{} '{}' is inactive or non-existent",
2367 if new_owner.is_token() {
2368 "API token".to_string()
2369 } else {
2370 "user".to_string()
2371 },
2372 new_owner
2373 );
2374 }
72be0eb1 2375
979b3784 2376 backup_group.set_owner(&new_owner, true)?;
72be0eb1 2377
979b3784
WB
2378 Ok(())
2379 })
2380 .await?
72be0eb1
DW
2381}
2382
552c2259 2383#[sortable]
255f378a 2384const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2385 (
2386 "active-operations",
dc7a5b34 2387 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2388 ),
dc7a5b34 2389 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2390 (
2391 "change-owner",
dc7a5b34 2392 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2393 ),
255f378a
DM
2394 (
2395 "download",
dc7a5b34 2396 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2397 ),
6ef9bb59
DC
2398 (
2399 "download-decoded",
dc7a5b34 2400 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2401 ),
dc7a5b34 2402 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2403 (
2404 "gc",
2405 &Router::new()
2406 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2407 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2408 ),
fe1d34d2
SL
2409 (
2410 "gc-job-status",
2411 &Router::new().get(&API_METHOD_GARBAGE_COLLECTION_JOB_STATUS),
2412 ),
d6688884
SR
2413 (
2414 "group-notes",
2415 &Router::new()
2416 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2417 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2418 ),
255f378a
DM
2419 (
2420 "groups",
2421 &Router::new()
b31c8019 2422 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2423 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2424 ),
18934ae5
TL
2425 (
2426 "namespace",
2427 // FIXME: move into datastore:: sub-module?!
2428 &crate::api2::admin::namespace::ROUTER,
2429 ),
912b3f5b
DM
2430 (
2431 "notes",
2432 &Router::new()
2433 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2434 .put(&API_METHOD_SET_NOTES),
912b3f5b 2435 ),
8292d3d2
DC
2436 (
2437 "protected",
2438 &Router::new()
2439 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2440 .put(&API_METHOD_SET_PROTECTION),
255f378a 2441 ),
dc7a5b34 2442 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2443 (
2444 "prune-datastore",
dc7a5b34 2445 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2446 ),
d33d8f4e
DC
2447 (
2448 "pxar-file-download",
dc7a5b34 2449 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2450 ),
dc7a5b34 2451 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2452 (
2453 "snapshots",
2454 &Router::new()
fc189b19 2455 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2456 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2457 ),
dc7a5b34 2458 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2459 (
2460 "upload-backup-log",
dc7a5b34 2461 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2462 ),
dc7a5b34 2463 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2464];
2465
ad51d02a 2466const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2467 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2468 .subdirs(DATASTORE_INFO_SUBDIRS);
2469
255f378a 2470pub const ROUTER: Router = Router::new()
bb34b589 2471 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2472 .match_all("store", &DATASTORE_INFO_ROUTER);