]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
datastore: add helpers to destroy whole namespaces
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
8c74349b 13use serde::Deserialize;
15e9b4ed 14use serde_json::{json, Value};
7c667013 15use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 16
dc7a5b34
TL
17use proxmox_async::blocking::WrappedReaderStream;
18use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 19use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 20use proxmox_router::{
dc7a5b34
TL
21 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
22 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
23};
24use proxmox_schema::*;
dc7a5b34
TL
25use proxmox_sys::fs::{
26 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
27};
28use proxmox_sys::sortable;
d5790a9f 29use proxmox_sys::{task_log, task_warn};
e18a6c9e 30
2e219481 31use pxar::accessor::aio::Accessor;
d33d8f4e
DC
32use pxar::EntryKind;
33
dc7a5b34 34use pbs_api_types::{
8c74349b
WB
35 Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
36 DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
37 RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
38 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
39 IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
40 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
41 UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 42};
984ddb2f 43use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 44use pbs_config::CachedUserInfo;
b2065dc7
WB
45use pbs_datastore::backup_info::BackupInfo;
46use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 47use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
48use pbs_datastore::data_blob::DataBlob;
49use pbs_datastore::data_blob_reader::DataBlobReader;
50use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 51use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
52use pbs_datastore::index::IndexFile;
53use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 54use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
55use pbs_datastore::{
56 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
57 StoreProgress, CATALOG_NAME,
58};
8c74349b 59use pbs_tools::json::required_string_param;
dc7a5b34 60use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 61
431cc7b1 62use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 63use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 64
b9700a9f 65use crate::server::jobstate::Job;
804f6143 66
d6688884
SR
67const GROUP_NOTES_FILE_NAME: &str = "notes";
68
db87d93e 69fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
d6688884 70 let mut note_path = store.base_path();
db87d93e 71 note_path.push(group.to_string());
d6688884
SR
72 note_path.push(GROUP_NOTES_FILE_NAME);
73 note_path
74}
75
bff85572 76fn check_priv_or_backup_owner(
e7cb4dc5 77 store: &DataStore,
db87d93e 78 group: &pbs_api_types::BackupGroup,
e6dc35ac 79 auth_id: &Authid,
bff85572
FG
80 required_privs: u64,
81) -> Result<(), Error> {
82 let user_info = CachedUserInfo::new()?;
9a37bd6c 83 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
84
85 if privs & required_privs == 0 {
86 let owner = store.get_owner(group)?;
87 check_backup_owner(&owner, auth_id)?;
88 }
89 Ok(())
90}
91
e7cb4dc5
WB
92fn read_backup_index(
93 store: &DataStore,
94 backup_dir: &BackupDir,
95) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
ff86ef00 96 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 97
09b1f7b2
DM
98 let mut result = Vec::new();
99 for item in manifest.files() {
100 result.push(BackupContent {
101 filename: item.filename.clone(),
f28d9088 102 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
103 size: Some(item.size),
104 });
8c70e3eb
DM
105 }
106
09b1f7b2 107 result.push(BackupContent {
96d65fbc 108 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
109 crypt_mode: match manifest.signature {
110 Some(_) => Some(CryptMode::SignOnly),
111 None => Some(CryptMode::None),
112 },
09b1f7b2
DM
113 size: Some(index_size),
114 });
4f1e40a2 115
70030b43 116 Ok((manifest, result))
8c70e3eb
DM
117}
118
1c090810
DC
119fn get_all_snapshot_files(
120 store: &DataStore,
121 info: &BackupInfo,
70030b43 122) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9a37bd6c 123 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
124
125 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
126 acc.insert(item.filename.clone());
127 acc
128 });
129
130 for file in &info.files {
dc7a5b34
TL
131 if file_set.contains(file) {
132 continue;
133 }
f28d9088
WB
134 files.push(BackupContent {
135 filename: file.to_string(),
136 size: None,
137 crypt_mode: None,
138 });
1c090810
DC
139 }
140
70030b43 141 Ok((manifest, files))
1c090810
DC
142}
143
b31c8019
DM
144#[api(
145 input: {
146 properties: {
147 store: {
148 schema: DATASTORE_SCHEMA,
149 },
150 },
151 },
7b570c17 152 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 153 access: {
54552dda
DM
154 permission: &Permission::Privilege(
155 &["datastore", "{store}"],
156 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
157 true),
bb34b589 158 },
b31c8019
DM
159)]
160/// List backup groups.
b2362a12 161pub fn list_groups(
b31c8019 162 store: String,
54552dda 163 rpcenv: &mut dyn RpcEnvironment,
b31c8019 164) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 165 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 166 let user_info = CachedUserInfo::new()?;
e6dc35ac 167 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 168
e9d2fc93 169 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee
FG
170 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
171
249dde8b 172 datastore
8c74349b 173 .iter_backup_groups(Default::default())? // FIXME: Namespaces and recursion parameters!
249dde8b
TL
174 .try_fold(Vec::new(), |mut group_info, group| {
175 let group = group?;
db87d93e 176 let owner = match datastore.get_owner(group.as_ref()) {
249dde8b
TL
177 Ok(auth_id) => auth_id,
178 Err(err) => {
179 let id = &store;
180 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
181 return Ok(group_info);
dc7a5b34 182 }
249dde8b
TL
183 };
184 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
185 return Ok(group_info);
186 }
0d08fcee 187
6da20161 188 let snapshots = match group.list_backups() {
249dde8b
TL
189 Ok(snapshots) => snapshots,
190 Err(_) => return Ok(group_info),
191 };
0d08fcee 192
249dde8b
TL
193 let backup_count: u64 = snapshots.len() as u64;
194 if backup_count == 0 {
195 return Ok(group_info);
196 }
0d08fcee 197
249dde8b
TL
198 let last_backup = snapshots
199 .iter()
200 .fold(&snapshots[0], |a, b| {
201 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
202 a
203 } else {
204 b
205 }
206 })
207 .to_owned();
208
db87d93e 209 let note_path = get_group_note_path(&datastore, group.as_ref());
249dde8b
TL
210 let comment = file_read_firstline(&note_path).ok();
211
212 group_info.push(GroupListItem {
988d575d 213 backup: group.into(),
249dde8b
TL
214 last_backup: last_backup.backup_dir.backup_time(),
215 owner: Some(owner),
216 backup_count,
217 files: last_backup.files,
218 comment,
0d08fcee
FG
219 });
220
249dde8b
TL
221 Ok(group_info)
222 })
812c6f87 223}
8f579717 224
f32791b4
DC
225#[api(
226 input: {
227 properties: {
988d575d 228 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
229 group: {
230 type: pbs_api_types::BackupGroup,
231 flatten: true,
232 },
f32791b4
DC
233 },
234 },
235 access: {
236 permission: &Permission::Privilege(
237 &["datastore", "{store}"],
238 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
239 true),
240 },
241)]
242/// Delete backup group including all snapshots.
243pub fn delete_group(
244 store: String,
8c74349b 245 group: pbs_api_types::BackupGroup,
f32791b4
DC
246 _info: &ApiMethod,
247 rpcenv: &mut dyn RpcEnvironment,
248) -> Result<Value, Error> {
f32791b4
DC
249 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
250
e9d2fc93 251 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
f32791b4
DC
252
253 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
254
5cc7d891 255 if !datastore.remove_backup_group(&group)? {
171a00ca 256 bail!("group only partially deleted due to protected snapshots");
5cc7d891 257 }
f32791b4
DC
258
259 Ok(Value::Null)
260}
261
09b1f7b2
DM
262#[api(
263 input: {
264 properties: {
988d575d 265 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
266 backup_dir: {
267 type: pbs_api_types::BackupDir,
268 flatten: true,
269 },
09b1f7b2
DM
270 },
271 },
7b570c17 272 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 273 access: {
54552dda
DM
274 permission: &Permission::Privilege(
275 &["datastore", "{store}"],
276 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
277 true),
bb34b589 278 },
09b1f7b2
DM
279)]
280/// List snapshot files.
ea5f547f 281pub fn list_snapshot_files(
09b1f7b2 282 store: String,
8c74349b 283 backup_dir: pbs_api_types::BackupDir,
01a13423 284 _info: &ApiMethod,
54552dda 285 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 286) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 287 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e9d2fc93 288 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
54552dda 289
8c74349b 290 let snapshot = datastore.backup_dir(backup_dir)?;
01a13423 291
dc7a5b34
TL
292 check_priv_or_backup_owner(
293 &datastore,
db87d93e 294 snapshot.as_ref(),
dc7a5b34
TL
295 &auth_id,
296 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
297 )?;
54552dda 298
6da20161 299 let info = BackupInfo::new(snapshot)?;
01a13423 300
70030b43
DM
301 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
302
303 Ok(files)
01a13423
DM
304}
305
68a6a0ee
DM
306#[api(
307 input: {
308 properties: {
988d575d 309 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
310 backup_dir: {
311 type: pbs_api_types::BackupDir,
312 flatten: true,
313 },
68a6a0ee
DM
314 },
315 },
bb34b589 316 access: {
54552dda
DM
317 permission: &Permission::Privilege(
318 &["datastore", "{store}"],
319 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
320 true),
bb34b589 321 },
68a6a0ee
DM
322)]
323/// Delete backup snapshot.
bf78f708 324pub fn delete_snapshot(
68a6a0ee 325 store: String,
8c74349b 326 backup_dir: pbs_api_types::BackupDir,
6f62c924 327 _info: &ApiMethod,
54552dda 328 rpcenv: &mut dyn RpcEnvironment,
6f62c924 329) -> Result<Value, Error> {
e6dc35ac 330 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 331
e9d2fc93 332 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8c74349b 333 let snapshot = datastore.backup_dir(backup_dir)?;
6f62c924 334
dc7a5b34
TL
335 check_priv_or_backup_owner(
336 &datastore,
db87d93e 337 snapshot.as_ref(),
dc7a5b34
TL
338 &auth_id,
339 PRIV_DATASTORE_MODIFY,
340 )?;
54552dda 341
db87d93e 342 datastore.remove_backup_dir(snapshot.as_ref(), false)?;
6f62c924
DM
343
344 Ok(Value::Null)
345}
346
fc189b19 347#[api(
b7c3eaa9 348 streaming: true,
fc189b19
DM
349 input: {
350 properties: {
988d575d 351 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
352 "backup-ns": {
353 type: BackupNamespace,
354 optional: true,
355 },
fc189b19
DM
356 "backup-type": {
357 optional: true,
988d575d 358 type: BackupType,
fc189b19
DM
359 },
360 "backup-id": {
361 optional: true,
362 schema: BACKUP_ID_SCHEMA,
363 },
364 },
365 },
7b570c17 366 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 367 access: {
54552dda
DM
368 permission: &Permission::Privilege(
369 &["datastore", "{store}"],
370 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
371 true),
bb34b589 372 },
fc189b19
DM
373)]
374/// List backup snapshots.
dc7a5b34 375pub fn list_snapshots(
54552dda 376 store: String,
8c74349b 377 backup_ns: Option<BackupNamespace>,
988d575d 378 backup_type: Option<BackupType>,
54552dda
DM
379 backup_id: Option<String>,
380 _param: Value,
184f17af 381 _info: &ApiMethod,
54552dda 382 rpcenv: &mut dyn RpcEnvironment,
fc189b19 383) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 384 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 385 let user_info = CachedUserInfo::new()?;
e6dc35ac 386 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 387
0d08fcee
FG
388 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
389
e9d2fc93 390 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 391
8c74349b
WB
392 let backup_ns = backup_ns.unwrap_or_default();
393
249dde8b
TL
394 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
395 // backup group and provide an error free (Err -> None) accessor
0d08fcee 396 let groups = match (backup_type, backup_id) {
db87d93e 397 (Some(backup_type), Some(backup_id)) => {
8c74349b 398 vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
db87d93e 399 }
8c74349b 400 // FIXME: Recursion
7d9cb8c4 401 (Some(backup_type), None) => datastore
8c74349b 402 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
403 .filter(|group| group.backup_type() == backup_type)
404 .collect(),
8c74349b 405 // FIXME: Recursion
7d9cb8c4 406 (None, Some(backup_id)) => datastore
8c74349b 407 .iter_backup_groups_ok(backup_ns)?
dc7a5b34
TL
408 .filter(|group| group.backup_id() == backup_id)
409 .collect(),
8c74349b
WB
410 // FIXME: Recursion
411 (None, None) => datastore.list_backup_groups(backup_ns)?,
0d08fcee 412 };
54552dda 413
0d08fcee 414 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
415 let backup = pbs_api_types::BackupDir {
416 group: group.into(),
417 time: info.backup_dir.backup_time(),
418 };
6da20161 419 let protected = info.backup_dir.is_protected();
1c090810 420
79c53595 421 match get_all_snapshot_files(&datastore, &info) {
70030b43 422 Ok((manifest, files)) => {
70030b43
DM
423 // extract the first line from notes
424 let comment: Option<String> = manifest.unprotected["notes"]
425 .as_str()
426 .and_then(|notes| notes.lines().next())
427 .map(String::from);
428
035c40e6
FG
429 let fingerprint = match manifest.fingerprint() {
430 Ok(fp) => fp,
431 Err(err) => {
432 eprintln!("error parsing fingerprint: '{}'", err);
433 None
dc7a5b34 434 }
035c40e6
FG
435 };
436
79c53595 437 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
438 let verification: Option<SnapshotVerifyState> =
439 match serde_json::from_value(verification) {
440 Ok(verify) => verify,
441 Err(err) => {
442 eprintln!("error parsing verification state : '{}'", err);
443 None
444 }
445 };
3b2046d2 446
0d08fcee
FG
447 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
448
79c53595 449 SnapshotListItem {
988d575d 450 backup,
79c53595
FG
451 comment,
452 verification,
035c40e6 453 fingerprint,
79c53595
FG
454 files,
455 size,
456 owner,
02db7267 457 protected,
79c53595 458 }
dc7a5b34 459 }
1c090810
DC
460 Err(err) => {
461 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 462 let files = info
dc7a5b34
TL
463 .files
464 .into_iter()
465 .map(|filename| BackupContent {
466 filename,
467 size: None,
468 crypt_mode: None,
469 })
470 .collect();
79c53595
FG
471
472 SnapshotListItem {
988d575d 473 backup,
79c53595
FG
474 comment: None,
475 verification: None,
035c40e6 476 fingerprint: None,
79c53595
FG
477 files,
478 size: None,
479 owner,
02db7267 480 protected,
79c53595 481 }
dc7a5b34 482 }
0d08fcee
FG
483 }
484 };
184f17af 485
dc7a5b34 486 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
db87d93e 487 let owner = match datastore.get_owner(group.as_ref()) {
dc7a5b34
TL
488 Ok(auth_id) => auth_id,
489 Err(err) => {
490 eprintln!(
491 "Failed to get owner of group '{}/{}' - {}",
492 &store, group, err
493 );
0d08fcee
FG
494 return Ok(snapshots);
495 }
dc7a5b34 496 };
0d08fcee 497
dc7a5b34
TL
498 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
499 return Ok(snapshots);
500 }
0d08fcee 501
6da20161 502 let group_backups = group.list_backups()?;
0d08fcee 503
dc7a5b34
TL
504 snapshots.extend(
505 group_backups
506 .into_iter()
507 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
508 );
509
510 Ok(snapshots)
511 })
184f17af
DM
512}
513
6da20161
WB
514fn get_snapshots_count(
515 store: &Arc<DataStore>,
516 filter_owner: Option<&Authid>,
517) -> Result<Counts, Error> {
7d9cb8c4 518 store
8c74349b 519 .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
fdfcb74d 520 .filter(|group| {
db87d93e 521 let owner = match store.get_owner(group.as_ref()) {
fdfcb74d
FG
522 Ok(owner) => owner,
523 Err(err) => {
72f81545
TL
524 let id = store.name();
525 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 526 return false;
dc7a5b34 527 }
fdfcb74d 528 };
14e08625 529
fdfcb74d
FG
530 match filter_owner {
531 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
532 None => true,
533 }
534 })
535 .try_fold(Counts::default(), |mut counts, group| {
6da20161 536 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 537
72f81545 538 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
539 if snapshot_count > 0 {
540 let type_count = match group.backup_type() {
988d575d
WB
541 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
542 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
543 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 544 };
14e08625 545
b44483a8
DM
546 type_count.groups += 1;
547 type_count.snapshots += snapshot_count;
548 }
16f9f244 549
fdfcb74d
FG
550 Ok(counts)
551 })
16f9f244
DC
552}
553
1dc117bb
DM
554#[api(
555 input: {
556 properties: {
557 store: {
558 schema: DATASTORE_SCHEMA,
559 },
98afc7b1
FG
560 verbose: {
561 type: bool,
562 default: false,
563 optional: true,
564 description: "Include additional information like snapshot counts and GC status.",
565 },
1dc117bb 566 },
98afc7b1 567
1dc117bb
DM
568 },
569 returns: {
14e08625 570 type: DataStoreStatus,
1dc117bb 571 },
bb34b589 572 access: {
54552dda 573 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 574 },
1dc117bb
DM
575)]
576/// Get datastore status.
ea5f547f 577pub fn status(
1dc117bb 578 store: String,
98afc7b1 579 verbose: bool,
0eecf38f 580 _info: &ApiMethod,
fdfcb74d 581 rpcenv: &mut dyn RpcEnvironment,
14e08625 582) -> Result<DataStoreStatus, Error> {
e9d2fc93 583 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 584 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
585 let (counts, gc_status) = if verbose {
586 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
587 let user_info = CachedUserInfo::new()?;
588
589 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
590 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
591 None
592 } else {
593 Some(&auth_id)
594 };
595
596 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
597 let gc_status = Some(datastore.last_gc_status());
598
599 (counts, gc_status)
600 } else {
601 (None, None)
98afc7b1 602 };
16f9f244 603
14e08625
DC
604 Ok(DataStoreStatus {
605 total: storage.total,
606 used: storage.used,
607 avail: storage.avail,
608 gc_status,
609 counts,
610 })
0eecf38f
DM
611}
612
c2009e53
DM
613#[api(
614 input: {
615 properties: {
616 store: {
617 schema: DATASTORE_SCHEMA,
618 },
8c74349b
WB
619 "backup-ns": {
620 type: BackupNamespace,
621 optional: true,
622 },
c2009e53 623 "backup-type": {
988d575d 624 type: BackupType,
c2009e53
DM
625 optional: true,
626 },
627 "backup-id": {
628 schema: BACKUP_ID_SCHEMA,
629 optional: true,
630 },
dcbf29e7
HL
631 "ignore-verified": {
632 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
633 optional: true,
634 },
635 "outdated-after": {
636 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
637 optional: true,
638 },
c2009e53
DM
639 "backup-time": {
640 schema: BACKUP_TIME_SCHEMA,
641 optional: true,
642 },
643 },
644 },
645 returns: {
646 schema: UPID_SCHEMA,
647 },
648 access: {
09f6a240 649 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
650 },
651)]
652/// Verify backups.
653///
654/// This function can verify a single backup snapshot, all backup from a backup group,
655/// or all backups in the datastore.
656pub fn verify(
657 store: String,
8c74349b 658 backup_ns: Option<BackupNamespace>,
988d575d 659 backup_type: Option<BackupType>,
c2009e53
DM
660 backup_id: Option<String>,
661 backup_time: Option<i64>,
dcbf29e7
HL
662 ignore_verified: Option<bool>,
663 outdated_after: Option<i64>,
c2009e53
DM
664 rpcenv: &mut dyn RpcEnvironment,
665) -> Result<Value, Error> {
e9d2fc93 666 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 667 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 668
09f6a240 669 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 670 let worker_id;
c2009e53
DM
671
672 let mut backup_dir = None;
673 let mut backup_group = None;
133042b5 674 let mut worker_type = "verify";
c2009e53 675
8c74349b
WB
676 // FIXME: Recursion
677 // FIXME: Namespaces and worker ID, could this be an issue?
678 let backup_ns = backup_ns.unwrap_or_default();
679
c2009e53
DM
680 match (backup_type, backup_id, backup_time) {
681 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34 682 worker_id = format!(
8c74349b
WB
683 "{}:{}/{}/{}/{:08X}",
684 store,
685 backup_ns.display_as_path(),
686 backup_type,
687 backup_id,
688 backup_time
dc7a5b34 689 );
8c74349b
WB
690 let dir =
691 datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
09f6a240 692
db87d93e 693 check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
09f6a240 694
c2009e53 695 backup_dir = Some(dir);
133042b5 696 worker_type = "verify_snapshot";
c2009e53
DM
697 }
698 (Some(backup_type), Some(backup_id), None) => {
8c74349b
WB
699 worker_id = format!(
700 "{}:{}/{}/{}",
701 store,
702 backup_ns.display_as_path(),
703 backup_type,
704 backup_id
705 );
706 let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id));
09f6a240
FG
707
708 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
709
6b0c6492 710 backup_group = Some(datastore.backup_group(group));
133042b5 711 worker_type = "verify_group";
c2009e53
DM
712 }
713 (None, None, None) => {
8ea00f6e 714 worker_id = store.clone();
c2009e53 715 }
5a718dce 716 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
717 }
718
39735609 719 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
720
721 let upid_str = WorkerTask::new_thread(
133042b5 722 worker_type,
44288184 723 Some(worker_id),
049a22a3 724 auth_id.to_string(),
e7cb4dc5
WB
725 to_stdout,
726 move |worker| {
9c26a3d6 727 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 728 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 729 let mut res = Vec::new();
f6b1d1cc 730 if !verify_backup_dir(
9c26a3d6 731 &verify_worker,
f6b1d1cc 732 &backup_dir,
f6b1d1cc 733 worker.upid().clone(),
dc7a5b34 734 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 735 )? {
adfdc369
DC
736 res.push(backup_dir.to_string());
737 }
738 res
c2009e53 739 } else if let Some(backup_group) = backup_group {
7e25b9aa 740 let failed_dirs = verify_backup_group(
9c26a3d6 741 &verify_worker,
63d9aca9 742 &backup_group,
7e25b9aa 743 &mut StoreProgress::new(1),
f6b1d1cc 744 worker.upid(),
dc7a5b34 745 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
746 )?;
747 failed_dirs
c2009e53 748 } else {
dc7a5b34 749 let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
09f6a240
FG
750
751 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
752 Some(auth_id)
753 } else {
754 None
755 };
756
dcbf29e7
HL
757 verify_all_backups(
758 &verify_worker,
759 worker.upid(),
760 owner,
dc7a5b34 761 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 762 )?
c2009e53 763 };
3984a5fd 764 if !failed_dirs.is_empty() {
1ec0d70d 765 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 766 for dir in failed_dirs {
1ec0d70d 767 task_log!(worker, "\t{}", dir);
adfdc369 768 }
1ffe0301 769 bail!("verification failed - please check the log for details");
c2009e53
DM
770 }
771 Ok(())
e7cb4dc5
WB
772 },
773 )?;
c2009e53
DM
774
775 Ok(json!(upid_str))
776}
777
0a240aaa
DC
778#[api(
779 input: {
780 properties: {
8c74349b
WB
781 group: {
782 type: pbs_api_types::BackupGroup,
783 flatten: true,
784 },
0a240aaa
DC
785 "dry-run": {
786 optional: true,
787 type: bool,
788 default: false,
789 description: "Just show what prune would do, but do not delete anything.",
790 },
791 "prune-options": {
792 type: PruneOptions,
793 flatten: true,
794 },
795 store: {
796 schema: DATASTORE_SCHEMA,
797 },
798 },
799 },
7b570c17 800 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
801 access: {
802 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
803 },
804)]
9805207a 805/// Prune a group on the datastore
bf78f708 806pub fn prune(
8c74349b 807 group: pbs_api_types::BackupGroup,
0a240aaa
DC
808 dry_run: bool,
809 prune_options: PruneOptions,
810 store: String,
811 _param: Value,
54552dda 812 rpcenv: &mut dyn RpcEnvironment,
83b7db02 813) -> Result<Value, Error> {
e6dc35ac 814 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 815
e9d2fc93 816 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
54552dda 817
8c74349b 818 let group = datastore.backup_group(group);
db87d93e
WB
819
820 check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 821
8c74349b 822 let worker_id = format!("{}:{}", store, group);
503995c7 823
dda70154
DM
824 let mut prune_result = Vec::new();
825
6da20161 826 let list = group.list_backups()?;
dda70154
DM
827
828 let mut prune_info = compute_prune_info(list, &prune_options)?;
829
830 prune_info.reverse(); // delete older snapshots first
831
89725197 832 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
833
834 if dry_run {
02db7267
DC
835 for (info, mark) in prune_info {
836 let keep = keep_all || mark.keep();
dda70154 837
dda70154 838 prune_result.push(json!({
db87d93e
WB
839 "backup-type": info.backup_dir.backup_type(),
840 "backup-id": info.backup_dir.backup_id(),
841 "backup-time": info.backup_dir.backup_time(),
dda70154 842 "keep": keep,
02db7267 843 "protected": mark.protected(),
dda70154
DM
844 }));
845 }
846 return Ok(json!(prune_result));
847 }
848
163e9bbe 849 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 850 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 851
f1539300 852 if keep_all {
1ec0d70d 853 task_log!(worker, "No prune selection - keeping all files.");
f1539300 854 } else {
dc7a5b34
TL
855 task_log!(
856 worker,
857 "retention options: {}",
858 pbs_datastore::prune::cli_options_string(&prune_options)
859 );
860 task_log!(
861 worker,
8c74349b 862 "Starting prune on store \"{}\" group \"{}\"",
dc7a5b34 863 store,
8c74349b 864 group,
dc7a5b34 865 );
f1539300 866 }
3b03abfe 867
02db7267
DC
868 for (info, mark) in prune_info {
869 let keep = keep_all || mark.keep();
dda70154 870
f1539300
SR
871 let backup_time = info.backup_dir.backup_time();
872 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
873 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
874
875 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 876
1ec0d70d 877 task_log!(worker, "{}", msg);
f1539300
SR
878
879 prune_result.push(json!({
db87d93e
WB
880 "backup-type": group.ty,
881 "backup-id": group.id,
f1539300
SR
882 "backup-time": backup_time,
883 "keep": keep,
02db7267 884 "protected": mark.protected(),
f1539300
SR
885 }));
886
887 if !(dry_run || keep) {
db87d93e 888 if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
1ec0d70d
DM
889 task_warn!(
890 worker,
891 "failed to remove dir {:?}: {}",
892 info.backup_dir.relative_path(),
893 err,
f1539300 894 );
8f0b4c1f 895 }
8f579717 896 }
f1539300 897 }
dd8e744f 898
f1539300 899 worker.log_result(&Ok(()));
83b7db02 900
dda70154 901 Ok(json!(prune_result))
83b7db02
DM
902}
903
9805207a
DC
904#[api(
905 input: {
906 properties: {
907 "dry-run": {
908 optional: true,
909 type: bool,
910 default: false,
911 description: "Just show what prune would do, but do not delete anything.",
912 },
913 "prune-options": {
914 type: PruneOptions,
915 flatten: true,
916 },
917 store: {
918 schema: DATASTORE_SCHEMA,
919 },
920 },
921 },
922 returns: {
923 schema: UPID_SCHEMA,
924 },
925 access: {
926 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
927 },
928)]
929/// Prune the datastore
930pub fn prune_datastore(
931 dry_run: bool,
932 prune_options: PruneOptions,
933 store: String,
934 _param: Value,
935 rpcenv: &mut dyn RpcEnvironment,
936) -> Result<String, Error> {
9805207a
DC
937 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
938
e9d2fc93 939 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 940
bfa942c0
DC
941 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
942
9805207a
DC
943 let upid_str = WorkerTask::new_thread(
944 "prune",
945 Some(store.clone()),
049a22a3 946 auth_id.to_string(),
bfa942c0 947 to_stdout,
dc7a5b34
TL
948 move |worker| {
949 crate::server::prune_datastore(
950 worker,
951 auth_id,
952 prune_options,
953 &store,
954 datastore,
955 dry_run,
956 )
957 },
9805207a
DC
958 )?;
959
960 Ok(upid_str)
961}
962
dfc58d47
DM
963#[api(
964 input: {
965 properties: {
966 store: {
967 schema: DATASTORE_SCHEMA,
968 },
969 },
970 },
971 returns: {
972 schema: UPID_SCHEMA,
973 },
bb34b589 974 access: {
54552dda 975 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 976 },
dfc58d47
DM
977)]
978/// Start garbage collection.
bf78f708 979pub fn start_garbage_collection(
dfc58d47 980 store: String,
6049b71f 981 _info: &ApiMethod,
dd5495d6 982 rpcenv: &mut dyn RpcEnvironment,
6049b71f 983) -> Result<Value, Error> {
e9d2fc93 984 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 985 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 986
dc7a5b34 987 let job = Job::new("garbage_collection", &store)
4fdf5ddf 988 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 989
39735609 990 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 991
dc7a5b34
TL
992 let upid_str =
993 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
994 .map_err(|err| {
995 format_err!(
996 "unable to start garbage collection job on datastore {} - {}",
997 store,
998 err
999 )
1000 })?;
0f778e06
DM
1001
1002 Ok(json!(upid_str))
15e9b4ed
DM
1003}
1004
a92830dc
DM
1005#[api(
1006 input: {
1007 properties: {
1008 store: {
1009 schema: DATASTORE_SCHEMA,
1010 },
1011 },
1012 },
1013 returns: {
1014 type: GarbageCollectionStatus,
bb34b589
DM
1015 },
1016 access: {
1017 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1018 },
a92830dc
DM
1019)]
1020/// Garbage collection status.
5eeea607 1021pub fn garbage_collection_status(
a92830dc 1022 store: String,
6049b71f 1023 _info: &ApiMethod,
dd5495d6 1024 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1025) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1026 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1027
f2b99c34 1028 let status = datastore.last_gc_status();
691c89a0 1029
a92830dc 1030 Ok(status)
691c89a0
DM
1031}
1032
bb34b589 1033#[api(
30fb6025
DM
1034 returns: {
1035 description: "List the accessible datastores.",
1036 type: Array,
9b93c620 1037 items: { type: DataStoreListItem },
30fb6025 1038 },
bb34b589 1039 access: {
54552dda 1040 permission: &Permission::Anybody,
bb34b589
DM
1041 },
1042)]
1043/// Datastore list
bf78f708 1044pub fn get_datastore_list(
6049b71f
DM
1045 _param: Value,
1046 _info: &ApiMethod,
54552dda 1047 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1048) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1049 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1050
e6dc35ac 1051 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1052 let user_info = CachedUserInfo::new()?;
1053
30fb6025 1054 let mut list = Vec::new();
54552dda 1055
30fb6025 1056 for (store, (_, data)) in &config.sections {
9a37bd6c 1057 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1058 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1059 if allowed {
dc7a5b34
TL
1060 list.push(DataStoreListItem {
1061 store: store.clone(),
1062 comment: data["comment"].as_str().map(String::from),
e022d13c 1063 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1064 });
30fb6025 1065 }
54552dda
DM
1066 }
1067
44288184 1068 Ok(list)
15e9b4ed
DM
1069}
1070
0ab08ac9
DM
1071#[sortable]
1072pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1073 &ApiHandler::AsyncHttp(&download_file),
1074 &ObjectSchema::new(
1075 "Download single raw file from backup snapshot.",
1076 &sorted!([
66c49c21 1077 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 1078 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1079 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1080 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1081 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1082 ]),
dc7a5b34
TL
1083 ),
1084)
1085.access(
1086 None,
1087 &Permission::Privilege(
1088 &["datastore", "{store}"],
1089 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1090 true,
1091 ),
54552dda 1092);
691c89a0 1093
bf78f708 1094pub fn download_file(
9e47c0a5
DM
1095 _parts: Parts,
1096 _req_body: Body,
1097 param: Value,
255f378a 1098 _info: &ApiMethod,
54552dda 1099 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1100) -> ApiResponseFuture {
ad51d02a 1101 async move {
3c8c2827 1102 let store = required_string_param(&param, "store")?;
e9d2fc93 1103 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
f14a8c9a 1104
e6dc35ac 1105 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1106
3c8c2827 1107 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1108
8c74349b 1109 let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
54552dda 1110
dc7a5b34
TL
1111 check_priv_or_backup_owner(
1112 &datastore,
db87d93e 1113 backup_dir.as_ref(),
dc7a5b34
TL
1114 &auth_id,
1115 PRIV_DATASTORE_READ,
1116 )?;
54552dda 1117
dc7a5b34
TL
1118 println!(
1119 "Download {} from {} ({}/{})",
1120 file_name, store, backup_dir, file_name
1121 );
9e47c0a5 1122
ad51d02a
DM
1123 let mut path = datastore.base_path();
1124 path.push(backup_dir.relative_path());
1125 path.push(&file_name);
1126
ba694720 1127 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1128 .await
1129 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1130
dc7a5b34
TL
1131 let payload =
1132 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1133 .map_ok(|bytes| bytes.freeze())
1134 .map_err(move |err| {
1135 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1136 err
1137 });
ad51d02a 1138 let body = Body::wrap_stream(payload);
9e47c0a5 1139
ad51d02a
DM
1140 // fixme: set other headers ?
1141 Ok(Response::builder()
dc7a5b34
TL
1142 .status(StatusCode::OK)
1143 .header(header::CONTENT_TYPE, "application/octet-stream")
1144 .body(body)
1145 .unwrap())
1146 }
1147 .boxed()
9e47c0a5
DM
1148}
1149
6ef9bb59
DC
1150#[sortable]
1151pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1152 &ApiHandler::AsyncHttp(&download_file_decoded),
1153 &ObjectSchema::new(
1154 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1155 &sorted!([
1156 ("store", false, &DATASTORE_SCHEMA),
1157 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1158 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1159 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1160 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1161 ]),
dc7a5b34
TL
1162 ),
1163)
1164.access(
1165 None,
1166 &Permission::Privilege(
1167 &["datastore", "{store}"],
1168 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1169 true,
1170 ),
6ef9bb59
DC
1171);
1172
bf78f708 1173pub fn download_file_decoded(
6ef9bb59
DC
1174 _parts: Parts,
1175 _req_body: Body,
1176 param: Value,
1177 _info: &ApiMethod,
1178 rpcenv: Box<dyn RpcEnvironment>,
1179) -> ApiResponseFuture {
6ef9bb59 1180 async move {
3c8c2827 1181 let store = required_string_param(&param, "store")?;
e9d2fc93 1182 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
6ef9bb59 1183
e6dc35ac 1184 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1185
3c8c2827 1186 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1187
8c74349b 1188 let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
6ef9bb59 1189
dc7a5b34
TL
1190 check_priv_or_backup_owner(
1191 &datastore,
db87d93e 1192 backup_dir.as_ref(),
dc7a5b34
TL
1193 &auth_id,
1194 PRIV_DATASTORE_READ,
1195 )?;
6ef9bb59 1196
2d55beec 1197 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1198 for file in files {
f28d9088 1199 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1200 bail!("cannot decode '{}' - is encrypted", file_name);
1201 }
1202 }
1203
dc7a5b34
TL
1204 println!(
1205 "Download {} from {} ({}/{})",
1206 file_name, store, backup_dir, file_name
1207 );
6ef9bb59
DC
1208
1209 let mut path = datastore.base_path();
1210 path.push(backup_dir.relative_path());
1211 path.push(&file_name);
1212
1213 let extension = file_name.rsplitn(2, '.').next().unwrap();
1214
1215 let body = match extension {
1216 "didx" => {
dc7a5b34
TL
1217 let index = DynamicIndexReader::open(&path).map_err(|err| {
1218 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1219 })?;
2d55beec
FG
1220 let (csum, size) = index.compute_csum();
1221 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1222
14f6c9cb 1223 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1224 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1225 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1226 eprintln!("error during streaming of '{:?}' - {}", path, err);
1227 err
1228 }))
1229 }
6ef9bb59 1230 "fidx" => {
dc7a5b34
TL
1231 let index = FixedIndexReader::open(&path).map_err(|err| {
1232 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1233 })?;
6ef9bb59 1234
2d55beec
FG
1235 let (csum, size) = index.compute_csum();
1236 manifest.verify_file(&file_name, &csum, size)?;
1237
14f6c9cb 1238 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1239 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1240 Body::wrap_stream(
1241 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1242 move |err| {
1243 eprintln!("error during streaming of '{:?}' - {}", path, err);
1244 err
1245 },
1246 ),
1247 )
1248 }
6ef9bb59
DC
1249 "blob" => {
1250 let file = std::fs::File::open(&path)
8aa67ee7 1251 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1252
2d55beec
FG
1253 // FIXME: load full blob to verify index checksum?
1254
6ef9bb59 1255 Body::wrap_stream(
dc7a5b34
TL
1256 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1257 move |err| {
6ef9bb59
DC
1258 eprintln!("error during streaming of '{:?}' - {}", path, err);
1259 err
dc7a5b34
TL
1260 },
1261 ),
6ef9bb59 1262 )
dc7a5b34 1263 }
6ef9bb59
DC
1264 extension => {
1265 bail!("cannot download '{}' files", extension);
dc7a5b34 1266 }
6ef9bb59
DC
1267 };
1268
1269 // fixme: set other headers ?
1270 Ok(Response::builder()
dc7a5b34
TL
1271 .status(StatusCode::OK)
1272 .header(header::CONTENT_TYPE, "application/octet-stream")
1273 .body(body)
1274 .unwrap())
1275 }
1276 .boxed()
6ef9bb59
DC
1277}
1278
552c2259 1279#[sortable]
0ab08ac9
DM
1280pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1281 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1282 &ObjectSchema::new(
54552dda 1283 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1284 &sorted!([
66c49c21 1285 ("store", false, &DATASTORE_SCHEMA),
255f378a 1286 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1287 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1288 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1289 ]),
dc7a5b34
TL
1290 ),
1291)
1292.access(
54552dda 1293 Some("Only the backup creator/owner is allowed to do this."),
dc7a5b34 1294 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
54552dda 1295);
9e47c0a5 1296
bf78f708 1297pub fn upload_backup_log(
07ee2235
DM
1298 _parts: Parts,
1299 req_body: Body,
1300 param: Value,
255f378a 1301 _info: &ApiMethod,
54552dda 1302 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1303) -> ApiResponseFuture {
ad51d02a 1304 async move {
3c8c2827 1305 let store = required_string_param(&param, "store")?;
e9d2fc93 1306 let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
07ee2235 1307
dc7a5b34 1308 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1309
8c74349b 1310 let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
07ee2235 1311
e6dc35ac 1312 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1313 let owner = datastore.get_owner(backup_dir.as_ref())?;
bff85572 1314 check_backup_owner(&owner, &auth_id)?;
54552dda 1315
ad51d02a
DM
1316 let mut path = datastore.base_path();
1317 path.push(backup_dir.relative_path());
1318 path.push(&file_name);
07ee2235 1319
ad51d02a
DM
1320 if path.exists() {
1321 bail!("backup already contains a log.");
1322 }
e128d4e8 1323
8c74349b 1324 println!("Upload backup log to {store}/{backup_dir}/{file_name}");
ad51d02a
DM
1325
1326 let data = req_body
1327 .map_err(Error::from)
1328 .try_fold(Vec::new(), |mut acc, chunk| {
1329 acc.extend_from_slice(&*chunk);
1330 future::ok::<_, Error>(acc)
1331 })
1332 .await?;
1333
39f18b30
DM
1334 // always verify blob/CRC at server side
1335 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1336
e0a19d33 1337 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1338
1339 // fixme: use correct formatter
53daae8e 1340 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1341 }
1342 .boxed()
07ee2235
DM
1343}
1344
5b1cfa01
DC
1345#[api(
1346 input: {
1347 properties: {
988d575d 1348 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1349 backup_dir: {
1350 type: pbs_api_types::BackupDir,
1351 flatten: true,
1352 },
5b1cfa01
DC
1353 "filepath": {
1354 description: "Base64 encoded path.",
1355 type: String,
1356 }
1357 },
1358 },
1359 access: {
1360 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1361 },
1362)]
1363/// Get the entries of the given path of the catalog
bf78f708 1364pub fn catalog(
5b1cfa01 1365 store: String,
8c74349b 1366 backup_dir: pbs_api_types::BackupDir,
5b1cfa01 1367 filepath: String,
5b1cfa01 1368 rpcenv: &mut dyn RpcEnvironment,
227501c0 1369) -> Result<Vec<ArchiveEntry>, Error> {
e9d2fc93 1370 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
5b1cfa01 1371
e6dc35ac 1372 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1373
8c74349b 1374 let backup_dir = datastore.backup_dir(backup_dir)?;
5b1cfa01 1375
dc7a5b34
TL
1376 check_priv_or_backup_owner(
1377 &datastore,
db87d93e 1378 backup_dir.as_ref(),
dc7a5b34
TL
1379 &auth_id,
1380 PRIV_DATASTORE_READ,
1381 )?;
5b1cfa01 1382
9238cdf5
FG
1383 let file_name = CATALOG_NAME;
1384
2d55beec 1385 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1386 for file in files {
1387 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1388 bail!("cannot decode '{}' - is encrypted", file_name);
1389 }
1390 }
1391
5b1cfa01
DC
1392 let mut path = datastore.base_path();
1393 path.push(backup_dir.relative_path());
9238cdf5 1394 path.push(file_name);
5b1cfa01
DC
1395
1396 let index = DynamicIndexReader::open(&path)
1397 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1398
2d55beec 1399 let (csum, size) = index.compute_csum();
9a37bd6c 1400 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1401
14f6c9cb 1402 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1403 let reader = BufferedDynamicReader::new(index, chunk_reader);
1404
1405 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1406
5279ee74 1407 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1408 base64::decode(filepath)?
1409 } else {
1410 vec![b'/']
1411 };
5b1cfa01 1412
86582454 1413 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1414}
1415
d33d8f4e
DC
1416#[sortable]
1417pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1418 &ApiHandler::AsyncHttp(&pxar_file_download),
1419 &ObjectSchema::new(
1ffe0301 1420 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1421 &sorted!([
1422 ("store", false, &DATASTORE_SCHEMA),
1423 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1424 ("backup-id", false, &BACKUP_ID_SCHEMA),
1425 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1426 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1427 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1428 ]),
1429 )
1430).access(None, &Permission::Privilege(
1431 &["datastore", "{store}"],
1432 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1433 true)
1434);
1435
bf78f708 1436pub fn pxar_file_download(
d33d8f4e
DC
1437 _parts: Parts,
1438 _req_body: Body,
1439 param: Value,
1440 _info: &ApiMethod,
1441 rpcenv: Box<dyn RpcEnvironment>,
1442) -> ApiResponseFuture {
d33d8f4e 1443 async move {
3c8c2827 1444 let store = required_string_param(&param, "store")?;
e9d2fc93 1445 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d33d8f4e 1446
e6dc35ac 1447 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1448
3c8c2827 1449 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1450
984ddb2f
DC
1451 let tar = param["tar"].as_bool().unwrap_or(false);
1452
8c74349b 1453 let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
d33d8f4e 1454
dc7a5b34
TL
1455 check_priv_or_backup_owner(
1456 &datastore,
db87d93e 1457 backup_dir.as_ref(),
dc7a5b34
TL
1458 &auth_id,
1459 PRIV_DATASTORE_READ,
1460 )?;
d33d8f4e 1461
d33d8f4e 1462 let mut components = base64::decode(&filepath)?;
3984a5fd 1463 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1464 components.remove(0);
1465 }
1466
d8d8af98 1467 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1468 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1469 let file_path = split.next().unwrap_or(b"/");
2d55beec 1470 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1471 for file in files {
1472 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1473 bail!("cannot decode '{}' - is encrypted", pxar_name);
1474 }
1475 }
d33d8f4e 1476
9238cdf5
FG
1477 let mut path = datastore.base_path();
1478 path.push(backup_dir.relative_path());
1479 path.push(pxar_name);
d33d8f4e
DC
1480
1481 let index = DynamicIndexReader::open(&path)
1482 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1483
2d55beec 1484 let (csum, size) = index.compute_csum();
9a37bd6c 1485 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1486
14f6c9cb 1487 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1488 let reader = BufferedDynamicReader::new(index, chunk_reader);
1489 let archive_size = reader.archive_size();
1490 let reader = LocalDynamicReadAt::new(reader);
1491
1492 let decoder = Accessor::new(reader, archive_size).await?;
1493 let root = decoder.open_root().await?;
2e219481 1494 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1495 let file = root
dc7a5b34
TL
1496 .lookup(&path)
1497 .await?
2e219481 1498 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1499
804f6143
DC
1500 let body = match file.kind() {
1501 EntryKind::File { .. } => Body::wrap_stream(
1502 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1503 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1504 err
1505 }),
1506 ),
1507 EntryKind::Hardlink(_) => Body::wrap_stream(
1508 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1509 .map_err(move |err| {
dc7a5b34 1510 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1511 err
1512 }),
1513 ),
1514 EntryKind::Directory => {
984ddb2f 1515 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1516 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1517 if tar {
dc7a5b34
TL
1518 proxmox_rest_server::spawn_internal_task(create_tar(
1519 channelwriter,
1520 decoder,
1521 path.clone(),
1522 false,
1523 ));
984ddb2f
DC
1524 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1525 Body::wrap_stream(zstdstream.map_err(move |err| {
1526 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1527 err
1528 }))
1529 } else {
dc7a5b34
TL
1530 proxmox_rest_server::spawn_internal_task(create_zip(
1531 channelwriter,
1532 decoder,
1533 path.clone(),
1534 false,
1535 ));
984ddb2f
DC
1536 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1537 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1538 err
1539 }))
1540 }
804f6143
DC
1541 }
1542 other => bail!("cannot download file of type {:?}", other),
1543 };
d33d8f4e
DC
1544
1545 // fixme: set other headers ?
1546 Ok(Response::builder()
dc7a5b34
TL
1547 .status(StatusCode::OK)
1548 .header(header::CONTENT_TYPE, "application/octet-stream")
1549 .body(body)
1550 .unwrap())
1551 }
1552 .boxed()
d33d8f4e
DC
1553}
1554
1a0d3d11
DM
1555#[api(
1556 input: {
1557 properties: {
1558 store: {
1559 schema: DATASTORE_SCHEMA,
1560 },
1561 timeframe: {
c68fa58a 1562 type: RRDTimeFrame,
1a0d3d11
DM
1563 },
1564 cf: {
1565 type: RRDMode,
1566 },
1567 },
1568 },
1569 access: {
1570 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1571 },
1572)]
1573/// Read datastore stats
bf78f708 1574pub fn get_rrd_stats(
1a0d3d11 1575 store: String,
c68fa58a 1576 timeframe: RRDTimeFrame,
1a0d3d11
DM
1577 cf: RRDMode,
1578 _param: Value,
1579) -> Result<Value, Error> {
e9d2fc93 1580 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1581 let disk_manager = crate::tools::disks::DiskManage::new();
1582
1583 let mut rrd_fields = vec![
dc7a5b34
TL
1584 "total",
1585 "used",
1586 "read_ios",
1587 "read_bytes",
1588 "write_ios",
1589 "write_bytes",
f27b6086
DC
1590 ];
1591
1592 // we do not have io_ticks for zpools, so don't include them
1593 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1594 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1595 _ => rrd_fields.push("io_ticks"),
1596 };
1597
dc7a5b34 1598 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1599}
1600
5fd823c3
HL
1601#[api(
1602 input: {
1603 properties: {
1604 store: {
1605 schema: DATASTORE_SCHEMA,
1606 },
1607 },
1608 },
1609 access: {
1610 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1611 },
1612)]
1613/// Read datastore stats
dc7a5b34 1614pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1615 let active_operations = task_tracking::get_active_operations(&store)?;
1616 Ok(json!({
1617 "read": active_operations.read,
1618 "write": active_operations.write,
1619 }))
1620}
1621
d6688884
SR
1622#[api(
1623 input: {
1624 properties: {
988d575d 1625 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1626 backup_group: {
1627 type: pbs_api_types::BackupGroup,
1628 flatten: true,
1629 },
d6688884
SR
1630 },
1631 },
1632 access: {
1633 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1634 },
1635)]
1636/// Get "notes" for a backup group
1637pub fn get_group_notes(
1638 store: String,
8c74349b 1639 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1640 rpcenv: &mut dyn RpcEnvironment,
1641) -> Result<String, Error> {
e9d2fc93 1642 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d6688884
SR
1643
1644 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d6688884
SR
1645
1646 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1647
1648 let note_path = get_group_note_path(&datastore, &backup_group);
1649 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1650}
1651
1652#[api(
1653 input: {
1654 properties: {
988d575d 1655 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1656 backup_group: {
1657 type: pbs_api_types::BackupGroup,
1658 flatten: true,
1659 },
d6688884
SR
1660 notes: {
1661 description: "A multiline text.",
1662 },
1663 },
1664 },
1665 access: {
1666 permission: &Permission::Privilege(&["datastore", "{store}"],
1667 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1668 true),
1669 },
1670)]
1671/// Set "notes" for a backup group
1672pub fn set_group_notes(
1673 store: String,
8c74349b 1674 backup_group: pbs_api_types::BackupGroup,
d6688884
SR
1675 notes: String,
1676 rpcenv: &mut dyn RpcEnvironment,
1677) -> Result<(), Error> {
e9d2fc93 1678 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
d6688884
SR
1679
1680 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d6688884
SR
1681
1682 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1683
1684 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1685 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1686
1687 Ok(())
1688}
1689
912b3f5b
DM
1690#[api(
1691 input: {
1692 properties: {
988d575d 1693 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1694 backup_dir: {
1695 type: pbs_api_types::BackupDir,
1696 flatten: true,
1697 },
912b3f5b
DM
1698 },
1699 },
1700 access: {
1401f4be 1701 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1702 },
1703)]
1704/// Get "notes" for a specific backup
bf78f708 1705pub fn get_notes(
912b3f5b 1706 store: String,
8c74349b 1707 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1708 rpcenv: &mut dyn RpcEnvironment,
1709) -> Result<String, Error> {
e9d2fc93 1710 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
912b3f5b 1711
e6dc35ac 1712 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8c74349b 1713 let backup_dir = datastore.backup_dir(backup_dir)?;
912b3f5b 1714
dc7a5b34
TL
1715 check_priv_or_backup_owner(
1716 &datastore,
db87d93e 1717 backup_dir.as_ref(),
dc7a5b34
TL
1718 &auth_id,
1719 PRIV_DATASTORE_AUDIT,
1720 )?;
912b3f5b 1721
883aa6d5 1722 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1723
dc7a5b34 1724 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1725
1726 Ok(String::from(notes))
1727}
1728
1729#[api(
1730 input: {
1731 properties: {
988d575d 1732 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1733 backup_dir: {
1734 type: pbs_api_types::BackupDir,
1735 flatten: true,
1736 },
912b3f5b
DM
1737 notes: {
1738 description: "A multiline text.",
1739 },
1740 },
1741 },
1742 access: {
b728a69e
FG
1743 permission: &Permission::Privilege(&["datastore", "{store}"],
1744 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1745 true),
912b3f5b
DM
1746 },
1747)]
1748/// Set "notes" for a specific backup
bf78f708 1749pub fn set_notes(
912b3f5b 1750 store: String,
8c74349b 1751 backup_dir: pbs_api_types::BackupDir,
912b3f5b
DM
1752 notes: String,
1753 rpcenv: &mut dyn RpcEnvironment,
1754) -> Result<(), Error> {
e9d2fc93 1755 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
912b3f5b 1756
e6dc35ac 1757 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8c74349b 1758 let backup_dir = datastore.backup_dir(backup_dir)?;
912b3f5b 1759
dc7a5b34
TL
1760 check_priv_or_backup_owner(
1761 &datastore,
db87d93e 1762 backup_dir.as_ref(),
dc7a5b34
TL
1763 &auth_id,
1764 PRIV_DATASTORE_MODIFY,
1765 )?;
912b3f5b 1766
dc7a5b34
TL
1767 datastore
1768 .update_manifest(&backup_dir, |manifest| {
1769 manifest.unprotected["notes"] = notes.into();
1770 })
1771 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1772
1773 Ok(())
1774}
1775
8292d3d2
DC
1776#[api(
1777 input: {
1778 properties: {
988d575d 1779 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1780 backup_dir: {
1781 type: pbs_api_types::BackupDir,
1782 flatten: true,
1783 },
8292d3d2
DC
1784 },
1785 },
1786 access: {
1787 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1788 },
1789)]
1790/// Query protection for a specific backup
1791pub fn get_protection(
1792 store: String,
8c74349b 1793 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
1794 rpcenv: &mut dyn RpcEnvironment,
1795) -> Result<bool, Error> {
e9d2fc93 1796 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
8292d3d2
DC
1797
1798 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8c74349b 1799 let backup_dir = datastore.backup_dir(backup_dir)?;
8292d3d2 1800
dc7a5b34
TL
1801 check_priv_or_backup_owner(
1802 &datastore,
db87d93e 1803 backup_dir.as_ref(),
dc7a5b34
TL
1804 &auth_id,
1805 PRIV_DATASTORE_AUDIT,
1806 )?;
8292d3d2 1807
6da20161 1808 Ok(backup_dir.is_protected())
8292d3d2
DC
1809}
1810
1811#[api(
1812 input: {
1813 properties: {
988d575d 1814 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1815 backup_dir: {
1816 type: pbs_api_types::BackupDir,
1817 flatten: true,
1818 },
8292d3d2
DC
1819 protected: {
1820 description: "Enable/disable protection.",
1821 },
1822 },
1823 },
1824 access: {
1825 permission: &Permission::Privilege(&["datastore", "{store}"],
1826 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1827 true),
1828 },
1829)]
1830/// En- or disable protection for a specific backup
1831pub fn set_protection(
1832 store: String,
8c74349b 1833 backup_dir: pbs_api_types::BackupDir,
8292d3d2
DC
1834 protected: bool,
1835 rpcenv: &mut dyn RpcEnvironment,
1836) -> Result<(), Error> {
e9d2fc93 1837 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8292d3d2
DC
1838
1839 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8c74349b 1840 let backup_dir = datastore.backup_dir(backup_dir)?;
8292d3d2 1841
dc7a5b34
TL
1842 check_priv_or_backup_owner(
1843 &datastore,
db87d93e 1844 backup_dir.as_ref(),
dc7a5b34
TL
1845 &auth_id,
1846 PRIV_DATASTORE_MODIFY,
1847 )?;
8292d3d2
DC
1848
1849 datastore.update_protection(&backup_dir, protected)
1850}
1851
72be0eb1 1852#[api(
4940012d 1853 input: {
72be0eb1 1854 properties: {
988d575d 1855 store: { schema: DATASTORE_SCHEMA },
8c74349b
WB
1856 backup_group: {
1857 type: pbs_api_types::BackupGroup,
1858 flatten: true,
1859 },
72be0eb1 1860 "new-owner": {
e6dc35ac 1861 type: Authid,
72be0eb1
DW
1862 },
1863 },
4940012d
FG
1864 },
1865 access: {
bff85572
FG
1866 permission: &Permission::Anybody,
1867 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1868 },
72be0eb1
DW
1869)]
1870/// Change owner of a backup group
bf78f708 1871pub fn set_backup_owner(
72be0eb1 1872 store: String,
8c74349b 1873 backup_group: pbs_api_types::BackupGroup,
e6dc35ac 1874 new_owner: Authid,
bff85572 1875 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 1876) -> Result<(), Error> {
e9d2fc93 1877 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
72be0eb1 1878
8c74349b 1879 let backup_group = datastore.backup_group(backup_group);
72be0eb1 1880
bff85572
FG
1881 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1882
72be0eb1
DW
1883 let user_info = CachedUserInfo::new()?;
1884
bff85572
FG
1885 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1886
1887 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1888 // High-privilege user/token
1889 true
1890 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
db87d93e 1891 let owner = datastore.get_owner(backup_group.as_ref())?;
bff85572
FG
1892
1893 match (owner.is_token(), new_owner.is_token()) {
1894 (true, true) => {
1895 // API token to API token, owned by same user
1896 let owner = owner.user();
1897 let new_owner = new_owner.user();
1898 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 1899 }
bff85572
FG
1900 (true, false) => {
1901 // API token to API token owner
dc7a5b34
TL
1902 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
1903 }
bff85572
FG
1904 (false, true) => {
1905 // API token owner to API token
dc7a5b34
TL
1906 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
1907 }
bff85572
FG
1908 (false, false) => {
1909 // User to User, not allowed for unprivileged users
1910 false
dc7a5b34 1911 }
bff85572
FG
1912 }
1913 } else {
1914 false
1915 };
1916
1917 if !allowed {
dc7a5b34
TL
1918 return Err(http_err!(
1919 UNAUTHORIZED,
1920 "{} does not have permission to change owner of backup group '{}' to {}",
1921 auth_id,
1922 backup_group,
1923 new_owner,
bff85572
FG
1924 ));
1925 }
1926
e6dc35ac 1927 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
1928 bail!(
1929 "{} '{}' is inactive or non-existent",
1930 if new_owner.is_token() {
1931 "API token".to_string()
1932 } else {
1933 "user".to_string()
1934 },
1935 new_owner
1936 );
72be0eb1
DW
1937 }
1938
db87d93e 1939 datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
72be0eb1
DW
1940
1941 Ok(())
1942}
1943
552c2259 1944#[sortable]
255f378a 1945const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
1946 (
1947 "active-operations",
dc7a5b34 1948 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 1949 ),
dc7a5b34 1950 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
1951 (
1952 "change-owner",
dc7a5b34 1953 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 1954 ),
255f378a
DM
1955 (
1956 "download",
dc7a5b34 1957 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 1958 ),
6ef9bb59
DC
1959 (
1960 "download-decoded",
dc7a5b34 1961 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 1962 ),
dc7a5b34 1963 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
1964 (
1965 "gc",
1966 &Router::new()
1967 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 1968 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 1969 ),
d6688884
SR
1970 (
1971 "group-notes",
1972 &Router::new()
1973 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 1974 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 1975 ),
255f378a
DM
1976 (
1977 "groups",
1978 &Router::new()
b31c8019 1979 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 1980 .delete(&API_METHOD_DELETE_GROUP),
255f378a 1981 ),
912b3f5b
DM
1982 (
1983 "notes",
1984 &Router::new()
1985 .get(&API_METHOD_GET_NOTES)
dc7a5b34 1986 .put(&API_METHOD_SET_NOTES),
912b3f5b 1987 ),
8292d3d2
DC
1988 (
1989 "protected",
1990 &Router::new()
1991 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 1992 .put(&API_METHOD_SET_PROTECTION),
255f378a 1993 ),
dc7a5b34 1994 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
1995 (
1996 "prune-datastore",
dc7a5b34 1997 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 1998 ),
d33d8f4e
DC
1999 (
2000 "pxar-file-download",
dc7a5b34 2001 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2002 ),
dc7a5b34 2003 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2004 (
2005 "snapshots",
2006 &Router::new()
fc189b19 2007 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2008 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2009 ),
dc7a5b34 2010 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2011 (
2012 "upload-backup-log",
dc7a5b34 2013 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2014 ),
dc7a5b34 2015 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2016];
2017
ad51d02a 2018const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2019 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2020 .subdirs(DATASTORE_INFO_SUBDIRS);
2021
255f378a 2022pub const ROUTER: Router = Router::new()
bb34b589 2023 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2024 .match_all("store", &DATASTORE_INFO_ROUTER);