]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
more api type cleanups: avoid re-exports
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed 12use serde_json::{json, Value};
7c667013 13use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 14
bb34b589
DM
15use proxmox::api::{
16 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
17 RpcEnvironment, RpcEnvironmentType, Permission
18};
0a240aaa 19use proxmox::api::router::SubdirMap;
cad540e9 20use proxmox::api::schema::*;
d6688884
SR
21use proxmox::tools::fs::{
22 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
23};
9ea4bce4 24use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 25
2e219481 26use pxar::accessor::aio::Accessor;
d33d8f4e
DC
27use pxar::EntryKind;
28
89725197
DM
29use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
30 DataStoreListItem, GarbageCollectionStatus, GroupListItem,
31 SnapshotListItem, SnapshotVerifyState, PruneOptions,
6227654a 32 DataStoreStatus, RRDMode, RRDTimeFrameResolution,
89725197
DM
33 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
34 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
35 IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
36 VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
37 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
8cc3760e
DM
38 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
39
b2065dc7 40};
2b7f8dd5 41use pbs_client::pxar::create_zip;
b2065dc7
WB
42use pbs_datastore::{BackupDir, BackupGroup, StoreProgress, CATALOG_NAME};
43use pbs_datastore::backup_info::BackupInfo;
44use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 45use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
46use pbs_datastore::data_blob::DataBlob;
47use pbs_datastore::data_blob_reader::DataBlobReader;
48use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
49use pbs_datastore::fixed_index::{FixedIndexReader};
50use pbs_datastore::index::IndexFile;
51use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 52use pbs_datastore::prune::compute_prune_info;
fc5870be
WB
53use pbs_tools::blocking::WrappedReaderStream;
54use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
3c8c2827 55use pbs_tools::json::{required_integer_param, required_string_param};
e7d4be9d 56use pbs_config::CachedUserInfo;
2b7f8dd5 57
431cc7b1 58use crate::api2::node::rrd::create_value_from_rrd;
b2065dc7
WB
59use crate::backup::{
60 check_backup_owner, verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
61 DataStore, LocalChunkReader,
62};
54552dda 63
4fdf5ddf 64use crate::server::{jobstate::Job, WorkerTask};
804f6143 65
1629d2ad 66
d6688884
SR
67const GROUP_NOTES_FILE_NAME: &str = "notes";
68
69fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
70 let mut note_path = store.base_path();
71 note_path.push(group.group_path());
72 note_path.push(GROUP_NOTES_FILE_NAME);
73 note_path
74}
75
bff85572 76fn check_priv_or_backup_owner(
e7cb4dc5
WB
77 store: &DataStore,
78 group: &BackupGroup,
e6dc35ac 79 auth_id: &Authid,
bff85572
FG
80 required_privs: u64,
81) -> Result<(), Error> {
82 let user_info = CachedUserInfo::new()?;
83 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
84
85 if privs & required_privs == 0 {
86 let owner = store.get_owner(group)?;
87 check_backup_owner(&owner, auth_id)?;
88 }
89 Ok(())
90}
91
e7cb4dc5
WB
92fn read_backup_index(
93 store: &DataStore,
94 backup_dir: &BackupDir,
95) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 96
ff86ef00 97 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 98
09b1f7b2
DM
99 let mut result = Vec::new();
100 for item in manifest.files() {
101 result.push(BackupContent {
102 filename: item.filename.clone(),
f28d9088 103 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
104 size: Some(item.size),
105 });
8c70e3eb
DM
106 }
107
09b1f7b2 108 result.push(BackupContent {
96d65fbc 109 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
110 crypt_mode: match manifest.signature {
111 Some(_) => Some(CryptMode::SignOnly),
112 None => Some(CryptMode::None),
113 },
09b1f7b2
DM
114 size: Some(index_size),
115 });
4f1e40a2 116
70030b43 117 Ok((manifest, result))
8c70e3eb
DM
118}
119
1c090810
DC
120fn get_all_snapshot_files(
121 store: &DataStore,
122 info: &BackupInfo,
70030b43
DM
123) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
124
125 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
126
127 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
128 acc.insert(item.filename.clone());
129 acc
130 });
131
132 for file in &info.files {
133 if file_set.contains(file) { continue; }
f28d9088
WB
134 files.push(BackupContent {
135 filename: file.to_string(),
136 size: None,
137 crypt_mode: None,
138 });
1c090810
DC
139 }
140
70030b43 141 Ok((manifest, files))
1c090810
DC
142}
143
b31c8019
DM
144#[api(
145 input: {
146 properties: {
147 store: {
148 schema: DATASTORE_SCHEMA,
149 },
150 },
151 },
7b570c17 152 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 153 access: {
54552dda
DM
154 permission: &Permission::Privilege(
155 &["datastore", "{store}"],
156 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
157 true),
bb34b589 158 },
b31c8019
DM
159)]
160/// List backup groups.
b2362a12 161pub fn list_groups(
b31c8019 162 store: String,
54552dda 163 rpcenv: &mut dyn RpcEnvironment,
b31c8019 164) -> Result<Vec<GroupListItem>, Error> {
812c6f87 165
e6dc35ac 166 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 167 let user_info = CachedUserInfo::new()?;
e6dc35ac 168 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 169
b31c8019 170 let datastore = DataStore::lookup_datastore(&store)?;
0d08fcee
FG
171 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
172
173 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
174
175 let group_info = backup_groups
176 .into_iter()
177 .fold(Vec::new(), |mut group_info, group| {
178 let owner = match datastore.get_owner(&group) {
179 Ok(auth_id) => auth_id,
180 Err(err) => {
1ed02257
FG
181 eprintln!("Failed to get owner of group '{}/{}' - {}",
182 &store,
183 group,
184 err);
0d08fcee
FG
185 return group_info;
186 },
187 };
188 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
189 return group_info;
190 }
191
192 let snapshots = match group.list_backups(&datastore.base_path()) {
193 Ok(snapshots) => snapshots,
194 Err(_) => {
195 return group_info;
196 },
197 };
198
199 let backup_count: u64 = snapshots.len() as u64;
200 if backup_count == 0 {
201 return group_info;
202 }
203
204 let last_backup = snapshots
205 .iter()
206 .fold(&snapshots[0], |last, curr| {
207 if curr.is_finished()
208 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
209 curr
210 } else {
211 last
212 }
213 })
214 .to_owned();
215
d6688884
SR
216 let note_path = get_group_note_path(&datastore, &group);
217 let comment = file_read_firstline(&note_path).ok();
218
0d08fcee
FG
219 group_info.push(GroupListItem {
220 backup_type: group.backup_type().to_string(),
221 backup_id: group.backup_id().to_string(),
222 last_backup: last_backup.backup_dir.backup_time(),
223 owner: Some(owner),
224 backup_count,
225 files: last_backup.files,
d6688884 226 comment,
0d08fcee
FG
227 });
228
229 group_info
230 });
812c6f87 231
0d08fcee 232 Ok(group_info)
812c6f87 233}
8f579717 234
f32791b4
DC
235#[api(
236 input: {
237 properties: {
238 store: {
239 schema: DATASTORE_SCHEMA,
240 },
241 "backup-type": {
242 schema: BACKUP_TYPE_SCHEMA,
243 },
244 "backup-id": {
245 schema: BACKUP_ID_SCHEMA,
246 },
247 },
248 },
249 access: {
250 permission: &Permission::Privilege(
251 &["datastore", "{store}"],
252 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
253 true),
254 },
255)]
256/// Delete backup group including all snapshots.
257pub fn delete_group(
258 store: String,
259 backup_type: String,
260 backup_id: String,
261 _info: &ApiMethod,
262 rpcenv: &mut dyn RpcEnvironment,
263) -> Result<Value, Error> {
264
265 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
266
267 let group = BackupGroup::new(backup_type, backup_id);
268 let datastore = DataStore::lookup_datastore(&store)?;
269
270 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
271
272 datastore.remove_backup_group(&group)?;
273
274 Ok(Value::Null)
275}
276
09b1f7b2
DM
277#[api(
278 input: {
279 properties: {
280 store: {
281 schema: DATASTORE_SCHEMA,
282 },
283 "backup-type": {
284 schema: BACKUP_TYPE_SCHEMA,
285 },
286 "backup-id": {
287 schema: BACKUP_ID_SCHEMA,
288 },
289 "backup-time": {
290 schema: BACKUP_TIME_SCHEMA,
291 },
292 },
293 },
7b570c17 294 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 295 access: {
54552dda
DM
296 permission: &Permission::Privilege(
297 &["datastore", "{store}"],
298 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
299 true),
bb34b589 300 },
09b1f7b2
DM
301)]
302/// List snapshot files.
ea5f547f 303pub fn list_snapshot_files(
09b1f7b2
DM
304 store: String,
305 backup_type: String,
306 backup_id: String,
307 backup_time: i64,
01a13423 308 _info: &ApiMethod,
54552dda 309 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 310) -> Result<Vec<BackupContent>, Error> {
01a13423 311
e6dc35ac 312 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 313 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 314
e0e5b442 315 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 316
bff85572 317 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 318
d7c24397 319 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 320
70030b43
DM
321 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
322
323 Ok(files)
01a13423
DM
324}
325
68a6a0ee
DM
326#[api(
327 input: {
328 properties: {
329 store: {
330 schema: DATASTORE_SCHEMA,
331 },
332 "backup-type": {
333 schema: BACKUP_TYPE_SCHEMA,
334 },
335 "backup-id": {
336 schema: BACKUP_ID_SCHEMA,
337 },
338 "backup-time": {
339 schema: BACKUP_TIME_SCHEMA,
340 },
341 },
342 },
bb34b589 343 access: {
54552dda
DM
344 permission: &Permission::Privilege(
345 &["datastore", "{store}"],
346 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
347 true),
bb34b589 348 },
68a6a0ee
DM
349)]
350/// Delete backup snapshot.
bf78f708 351pub fn delete_snapshot(
68a6a0ee
DM
352 store: String,
353 backup_type: String,
354 backup_id: String,
355 backup_time: i64,
6f62c924 356 _info: &ApiMethod,
54552dda 357 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
358) -> Result<Value, Error> {
359
e6dc35ac 360 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 361
e0e5b442 362 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 363 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 364
bff85572 365 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 366
c9756b40 367 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
368
369 Ok(Value::Null)
370}
371
fc189b19
DM
372#[api(
373 input: {
374 properties: {
375 store: {
376 schema: DATASTORE_SCHEMA,
377 },
378 "backup-type": {
379 optional: true,
380 schema: BACKUP_TYPE_SCHEMA,
381 },
382 "backup-id": {
383 optional: true,
384 schema: BACKUP_ID_SCHEMA,
385 },
386 },
387 },
7b570c17 388 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 389 access: {
54552dda
DM
390 permission: &Permission::Privilege(
391 &["datastore", "{store}"],
392 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
393 true),
bb34b589 394 },
fc189b19
DM
395)]
396/// List backup snapshots.
f24fc116 397pub fn list_snapshots (
54552dda
DM
398 store: String,
399 backup_type: Option<String>,
400 backup_id: Option<String>,
401 _param: Value,
184f17af 402 _info: &ApiMethod,
54552dda 403 rpcenv: &mut dyn RpcEnvironment,
fc189b19 404) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 405
e6dc35ac 406 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 407 let user_info = CachedUserInfo::new()?;
e6dc35ac 408 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 409
0d08fcee
FG
410 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
411
54552dda 412 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 413
c0977501 414 let base_path = datastore.base_path();
184f17af 415
0d08fcee
FG
416 let groups = match (backup_type, backup_id) {
417 (Some(backup_type), Some(backup_id)) => {
418 let mut groups = Vec::with_capacity(1);
419 groups.push(BackupGroup::new(backup_type, backup_id));
420 groups
421 },
422 (Some(backup_type), None) => {
423 BackupInfo::list_backup_groups(&base_path)?
424 .into_iter()
425 .filter(|group| group.backup_type() == backup_type)
426 .collect()
427 },
428 (None, Some(backup_id)) => {
429 BackupInfo::list_backup_groups(&base_path)?
430 .into_iter()
431 .filter(|group| group.backup_id() == backup_id)
432 .collect()
433 },
434 _ => BackupInfo::list_backup_groups(&base_path)?,
435 };
54552dda 436
0d08fcee 437 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
438 let backup_type = group.backup_type().to_string();
439 let backup_id = group.backup_id().to_string();
0d08fcee 440 let backup_time = info.backup_dir.backup_time();
1c090810 441
79c53595 442 match get_all_snapshot_files(&datastore, &info) {
70030b43 443 Ok((manifest, files)) => {
70030b43
DM
444 // extract the first line from notes
445 let comment: Option<String> = manifest.unprotected["notes"]
446 .as_str()
447 .and_then(|notes| notes.lines().next())
448 .map(String::from);
449
035c40e6
FG
450 let fingerprint = match manifest.fingerprint() {
451 Ok(fp) => fp,
452 Err(err) => {
453 eprintln!("error parsing fingerprint: '{}'", err);
454 None
455 },
456 };
457
79c53595
FG
458 let verification = manifest.unprotected["verify_state"].clone();
459 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
460 Ok(verify) => verify,
461 Err(err) => {
462 eprintln!("error parsing verification state : '{}'", err);
463 None
464 }
465 };
466
0d08fcee
FG
467 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
468
79c53595
FG
469 SnapshotListItem {
470 backup_type,
471 backup_id,
472 backup_time,
473 comment,
474 verification,
035c40e6 475 fingerprint,
79c53595
FG
476 files,
477 size,
478 owner,
479 }
1c090810
DC
480 },
481 Err(err) => {
482 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 483 let files = info
70030b43 484 .files
0d08fcee 485 .into_iter()
44288184
FG
486 .map(|filename| BackupContent {
487 filename,
70030b43
DM
488 size: None,
489 crypt_mode: None,
490 })
79c53595
FG
491 .collect();
492
493 SnapshotListItem {
494 backup_type,
495 backup_id,
496 backup_time,
497 comment: None,
498 verification: None,
035c40e6 499 fingerprint: None,
79c53595
FG
500 files,
501 size: None,
502 owner,
503 }
1c090810 504 },
0d08fcee
FG
505 }
506 };
184f17af 507
0d08fcee
FG
508 groups
509 .iter()
510 .try_fold(Vec::new(), |mut snapshots, group| {
511 let owner = match datastore.get_owner(group) {
512 Ok(auth_id) => auth_id,
513 Err(err) => {
514 eprintln!("Failed to get owner of group '{}/{}' - {}",
515 &store,
516 group,
517 err);
518 return Ok(snapshots);
519 },
520 };
521
522 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
523 return Ok(snapshots);
524 }
525
526 let group_backups = group.list_backups(&datastore.base_path())?;
527
528 snapshots.extend(
529 group_backups
530 .into_iter()
531 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
532 );
533
534 Ok(snapshots)
535 })
184f17af
DM
536}
537
fdfcb74d 538fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 539 let base_path = store.base_path();
fdfcb74d 540 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 541
fdfcb74d
FG
542 groups.iter()
543 .filter(|group| {
544 let owner = match store.get_owner(&group) {
545 Ok(owner) => owner,
546 Err(err) => {
1ed02257
FG
547 eprintln!("Failed to get owner of group '{}/{}' - {}",
548 store.name(),
549 group,
550 err);
fdfcb74d
FG
551 return false;
552 },
553 };
14e08625 554
fdfcb74d
FG
555 match filter_owner {
556 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
557 None => true,
558 }
559 })
560 .try_fold(Counts::default(), |mut counts, group| {
561 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
562
563 let type_count = match group.backup_type() {
564 "ct" => counts.ct.get_or_insert(Default::default()),
565 "vm" => counts.vm.get_or_insert(Default::default()),
566 "host" => counts.host.get_or_insert(Default::default()),
567 _ => counts.other.get_or_insert(Default::default()),
568 };
14e08625 569
fdfcb74d
FG
570 type_count.groups += 1;
571 type_count.snapshots += snapshot_count;
16f9f244 572
fdfcb74d
FG
573 Ok(counts)
574 })
16f9f244
DC
575}
576
1dc117bb
DM
577#[api(
578 input: {
579 properties: {
580 store: {
581 schema: DATASTORE_SCHEMA,
582 },
98afc7b1
FG
583 verbose: {
584 type: bool,
585 default: false,
586 optional: true,
587 description: "Include additional information like snapshot counts and GC status.",
588 },
1dc117bb 589 },
98afc7b1 590
1dc117bb
DM
591 },
592 returns: {
14e08625 593 type: DataStoreStatus,
1dc117bb 594 },
bb34b589 595 access: {
54552dda 596 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 597 },
1dc117bb
DM
598)]
599/// Get datastore status.
ea5f547f 600pub fn status(
1dc117bb 601 store: String,
98afc7b1 602 verbose: bool,
0eecf38f 603 _info: &ApiMethod,
fdfcb74d 604 rpcenv: &mut dyn RpcEnvironment,
14e08625 605) -> Result<DataStoreStatus, Error> {
1dc117bb 606 let datastore = DataStore::lookup_datastore(&store)?;
14e08625 607 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
608 let (counts, gc_status) = if verbose {
609 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
610 let user_info = CachedUserInfo::new()?;
611
612 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
613 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
614 None
615 } else {
616 Some(&auth_id)
617 };
618
619 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
620 let gc_status = Some(datastore.last_gc_status());
621
622 (counts, gc_status)
623 } else {
624 (None, None)
98afc7b1 625 };
16f9f244 626
14e08625
DC
627 Ok(DataStoreStatus {
628 total: storage.total,
629 used: storage.used,
630 avail: storage.avail,
631 gc_status,
632 counts,
633 })
0eecf38f
DM
634}
635
c2009e53
DM
636#[api(
637 input: {
638 properties: {
639 store: {
640 schema: DATASTORE_SCHEMA,
641 },
642 "backup-type": {
643 schema: BACKUP_TYPE_SCHEMA,
644 optional: true,
645 },
646 "backup-id": {
647 schema: BACKUP_ID_SCHEMA,
648 optional: true,
649 },
dcbf29e7
HL
650 "ignore-verified": {
651 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
652 optional: true,
653 },
654 "outdated-after": {
655 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
656 optional: true,
657 },
c2009e53
DM
658 "backup-time": {
659 schema: BACKUP_TIME_SCHEMA,
660 optional: true,
661 },
662 },
663 },
664 returns: {
665 schema: UPID_SCHEMA,
666 },
667 access: {
09f6a240 668 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
669 },
670)]
671/// Verify backups.
672///
673/// This function can verify a single backup snapshot, all backup from a backup group,
674/// or all backups in the datastore.
675pub fn verify(
676 store: String,
677 backup_type: Option<String>,
678 backup_id: Option<String>,
679 backup_time: Option<i64>,
dcbf29e7
HL
680 ignore_verified: Option<bool>,
681 outdated_after: Option<i64>,
c2009e53
DM
682 rpcenv: &mut dyn RpcEnvironment,
683) -> Result<Value, Error> {
684 let datastore = DataStore::lookup_datastore(&store)?;
dcbf29e7 685 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 686
09f6a240 687 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 688 let worker_id;
c2009e53
DM
689
690 let mut backup_dir = None;
691 let mut backup_group = None;
133042b5 692 let mut worker_type = "verify";
c2009e53
DM
693
694 match (backup_type, backup_id, backup_time) {
695 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 696 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 697 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
698
699 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
700
c2009e53 701 backup_dir = Some(dir);
133042b5 702 worker_type = "verify_snapshot";
c2009e53
DM
703 }
704 (Some(backup_type), Some(backup_id), None) => {
4ebda996 705 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 706 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
707
708 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
709
c2009e53 710 backup_group = Some(group);
133042b5 711 worker_type = "verify_group";
c2009e53
DM
712 }
713 (None, None, None) => {
8ea00f6e 714 worker_id = store.clone();
c2009e53 715 }
5a718dce 716 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
717 }
718
39735609 719 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
720
721 let upid_str = WorkerTask::new_thread(
133042b5 722 worker_type,
44288184 723 Some(worker_id),
09f6a240 724 auth_id.clone(),
e7cb4dc5
WB
725 to_stdout,
726 move |worker| {
9c26a3d6 727 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 728 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 729 let mut res = Vec::new();
f6b1d1cc 730 if !verify_backup_dir(
9c26a3d6 731 &verify_worker,
f6b1d1cc 732 &backup_dir,
f6b1d1cc 733 worker.upid().clone(),
dcbf29e7
HL
734 Some(&move |manifest| {
735 verify_filter(ignore_verified, outdated_after, manifest)
736 }),
f6b1d1cc 737 )? {
adfdc369
DC
738 res.push(backup_dir.to_string());
739 }
740 res
c2009e53 741 } else if let Some(backup_group) = backup_group {
7e25b9aa 742 let failed_dirs = verify_backup_group(
9c26a3d6 743 &verify_worker,
63d9aca9 744 &backup_group,
7e25b9aa 745 &mut StoreProgress::new(1),
f6b1d1cc 746 worker.upid(),
dcbf29e7
HL
747 Some(&move |manifest| {
748 verify_filter(ignore_verified, outdated_after, manifest)
749 }),
63d9aca9
DM
750 )?;
751 failed_dirs
c2009e53 752 } else {
09f6a240
FG
753 let privs = CachedUserInfo::new()?
754 .lookup_privs(&auth_id, &["datastore", &store]);
755
756 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
757 Some(auth_id)
758 } else {
759 None
760 };
761
dcbf29e7
HL
762 verify_all_backups(
763 &verify_worker,
764 worker.upid(),
765 owner,
766 Some(&move |manifest| {
767 verify_filter(ignore_verified, outdated_after, manifest)
768 }),
769 )?
c2009e53 770 };
3984a5fd 771 if !failed_dirs.is_empty() {
a4fa3fc2 772 worker.log("Failed to verify the following snapshots/groups:");
adfdc369
DC
773 for dir in failed_dirs {
774 worker.log(format!("\t{}", dir));
775 }
1ffe0301 776 bail!("verification failed - please check the log for details");
c2009e53
DM
777 }
778 Ok(())
e7cb4dc5
WB
779 },
780 )?;
c2009e53
DM
781
782 Ok(json!(upid_str))
783}
784
0a240aaa
DC
785#[api(
786 input: {
787 properties: {
788 "backup-id": {
789 schema: BACKUP_ID_SCHEMA,
790 },
791 "backup-type": {
792 schema: BACKUP_TYPE_SCHEMA,
793 },
794 "dry-run": {
795 optional: true,
796 type: bool,
797 default: false,
798 description: "Just show what prune would do, but do not delete anything.",
799 },
800 "prune-options": {
801 type: PruneOptions,
802 flatten: true,
803 },
804 store: {
805 schema: DATASTORE_SCHEMA,
806 },
807 },
808 },
7b570c17 809 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
810 access: {
811 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
812 },
813)]
9805207a 814/// Prune a group on the datastore
bf78f708 815pub fn prune(
0a240aaa
DC
816 backup_id: String,
817 backup_type: String,
818 dry_run: bool,
819 prune_options: PruneOptions,
820 store: String,
821 _param: Value,
54552dda 822 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
823) -> Result<Value, Error> {
824
e6dc35ac 825 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 826
0a240aaa 827 let group = BackupGroup::new(&backup_type, &backup_id);
9fdc3ef4 828
54552dda
DM
829 let datastore = DataStore::lookup_datastore(&store)?;
830
bff85572 831 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 832
0a240aaa 833 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
503995c7 834
dda70154
DM
835 let mut prune_result = Vec::new();
836
837 let list = group.list_backups(&datastore.base_path())?;
838
839 let mut prune_info = compute_prune_info(list, &prune_options)?;
840
841 prune_info.reverse(); // delete older snapshots first
842
89725197 843 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
844
845 if dry_run {
846 for (info, mut keep) in prune_info {
847 if keep_all { keep = true; }
848
849 let backup_time = info.backup_dir.backup_time();
850 let group = info.backup_dir.group();
851
852 prune_result.push(json!({
853 "backup-type": group.backup_type(),
854 "backup-id": group.backup_id(),
6a7be83e 855 "backup-time": backup_time,
dda70154
DM
856 "keep": keep,
857 }));
858 }
859 return Ok(json!(prune_result));
860 }
861
862
163e9bbe 863 // We use a WorkerTask just to have a task log, but run synchrounously
44288184 864 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
dda70154 865
f1539300
SR
866 if keep_all {
867 worker.log("No prune selection - keeping all files.");
868 } else {
89725197 869 worker.log(format!("retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options)));
f1539300
SR
870 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
871 store, backup_type, backup_id));
872 }
3b03abfe 873
f1539300
SR
874 for (info, mut keep) in prune_info {
875 if keep_all { keep = true; }
dda70154 876
f1539300
SR
877 let backup_time = info.backup_dir.backup_time();
878 let timestamp = info.backup_dir.backup_time_string();
879 let group = info.backup_dir.group();
3b03abfe 880
3b03abfe 881
f1539300
SR
882 let msg = format!(
883 "{}/{}/{} {}",
884 group.backup_type(),
885 group.backup_id(),
886 timestamp,
887 if keep { "keep" } else { "remove" },
888 );
889
890 worker.log(msg);
891
892 prune_result.push(json!({
893 "backup-type": group.backup_type(),
894 "backup-id": group.backup_id(),
895 "backup-time": backup_time,
896 "keep": keep,
897 }));
898
899 if !(dry_run || keep) {
900 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
901 worker.warn(
902 format!(
903 "failed to remove dir {:?}: {}",
904 info.backup_dir.relative_path(), err
905 )
906 );
8f0b4c1f 907 }
8f579717 908 }
f1539300 909 }
dd8e744f 910
f1539300 911 worker.log_result(&Ok(()));
83b7db02 912
dda70154 913 Ok(json!(prune_result))
83b7db02
DM
914}
915
9805207a
DC
916#[api(
917 input: {
918 properties: {
919 "dry-run": {
920 optional: true,
921 type: bool,
922 default: false,
923 description: "Just show what prune would do, but do not delete anything.",
924 },
925 "prune-options": {
926 type: PruneOptions,
927 flatten: true,
928 },
929 store: {
930 schema: DATASTORE_SCHEMA,
931 },
932 },
933 },
934 returns: {
935 schema: UPID_SCHEMA,
936 },
937 access: {
938 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
939 },
940)]
941/// Prune the datastore
942pub fn prune_datastore(
943 dry_run: bool,
944 prune_options: PruneOptions,
945 store: String,
946 _param: Value,
947 rpcenv: &mut dyn RpcEnvironment,
948) -> Result<String, Error> {
949
950 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
951
952 let datastore = DataStore::lookup_datastore(&store)?;
953
954 let upid_str = WorkerTask::new_thread(
955 "prune",
956 Some(store.clone()),
957 auth_id.clone(),
958 false,
959 move |worker| crate::server::prune_datastore(
960 worker.clone(),
961 auth_id,
962 prune_options,
963 &store,
964 datastore,
965 dry_run
966 ),
967 )?;
968
969 Ok(upid_str)
970}
971
dfc58d47
DM
972#[api(
973 input: {
974 properties: {
975 store: {
976 schema: DATASTORE_SCHEMA,
977 },
978 },
979 },
980 returns: {
981 schema: UPID_SCHEMA,
982 },
bb34b589 983 access: {
54552dda 984 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 985 },
dfc58d47
DM
986)]
987/// Start garbage collection.
bf78f708 988pub fn start_garbage_collection(
dfc58d47 989 store: String,
6049b71f 990 _info: &ApiMethod,
dd5495d6 991 rpcenv: &mut dyn RpcEnvironment,
6049b71f 992) -> Result<Value, Error> {
15e9b4ed 993
3e6a7dee 994 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 995 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 996
4fdf5ddf
DC
997 let job = Job::new("garbage_collection", &store)
998 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 999
39735609 1000 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1001
4fdf5ddf
DC
1002 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1003 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
1004
1005 Ok(json!(upid_str))
15e9b4ed
DM
1006}
1007
a92830dc
DM
1008#[api(
1009 input: {
1010 properties: {
1011 store: {
1012 schema: DATASTORE_SCHEMA,
1013 },
1014 },
1015 },
1016 returns: {
1017 type: GarbageCollectionStatus,
bb34b589
DM
1018 },
1019 access: {
1020 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1021 },
a92830dc
DM
1022)]
1023/// Garbage collection status.
5eeea607 1024pub fn garbage_collection_status(
a92830dc 1025 store: String,
6049b71f 1026 _info: &ApiMethod,
dd5495d6 1027 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1028) -> Result<GarbageCollectionStatus, Error> {
691c89a0 1029
f2b99c34
DM
1030 let datastore = DataStore::lookup_datastore(&store)?;
1031
f2b99c34 1032 let status = datastore.last_gc_status();
691c89a0 1033
a92830dc 1034 Ok(status)
691c89a0
DM
1035}
1036
bb34b589 1037#[api(
30fb6025
DM
1038 returns: {
1039 description: "List the accessible datastores.",
1040 type: Array,
9b93c620 1041 items: { type: DataStoreListItem },
30fb6025 1042 },
bb34b589 1043 access: {
54552dda 1044 permission: &Permission::Anybody,
bb34b589
DM
1045 },
1046)]
1047/// Datastore list
bf78f708 1048pub fn get_datastore_list(
6049b71f
DM
1049 _param: Value,
1050 _info: &ApiMethod,
54552dda 1051 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1052) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 1053
e7d4be9d 1054 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1055
e6dc35ac 1056 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1057 let user_info = CachedUserInfo::new()?;
1058
30fb6025 1059 let mut list = Vec::new();
54552dda 1060
30fb6025 1061 for (store, (_, data)) in &config.sections {
e6dc35ac 1062 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 1063 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1064 if allowed {
455e5f71
FG
1065 list.push(
1066 DataStoreListItem {
1067 store: store.clone(),
1068 comment: data["comment"].as_str().map(String::from),
1069 }
1070 );
30fb6025 1071 }
54552dda
DM
1072 }
1073
44288184 1074 Ok(list)
15e9b4ed
DM
1075}
1076
0ab08ac9
DM
1077#[sortable]
1078pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1079 &ApiHandler::AsyncHttp(&download_file),
1080 &ObjectSchema::new(
1081 "Download single raw file from backup snapshot.",
1082 &sorted!([
66c49c21 1083 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1084 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1085 ("backup-id", false, &BACKUP_ID_SCHEMA),
1086 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1087 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1088 ]),
1089 )
54552dda
DM
1090).access(None, &Permission::Privilege(
1091 &["datastore", "{store}"],
1092 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1093 true)
1094);
691c89a0 1095
bf78f708 1096pub fn download_file(
9e47c0a5
DM
1097 _parts: Parts,
1098 _req_body: Body,
1099 param: Value,
255f378a 1100 _info: &ApiMethod,
54552dda 1101 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1102) -> ApiResponseFuture {
9e47c0a5 1103
ad51d02a 1104 async move {
3c8c2827 1105 let store = required_string_param(&param, "store")?;
ad51d02a 1106 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 1107
e6dc35ac 1108 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1109
3c8c2827 1110 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1111
3c8c2827
WB
1112 let backup_type = required_string_param(&param, "backup-type")?;
1113 let backup_id = required_string_param(&param, "backup-id")?;
1114 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1115
e0e5b442 1116 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1117
bff85572 1118 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1119
abdb9763 1120 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1121
ad51d02a
DM
1122 let mut path = datastore.base_path();
1123 path.push(backup_dir.relative_path());
1124 path.push(&file_name);
1125
ba694720 1126 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1127 .await
1128 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1129
db0cb9ce 1130 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1131 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1132 .map_err(move |err| {
1133 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1134 err
1135 });
ad51d02a 1136 let body = Body::wrap_stream(payload);
9e47c0a5 1137
ad51d02a
DM
1138 // fixme: set other headers ?
1139 Ok(Response::builder()
1140 .status(StatusCode::OK)
1141 .header(header::CONTENT_TYPE, "application/octet-stream")
1142 .body(body)
1143 .unwrap())
1144 }.boxed()
9e47c0a5
DM
1145}
1146
6ef9bb59
DC
1147#[sortable]
1148pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1149 &ApiHandler::AsyncHttp(&download_file_decoded),
1150 &ObjectSchema::new(
1151 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1152 &sorted!([
1153 ("store", false, &DATASTORE_SCHEMA),
1154 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1155 ("backup-id", false, &BACKUP_ID_SCHEMA),
1156 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1157 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1158 ]),
1159 )
1160).access(None, &Permission::Privilege(
1161 &["datastore", "{store}"],
1162 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1163 true)
1164);
1165
bf78f708 1166pub fn download_file_decoded(
6ef9bb59
DC
1167 _parts: Parts,
1168 _req_body: Body,
1169 param: Value,
1170 _info: &ApiMethod,
1171 rpcenv: Box<dyn RpcEnvironment>,
1172) -> ApiResponseFuture {
1173
1174 async move {
3c8c2827 1175 let store = required_string_param(&param, "store")?;
6ef9bb59
DC
1176 let datastore = DataStore::lookup_datastore(store)?;
1177
e6dc35ac 1178 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1179
3c8c2827 1180 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1181
3c8c2827
WB
1182 let backup_type = required_string_param(&param, "backup-type")?;
1183 let backup_id = required_string_param(&param, "backup-id")?;
1184 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1185
e0e5b442 1186 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1187
bff85572 1188 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1189
2d55beec 1190 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1191 for file in files {
f28d9088 1192 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1193 bail!("cannot decode '{}' - is encrypted", file_name);
1194 }
1195 }
1196
1197 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1198
1199 let mut path = datastore.base_path();
1200 path.push(backup_dir.relative_path());
1201 path.push(&file_name);
1202
1203 let extension = file_name.rsplitn(2, '.').next().unwrap();
1204
1205 let body = match extension {
1206 "didx" => {
1207 let index = DynamicIndexReader::open(&path)
1208 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1209 let (csum, size) = index.compute_csum();
1210 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1211
14f6c9cb 1212 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1213 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1214 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1215 .map_err(move |err| {
1216 eprintln!("error during streaming of '{:?}' - {}", path, err);
1217 err
1218 }))
1219 },
1220 "fidx" => {
1221 let index = FixedIndexReader::open(&path)
1222 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1223
2d55beec
FG
1224 let (csum, size) = index.compute_csum();
1225 manifest.verify_file(&file_name, &csum, size)?;
1226
14f6c9cb 1227 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1228 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1229 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1230 .map_err(move |err| {
1231 eprintln!("error during streaming of '{:?}' - {}", path, err);
1232 err
1233 }))
1234 },
1235 "blob" => {
1236 let file = std::fs::File::open(&path)
8aa67ee7 1237 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1238
2d55beec
FG
1239 // FIXME: load full blob to verify index checksum?
1240
6ef9bb59
DC
1241 Body::wrap_stream(
1242 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1243 .map_err(move |err| {
1244 eprintln!("error during streaming of '{:?}' - {}", path, err);
1245 err
1246 })
1247 )
1248 },
1249 extension => {
1250 bail!("cannot download '{}' files", extension);
1251 },
1252 };
1253
1254 // fixme: set other headers ?
1255 Ok(Response::builder()
1256 .status(StatusCode::OK)
1257 .header(header::CONTENT_TYPE, "application/octet-stream")
1258 .body(body)
1259 .unwrap())
1260 }.boxed()
1261}
1262
552c2259 1263#[sortable]
0ab08ac9
DM
1264pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1265 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1266 &ObjectSchema::new(
54552dda 1267 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1268 &sorted!([
66c49c21 1269 ("store", false, &DATASTORE_SCHEMA),
255f378a 1270 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1271 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1272 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1273 ]),
9e47c0a5 1274 )
54552dda
DM
1275).access(
1276 Some("Only the backup creator/owner is allowed to do this."),
1277 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1278);
9e47c0a5 1279
bf78f708 1280pub fn upload_backup_log(
07ee2235
DM
1281 _parts: Parts,
1282 req_body: Body,
1283 param: Value,
255f378a 1284 _info: &ApiMethod,
54552dda 1285 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1286) -> ApiResponseFuture {
07ee2235 1287
ad51d02a 1288 async move {
3c8c2827 1289 let store = required_string_param(&param, "store")?;
ad51d02a 1290 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1291
96d65fbc 1292 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1293
3c8c2827
WB
1294 let backup_type = required_string_param(&param, "backup-type")?;
1295 let backup_id = required_string_param(&param, "backup-id")?;
1296 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1297
e0e5b442 1298 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1299
e6dc35ac 1300 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1301 let owner = datastore.get_owner(backup_dir.group())?;
1302 check_backup_owner(&owner, &auth_id)?;
54552dda 1303
ad51d02a
DM
1304 let mut path = datastore.base_path();
1305 path.push(backup_dir.relative_path());
1306 path.push(&file_name);
07ee2235 1307
ad51d02a
DM
1308 if path.exists() {
1309 bail!("backup already contains a log.");
1310 }
e128d4e8 1311
ad51d02a 1312 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1313 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1314
1315 let data = req_body
1316 .map_err(Error::from)
1317 .try_fold(Vec::new(), |mut acc, chunk| {
1318 acc.extend_from_slice(&*chunk);
1319 future::ok::<_, Error>(acc)
1320 })
1321 .await?;
1322
39f18b30
DM
1323 // always verify blob/CRC at server side
1324 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1325
1326 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1327
1328 // fixme: use correct formatter
1329 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1330 }.boxed()
07ee2235
DM
1331}
1332
5b1cfa01
DC
1333#[api(
1334 input: {
1335 properties: {
1336 store: {
1337 schema: DATASTORE_SCHEMA,
1338 },
1339 "backup-type": {
1340 schema: BACKUP_TYPE_SCHEMA,
1341 },
1342 "backup-id": {
1343 schema: BACKUP_ID_SCHEMA,
1344 },
1345 "backup-time": {
1346 schema: BACKUP_TIME_SCHEMA,
1347 },
1348 "filepath": {
1349 description: "Base64 encoded path.",
1350 type: String,
1351 }
1352 },
1353 },
1354 access: {
1355 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1356 },
1357)]
1358/// Get the entries of the given path of the catalog
bf78f708 1359pub fn catalog(
5b1cfa01
DC
1360 store: String,
1361 backup_type: String,
1362 backup_id: String,
1363 backup_time: i64,
1364 filepath: String,
5b1cfa01 1365 rpcenv: &mut dyn RpcEnvironment,
227501c0 1366) -> Result<Vec<ArchiveEntry>, Error> {
5b1cfa01
DC
1367 let datastore = DataStore::lookup_datastore(&store)?;
1368
e6dc35ac 1369 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1370
e0e5b442 1371 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1372
bff85572 1373 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1374
9238cdf5
FG
1375 let file_name = CATALOG_NAME;
1376
2d55beec 1377 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1378 for file in files {
1379 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1380 bail!("cannot decode '{}' - is encrypted", file_name);
1381 }
1382 }
1383
5b1cfa01
DC
1384 let mut path = datastore.base_path();
1385 path.push(backup_dir.relative_path());
9238cdf5 1386 path.push(file_name);
5b1cfa01
DC
1387
1388 let index = DynamicIndexReader::open(&path)
1389 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1390
2d55beec
FG
1391 let (csum, size) = index.compute_csum();
1392 manifest.verify_file(&file_name, &csum, size)?;
1393
14f6c9cb 1394 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1395 let reader = BufferedDynamicReader::new(index, chunk_reader);
1396
1397 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1398
5279ee74 1399 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1400 base64::decode(filepath)?
1401 } else {
1402 vec![b'/']
1403 };
5b1cfa01 1404
86582454 1405 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1406}
1407
d33d8f4e
DC
1408#[sortable]
1409pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1410 &ApiHandler::AsyncHttp(&pxar_file_download),
1411 &ObjectSchema::new(
1ffe0301 1412 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1413 &sorted!([
1414 ("store", false, &DATASTORE_SCHEMA),
1415 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1416 ("backup-id", false, &BACKUP_ID_SCHEMA),
1417 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1418 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1419 ]),
1420 )
1421).access(None, &Permission::Privilege(
1422 &["datastore", "{store}"],
1423 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1424 true)
1425);
1426
bf78f708 1427pub fn pxar_file_download(
d33d8f4e
DC
1428 _parts: Parts,
1429 _req_body: Body,
1430 param: Value,
1431 _info: &ApiMethod,
1432 rpcenv: Box<dyn RpcEnvironment>,
1433) -> ApiResponseFuture {
1434
1435 async move {
3c8c2827 1436 let store = required_string_param(&param, "store")?;
d33d8f4e
DC
1437 let datastore = DataStore::lookup_datastore(&store)?;
1438
e6dc35ac 1439 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1440
3c8c2827 1441 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1442
3c8c2827
WB
1443 let backup_type = required_string_param(&param, "backup-type")?;
1444 let backup_id = required_string_param(&param, "backup-id")?;
1445 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1446
e0e5b442 1447 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1448
bff85572 1449 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1450
d33d8f4e 1451 let mut components = base64::decode(&filepath)?;
3984a5fd 1452 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1453 components.remove(0);
1454 }
1455
d8d8af98 1456 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1457 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1458 let file_path = split.next().unwrap_or(b"/");
2d55beec 1459 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1460 for file in files {
1461 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1462 bail!("cannot decode '{}' - is encrypted", pxar_name);
1463 }
1464 }
d33d8f4e 1465
9238cdf5
FG
1466 let mut path = datastore.base_path();
1467 path.push(backup_dir.relative_path());
1468 path.push(pxar_name);
d33d8f4e
DC
1469
1470 let index = DynamicIndexReader::open(&path)
1471 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1472
2d55beec
FG
1473 let (csum, size) = index.compute_csum();
1474 manifest.verify_file(&pxar_name, &csum, size)?;
1475
14f6c9cb 1476 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1477 let reader = BufferedDynamicReader::new(index, chunk_reader);
1478 let archive_size = reader.archive_size();
1479 let reader = LocalDynamicReadAt::new(reader);
1480
1481 let decoder = Accessor::new(reader, archive_size).await?;
1482 let root = decoder.open_root().await?;
2e219481 1483 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1484 let file = root
2e219481
DC
1485 .lookup(&path).await?
1486 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1487
804f6143
DC
1488 let body = match file.kind() {
1489 EntryKind::File { .. } => Body::wrap_stream(
1490 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1491 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1492 err
1493 }),
1494 ),
1495 EntryKind::Hardlink(_) => Body::wrap_stream(
1496 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1497 .map_err(move |err| {
1498 eprintln!(
1499 "error during streaming of hardlink '{:?}' - {}",
2e219481 1500 path, err
804f6143
DC
1501 );
1502 err
1503 }),
1504 ),
1505 EntryKind::Directory => {
1506 let (sender, receiver) = tokio::sync::mpsc::channel(100);
804f6143 1507 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
2e219481
DC
1508 crate::server::spawn_internal_task(
1509 create_zip(channelwriter, decoder, path.clone(), false)
1510 );
7c667013 1511 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
2e219481 1512 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
d33d8f4e 1513 err
804f6143
DC
1514 }))
1515 }
1516 other => bail!("cannot download file of type {:?}", other),
1517 };
d33d8f4e
DC
1518
1519 // fixme: set other headers ?
1520 Ok(Response::builder()
1521 .status(StatusCode::OK)
1522 .header(header::CONTENT_TYPE, "application/octet-stream")
1523 .body(body)
1524 .unwrap())
1525 }.boxed()
1526}
1527
1a0d3d11
DM
1528#[api(
1529 input: {
1530 properties: {
1531 store: {
1532 schema: DATASTORE_SCHEMA,
1533 },
1534 timeframe: {
1535 type: RRDTimeFrameResolution,
1536 },
1537 cf: {
1538 type: RRDMode,
1539 },
1540 },
1541 },
1542 access: {
1543 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1544 },
1545)]
1546/// Read datastore stats
bf78f708 1547pub fn get_rrd_stats(
1a0d3d11
DM
1548 store: String,
1549 timeframe: RRDTimeFrameResolution,
1550 cf: RRDMode,
1551 _param: Value,
1552) -> Result<Value, Error> {
1553
431cc7b1
DC
1554 create_value_from_rrd(
1555 &format!("datastore/{}", store),
1a0d3d11
DM
1556 &[
1557 "total", "used",
c94e1f65
DM
1558 "read_ios", "read_bytes",
1559 "write_ios", "write_bytes",
1560 "io_ticks",
1a0d3d11
DM
1561 ],
1562 timeframe,
1563 cf,
1564 )
1565}
1566
d6688884
SR
1567#[api(
1568 input: {
1569 properties: {
1570 store: {
1571 schema: DATASTORE_SCHEMA,
1572 },
1573 "backup-type": {
1574 schema: BACKUP_TYPE_SCHEMA,
1575 },
1576 "backup-id": {
1577 schema: BACKUP_ID_SCHEMA,
1578 },
1579 },
1580 },
1581 access: {
1582 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1583 },
1584)]
1585/// Get "notes" for a backup group
1586pub fn get_group_notes(
1587 store: String,
1588 backup_type: String,
1589 backup_id: String,
1590 rpcenv: &mut dyn RpcEnvironment,
1591) -> Result<String, Error> {
1592 let datastore = DataStore::lookup_datastore(&store)?;
1593
1594 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1595 let backup_group = BackupGroup::new(backup_type, backup_id);
1596
1597 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1598
1599 let note_path = get_group_note_path(&datastore, &backup_group);
1600 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1601}
1602
1603#[api(
1604 input: {
1605 properties: {
1606 store: {
1607 schema: DATASTORE_SCHEMA,
1608 },
1609 "backup-type": {
1610 schema: BACKUP_TYPE_SCHEMA,
1611 },
1612 "backup-id": {
1613 schema: BACKUP_ID_SCHEMA,
1614 },
1615 notes: {
1616 description: "A multiline text.",
1617 },
1618 },
1619 },
1620 access: {
1621 permission: &Permission::Privilege(&["datastore", "{store}"],
1622 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1623 true),
1624 },
1625)]
1626/// Set "notes" for a backup group
1627pub fn set_group_notes(
1628 store: String,
1629 backup_type: String,
1630 backup_id: String,
1631 notes: String,
1632 rpcenv: &mut dyn RpcEnvironment,
1633) -> Result<(), Error> {
1634 let datastore = DataStore::lookup_datastore(&store)?;
1635
1636 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1637 let backup_group = BackupGroup::new(backup_type, backup_id);
1638
1639 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1640
1641 let note_path = get_group_note_path(&datastore, &backup_group);
1642 replace_file(note_path, notes.as_bytes(), CreateOptions::new())?;
1643
1644 Ok(())
1645}
1646
912b3f5b
DM
1647#[api(
1648 input: {
1649 properties: {
1650 store: {
1651 schema: DATASTORE_SCHEMA,
1652 },
1653 "backup-type": {
1654 schema: BACKUP_TYPE_SCHEMA,
1655 },
1656 "backup-id": {
1657 schema: BACKUP_ID_SCHEMA,
1658 },
1659 "backup-time": {
1660 schema: BACKUP_TIME_SCHEMA,
1661 },
1662 },
1663 },
1664 access: {
1401f4be 1665 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1666 },
1667)]
1668/// Get "notes" for a specific backup
bf78f708 1669pub fn get_notes(
912b3f5b
DM
1670 store: String,
1671 backup_type: String,
1672 backup_id: String,
1673 backup_time: i64,
1674 rpcenv: &mut dyn RpcEnvironment,
1675) -> Result<String, Error> {
1676 let datastore = DataStore::lookup_datastore(&store)?;
1677
e6dc35ac 1678 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1679 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1680
1401f4be 1681 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1682
883aa6d5 1683 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1684
883aa6d5 1685 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1686 .as_str()
1687 .unwrap_or("");
1688
1689 Ok(String::from(notes))
1690}
1691
1692#[api(
1693 input: {
1694 properties: {
1695 store: {
1696 schema: DATASTORE_SCHEMA,
1697 },
1698 "backup-type": {
1699 schema: BACKUP_TYPE_SCHEMA,
1700 },
1701 "backup-id": {
1702 schema: BACKUP_ID_SCHEMA,
1703 },
1704 "backup-time": {
1705 schema: BACKUP_TIME_SCHEMA,
1706 },
1707 notes: {
1708 description: "A multiline text.",
1709 },
1710 },
1711 },
1712 access: {
b728a69e
FG
1713 permission: &Permission::Privilege(&["datastore", "{store}"],
1714 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1715 true),
912b3f5b
DM
1716 },
1717)]
1718/// Set "notes" for a specific backup
bf78f708 1719pub fn set_notes(
912b3f5b
DM
1720 store: String,
1721 backup_type: String,
1722 backup_id: String,
1723 backup_time: i64,
1724 notes: String,
1725 rpcenv: &mut dyn RpcEnvironment,
1726) -> Result<(), Error> {
1727 let datastore = DataStore::lookup_datastore(&store)?;
1728
e6dc35ac 1729 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1730 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1731
b728a69e 1732 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1733
1a374fcf
SR
1734 datastore.update_manifest(&backup_dir,|manifest| {
1735 manifest.unprotected["notes"] = notes.into();
1736 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1737
1738 Ok(())
1739}
1740
72be0eb1 1741#[api(
4940012d 1742 input: {
72be0eb1
DW
1743 properties: {
1744 store: {
1745 schema: DATASTORE_SCHEMA,
1746 },
1747 "backup-type": {
1748 schema: BACKUP_TYPE_SCHEMA,
1749 },
1750 "backup-id": {
1751 schema: BACKUP_ID_SCHEMA,
1752 },
1753 "new-owner": {
e6dc35ac 1754 type: Authid,
72be0eb1
DW
1755 },
1756 },
4940012d
FG
1757 },
1758 access: {
bff85572
FG
1759 permission: &Permission::Anybody,
1760 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1761 },
72be0eb1
DW
1762)]
1763/// Change owner of a backup group
bf78f708 1764pub fn set_backup_owner(
72be0eb1
DW
1765 store: String,
1766 backup_type: String,
1767 backup_id: String,
e6dc35ac 1768 new_owner: Authid,
bff85572 1769 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1770) -> Result<(), Error> {
1771
1772 let datastore = DataStore::lookup_datastore(&store)?;
1773
1774 let backup_group = BackupGroup::new(backup_type, backup_id);
1775
bff85572
FG
1776 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1777
72be0eb1
DW
1778 let user_info = CachedUserInfo::new()?;
1779
bff85572
FG
1780 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1781
1782 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1783 // High-privilege user/token
1784 true
1785 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1786 let owner = datastore.get_owner(&backup_group)?;
1787
1788 match (owner.is_token(), new_owner.is_token()) {
1789 (true, true) => {
1790 // API token to API token, owned by same user
1791 let owner = owner.user();
1792 let new_owner = new_owner.user();
1793 owner == new_owner && Authid::from(owner.clone()) == auth_id
1794 },
1795 (true, false) => {
1796 // API token to API token owner
1797 Authid::from(owner.user().clone()) == auth_id
1798 && new_owner == auth_id
1799 },
1800 (false, true) => {
1801 // API token owner to API token
1802 owner == auth_id
1803 && Authid::from(new_owner.user().clone()) == auth_id
1804 },
1805 (false, false) => {
1806 // User to User, not allowed for unprivileged users
1807 false
1808 },
1809 }
1810 } else {
1811 false
1812 };
1813
1814 if !allowed {
1815 return Err(http_err!(UNAUTHORIZED,
1816 "{} does not have permission to change owner of backup group '{}' to {}",
1817 auth_id,
1818 backup_group,
1819 new_owner,
1820 ));
1821 }
1822
e6dc35ac
FG
1823 if !user_info.is_active_auth_id(&new_owner) {
1824 bail!("{} '{}' is inactive or non-existent",
1825 if new_owner.is_token() {
1826 "API token".to_string()
1827 } else {
1828 "user".to_string()
1829 },
1830 new_owner);
72be0eb1
DW
1831 }
1832
1833 datastore.set_owner(&backup_group, &new_owner, true)?;
1834
1835 Ok(())
1836}
1837
552c2259 1838#[sortable]
255f378a 1839const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1840 (
1841 "catalog",
1842 &Router::new()
1843 .get(&API_METHOD_CATALOG)
1844 ),
72be0eb1
DW
1845 (
1846 "change-owner",
1847 &Router::new()
1848 .post(&API_METHOD_SET_BACKUP_OWNER)
1849 ),
255f378a
DM
1850 (
1851 "download",
1852 &Router::new()
1853 .download(&API_METHOD_DOWNLOAD_FILE)
1854 ),
6ef9bb59
DC
1855 (
1856 "download-decoded",
1857 &Router::new()
1858 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1859 ),
255f378a
DM
1860 (
1861 "files",
1862 &Router::new()
09b1f7b2 1863 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1864 ),
1865 (
1866 "gc",
1867 &Router::new()
1868 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1869 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1870 ),
d6688884
SR
1871 (
1872 "group-notes",
1873 &Router::new()
1874 .get(&API_METHOD_GET_GROUP_NOTES)
1875 .put(&API_METHOD_SET_GROUP_NOTES)
1876 ),
255f378a
DM
1877 (
1878 "groups",
1879 &Router::new()
b31c8019 1880 .get(&API_METHOD_LIST_GROUPS)
f32791b4 1881 .delete(&API_METHOD_DELETE_GROUP)
255f378a 1882 ),
912b3f5b
DM
1883 (
1884 "notes",
1885 &Router::new()
1886 .get(&API_METHOD_GET_NOTES)
1887 .put(&API_METHOD_SET_NOTES)
1888 ),
255f378a
DM
1889 (
1890 "prune",
1891 &Router::new()
1892 .post(&API_METHOD_PRUNE)
1893 ),
9805207a
DC
1894 (
1895 "prune-datastore",
1896 &Router::new()
1897 .post(&API_METHOD_PRUNE_DATASTORE)
1898 ),
d33d8f4e
DC
1899 (
1900 "pxar-file-download",
1901 &Router::new()
1902 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1903 ),
1a0d3d11
DM
1904 (
1905 "rrd",
1906 &Router::new()
1907 .get(&API_METHOD_GET_RRD_STATS)
1908 ),
255f378a
DM
1909 (
1910 "snapshots",
1911 &Router::new()
fc189b19 1912 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1913 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1914 ),
1915 (
1916 "status",
1917 &Router::new()
1918 .get(&API_METHOD_STATUS)
1919 ),
1920 (
1921 "upload-backup-log",
1922 &Router::new()
1923 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1924 ),
c2009e53
DM
1925 (
1926 "verify",
1927 &Router::new()
1928 .post(&API_METHOD_VERIFY)
1929 ),
255f378a
DM
1930];
1931
ad51d02a 1932const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1933 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1934 .subdirs(DATASTORE_INFO_SUBDIRS);
1935
1936
1937pub const ROUTER: Router = Router::new()
bb34b589 1938 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1939 .match_all("store", &DATASTORE_INFO_ROUTER);