]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
move datastore config to pbs_config workspace
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed 12use serde_json::{json, Value};
7c667013 13use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 14
bb34b589
DM
15use proxmox::api::{
16 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
17 RpcEnvironment, RpcEnvironmentType, Permission
18};
0a240aaa 19use proxmox::api::router::SubdirMap;
cad540e9 20use proxmox::api::schema::*;
d6688884
SR
21use proxmox::tools::fs::{
22 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
23};
9ea4bce4 24use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 25
2e219481 26use pxar::accessor::aio::Accessor;
d33d8f4e
DC
27use pxar::EntryKind;
28
b2065dc7
WB
29use pbs_api_types::{
30 Authid, BackupContent, Counts, CryptMode, DataStoreListItem, GarbageCollectionStatus,
31 GroupListItem, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
32 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
8cc3760e
DM
33 IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
34 PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
35 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
36
b2065dc7 37};
2b7f8dd5 38use pbs_client::pxar::create_zip;
b2065dc7
WB
39use pbs_datastore::{BackupDir, BackupGroup, StoreProgress, CATALOG_NAME};
40use pbs_datastore::backup_info::BackupInfo;
41use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 42use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
43use pbs_datastore::data_blob::DataBlob;
44use pbs_datastore::data_blob_reader::DataBlobReader;
45use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
46use pbs_datastore::fixed_index::{FixedIndexReader};
47use pbs_datastore::index::IndexFile;
48use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
49use pbs_datastore::prune::{compute_prune_info, PruneOptions};
fc5870be
WB
50use pbs_tools::blocking::WrappedReaderStream;
51use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
3c8c2827 52use pbs_tools::json::{required_integer_param, required_string_param};
e7d4be9d 53use pbs_config::CachedUserInfo;
2b7f8dd5 54
8cc3760e 55use crate::api2::types::{DataStoreStatus, RRDMode, RRDTimeFrameResolution};
431cc7b1 56use crate::api2::node::rrd::create_value_from_rrd;
b2065dc7
WB
57use crate::backup::{
58 check_backup_owner, verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
59 DataStore, LocalChunkReader,
60};
54552dda 61
4fdf5ddf 62use crate::server::{jobstate::Job, WorkerTask};
804f6143 63
1629d2ad 64
d6688884
SR
65const GROUP_NOTES_FILE_NAME: &str = "notes";
66
67fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
68 let mut note_path = store.base_path();
69 note_path.push(group.group_path());
70 note_path.push(GROUP_NOTES_FILE_NAME);
71 note_path
72}
73
bff85572 74fn check_priv_or_backup_owner(
e7cb4dc5
WB
75 store: &DataStore,
76 group: &BackupGroup,
e6dc35ac 77 auth_id: &Authid,
bff85572
FG
78 required_privs: u64,
79) -> Result<(), Error> {
80 let user_info = CachedUserInfo::new()?;
81 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
82
83 if privs & required_privs == 0 {
84 let owner = store.get_owner(group)?;
85 check_backup_owner(&owner, auth_id)?;
86 }
87 Ok(())
88}
89
e7cb4dc5
WB
90fn read_backup_index(
91 store: &DataStore,
92 backup_dir: &BackupDir,
93) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 94
ff86ef00 95 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 96
09b1f7b2
DM
97 let mut result = Vec::new();
98 for item in manifest.files() {
99 result.push(BackupContent {
100 filename: item.filename.clone(),
f28d9088 101 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
102 size: Some(item.size),
103 });
8c70e3eb
DM
104 }
105
09b1f7b2 106 result.push(BackupContent {
96d65fbc 107 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
108 crypt_mode: match manifest.signature {
109 Some(_) => Some(CryptMode::SignOnly),
110 None => Some(CryptMode::None),
111 },
09b1f7b2
DM
112 size: Some(index_size),
113 });
4f1e40a2 114
70030b43 115 Ok((manifest, result))
8c70e3eb
DM
116}
117
1c090810
DC
118fn get_all_snapshot_files(
119 store: &DataStore,
120 info: &BackupInfo,
70030b43
DM
121) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
122
123 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
124
125 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
126 acc.insert(item.filename.clone());
127 acc
128 });
129
130 for file in &info.files {
131 if file_set.contains(file) { continue; }
f28d9088
WB
132 files.push(BackupContent {
133 filename: file.to_string(),
134 size: None,
135 crypt_mode: None,
136 });
1c090810
DC
137 }
138
70030b43 139 Ok((manifest, files))
1c090810
DC
140}
141
b31c8019
DM
142#[api(
143 input: {
144 properties: {
145 store: {
146 schema: DATASTORE_SCHEMA,
147 },
148 },
149 },
7b570c17 150 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 151 access: {
54552dda
DM
152 permission: &Permission::Privilege(
153 &["datastore", "{store}"],
154 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
155 true),
bb34b589 156 },
b31c8019
DM
157)]
158/// List backup groups.
b2362a12 159pub fn list_groups(
b31c8019 160 store: String,
54552dda 161 rpcenv: &mut dyn RpcEnvironment,
b31c8019 162) -> Result<Vec<GroupListItem>, Error> {
812c6f87 163
e6dc35ac 164 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 165 let user_info = CachedUserInfo::new()?;
e6dc35ac 166 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 167
b31c8019 168 let datastore = DataStore::lookup_datastore(&store)?;
0d08fcee
FG
169 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
170
171 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
172
173 let group_info = backup_groups
174 .into_iter()
175 .fold(Vec::new(), |mut group_info, group| {
176 let owner = match datastore.get_owner(&group) {
177 Ok(auth_id) => auth_id,
178 Err(err) => {
1ed02257
FG
179 eprintln!("Failed to get owner of group '{}/{}' - {}",
180 &store,
181 group,
182 err);
0d08fcee
FG
183 return group_info;
184 },
185 };
186 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
187 return group_info;
188 }
189
190 let snapshots = match group.list_backups(&datastore.base_path()) {
191 Ok(snapshots) => snapshots,
192 Err(_) => {
193 return group_info;
194 },
195 };
196
197 let backup_count: u64 = snapshots.len() as u64;
198 if backup_count == 0 {
199 return group_info;
200 }
201
202 let last_backup = snapshots
203 .iter()
204 .fold(&snapshots[0], |last, curr| {
205 if curr.is_finished()
206 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
207 curr
208 } else {
209 last
210 }
211 })
212 .to_owned();
213
d6688884
SR
214 let note_path = get_group_note_path(&datastore, &group);
215 let comment = file_read_firstline(&note_path).ok();
216
0d08fcee
FG
217 group_info.push(GroupListItem {
218 backup_type: group.backup_type().to_string(),
219 backup_id: group.backup_id().to_string(),
220 last_backup: last_backup.backup_dir.backup_time(),
221 owner: Some(owner),
222 backup_count,
223 files: last_backup.files,
d6688884 224 comment,
0d08fcee
FG
225 });
226
227 group_info
228 });
812c6f87 229
0d08fcee 230 Ok(group_info)
812c6f87 231}
8f579717 232
f32791b4
DC
233#[api(
234 input: {
235 properties: {
236 store: {
237 schema: DATASTORE_SCHEMA,
238 },
239 "backup-type": {
240 schema: BACKUP_TYPE_SCHEMA,
241 },
242 "backup-id": {
243 schema: BACKUP_ID_SCHEMA,
244 },
245 },
246 },
247 access: {
248 permission: &Permission::Privilege(
249 &["datastore", "{store}"],
250 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
251 true),
252 },
253)]
254/// Delete backup group including all snapshots.
255pub fn delete_group(
256 store: String,
257 backup_type: String,
258 backup_id: String,
259 _info: &ApiMethod,
260 rpcenv: &mut dyn RpcEnvironment,
261) -> Result<Value, Error> {
262
263 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
264
265 let group = BackupGroup::new(backup_type, backup_id);
266 let datastore = DataStore::lookup_datastore(&store)?;
267
268 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
269
270 datastore.remove_backup_group(&group)?;
271
272 Ok(Value::Null)
273}
274
09b1f7b2
DM
275#[api(
276 input: {
277 properties: {
278 store: {
279 schema: DATASTORE_SCHEMA,
280 },
281 "backup-type": {
282 schema: BACKUP_TYPE_SCHEMA,
283 },
284 "backup-id": {
285 schema: BACKUP_ID_SCHEMA,
286 },
287 "backup-time": {
288 schema: BACKUP_TIME_SCHEMA,
289 },
290 },
291 },
7b570c17 292 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 293 access: {
54552dda
DM
294 permission: &Permission::Privilege(
295 &["datastore", "{store}"],
296 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
297 true),
bb34b589 298 },
09b1f7b2
DM
299)]
300/// List snapshot files.
ea5f547f 301pub fn list_snapshot_files(
09b1f7b2
DM
302 store: String,
303 backup_type: String,
304 backup_id: String,
305 backup_time: i64,
01a13423 306 _info: &ApiMethod,
54552dda 307 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 308) -> Result<Vec<BackupContent>, Error> {
01a13423 309
e6dc35ac 310 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 311 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 312
e0e5b442 313 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 314
bff85572 315 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 316
d7c24397 317 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 318
70030b43
DM
319 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
320
321 Ok(files)
01a13423
DM
322}
323
68a6a0ee
DM
324#[api(
325 input: {
326 properties: {
327 store: {
328 schema: DATASTORE_SCHEMA,
329 },
330 "backup-type": {
331 schema: BACKUP_TYPE_SCHEMA,
332 },
333 "backup-id": {
334 schema: BACKUP_ID_SCHEMA,
335 },
336 "backup-time": {
337 schema: BACKUP_TIME_SCHEMA,
338 },
339 },
340 },
bb34b589 341 access: {
54552dda
DM
342 permission: &Permission::Privilege(
343 &["datastore", "{store}"],
344 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
345 true),
bb34b589 346 },
68a6a0ee
DM
347)]
348/// Delete backup snapshot.
bf78f708 349pub fn delete_snapshot(
68a6a0ee
DM
350 store: String,
351 backup_type: String,
352 backup_id: String,
353 backup_time: i64,
6f62c924 354 _info: &ApiMethod,
54552dda 355 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
356) -> Result<Value, Error> {
357
e6dc35ac 358 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 359
e0e5b442 360 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 361 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 362
bff85572 363 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 364
c9756b40 365 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
366
367 Ok(Value::Null)
368}
369
fc189b19
DM
370#[api(
371 input: {
372 properties: {
373 store: {
374 schema: DATASTORE_SCHEMA,
375 },
376 "backup-type": {
377 optional: true,
378 schema: BACKUP_TYPE_SCHEMA,
379 },
380 "backup-id": {
381 optional: true,
382 schema: BACKUP_ID_SCHEMA,
383 },
384 },
385 },
7b570c17 386 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 387 access: {
54552dda
DM
388 permission: &Permission::Privilege(
389 &["datastore", "{store}"],
390 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
391 true),
bb34b589 392 },
fc189b19
DM
393)]
394/// List backup snapshots.
f24fc116 395pub fn list_snapshots (
54552dda
DM
396 store: String,
397 backup_type: Option<String>,
398 backup_id: Option<String>,
399 _param: Value,
184f17af 400 _info: &ApiMethod,
54552dda 401 rpcenv: &mut dyn RpcEnvironment,
fc189b19 402) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 403
e6dc35ac 404 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 405 let user_info = CachedUserInfo::new()?;
e6dc35ac 406 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 407
0d08fcee
FG
408 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
409
54552dda 410 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 411
c0977501 412 let base_path = datastore.base_path();
184f17af 413
0d08fcee
FG
414 let groups = match (backup_type, backup_id) {
415 (Some(backup_type), Some(backup_id)) => {
416 let mut groups = Vec::with_capacity(1);
417 groups.push(BackupGroup::new(backup_type, backup_id));
418 groups
419 },
420 (Some(backup_type), None) => {
421 BackupInfo::list_backup_groups(&base_path)?
422 .into_iter()
423 .filter(|group| group.backup_type() == backup_type)
424 .collect()
425 },
426 (None, Some(backup_id)) => {
427 BackupInfo::list_backup_groups(&base_path)?
428 .into_iter()
429 .filter(|group| group.backup_id() == backup_id)
430 .collect()
431 },
432 _ => BackupInfo::list_backup_groups(&base_path)?,
433 };
54552dda 434
0d08fcee 435 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
436 let backup_type = group.backup_type().to_string();
437 let backup_id = group.backup_id().to_string();
0d08fcee 438 let backup_time = info.backup_dir.backup_time();
1c090810 439
79c53595 440 match get_all_snapshot_files(&datastore, &info) {
70030b43 441 Ok((manifest, files)) => {
70030b43
DM
442 // extract the first line from notes
443 let comment: Option<String> = manifest.unprotected["notes"]
444 .as_str()
445 .and_then(|notes| notes.lines().next())
446 .map(String::from);
447
035c40e6
FG
448 let fingerprint = match manifest.fingerprint() {
449 Ok(fp) => fp,
450 Err(err) => {
451 eprintln!("error parsing fingerprint: '{}'", err);
452 None
453 },
454 };
455
79c53595
FG
456 let verification = manifest.unprotected["verify_state"].clone();
457 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
458 Ok(verify) => verify,
459 Err(err) => {
460 eprintln!("error parsing verification state : '{}'", err);
461 None
462 }
463 };
464
0d08fcee
FG
465 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
466
79c53595
FG
467 SnapshotListItem {
468 backup_type,
469 backup_id,
470 backup_time,
471 comment,
472 verification,
035c40e6 473 fingerprint,
79c53595
FG
474 files,
475 size,
476 owner,
477 }
1c090810
DC
478 },
479 Err(err) => {
480 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 481 let files = info
70030b43 482 .files
0d08fcee 483 .into_iter()
44288184
FG
484 .map(|filename| BackupContent {
485 filename,
70030b43
DM
486 size: None,
487 crypt_mode: None,
488 })
79c53595
FG
489 .collect();
490
491 SnapshotListItem {
492 backup_type,
493 backup_id,
494 backup_time,
495 comment: None,
496 verification: None,
035c40e6 497 fingerprint: None,
79c53595
FG
498 files,
499 size: None,
500 owner,
501 }
1c090810 502 },
0d08fcee
FG
503 }
504 };
184f17af 505
0d08fcee
FG
506 groups
507 .iter()
508 .try_fold(Vec::new(), |mut snapshots, group| {
509 let owner = match datastore.get_owner(group) {
510 Ok(auth_id) => auth_id,
511 Err(err) => {
512 eprintln!("Failed to get owner of group '{}/{}' - {}",
513 &store,
514 group,
515 err);
516 return Ok(snapshots);
517 },
518 };
519
520 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
521 return Ok(snapshots);
522 }
523
524 let group_backups = group.list_backups(&datastore.base_path())?;
525
526 snapshots.extend(
527 group_backups
528 .into_iter()
529 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
530 );
531
532 Ok(snapshots)
533 })
184f17af
DM
534}
535
fdfcb74d 536fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 537 let base_path = store.base_path();
fdfcb74d 538 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 539
fdfcb74d
FG
540 groups.iter()
541 .filter(|group| {
542 let owner = match store.get_owner(&group) {
543 Ok(owner) => owner,
544 Err(err) => {
1ed02257
FG
545 eprintln!("Failed to get owner of group '{}/{}' - {}",
546 store.name(),
547 group,
548 err);
fdfcb74d
FG
549 return false;
550 },
551 };
14e08625 552
fdfcb74d
FG
553 match filter_owner {
554 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
555 None => true,
556 }
557 })
558 .try_fold(Counts::default(), |mut counts, group| {
559 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
560
561 let type_count = match group.backup_type() {
562 "ct" => counts.ct.get_or_insert(Default::default()),
563 "vm" => counts.vm.get_or_insert(Default::default()),
564 "host" => counts.host.get_or_insert(Default::default()),
565 _ => counts.other.get_or_insert(Default::default()),
566 };
14e08625 567
fdfcb74d
FG
568 type_count.groups += 1;
569 type_count.snapshots += snapshot_count;
16f9f244 570
fdfcb74d
FG
571 Ok(counts)
572 })
16f9f244
DC
573}
574
1dc117bb
DM
575#[api(
576 input: {
577 properties: {
578 store: {
579 schema: DATASTORE_SCHEMA,
580 },
98afc7b1
FG
581 verbose: {
582 type: bool,
583 default: false,
584 optional: true,
585 description: "Include additional information like snapshot counts and GC status.",
586 },
1dc117bb 587 },
98afc7b1 588
1dc117bb
DM
589 },
590 returns: {
14e08625 591 type: DataStoreStatus,
1dc117bb 592 },
bb34b589 593 access: {
54552dda 594 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 595 },
1dc117bb
DM
596)]
597/// Get datastore status.
ea5f547f 598pub fn status(
1dc117bb 599 store: String,
98afc7b1 600 verbose: bool,
0eecf38f 601 _info: &ApiMethod,
fdfcb74d 602 rpcenv: &mut dyn RpcEnvironment,
14e08625 603) -> Result<DataStoreStatus, Error> {
1dc117bb 604 let datastore = DataStore::lookup_datastore(&store)?;
14e08625 605 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
606 let (counts, gc_status) = if verbose {
607 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
608 let user_info = CachedUserInfo::new()?;
609
610 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
611 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
612 None
613 } else {
614 Some(&auth_id)
615 };
616
617 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
618 let gc_status = Some(datastore.last_gc_status());
619
620 (counts, gc_status)
621 } else {
622 (None, None)
98afc7b1 623 };
16f9f244 624
14e08625
DC
625 Ok(DataStoreStatus {
626 total: storage.total,
627 used: storage.used,
628 avail: storage.avail,
629 gc_status,
630 counts,
631 })
0eecf38f
DM
632}
633
c2009e53
DM
634#[api(
635 input: {
636 properties: {
637 store: {
638 schema: DATASTORE_SCHEMA,
639 },
640 "backup-type": {
641 schema: BACKUP_TYPE_SCHEMA,
642 optional: true,
643 },
644 "backup-id": {
645 schema: BACKUP_ID_SCHEMA,
646 optional: true,
647 },
dcbf29e7
HL
648 "ignore-verified": {
649 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
650 optional: true,
651 },
652 "outdated-after": {
653 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
654 optional: true,
655 },
c2009e53
DM
656 "backup-time": {
657 schema: BACKUP_TIME_SCHEMA,
658 optional: true,
659 },
660 },
661 },
662 returns: {
663 schema: UPID_SCHEMA,
664 },
665 access: {
09f6a240 666 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
667 },
668)]
669/// Verify backups.
670///
671/// This function can verify a single backup snapshot, all backup from a backup group,
672/// or all backups in the datastore.
673pub fn verify(
674 store: String,
675 backup_type: Option<String>,
676 backup_id: Option<String>,
677 backup_time: Option<i64>,
dcbf29e7
HL
678 ignore_verified: Option<bool>,
679 outdated_after: Option<i64>,
c2009e53
DM
680 rpcenv: &mut dyn RpcEnvironment,
681) -> Result<Value, Error> {
682 let datastore = DataStore::lookup_datastore(&store)?;
dcbf29e7 683 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 684
09f6a240 685 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 686 let worker_id;
c2009e53
DM
687
688 let mut backup_dir = None;
689 let mut backup_group = None;
133042b5 690 let mut worker_type = "verify";
c2009e53
DM
691
692 match (backup_type, backup_id, backup_time) {
693 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 694 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 695 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
696
697 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
698
c2009e53 699 backup_dir = Some(dir);
133042b5 700 worker_type = "verify_snapshot";
c2009e53
DM
701 }
702 (Some(backup_type), Some(backup_id), None) => {
4ebda996 703 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 704 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
705
706 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
707
c2009e53 708 backup_group = Some(group);
133042b5 709 worker_type = "verify_group";
c2009e53
DM
710 }
711 (None, None, None) => {
8ea00f6e 712 worker_id = store.clone();
c2009e53 713 }
5a718dce 714 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
715 }
716
39735609 717 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
718
719 let upid_str = WorkerTask::new_thread(
133042b5 720 worker_type,
44288184 721 Some(worker_id),
09f6a240 722 auth_id.clone(),
e7cb4dc5
WB
723 to_stdout,
724 move |worker| {
9c26a3d6 725 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 726 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 727 let mut res = Vec::new();
f6b1d1cc 728 if !verify_backup_dir(
9c26a3d6 729 &verify_worker,
f6b1d1cc 730 &backup_dir,
f6b1d1cc 731 worker.upid().clone(),
dcbf29e7
HL
732 Some(&move |manifest| {
733 verify_filter(ignore_verified, outdated_after, manifest)
734 }),
f6b1d1cc 735 )? {
adfdc369
DC
736 res.push(backup_dir.to_string());
737 }
738 res
c2009e53 739 } else if let Some(backup_group) = backup_group {
7e25b9aa 740 let failed_dirs = verify_backup_group(
9c26a3d6 741 &verify_worker,
63d9aca9 742 &backup_group,
7e25b9aa 743 &mut StoreProgress::new(1),
f6b1d1cc 744 worker.upid(),
dcbf29e7
HL
745 Some(&move |manifest| {
746 verify_filter(ignore_verified, outdated_after, manifest)
747 }),
63d9aca9
DM
748 )?;
749 failed_dirs
c2009e53 750 } else {
09f6a240
FG
751 let privs = CachedUserInfo::new()?
752 .lookup_privs(&auth_id, &["datastore", &store]);
753
754 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
755 Some(auth_id)
756 } else {
757 None
758 };
759
dcbf29e7
HL
760 verify_all_backups(
761 &verify_worker,
762 worker.upid(),
763 owner,
764 Some(&move |manifest| {
765 verify_filter(ignore_verified, outdated_after, manifest)
766 }),
767 )?
c2009e53 768 };
3984a5fd 769 if !failed_dirs.is_empty() {
a4fa3fc2 770 worker.log("Failed to verify the following snapshots/groups:");
adfdc369
DC
771 for dir in failed_dirs {
772 worker.log(format!("\t{}", dir));
773 }
1ffe0301 774 bail!("verification failed - please check the log for details");
c2009e53
DM
775 }
776 Ok(())
e7cb4dc5
WB
777 },
778 )?;
c2009e53
DM
779
780 Ok(json!(upid_str))
781}
782
0a240aaa
DC
783#[api(
784 input: {
785 properties: {
786 "backup-id": {
787 schema: BACKUP_ID_SCHEMA,
788 },
789 "backup-type": {
790 schema: BACKUP_TYPE_SCHEMA,
791 },
792 "dry-run": {
793 optional: true,
794 type: bool,
795 default: false,
796 description: "Just show what prune would do, but do not delete anything.",
797 },
798 "prune-options": {
799 type: PruneOptions,
800 flatten: true,
801 },
802 store: {
803 schema: DATASTORE_SCHEMA,
804 },
805 },
806 },
7b570c17 807 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
808 access: {
809 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
810 },
811)]
9805207a 812/// Prune a group on the datastore
bf78f708 813pub fn prune(
0a240aaa
DC
814 backup_id: String,
815 backup_type: String,
816 dry_run: bool,
817 prune_options: PruneOptions,
818 store: String,
819 _param: Value,
54552dda 820 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
821) -> Result<Value, Error> {
822
e6dc35ac 823 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 824
0a240aaa 825 let group = BackupGroup::new(&backup_type, &backup_id);
9fdc3ef4 826
54552dda
DM
827 let datastore = DataStore::lookup_datastore(&store)?;
828
bff85572 829 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 830
0a240aaa 831 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
503995c7 832
dda70154
DM
833 let mut prune_result = Vec::new();
834
835 let list = group.list_backups(&datastore.base_path())?;
836
837 let mut prune_info = compute_prune_info(list, &prune_options)?;
838
839 prune_info.reverse(); // delete older snapshots first
840
841 let keep_all = !prune_options.keeps_something();
842
843 if dry_run {
844 for (info, mut keep) in prune_info {
845 if keep_all { keep = true; }
846
847 let backup_time = info.backup_dir.backup_time();
848 let group = info.backup_dir.group();
849
850 prune_result.push(json!({
851 "backup-type": group.backup_type(),
852 "backup-id": group.backup_id(),
6a7be83e 853 "backup-time": backup_time,
dda70154
DM
854 "keep": keep,
855 }));
856 }
857 return Ok(json!(prune_result));
858 }
859
860
163e9bbe 861 // We use a WorkerTask just to have a task log, but run synchrounously
44288184 862 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
dda70154 863
f1539300
SR
864 if keep_all {
865 worker.log("No prune selection - keeping all files.");
866 } else {
867 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
868 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
869 store, backup_type, backup_id));
870 }
3b03abfe 871
f1539300
SR
872 for (info, mut keep) in prune_info {
873 if keep_all { keep = true; }
dda70154 874
f1539300
SR
875 let backup_time = info.backup_dir.backup_time();
876 let timestamp = info.backup_dir.backup_time_string();
877 let group = info.backup_dir.group();
3b03abfe 878
3b03abfe 879
f1539300
SR
880 let msg = format!(
881 "{}/{}/{} {}",
882 group.backup_type(),
883 group.backup_id(),
884 timestamp,
885 if keep { "keep" } else { "remove" },
886 );
887
888 worker.log(msg);
889
890 prune_result.push(json!({
891 "backup-type": group.backup_type(),
892 "backup-id": group.backup_id(),
893 "backup-time": backup_time,
894 "keep": keep,
895 }));
896
897 if !(dry_run || keep) {
898 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
899 worker.warn(
900 format!(
901 "failed to remove dir {:?}: {}",
902 info.backup_dir.relative_path(), err
903 )
904 );
8f0b4c1f 905 }
8f579717 906 }
f1539300 907 }
dd8e744f 908
f1539300 909 worker.log_result(&Ok(()));
83b7db02 910
dda70154 911 Ok(json!(prune_result))
83b7db02
DM
912}
913
9805207a
DC
914#[api(
915 input: {
916 properties: {
917 "dry-run": {
918 optional: true,
919 type: bool,
920 default: false,
921 description: "Just show what prune would do, but do not delete anything.",
922 },
923 "prune-options": {
924 type: PruneOptions,
925 flatten: true,
926 },
927 store: {
928 schema: DATASTORE_SCHEMA,
929 },
930 },
931 },
932 returns: {
933 schema: UPID_SCHEMA,
934 },
935 access: {
936 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
937 },
938)]
939/// Prune the datastore
940pub fn prune_datastore(
941 dry_run: bool,
942 prune_options: PruneOptions,
943 store: String,
944 _param: Value,
945 rpcenv: &mut dyn RpcEnvironment,
946) -> Result<String, Error> {
947
948 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
949
950 let datastore = DataStore::lookup_datastore(&store)?;
951
952 let upid_str = WorkerTask::new_thread(
953 "prune",
954 Some(store.clone()),
955 auth_id.clone(),
956 false,
957 move |worker| crate::server::prune_datastore(
958 worker.clone(),
959 auth_id,
960 prune_options,
961 &store,
962 datastore,
963 dry_run
964 ),
965 )?;
966
967 Ok(upid_str)
968}
969
dfc58d47
DM
970#[api(
971 input: {
972 properties: {
973 store: {
974 schema: DATASTORE_SCHEMA,
975 },
976 },
977 },
978 returns: {
979 schema: UPID_SCHEMA,
980 },
bb34b589 981 access: {
54552dda 982 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 983 },
dfc58d47
DM
984)]
985/// Start garbage collection.
bf78f708 986pub fn start_garbage_collection(
dfc58d47 987 store: String,
6049b71f 988 _info: &ApiMethod,
dd5495d6 989 rpcenv: &mut dyn RpcEnvironment,
6049b71f 990) -> Result<Value, Error> {
15e9b4ed 991
3e6a7dee 992 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 993 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 994
4fdf5ddf
DC
995 let job = Job::new("garbage_collection", &store)
996 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 997
39735609 998 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 999
4fdf5ddf
DC
1000 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1001 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
1002
1003 Ok(json!(upid_str))
15e9b4ed
DM
1004}
1005
a92830dc
DM
1006#[api(
1007 input: {
1008 properties: {
1009 store: {
1010 schema: DATASTORE_SCHEMA,
1011 },
1012 },
1013 },
1014 returns: {
1015 type: GarbageCollectionStatus,
bb34b589
DM
1016 },
1017 access: {
1018 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1019 },
a92830dc
DM
1020)]
1021/// Garbage collection status.
5eeea607 1022pub fn garbage_collection_status(
a92830dc 1023 store: String,
6049b71f 1024 _info: &ApiMethod,
dd5495d6 1025 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1026) -> Result<GarbageCollectionStatus, Error> {
691c89a0 1027
f2b99c34
DM
1028 let datastore = DataStore::lookup_datastore(&store)?;
1029
f2b99c34 1030 let status = datastore.last_gc_status();
691c89a0 1031
a92830dc 1032 Ok(status)
691c89a0
DM
1033}
1034
bb34b589 1035#[api(
30fb6025
DM
1036 returns: {
1037 description: "List the accessible datastores.",
1038 type: Array,
9b93c620 1039 items: { type: DataStoreListItem },
30fb6025 1040 },
bb34b589 1041 access: {
54552dda 1042 permission: &Permission::Anybody,
bb34b589
DM
1043 },
1044)]
1045/// Datastore list
bf78f708 1046pub fn get_datastore_list(
6049b71f
DM
1047 _param: Value,
1048 _info: &ApiMethod,
54552dda 1049 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1050) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 1051
e7d4be9d 1052 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1053
e6dc35ac 1054 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1055 let user_info = CachedUserInfo::new()?;
1056
30fb6025 1057 let mut list = Vec::new();
54552dda 1058
30fb6025 1059 for (store, (_, data)) in &config.sections {
e6dc35ac 1060 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 1061 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1062 if allowed {
455e5f71
FG
1063 list.push(
1064 DataStoreListItem {
1065 store: store.clone(),
1066 comment: data["comment"].as_str().map(String::from),
1067 }
1068 );
30fb6025 1069 }
54552dda
DM
1070 }
1071
44288184 1072 Ok(list)
15e9b4ed
DM
1073}
1074
0ab08ac9
DM
1075#[sortable]
1076pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1077 &ApiHandler::AsyncHttp(&download_file),
1078 &ObjectSchema::new(
1079 "Download single raw file from backup snapshot.",
1080 &sorted!([
66c49c21 1081 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1082 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1083 ("backup-id", false, &BACKUP_ID_SCHEMA),
1084 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1085 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1086 ]),
1087 )
54552dda
DM
1088).access(None, &Permission::Privilege(
1089 &["datastore", "{store}"],
1090 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1091 true)
1092);
691c89a0 1093
bf78f708 1094pub fn download_file(
9e47c0a5
DM
1095 _parts: Parts,
1096 _req_body: Body,
1097 param: Value,
255f378a 1098 _info: &ApiMethod,
54552dda 1099 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1100) -> ApiResponseFuture {
9e47c0a5 1101
ad51d02a 1102 async move {
3c8c2827 1103 let store = required_string_param(&param, "store")?;
ad51d02a 1104 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 1105
e6dc35ac 1106 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1107
3c8c2827 1108 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1109
3c8c2827
WB
1110 let backup_type = required_string_param(&param, "backup-type")?;
1111 let backup_id = required_string_param(&param, "backup-id")?;
1112 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1113
e0e5b442 1114 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1115
bff85572 1116 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1117
abdb9763 1118 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1119
ad51d02a
DM
1120 let mut path = datastore.base_path();
1121 path.push(backup_dir.relative_path());
1122 path.push(&file_name);
1123
ba694720 1124 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1125 .await
1126 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1127
db0cb9ce 1128 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1129 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1130 .map_err(move |err| {
1131 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1132 err
1133 });
ad51d02a 1134 let body = Body::wrap_stream(payload);
9e47c0a5 1135
ad51d02a
DM
1136 // fixme: set other headers ?
1137 Ok(Response::builder()
1138 .status(StatusCode::OK)
1139 .header(header::CONTENT_TYPE, "application/octet-stream")
1140 .body(body)
1141 .unwrap())
1142 }.boxed()
9e47c0a5
DM
1143}
1144
6ef9bb59
DC
1145#[sortable]
1146pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1147 &ApiHandler::AsyncHttp(&download_file_decoded),
1148 &ObjectSchema::new(
1149 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1150 &sorted!([
1151 ("store", false, &DATASTORE_SCHEMA),
1152 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1153 ("backup-id", false, &BACKUP_ID_SCHEMA),
1154 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1155 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1156 ]),
1157 )
1158).access(None, &Permission::Privilege(
1159 &["datastore", "{store}"],
1160 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1161 true)
1162);
1163
bf78f708 1164pub fn download_file_decoded(
6ef9bb59
DC
1165 _parts: Parts,
1166 _req_body: Body,
1167 param: Value,
1168 _info: &ApiMethod,
1169 rpcenv: Box<dyn RpcEnvironment>,
1170) -> ApiResponseFuture {
1171
1172 async move {
3c8c2827 1173 let store = required_string_param(&param, "store")?;
6ef9bb59
DC
1174 let datastore = DataStore::lookup_datastore(store)?;
1175
e6dc35ac 1176 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1177
3c8c2827 1178 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1179
3c8c2827
WB
1180 let backup_type = required_string_param(&param, "backup-type")?;
1181 let backup_id = required_string_param(&param, "backup-id")?;
1182 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1183
e0e5b442 1184 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1185
bff85572 1186 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1187
2d55beec 1188 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1189 for file in files {
f28d9088 1190 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1191 bail!("cannot decode '{}' - is encrypted", file_name);
1192 }
1193 }
1194
1195 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1196
1197 let mut path = datastore.base_path();
1198 path.push(backup_dir.relative_path());
1199 path.push(&file_name);
1200
1201 let extension = file_name.rsplitn(2, '.').next().unwrap();
1202
1203 let body = match extension {
1204 "didx" => {
1205 let index = DynamicIndexReader::open(&path)
1206 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1207 let (csum, size) = index.compute_csum();
1208 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1209
14f6c9cb 1210 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1211 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1212 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1213 .map_err(move |err| {
1214 eprintln!("error during streaming of '{:?}' - {}", path, err);
1215 err
1216 }))
1217 },
1218 "fidx" => {
1219 let index = FixedIndexReader::open(&path)
1220 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1221
2d55beec
FG
1222 let (csum, size) = index.compute_csum();
1223 manifest.verify_file(&file_name, &csum, size)?;
1224
14f6c9cb 1225 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1226 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1227 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1228 .map_err(move |err| {
1229 eprintln!("error during streaming of '{:?}' - {}", path, err);
1230 err
1231 }))
1232 },
1233 "blob" => {
1234 let file = std::fs::File::open(&path)
8aa67ee7 1235 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1236
2d55beec
FG
1237 // FIXME: load full blob to verify index checksum?
1238
6ef9bb59
DC
1239 Body::wrap_stream(
1240 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1241 .map_err(move |err| {
1242 eprintln!("error during streaming of '{:?}' - {}", path, err);
1243 err
1244 })
1245 )
1246 },
1247 extension => {
1248 bail!("cannot download '{}' files", extension);
1249 },
1250 };
1251
1252 // fixme: set other headers ?
1253 Ok(Response::builder()
1254 .status(StatusCode::OK)
1255 .header(header::CONTENT_TYPE, "application/octet-stream")
1256 .body(body)
1257 .unwrap())
1258 }.boxed()
1259}
1260
552c2259 1261#[sortable]
0ab08ac9
DM
1262pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1263 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1264 &ObjectSchema::new(
54552dda 1265 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1266 &sorted!([
66c49c21 1267 ("store", false, &DATASTORE_SCHEMA),
255f378a 1268 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1269 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1270 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1271 ]),
9e47c0a5 1272 )
54552dda
DM
1273).access(
1274 Some("Only the backup creator/owner is allowed to do this."),
1275 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1276);
9e47c0a5 1277
bf78f708 1278pub fn upload_backup_log(
07ee2235
DM
1279 _parts: Parts,
1280 req_body: Body,
1281 param: Value,
255f378a 1282 _info: &ApiMethod,
54552dda 1283 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1284) -> ApiResponseFuture {
07ee2235 1285
ad51d02a 1286 async move {
3c8c2827 1287 let store = required_string_param(&param, "store")?;
ad51d02a 1288 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1289
96d65fbc 1290 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1291
3c8c2827
WB
1292 let backup_type = required_string_param(&param, "backup-type")?;
1293 let backup_id = required_string_param(&param, "backup-id")?;
1294 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1295
e0e5b442 1296 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1297
e6dc35ac 1298 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1299 let owner = datastore.get_owner(backup_dir.group())?;
1300 check_backup_owner(&owner, &auth_id)?;
54552dda 1301
ad51d02a
DM
1302 let mut path = datastore.base_path();
1303 path.push(backup_dir.relative_path());
1304 path.push(&file_name);
07ee2235 1305
ad51d02a
DM
1306 if path.exists() {
1307 bail!("backup already contains a log.");
1308 }
e128d4e8 1309
ad51d02a 1310 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1311 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1312
1313 let data = req_body
1314 .map_err(Error::from)
1315 .try_fold(Vec::new(), |mut acc, chunk| {
1316 acc.extend_from_slice(&*chunk);
1317 future::ok::<_, Error>(acc)
1318 })
1319 .await?;
1320
39f18b30
DM
1321 // always verify blob/CRC at server side
1322 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1323
1324 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1325
1326 // fixme: use correct formatter
1327 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1328 }.boxed()
07ee2235
DM
1329}
1330
5b1cfa01
DC
1331#[api(
1332 input: {
1333 properties: {
1334 store: {
1335 schema: DATASTORE_SCHEMA,
1336 },
1337 "backup-type": {
1338 schema: BACKUP_TYPE_SCHEMA,
1339 },
1340 "backup-id": {
1341 schema: BACKUP_ID_SCHEMA,
1342 },
1343 "backup-time": {
1344 schema: BACKUP_TIME_SCHEMA,
1345 },
1346 "filepath": {
1347 description: "Base64 encoded path.",
1348 type: String,
1349 }
1350 },
1351 },
1352 access: {
1353 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1354 },
1355)]
1356/// Get the entries of the given path of the catalog
bf78f708 1357pub fn catalog(
5b1cfa01
DC
1358 store: String,
1359 backup_type: String,
1360 backup_id: String,
1361 backup_time: i64,
1362 filepath: String,
5b1cfa01 1363 rpcenv: &mut dyn RpcEnvironment,
227501c0 1364) -> Result<Vec<ArchiveEntry>, Error> {
5b1cfa01
DC
1365 let datastore = DataStore::lookup_datastore(&store)?;
1366
e6dc35ac 1367 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1368
e0e5b442 1369 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1370
bff85572 1371 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1372
9238cdf5
FG
1373 let file_name = CATALOG_NAME;
1374
2d55beec 1375 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1376 for file in files {
1377 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1378 bail!("cannot decode '{}' - is encrypted", file_name);
1379 }
1380 }
1381
5b1cfa01
DC
1382 let mut path = datastore.base_path();
1383 path.push(backup_dir.relative_path());
9238cdf5 1384 path.push(file_name);
5b1cfa01
DC
1385
1386 let index = DynamicIndexReader::open(&path)
1387 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1388
2d55beec
FG
1389 let (csum, size) = index.compute_csum();
1390 manifest.verify_file(&file_name, &csum, size)?;
1391
14f6c9cb 1392 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1393 let reader = BufferedDynamicReader::new(index, chunk_reader);
1394
1395 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1396
5279ee74 1397 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1398 base64::decode(filepath)?
1399 } else {
1400 vec![b'/']
1401 };
5b1cfa01 1402
86582454 1403 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1404}
1405
d33d8f4e
DC
1406#[sortable]
1407pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1408 &ApiHandler::AsyncHttp(&pxar_file_download),
1409 &ObjectSchema::new(
1ffe0301 1410 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1411 &sorted!([
1412 ("store", false, &DATASTORE_SCHEMA),
1413 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1414 ("backup-id", false, &BACKUP_ID_SCHEMA),
1415 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1416 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1417 ]),
1418 )
1419).access(None, &Permission::Privilege(
1420 &["datastore", "{store}"],
1421 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1422 true)
1423);
1424
bf78f708 1425pub fn pxar_file_download(
d33d8f4e
DC
1426 _parts: Parts,
1427 _req_body: Body,
1428 param: Value,
1429 _info: &ApiMethod,
1430 rpcenv: Box<dyn RpcEnvironment>,
1431) -> ApiResponseFuture {
1432
1433 async move {
3c8c2827 1434 let store = required_string_param(&param, "store")?;
d33d8f4e
DC
1435 let datastore = DataStore::lookup_datastore(&store)?;
1436
e6dc35ac 1437 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1438
3c8c2827 1439 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1440
3c8c2827
WB
1441 let backup_type = required_string_param(&param, "backup-type")?;
1442 let backup_id = required_string_param(&param, "backup-id")?;
1443 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1444
e0e5b442 1445 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1446
bff85572 1447 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1448
d33d8f4e 1449 let mut components = base64::decode(&filepath)?;
3984a5fd 1450 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1451 components.remove(0);
1452 }
1453
d8d8af98 1454 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1455 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1456 let file_path = split.next().unwrap_or(b"/");
2d55beec 1457 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1458 for file in files {
1459 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1460 bail!("cannot decode '{}' - is encrypted", pxar_name);
1461 }
1462 }
d33d8f4e 1463
9238cdf5
FG
1464 let mut path = datastore.base_path();
1465 path.push(backup_dir.relative_path());
1466 path.push(pxar_name);
d33d8f4e
DC
1467
1468 let index = DynamicIndexReader::open(&path)
1469 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1470
2d55beec
FG
1471 let (csum, size) = index.compute_csum();
1472 manifest.verify_file(&pxar_name, &csum, size)?;
1473
14f6c9cb 1474 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1475 let reader = BufferedDynamicReader::new(index, chunk_reader);
1476 let archive_size = reader.archive_size();
1477 let reader = LocalDynamicReadAt::new(reader);
1478
1479 let decoder = Accessor::new(reader, archive_size).await?;
1480 let root = decoder.open_root().await?;
2e219481 1481 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1482 let file = root
2e219481
DC
1483 .lookup(&path).await?
1484 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1485
804f6143
DC
1486 let body = match file.kind() {
1487 EntryKind::File { .. } => Body::wrap_stream(
1488 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1489 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1490 err
1491 }),
1492 ),
1493 EntryKind::Hardlink(_) => Body::wrap_stream(
1494 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1495 .map_err(move |err| {
1496 eprintln!(
1497 "error during streaming of hardlink '{:?}' - {}",
2e219481 1498 path, err
804f6143
DC
1499 );
1500 err
1501 }),
1502 ),
1503 EntryKind::Directory => {
1504 let (sender, receiver) = tokio::sync::mpsc::channel(100);
804f6143 1505 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
2e219481
DC
1506 crate::server::spawn_internal_task(
1507 create_zip(channelwriter, decoder, path.clone(), false)
1508 );
7c667013 1509 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
2e219481 1510 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
d33d8f4e 1511 err
804f6143
DC
1512 }))
1513 }
1514 other => bail!("cannot download file of type {:?}", other),
1515 };
d33d8f4e
DC
1516
1517 // fixme: set other headers ?
1518 Ok(Response::builder()
1519 .status(StatusCode::OK)
1520 .header(header::CONTENT_TYPE, "application/octet-stream")
1521 .body(body)
1522 .unwrap())
1523 }.boxed()
1524}
1525
1a0d3d11
DM
1526#[api(
1527 input: {
1528 properties: {
1529 store: {
1530 schema: DATASTORE_SCHEMA,
1531 },
1532 timeframe: {
1533 type: RRDTimeFrameResolution,
1534 },
1535 cf: {
1536 type: RRDMode,
1537 },
1538 },
1539 },
1540 access: {
1541 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1542 },
1543)]
1544/// Read datastore stats
bf78f708 1545pub fn get_rrd_stats(
1a0d3d11
DM
1546 store: String,
1547 timeframe: RRDTimeFrameResolution,
1548 cf: RRDMode,
1549 _param: Value,
1550) -> Result<Value, Error> {
1551
431cc7b1
DC
1552 create_value_from_rrd(
1553 &format!("datastore/{}", store),
1a0d3d11
DM
1554 &[
1555 "total", "used",
c94e1f65
DM
1556 "read_ios", "read_bytes",
1557 "write_ios", "write_bytes",
1558 "io_ticks",
1a0d3d11
DM
1559 ],
1560 timeframe,
1561 cf,
1562 )
1563}
1564
d6688884
SR
1565#[api(
1566 input: {
1567 properties: {
1568 store: {
1569 schema: DATASTORE_SCHEMA,
1570 },
1571 "backup-type": {
1572 schema: BACKUP_TYPE_SCHEMA,
1573 },
1574 "backup-id": {
1575 schema: BACKUP_ID_SCHEMA,
1576 },
1577 },
1578 },
1579 access: {
1580 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1581 },
1582)]
1583/// Get "notes" for a backup group
1584pub fn get_group_notes(
1585 store: String,
1586 backup_type: String,
1587 backup_id: String,
1588 rpcenv: &mut dyn RpcEnvironment,
1589) -> Result<String, Error> {
1590 let datastore = DataStore::lookup_datastore(&store)?;
1591
1592 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1593 let backup_group = BackupGroup::new(backup_type, backup_id);
1594
1595 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1596
1597 let note_path = get_group_note_path(&datastore, &backup_group);
1598 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1599}
1600
1601#[api(
1602 input: {
1603 properties: {
1604 store: {
1605 schema: DATASTORE_SCHEMA,
1606 },
1607 "backup-type": {
1608 schema: BACKUP_TYPE_SCHEMA,
1609 },
1610 "backup-id": {
1611 schema: BACKUP_ID_SCHEMA,
1612 },
1613 notes: {
1614 description: "A multiline text.",
1615 },
1616 },
1617 },
1618 access: {
1619 permission: &Permission::Privilege(&["datastore", "{store}"],
1620 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1621 true),
1622 },
1623)]
1624/// Set "notes" for a backup group
1625pub fn set_group_notes(
1626 store: String,
1627 backup_type: String,
1628 backup_id: String,
1629 notes: String,
1630 rpcenv: &mut dyn RpcEnvironment,
1631) -> Result<(), Error> {
1632 let datastore = DataStore::lookup_datastore(&store)?;
1633
1634 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1635 let backup_group = BackupGroup::new(backup_type, backup_id);
1636
1637 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1638
1639 let note_path = get_group_note_path(&datastore, &backup_group);
1640 replace_file(note_path, notes.as_bytes(), CreateOptions::new())?;
1641
1642 Ok(())
1643}
1644
912b3f5b
DM
1645#[api(
1646 input: {
1647 properties: {
1648 store: {
1649 schema: DATASTORE_SCHEMA,
1650 },
1651 "backup-type": {
1652 schema: BACKUP_TYPE_SCHEMA,
1653 },
1654 "backup-id": {
1655 schema: BACKUP_ID_SCHEMA,
1656 },
1657 "backup-time": {
1658 schema: BACKUP_TIME_SCHEMA,
1659 },
1660 },
1661 },
1662 access: {
1401f4be 1663 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1664 },
1665)]
1666/// Get "notes" for a specific backup
bf78f708 1667pub fn get_notes(
912b3f5b
DM
1668 store: String,
1669 backup_type: String,
1670 backup_id: String,
1671 backup_time: i64,
1672 rpcenv: &mut dyn RpcEnvironment,
1673) -> Result<String, Error> {
1674 let datastore = DataStore::lookup_datastore(&store)?;
1675
e6dc35ac 1676 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1677 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1678
1401f4be 1679 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1680
883aa6d5 1681 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1682
883aa6d5 1683 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1684 .as_str()
1685 .unwrap_or("");
1686
1687 Ok(String::from(notes))
1688}
1689
1690#[api(
1691 input: {
1692 properties: {
1693 store: {
1694 schema: DATASTORE_SCHEMA,
1695 },
1696 "backup-type": {
1697 schema: BACKUP_TYPE_SCHEMA,
1698 },
1699 "backup-id": {
1700 schema: BACKUP_ID_SCHEMA,
1701 },
1702 "backup-time": {
1703 schema: BACKUP_TIME_SCHEMA,
1704 },
1705 notes: {
1706 description: "A multiline text.",
1707 },
1708 },
1709 },
1710 access: {
b728a69e
FG
1711 permission: &Permission::Privilege(&["datastore", "{store}"],
1712 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1713 true),
912b3f5b
DM
1714 },
1715)]
1716/// Set "notes" for a specific backup
bf78f708 1717pub fn set_notes(
912b3f5b
DM
1718 store: String,
1719 backup_type: String,
1720 backup_id: String,
1721 backup_time: i64,
1722 notes: String,
1723 rpcenv: &mut dyn RpcEnvironment,
1724) -> Result<(), Error> {
1725 let datastore = DataStore::lookup_datastore(&store)?;
1726
e6dc35ac 1727 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1728 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1729
b728a69e 1730 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1731
1a374fcf
SR
1732 datastore.update_manifest(&backup_dir,|manifest| {
1733 manifest.unprotected["notes"] = notes.into();
1734 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1735
1736 Ok(())
1737}
1738
72be0eb1 1739#[api(
4940012d 1740 input: {
72be0eb1
DW
1741 properties: {
1742 store: {
1743 schema: DATASTORE_SCHEMA,
1744 },
1745 "backup-type": {
1746 schema: BACKUP_TYPE_SCHEMA,
1747 },
1748 "backup-id": {
1749 schema: BACKUP_ID_SCHEMA,
1750 },
1751 "new-owner": {
e6dc35ac 1752 type: Authid,
72be0eb1
DW
1753 },
1754 },
4940012d
FG
1755 },
1756 access: {
bff85572
FG
1757 permission: &Permission::Anybody,
1758 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1759 },
72be0eb1
DW
1760)]
1761/// Change owner of a backup group
bf78f708 1762pub fn set_backup_owner(
72be0eb1
DW
1763 store: String,
1764 backup_type: String,
1765 backup_id: String,
e6dc35ac 1766 new_owner: Authid,
bff85572 1767 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1768) -> Result<(), Error> {
1769
1770 let datastore = DataStore::lookup_datastore(&store)?;
1771
1772 let backup_group = BackupGroup::new(backup_type, backup_id);
1773
bff85572
FG
1774 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1775
72be0eb1
DW
1776 let user_info = CachedUserInfo::new()?;
1777
bff85572
FG
1778 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1779
1780 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1781 // High-privilege user/token
1782 true
1783 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1784 let owner = datastore.get_owner(&backup_group)?;
1785
1786 match (owner.is_token(), new_owner.is_token()) {
1787 (true, true) => {
1788 // API token to API token, owned by same user
1789 let owner = owner.user();
1790 let new_owner = new_owner.user();
1791 owner == new_owner && Authid::from(owner.clone()) == auth_id
1792 },
1793 (true, false) => {
1794 // API token to API token owner
1795 Authid::from(owner.user().clone()) == auth_id
1796 && new_owner == auth_id
1797 },
1798 (false, true) => {
1799 // API token owner to API token
1800 owner == auth_id
1801 && Authid::from(new_owner.user().clone()) == auth_id
1802 },
1803 (false, false) => {
1804 // User to User, not allowed for unprivileged users
1805 false
1806 },
1807 }
1808 } else {
1809 false
1810 };
1811
1812 if !allowed {
1813 return Err(http_err!(UNAUTHORIZED,
1814 "{} does not have permission to change owner of backup group '{}' to {}",
1815 auth_id,
1816 backup_group,
1817 new_owner,
1818 ));
1819 }
1820
e6dc35ac
FG
1821 if !user_info.is_active_auth_id(&new_owner) {
1822 bail!("{} '{}' is inactive or non-existent",
1823 if new_owner.is_token() {
1824 "API token".to_string()
1825 } else {
1826 "user".to_string()
1827 },
1828 new_owner);
72be0eb1
DW
1829 }
1830
1831 datastore.set_owner(&backup_group, &new_owner, true)?;
1832
1833 Ok(())
1834}
1835
552c2259 1836#[sortable]
255f378a 1837const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1838 (
1839 "catalog",
1840 &Router::new()
1841 .get(&API_METHOD_CATALOG)
1842 ),
72be0eb1
DW
1843 (
1844 "change-owner",
1845 &Router::new()
1846 .post(&API_METHOD_SET_BACKUP_OWNER)
1847 ),
255f378a
DM
1848 (
1849 "download",
1850 &Router::new()
1851 .download(&API_METHOD_DOWNLOAD_FILE)
1852 ),
6ef9bb59
DC
1853 (
1854 "download-decoded",
1855 &Router::new()
1856 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1857 ),
255f378a
DM
1858 (
1859 "files",
1860 &Router::new()
09b1f7b2 1861 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1862 ),
1863 (
1864 "gc",
1865 &Router::new()
1866 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1867 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1868 ),
d6688884
SR
1869 (
1870 "group-notes",
1871 &Router::new()
1872 .get(&API_METHOD_GET_GROUP_NOTES)
1873 .put(&API_METHOD_SET_GROUP_NOTES)
1874 ),
255f378a
DM
1875 (
1876 "groups",
1877 &Router::new()
b31c8019 1878 .get(&API_METHOD_LIST_GROUPS)
f32791b4 1879 .delete(&API_METHOD_DELETE_GROUP)
255f378a 1880 ),
912b3f5b
DM
1881 (
1882 "notes",
1883 &Router::new()
1884 .get(&API_METHOD_GET_NOTES)
1885 .put(&API_METHOD_SET_NOTES)
1886 ),
255f378a
DM
1887 (
1888 "prune",
1889 &Router::new()
1890 .post(&API_METHOD_PRUNE)
1891 ),
9805207a
DC
1892 (
1893 "prune-datastore",
1894 &Router::new()
1895 .post(&API_METHOD_PRUNE_DATASTORE)
1896 ),
d33d8f4e
DC
1897 (
1898 "pxar-file-download",
1899 &Router::new()
1900 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1901 ),
1a0d3d11
DM
1902 (
1903 "rrd",
1904 &Router::new()
1905 .get(&API_METHOD_GET_RRD_STATS)
1906 ),
255f378a
DM
1907 (
1908 "snapshots",
1909 &Router::new()
fc189b19 1910 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1911 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1912 ),
1913 (
1914 "status",
1915 &Router::new()
1916 .get(&API_METHOD_STATUS)
1917 ),
1918 (
1919 "upload-backup-log",
1920 &Router::new()
1921 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1922 ),
c2009e53
DM
1923 (
1924 "verify",
1925 &Router::new()
1926 .post(&API_METHOD_VERIFY)
1927 ),
255f378a
DM
1928];
1929
ad51d02a 1930const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1931 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1932 .subdirs(DATASTORE_INFO_SUBDIRS);
1933
1934
1935pub const ROUTER: Router = Router::new()
bb34b589 1936 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1937 .match_all("store", &DATASTORE_INFO_ROUTER);