]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
api2/admin/datastore: accept "/" as path for root
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
53a561a2 6use std::path::{Path, PathBuf};
804f6143 7use std::pin::Pin;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
15e9b4ed 13use serde_json::{json, Value};
7c667013 14use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 15
bb34b589
DM
16use proxmox::api::{
17 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
18 RpcEnvironment, RpcEnvironmentType, Permission
19};
b2362a12 20use proxmox::api::router::{ReturnType, SubdirMap};
cad540e9 21use proxmox::api::schema::*;
60f9a6ea 22use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 23use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 24
804f6143 25use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
26use pxar::EntryKind;
27
cad540e9 28use crate::api2::types::*;
431cc7b1 29use crate::api2::node::rrd::create_value_from_rrd;
227501c0 30use crate::api2::helpers;
e5064ba6 31use crate::backup::*;
cad540e9 32use crate::config::datastore;
54552dda
DM
33use crate::config::cached_user_info::CachedUserInfo;
34
4fdf5ddf 35use crate::server::{jobstate::Job, WorkerTask};
804f6143
DC
36use crate::tools::{
37 self,
38 zip::{ZipEncoder, ZipEntry},
39 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
40};
41
d00e1a21
DM
42use crate::config::acl::{
43 PRIV_DATASTORE_AUDIT,
54552dda 44 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
45 PRIV_DATASTORE_READ,
46 PRIV_DATASTORE_PRUNE,
54552dda 47 PRIV_DATASTORE_BACKUP,
09f6a240 48 PRIV_DATASTORE_VERIFY,
d00e1a21 49};
1629d2ad 50
bff85572 51fn check_priv_or_backup_owner(
e7cb4dc5
WB
52 store: &DataStore,
53 group: &BackupGroup,
e6dc35ac 54 auth_id: &Authid,
bff85572
FG
55 required_privs: u64,
56) -> Result<(), Error> {
57 let user_info = CachedUserInfo::new()?;
58 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
59
60 if privs & required_privs == 0 {
61 let owner = store.get_owner(group)?;
62 check_backup_owner(&owner, auth_id)?;
63 }
64 Ok(())
65}
66
67fn check_backup_owner(
68 owner: &Authid,
69 auth_id: &Authid,
e7cb4dc5 70) -> Result<(), Error> {
bff85572
FG
71 let correct_owner = owner == auth_id
72 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
73 if !correct_owner {
e6dc35ac 74 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
75 }
76 Ok(())
77}
78
e7cb4dc5
WB
79fn read_backup_index(
80 store: &DataStore,
81 backup_dir: &BackupDir,
82) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 83
ff86ef00 84 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 85
09b1f7b2
DM
86 let mut result = Vec::new();
87 for item in manifest.files() {
88 result.push(BackupContent {
89 filename: item.filename.clone(),
f28d9088 90 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
91 size: Some(item.size),
92 });
8c70e3eb
DM
93 }
94
09b1f7b2 95 result.push(BackupContent {
96d65fbc 96 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
97 crypt_mode: match manifest.signature {
98 Some(_) => Some(CryptMode::SignOnly),
99 None => Some(CryptMode::None),
100 },
09b1f7b2
DM
101 size: Some(index_size),
102 });
4f1e40a2 103
70030b43 104 Ok((manifest, result))
8c70e3eb
DM
105}
106
1c090810
DC
107fn get_all_snapshot_files(
108 store: &DataStore,
109 info: &BackupInfo,
70030b43
DM
110) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
111
112 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
113
114 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
115 acc.insert(item.filename.clone());
116 acc
117 });
118
119 for file in &info.files {
120 if file_set.contains(file) { continue; }
f28d9088
WB
121 files.push(BackupContent {
122 filename: file.to_string(),
123 size: None,
124 crypt_mode: None,
125 });
1c090810
DC
126 }
127
70030b43 128 Ok((manifest, files))
1c090810
DC
129}
130
b31c8019
DM
131#[api(
132 input: {
133 properties: {
134 store: {
135 schema: DATASTORE_SCHEMA,
136 },
137 },
138 },
139 returns: {
140 type: Array,
141 description: "Returns the list of backup groups.",
142 items: {
143 type: GroupListItem,
144 }
145 },
bb34b589 146 access: {
54552dda
DM
147 permission: &Permission::Privilege(
148 &["datastore", "{store}"],
149 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
150 true),
bb34b589 151 },
b31c8019
DM
152)]
153/// List backup groups.
b2362a12 154pub fn list_groups(
b31c8019 155 store: String,
54552dda 156 rpcenv: &mut dyn RpcEnvironment,
b31c8019 157) -> Result<Vec<GroupListItem>, Error> {
812c6f87 158
e6dc35ac 159 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 160 let user_info = CachedUserInfo::new()?;
e6dc35ac 161 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 162
b31c8019 163 let datastore = DataStore::lookup_datastore(&store)?;
0d08fcee
FG
164 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
165
166 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
167
168 let group_info = backup_groups
169 .into_iter()
170 .fold(Vec::new(), |mut group_info, group| {
171 let owner = match datastore.get_owner(&group) {
172 Ok(auth_id) => auth_id,
173 Err(err) => {
1ed02257
FG
174 eprintln!("Failed to get owner of group '{}/{}' - {}",
175 &store,
176 group,
177 err);
0d08fcee
FG
178 return group_info;
179 },
180 };
181 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
182 return group_info;
183 }
184
185 let snapshots = match group.list_backups(&datastore.base_path()) {
186 Ok(snapshots) => snapshots,
187 Err(_) => {
188 return group_info;
189 },
190 };
191
192 let backup_count: u64 = snapshots.len() as u64;
193 if backup_count == 0 {
194 return group_info;
195 }
196
197 let last_backup = snapshots
198 .iter()
199 .fold(&snapshots[0], |last, curr| {
200 if curr.is_finished()
201 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
202 curr
203 } else {
204 last
205 }
206 })
207 .to_owned();
208
209 group_info.push(GroupListItem {
210 backup_type: group.backup_type().to_string(),
211 backup_id: group.backup_id().to_string(),
212 last_backup: last_backup.backup_dir.backup_time(),
213 owner: Some(owner),
214 backup_count,
215 files: last_backup.files,
216 });
217
218 group_info
219 });
812c6f87 220
0d08fcee 221 Ok(group_info)
812c6f87 222}
8f579717 223
09b1f7b2
DM
224#[api(
225 input: {
226 properties: {
227 store: {
228 schema: DATASTORE_SCHEMA,
229 },
230 "backup-type": {
231 schema: BACKUP_TYPE_SCHEMA,
232 },
233 "backup-id": {
234 schema: BACKUP_ID_SCHEMA,
235 },
236 "backup-time": {
237 schema: BACKUP_TIME_SCHEMA,
238 },
239 },
240 },
241 returns: {
242 type: Array,
243 description: "Returns the list of archive files inside a backup snapshots.",
244 items: {
245 type: BackupContent,
246 }
247 },
bb34b589 248 access: {
54552dda
DM
249 permission: &Permission::Privilege(
250 &["datastore", "{store}"],
251 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
252 true),
bb34b589 253 },
09b1f7b2
DM
254)]
255/// List snapshot files.
ea5f547f 256pub fn list_snapshot_files(
09b1f7b2
DM
257 store: String,
258 backup_type: String,
259 backup_id: String,
260 backup_time: i64,
01a13423 261 _info: &ApiMethod,
54552dda 262 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 263) -> Result<Vec<BackupContent>, Error> {
01a13423 264
e6dc35ac 265 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 266 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 267
e0e5b442 268 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 269
bff85572 270 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 271
d7c24397 272 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 273
70030b43
DM
274 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
275
276 Ok(files)
01a13423
DM
277}
278
68a6a0ee
DM
279#[api(
280 input: {
281 properties: {
282 store: {
283 schema: DATASTORE_SCHEMA,
284 },
285 "backup-type": {
286 schema: BACKUP_TYPE_SCHEMA,
287 },
288 "backup-id": {
289 schema: BACKUP_ID_SCHEMA,
290 },
291 "backup-time": {
292 schema: BACKUP_TIME_SCHEMA,
293 },
294 },
295 },
bb34b589 296 access: {
54552dda
DM
297 permission: &Permission::Privilege(
298 &["datastore", "{store}"],
299 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
300 true),
bb34b589 301 },
68a6a0ee
DM
302)]
303/// Delete backup snapshot.
bf78f708 304pub fn delete_snapshot(
68a6a0ee
DM
305 store: String,
306 backup_type: String,
307 backup_id: String,
308 backup_time: i64,
6f62c924 309 _info: &ApiMethod,
54552dda 310 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
311) -> Result<Value, Error> {
312
e6dc35ac 313 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 314
e0e5b442 315 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 316 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 317
bff85572 318 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 319
c9756b40 320 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
321
322 Ok(Value::Null)
323}
324
fc189b19
DM
325#[api(
326 input: {
327 properties: {
328 store: {
329 schema: DATASTORE_SCHEMA,
330 },
331 "backup-type": {
332 optional: true,
333 schema: BACKUP_TYPE_SCHEMA,
334 },
335 "backup-id": {
336 optional: true,
337 schema: BACKUP_ID_SCHEMA,
338 },
339 },
340 },
341 returns: {
342 type: Array,
343 description: "Returns the list of snapshots.",
344 items: {
345 type: SnapshotListItem,
346 }
347 },
bb34b589 348 access: {
54552dda
DM
349 permission: &Permission::Privilege(
350 &["datastore", "{store}"],
351 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
352 true),
bb34b589 353 },
fc189b19
DM
354)]
355/// List backup snapshots.
f24fc116 356pub fn list_snapshots (
54552dda
DM
357 store: String,
358 backup_type: Option<String>,
359 backup_id: Option<String>,
360 _param: Value,
184f17af 361 _info: &ApiMethod,
54552dda 362 rpcenv: &mut dyn RpcEnvironment,
fc189b19 363) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 364
e6dc35ac 365 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 366 let user_info = CachedUserInfo::new()?;
e6dc35ac 367 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 368
0d08fcee
FG
369 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
370
54552dda 371 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 372
c0977501 373 let base_path = datastore.base_path();
184f17af 374
0d08fcee
FG
375 let groups = match (backup_type, backup_id) {
376 (Some(backup_type), Some(backup_id)) => {
377 let mut groups = Vec::with_capacity(1);
378 groups.push(BackupGroup::new(backup_type, backup_id));
379 groups
380 },
381 (Some(backup_type), None) => {
382 BackupInfo::list_backup_groups(&base_path)?
383 .into_iter()
384 .filter(|group| group.backup_type() == backup_type)
385 .collect()
386 },
387 (None, Some(backup_id)) => {
388 BackupInfo::list_backup_groups(&base_path)?
389 .into_iter()
390 .filter(|group| group.backup_id() == backup_id)
391 .collect()
392 },
393 _ => BackupInfo::list_backup_groups(&base_path)?,
394 };
54552dda 395
0d08fcee 396 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
397 let backup_type = group.backup_type().to_string();
398 let backup_id = group.backup_id().to_string();
0d08fcee 399 let backup_time = info.backup_dir.backup_time();
1c090810 400
79c53595 401 match get_all_snapshot_files(&datastore, &info) {
70030b43 402 Ok((manifest, files)) => {
70030b43
DM
403 // extract the first line from notes
404 let comment: Option<String> = manifest.unprotected["notes"]
405 .as_str()
406 .and_then(|notes| notes.lines().next())
407 .map(String::from);
408
035c40e6
FG
409 let fingerprint = match manifest.fingerprint() {
410 Ok(fp) => fp,
411 Err(err) => {
412 eprintln!("error parsing fingerprint: '{}'", err);
413 None
414 },
415 };
416
79c53595
FG
417 let verification = manifest.unprotected["verify_state"].clone();
418 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
419 Ok(verify) => verify,
420 Err(err) => {
421 eprintln!("error parsing verification state : '{}'", err);
422 None
423 }
424 };
425
0d08fcee
FG
426 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
427
79c53595
FG
428 SnapshotListItem {
429 backup_type,
430 backup_id,
431 backup_time,
432 comment,
433 verification,
035c40e6 434 fingerprint,
79c53595
FG
435 files,
436 size,
437 owner,
438 }
1c090810
DC
439 },
440 Err(err) => {
441 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 442 let files = info
70030b43 443 .files
0d08fcee 444 .into_iter()
44288184
FG
445 .map(|filename| BackupContent {
446 filename,
70030b43
DM
447 size: None,
448 crypt_mode: None,
449 })
79c53595
FG
450 .collect();
451
452 SnapshotListItem {
453 backup_type,
454 backup_id,
455 backup_time,
456 comment: None,
457 verification: None,
035c40e6 458 fingerprint: None,
79c53595
FG
459 files,
460 size: None,
461 owner,
462 }
1c090810 463 },
0d08fcee
FG
464 }
465 };
184f17af 466
0d08fcee
FG
467 groups
468 .iter()
469 .try_fold(Vec::new(), |mut snapshots, group| {
470 let owner = match datastore.get_owner(group) {
471 Ok(auth_id) => auth_id,
472 Err(err) => {
473 eprintln!("Failed to get owner of group '{}/{}' - {}",
474 &store,
475 group,
476 err);
477 return Ok(snapshots);
478 },
479 };
480
481 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
482 return Ok(snapshots);
483 }
484
485 let group_backups = group.list_backups(&datastore.base_path())?;
486
487 snapshots.extend(
488 group_backups
489 .into_iter()
490 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
491 );
492
493 Ok(snapshots)
494 })
184f17af
DM
495}
496
fdfcb74d 497fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 498 let base_path = store.base_path();
fdfcb74d 499 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 500
fdfcb74d
FG
501 groups.iter()
502 .filter(|group| {
503 let owner = match store.get_owner(&group) {
504 Ok(owner) => owner,
505 Err(err) => {
1ed02257
FG
506 eprintln!("Failed to get owner of group '{}/{}' - {}",
507 store.name(),
508 group,
509 err);
fdfcb74d
FG
510 return false;
511 },
512 };
14e08625 513
fdfcb74d
FG
514 match filter_owner {
515 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
516 None => true,
517 }
518 })
519 .try_fold(Counts::default(), |mut counts, group| {
520 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
521
522 let type_count = match group.backup_type() {
523 "ct" => counts.ct.get_or_insert(Default::default()),
524 "vm" => counts.vm.get_or_insert(Default::default()),
525 "host" => counts.host.get_or_insert(Default::default()),
526 _ => counts.other.get_or_insert(Default::default()),
527 };
14e08625 528
fdfcb74d
FG
529 type_count.groups += 1;
530 type_count.snapshots += snapshot_count;
16f9f244 531
fdfcb74d
FG
532 Ok(counts)
533 })
16f9f244
DC
534}
535
1dc117bb
DM
536#[api(
537 input: {
538 properties: {
539 store: {
540 schema: DATASTORE_SCHEMA,
541 },
98afc7b1
FG
542 verbose: {
543 type: bool,
544 default: false,
545 optional: true,
546 description: "Include additional information like snapshot counts and GC status.",
547 },
1dc117bb 548 },
98afc7b1 549
1dc117bb
DM
550 },
551 returns: {
14e08625 552 type: DataStoreStatus,
1dc117bb 553 },
bb34b589 554 access: {
54552dda 555 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 556 },
1dc117bb
DM
557)]
558/// Get datastore status.
ea5f547f 559pub fn status(
1dc117bb 560 store: String,
98afc7b1 561 verbose: bool,
0eecf38f 562 _info: &ApiMethod,
fdfcb74d 563 rpcenv: &mut dyn RpcEnvironment,
14e08625 564) -> Result<DataStoreStatus, Error> {
1dc117bb 565 let datastore = DataStore::lookup_datastore(&store)?;
14e08625 566 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
567 let (counts, gc_status) = if verbose {
568 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
569 let user_info = CachedUserInfo::new()?;
570
571 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
572 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
573 None
574 } else {
575 Some(&auth_id)
576 };
577
578 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
579 let gc_status = Some(datastore.last_gc_status());
580
581 (counts, gc_status)
582 } else {
583 (None, None)
98afc7b1 584 };
16f9f244 585
14e08625
DC
586 Ok(DataStoreStatus {
587 total: storage.total,
588 used: storage.used,
589 avail: storage.avail,
590 gc_status,
591 counts,
592 })
0eecf38f
DM
593}
594
c2009e53
DM
595#[api(
596 input: {
597 properties: {
598 store: {
599 schema: DATASTORE_SCHEMA,
600 },
601 "backup-type": {
602 schema: BACKUP_TYPE_SCHEMA,
603 optional: true,
604 },
605 "backup-id": {
606 schema: BACKUP_ID_SCHEMA,
607 optional: true,
608 },
609 "backup-time": {
610 schema: BACKUP_TIME_SCHEMA,
611 optional: true,
612 },
613 },
614 },
615 returns: {
616 schema: UPID_SCHEMA,
617 },
618 access: {
09f6a240 619 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
620 },
621)]
622/// Verify backups.
623///
624/// This function can verify a single backup snapshot, all backup from a backup group,
625/// or all backups in the datastore.
626pub fn verify(
627 store: String,
628 backup_type: Option<String>,
629 backup_id: Option<String>,
630 backup_time: Option<i64>,
631 rpcenv: &mut dyn RpcEnvironment,
632) -> Result<Value, Error> {
633 let datastore = DataStore::lookup_datastore(&store)?;
634
09f6a240 635 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 636 let worker_id;
c2009e53
DM
637
638 let mut backup_dir = None;
639 let mut backup_group = None;
133042b5 640 let mut worker_type = "verify";
c2009e53
DM
641
642 match (backup_type, backup_id, backup_time) {
643 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 644 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 645 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
646
647 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
648
c2009e53 649 backup_dir = Some(dir);
133042b5 650 worker_type = "verify_snapshot";
c2009e53
DM
651 }
652 (Some(backup_type), Some(backup_id), None) => {
4ebda996 653 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 654 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
655
656 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
657
c2009e53 658 backup_group = Some(group);
133042b5 659 worker_type = "verify_group";
c2009e53
DM
660 }
661 (None, None, None) => {
8ea00f6e 662 worker_id = store.clone();
c2009e53 663 }
5a718dce 664 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
665 }
666
39735609 667 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
668
669 let upid_str = WorkerTask::new_thread(
133042b5 670 worker_type,
44288184 671 Some(worker_id),
09f6a240 672 auth_id.clone(),
e7cb4dc5
WB
673 to_stdout,
674 move |worker| {
9c26a3d6 675 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 676 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 677 let mut res = Vec::new();
f6b1d1cc 678 if !verify_backup_dir(
9c26a3d6 679 &verify_worker,
f6b1d1cc 680 &backup_dir,
f6b1d1cc 681 worker.upid().clone(),
d771a608 682 None,
f6b1d1cc 683 )? {
adfdc369
DC
684 res.push(backup_dir.to_string());
685 }
686 res
c2009e53 687 } else if let Some(backup_group) = backup_group {
7e25b9aa 688 let failed_dirs = verify_backup_group(
9c26a3d6 689 &verify_worker,
63d9aca9 690 &backup_group,
7e25b9aa 691 &mut StoreProgress::new(1),
f6b1d1cc 692 worker.upid(),
d771a608 693 None,
63d9aca9
DM
694 )?;
695 failed_dirs
c2009e53 696 } else {
09f6a240
FG
697 let privs = CachedUserInfo::new()?
698 .lookup_privs(&auth_id, &["datastore", &store]);
699
700 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
701 Some(auth_id)
702 } else {
703 None
704 };
705
9c26a3d6 706 verify_all_backups(&verify_worker, worker.upid(), owner, None)?
c2009e53 707 };
3984a5fd 708 if !failed_dirs.is_empty() {
a4fa3fc2 709 worker.log("Failed to verify the following snapshots/groups:");
adfdc369
DC
710 for dir in failed_dirs {
711 worker.log(format!("\t{}", dir));
712 }
1ffe0301 713 bail!("verification failed - please check the log for details");
c2009e53
DM
714 }
715 Ok(())
e7cb4dc5
WB
716 },
717 )?;
c2009e53
DM
718
719 Ok(json!(upid_str))
720}
721
255f378a
DM
722#[macro_export]
723macro_rules! add_common_prune_prameters {
552c2259
DM
724 ( [ $( $list1:tt )* ] ) => {
725 add_common_prune_prameters!([$( $list1 )* ] , [])
726 };
727 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 728 [
552c2259 729 $( $list1 )*
255f378a 730 (
552c2259 731 "keep-daily",
255f378a 732 true,
49ff1092 733 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 734 ),
102d8d41
DM
735 (
736 "keep-hourly",
737 true,
49ff1092 738 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 739 ),
255f378a 740 (
552c2259 741 "keep-last",
255f378a 742 true,
49ff1092 743 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
744 ),
745 (
552c2259 746 "keep-monthly",
255f378a 747 true,
49ff1092 748 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
749 ),
750 (
552c2259 751 "keep-weekly",
255f378a 752 true,
49ff1092 753 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
754 ),
755 (
756 "keep-yearly",
757 true,
49ff1092 758 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 759 ),
552c2259 760 $( $list2 )*
255f378a
DM
761 ]
762 }
0eecf38f
DM
763}
764
db1e061d
DM
765pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
766 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 767 &PruneListItem::API_SCHEMA
db1e061d
DM
768).schema();
769
b2362a12 770pub const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
0ab08ac9 771 &ApiHandler::Sync(&prune),
255f378a 772 &ObjectSchema::new(
0ab08ac9
DM
773 "Prune the datastore.",
774 &add_common_prune_prameters!([
775 ("backup-id", false, &BACKUP_ID_SCHEMA),
776 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
777 ("dry-run", true, &BooleanSchema::new(
778 "Just show what prune would do, but do not delete anything.")
779 .schema()
780 ),
0ab08ac9 781 ],[
66c49c21 782 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 783 ])
db1e061d 784 ))
b2362a12 785 .returns(ReturnType::new(false, &API_RETURN_SCHEMA_PRUNE))
db1e061d 786 .access(None, &Permission::Privilege(
54552dda
DM
787 &["datastore", "{store}"],
788 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
789 true)
790);
255f378a 791
bf78f708 792pub fn prune(
83b7db02
DM
793 param: Value,
794 _info: &ApiMethod,
54552dda 795 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
796) -> Result<Value, Error> {
797
54552dda 798 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
799 let backup_type = tools::required_string_param(&param, "backup-type")?;
800 let backup_id = tools::required_string_param(&param, "backup-id")?;
801
e6dc35ac 802 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 803
3b03abfe
DM
804 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
805
9fdc3ef4
DM
806 let group = BackupGroup::new(backup_type, backup_id);
807
54552dda
DM
808 let datastore = DataStore::lookup_datastore(&store)?;
809
bff85572 810 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 811
9e3f0088
DM
812 let prune_options = PruneOptions {
813 keep_last: param["keep-last"].as_u64(),
102d8d41 814 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
815 keep_daily: param["keep-daily"].as_u64(),
816 keep_weekly: param["keep-weekly"].as_u64(),
817 keep_monthly: param["keep-monthly"].as_u64(),
818 keep_yearly: param["keep-yearly"].as_u64(),
819 };
8f579717 820
4ebda996 821 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 822
dda70154
DM
823 let mut prune_result = Vec::new();
824
825 let list = group.list_backups(&datastore.base_path())?;
826
827 let mut prune_info = compute_prune_info(list, &prune_options)?;
828
829 prune_info.reverse(); // delete older snapshots first
830
831 let keep_all = !prune_options.keeps_something();
832
833 if dry_run {
834 for (info, mut keep) in prune_info {
835 if keep_all { keep = true; }
836
837 let backup_time = info.backup_dir.backup_time();
838 let group = info.backup_dir.group();
839
840 prune_result.push(json!({
841 "backup-type": group.backup_type(),
842 "backup-id": group.backup_id(),
6a7be83e 843 "backup-time": backup_time,
dda70154
DM
844 "keep": keep,
845 }));
846 }
847 return Ok(json!(prune_result));
848 }
849
850
163e9bbe 851 // We use a WorkerTask just to have a task log, but run synchrounously
44288184 852 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
dda70154 853
f1539300
SR
854 if keep_all {
855 worker.log("No prune selection - keeping all files.");
856 } else {
857 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
858 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
859 store, backup_type, backup_id));
860 }
3b03abfe 861
f1539300
SR
862 for (info, mut keep) in prune_info {
863 if keep_all { keep = true; }
dda70154 864
f1539300
SR
865 let backup_time = info.backup_dir.backup_time();
866 let timestamp = info.backup_dir.backup_time_string();
867 let group = info.backup_dir.group();
3b03abfe 868
3b03abfe 869
f1539300
SR
870 let msg = format!(
871 "{}/{}/{} {}",
872 group.backup_type(),
873 group.backup_id(),
874 timestamp,
875 if keep { "keep" } else { "remove" },
876 );
877
878 worker.log(msg);
879
880 prune_result.push(json!({
881 "backup-type": group.backup_type(),
882 "backup-id": group.backup_id(),
883 "backup-time": backup_time,
884 "keep": keep,
885 }));
886
887 if !(dry_run || keep) {
888 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
889 worker.warn(
890 format!(
891 "failed to remove dir {:?}: {}",
892 info.backup_dir.relative_path(), err
893 )
894 );
8f0b4c1f 895 }
8f579717 896 }
f1539300 897 }
dd8e744f 898
f1539300 899 worker.log_result(&Ok(()));
83b7db02 900
dda70154 901 Ok(json!(prune_result))
83b7db02
DM
902}
903
dfc58d47
DM
904#[api(
905 input: {
906 properties: {
907 store: {
908 schema: DATASTORE_SCHEMA,
909 },
910 },
911 },
912 returns: {
913 schema: UPID_SCHEMA,
914 },
bb34b589 915 access: {
54552dda 916 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 917 },
dfc58d47
DM
918)]
919/// Start garbage collection.
bf78f708 920pub fn start_garbage_collection(
dfc58d47 921 store: String,
6049b71f 922 _info: &ApiMethod,
dd5495d6 923 rpcenv: &mut dyn RpcEnvironment,
6049b71f 924) -> Result<Value, Error> {
15e9b4ed 925
3e6a7dee 926 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 927 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 928
4fdf5ddf
DC
929 let job = Job::new("garbage_collection", &store)
930 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 931
39735609 932 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 933
4fdf5ddf
DC
934 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
935 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
936
937 Ok(json!(upid_str))
15e9b4ed
DM
938}
939
a92830dc
DM
940#[api(
941 input: {
942 properties: {
943 store: {
944 schema: DATASTORE_SCHEMA,
945 },
946 },
947 },
948 returns: {
949 type: GarbageCollectionStatus,
bb34b589
DM
950 },
951 access: {
952 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
953 },
a92830dc
DM
954)]
955/// Garbage collection status.
5eeea607 956pub fn garbage_collection_status(
a92830dc 957 store: String,
6049b71f 958 _info: &ApiMethod,
dd5495d6 959 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 960) -> Result<GarbageCollectionStatus, Error> {
691c89a0 961
f2b99c34
DM
962 let datastore = DataStore::lookup_datastore(&store)?;
963
f2b99c34 964 let status = datastore.last_gc_status();
691c89a0 965
a92830dc 966 Ok(status)
691c89a0
DM
967}
968
bb34b589 969#[api(
30fb6025
DM
970 returns: {
971 description: "List the accessible datastores.",
972 type: Array,
9b93c620 973 items: { type: DataStoreListItem },
30fb6025 974 },
bb34b589 975 access: {
54552dda 976 permission: &Permission::Anybody,
bb34b589
DM
977 },
978)]
979/// Datastore list
bf78f708 980pub fn get_datastore_list(
6049b71f
DM
981 _param: Value,
982 _info: &ApiMethod,
54552dda 983 rpcenv: &mut dyn RpcEnvironment,
455e5f71 984) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 985
d0187a51 986 let (config, _digest) = datastore::config()?;
15e9b4ed 987
e6dc35ac 988 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
989 let user_info = CachedUserInfo::new()?;
990
30fb6025 991 let mut list = Vec::new();
54552dda 992
30fb6025 993 for (store, (_, data)) in &config.sections {
e6dc35ac 994 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 995 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 996 if allowed {
455e5f71
FG
997 list.push(
998 DataStoreListItem {
999 store: store.clone(),
1000 comment: data["comment"].as_str().map(String::from),
1001 }
1002 );
30fb6025 1003 }
54552dda
DM
1004 }
1005
44288184 1006 Ok(list)
15e9b4ed
DM
1007}
1008
0ab08ac9
DM
1009#[sortable]
1010pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1011 &ApiHandler::AsyncHttp(&download_file),
1012 &ObjectSchema::new(
1013 "Download single raw file from backup snapshot.",
1014 &sorted!([
66c49c21 1015 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1016 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1017 ("backup-id", false, &BACKUP_ID_SCHEMA),
1018 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1019 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1020 ]),
1021 )
54552dda
DM
1022).access(None, &Permission::Privilege(
1023 &["datastore", "{store}"],
1024 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1025 true)
1026);
691c89a0 1027
bf78f708 1028pub fn download_file(
9e47c0a5
DM
1029 _parts: Parts,
1030 _req_body: Body,
1031 param: Value,
255f378a 1032 _info: &ApiMethod,
54552dda 1033 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1034) -> ApiResponseFuture {
9e47c0a5 1035
ad51d02a
DM
1036 async move {
1037 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1038 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 1039
e6dc35ac 1040 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1041
ad51d02a 1042 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1043
ad51d02a
DM
1044 let backup_type = tools::required_string_param(&param, "backup-type")?;
1045 let backup_id = tools::required_string_param(&param, "backup-id")?;
1046 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 1047
e0e5b442 1048 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1049
bff85572 1050 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1051
abdb9763 1052 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1053
ad51d02a
DM
1054 let mut path = datastore.base_path();
1055 path.push(backup_dir.relative_path());
1056 path.push(&file_name);
1057
ba694720 1058 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1059 .await
1060 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1061
db0cb9ce 1062 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1063 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1064 .map_err(move |err| {
1065 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1066 err
1067 });
ad51d02a 1068 let body = Body::wrap_stream(payload);
9e47c0a5 1069
ad51d02a
DM
1070 // fixme: set other headers ?
1071 Ok(Response::builder()
1072 .status(StatusCode::OK)
1073 .header(header::CONTENT_TYPE, "application/octet-stream")
1074 .body(body)
1075 .unwrap())
1076 }.boxed()
9e47c0a5
DM
1077}
1078
6ef9bb59
DC
1079#[sortable]
1080pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1081 &ApiHandler::AsyncHttp(&download_file_decoded),
1082 &ObjectSchema::new(
1083 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1084 &sorted!([
1085 ("store", false, &DATASTORE_SCHEMA),
1086 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1087 ("backup-id", false, &BACKUP_ID_SCHEMA),
1088 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1089 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1090 ]),
1091 )
1092).access(None, &Permission::Privilege(
1093 &["datastore", "{store}"],
1094 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1095 true)
1096);
1097
bf78f708 1098pub fn download_file_decoded(
6ef9bb59
DC
1099 _parts: Parts,
1100 _req_body: Body,
1101 param: Value,
1102 _info: &ApiMethod,
1103 rpcenv: Box<dyn RpcEnvironment>,
1104) -> ApiResponseFuture {
1105
1106 async move {
1107 let store = tools::required_string_param(&param, "store")?;
1108 let datastore = DataStore::lookup_datastore(store)?;
1109
e6dc35ac 1110 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59
DC
1111
1112 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1113
1114 let backup_type = tools::required_string_param(&param, "backup-type")?;
1115 let backup_id = tools::required_string_param(&param, "backup-id")?;
1116 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1117
e0e5b442 1118 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1119
bff85572 1120 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1121
2d55beec 1122 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1123 for file in files {
f28d9088 1124 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1125 bail!("cannot decode '{}' - is encrypted", file_name);
1126 }
1127 }
1128
1129 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1130
1131 let mut path = datastore.base_path();
1132 path.push(backup_dir.relative_path());
1133 path.push(&file_name);
1134
1135 let extension = file_name.rsplitn(2, '.').next().unwrap();
1136
1137 let body = match extension {
1138 "didx" => {
1139 let index = DynamicIndexReader::open(&path)
1140 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1141 let (csum, size) = index.compute_csum();
1142 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1143
14f6c9cb 1144 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1145 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1146 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1147 .map_err(move |err| {
1148 eprintln!("error during streaming of '{:?}' - {}", path, err);
1149 err
1150 }))
1151 },
1152 "fidx" => {
1153 let index = FixedIndexReader::open(&path)
1154 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1155
2d55beec
FG
1156 let (csum, size) = index.compute_csum();
1157 manifest.verify_file(&file_name, &csum, size)?;
1158
14f6c9cb 1159 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1160 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1161 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1162 .map_err(move |err| {
1163 eprintln!("error during streaming of '{:?}' - {}", path, err);
1164 err
1165 }))
1166 },
1167 "blob" => {
1168 let file = std::fs::File::open(&path)
8aa67ee7 1169 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1170
2d55beec
FG
1171 // FIXME: load full blob to verify index checksum?
1172
6ef9bb59
DC
1173 Body::wrap_stream(
1174 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1175 .map_err(move |err| {
1176 eprintln!("error during streaming of '{:?}' - {}", path, err);
1177 err
1178 })
1179 )
1180 },
1181 extension => {
1182 bail!("cannot download '{}' files", extension);
1183 },
1184 };
1185
1186 // fixme: set other headers ?
1187 Ok(Response::builder()
1188 .status(StatusCode::OK)
1189 .header(header::CONTENT_TYPE, "application/octet-stream")
1190 .body(body)
1191 .unwrap())
1192 }.boxed()
1193}
1194
552c2259 1195#[sortable]
0ab08ac9
DM
1196pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1197 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1198 &ObjectSchema::new(
54552dda 1199 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1200 &sorted!([
66c49c21 1201 ("store", false, &DATASTORE_SCHEMA),
255f378a 1202 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1203 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1204 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1205 ]),
9e47c0a5 1206 )
54552dda
DM
1207).access(
1208 Some("Only the backup creator/owner is allowed to do this."),
1209 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1210);
9e47c0a5 1211
bf78f708 1212pub fn upload_backup_log(
07ee2235
DM
1213 _parts: Parts,
1214 req_body: Body,
1215 param: Value,
255f378a 1216 _info: &ApiMethod,
54552dda 1217 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1218) -> ApiResponseFuture {
07ee2235 1219
ad51d02a
DM
1220 async move {
1221 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1222 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1223
96d65fbc 1224 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1225
ad51d02a
DM
1226 let backup_type = tools::required_string_param(&param, "backup-type")?;
1227 let backup_id = tools::required_string_param(&param, "backup-id")?;
1228 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1229
e0e5b442 1230 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1231
e6dc35ac 1232 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1233 let owner = datastore.get_owner(backup_dir.group())?;
1234 check_backup_owner(&owner, &auth_id)?;
54552dda 1235
ad51d02a
DM
1236 let mut path = datastore.base_path();
1237 path.push(backup_dir.relative_path());
1238 path.push(&file_name);
07ee2235 1239
ad51d02a
DM
1240 if path.exists() {
1241 bail!("backup already contains a log.");
1242 }
e128d4e8 1243
ad51d02a 1244 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1245 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1246
1247 let data = req_body
1248 .map_err(Error::from)
1249 .try_fold(Vec::new(), |mut acc, chunk| {
1250 acc.extend_from_slice(&*chunk);
1251 future::ok::<_, Error>(acc)
1252 })
1253 .await?;
1254
39f18b30
DM
1255 // always verify blob/CRC at server side
1256 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1257
1258 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1259
1260 // fixme: use correct formatter
1261 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1262 }.boxed()
07ee2235
DM
1263}
1264
5b1cfa01
DC
1265#[api(
1266 input: {
1267 properties: {
1268 store: {
1269 schema: DATASTORE_SCHEMA,
1270 },
1271 "backup-type": {
1272 schema: BACKUP_TYPE_SCHEMA,
1273 },
1274 "backup-id": {
1275 schema: BACKUP_ID_SCHEMA,
1276 },
1277 "backup-time": {
1278 schema: BACKUP_TIME_SCHEMA,
1279 },
1280 "filepath": {
1281 description: "Base64 encoded path.",
1282 type: String,
1283 }
1284 },
1285 },
1286 access: {
1287 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1288 },
1289)]
1290/// Get the entries of the given path of the catalog
bf78f708 1291pub fn catalog(
5b1cfa01
DC
1292 store: String,
1293 backup_type: String,
1294 backup_id: String,
1295 backup_time: i64,
1296 filepath: String,
5b1cfa01 1297 rpcenv: &mut dyn RpcEnvironment,
227501c0 1298) -> Result<Vec<ArchiveEntry>, Error> {
5b1cfa01
DC
1299 let datastore = DataStore::lookup_datastore(&store)?;
1300
e6dc35ac 1301 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1302
e0e5b442 1303 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1304
bff85572 1305 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1306
9238cdf5
FG
1307 let file_name = CATALOG_NAME;
1308
2d55beec 1309 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1310 for file in files {
1311 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1312 bail!("cannot decode '{}' - is encrypted", file_name);
1313 }
1314 }
1315
5b1cfa01
DC
1316 let mut path = datastore.base_path();
1317 path.push(backup_dir.relative_path());
9238cdf5 1318 path.push(file_name);
5b1cfa01
DC
1319
1320 let index = DynamicIndexReader::open(&path)
1321 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1322
2d55beec
FG
1323 let (csum, size) = index.compute_csum();
1324 manifest.verify_file(&file_name, &csum, size)?;
1325
14f6c9cb 1326 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1327 let reader = BufferedDynamicReader::new(index, chunk_reader);
1328
1329 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1330
5279ee74 1331 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1332 base64::decode(filepath)?
1333 } else {
1334 vec![b'/']
1335 };
5b1cfa01 1336
227501c0 1337 helpers::list_dir_content(&mut catalog_reader, &path)
5b1cfa01
DC
1338}
1339
53a561a2
WB
1340fn recurse_files<'a, T, W>(
1341 zip: &'a mut ZipEncoder<W>,
1342 decoder: &'a mut Accessor<T>,
1343 prefix: &'a Path,
804f6143 1344 file: FileEntry<T>,
53a561a2 1345) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1346where
1347 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1348 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1349{
1350 Box::pin(async move {
1351 let metadata = file.entry().metadata();
1352 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1353
1354 match file.kind() {
1355 EntryKind::File { .. } => {
1356 let entry = ZipEntry::new(
1357 path,
1358 metadata.stat.mtime.secs,
1359 metadata.stat.mode as u16,
1360 true,
1361 );
1362 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1363 .await
1364 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1365 }
1366 EntryKind::Hardlink(_) => {
1367 let realfile = decoder.follow_hardlink(&file).await?;
1368 let entry = ZipEntry::new(
1369 path,
1370 metadata.stat.mtime.secs,
1371 metadata.stat.mode as u16,
1372 true,
1373 );
1374 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1375 .await
1376 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1377 }
1378 EntryKind::Directory => {
1379 let dir = file.enter_directory().await?;
1380 let mut readdir = dir.read_dir();
1381 let entry = ZipEntry::new(
1382 path,
1383 metadata.stat.mtime.secs,
1384 metadata.stat.mode as u16,
1385 false,
1386 );
1387 zip.add_entry::<FileContents<T>>(entry, None).await?;
1388 while let Some(entry) = readdir.next().await {
1389 let entry = entry?.decode_entry().await?;
53a561a2 1390 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1391 }
1392 }
1393 _ => {} // ignore all else
1394 };
1395
53a561a2 1396 Ok(())
804f6143
DC
1397 })
1398}
1399
d33d8f4e
DC
1400#[sortable]
1401pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1402 &ApiHandler::AsyncHttp(&pxar_file_download),
1403 &ObjectSchema::new(
1ffe0301 1404 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1405 &sorted!([
1406 ("store", false, &DATASTORE_SCHEMA),
1407 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1408 ("backup-id", false, &BACKUP_ID_SCHEMA),
1409 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1410 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1411 ]),
1412 )
1413).access(None, &Permission::Privilege(
1414 &["datastore", "{store}"],
1415 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1416 true)
1417);
1418
bf78f708 1419pub fn pxar_file_download(
d33d8f4e
DC
1420 _parts: Parts,
1421 _req_body: Body,
1422 param: Value,
1423 _info: &ApiMethod,
1424 rpcenv: Box<dyn RpcEnvironment>,
1425) -> ApiResponseFuture {
1426
1427 async move {
1428 let store = tools::required_string_param(&param, "store")?;
1429 let datastore = DataStore::lookup_datastore(&store)?;
1430
e6dc35ac 1431 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e
DC
1432
1433 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1434
1435 let backup_type = tools::required_string_param(&param, "backup-type")?;
1436 let backup_id = tools::required_string_param(&param, "backup-id")?;
1437 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1438
e0e5b442 1439 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1440
bff85572 1441 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1442
d33d8f4e 1443 let mut components = base64::decode(&filepath)?;
3984a5fd 1444 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1445 components.remove(0);
1446 }
1447
d8d8af98 1448 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1449 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
e062ebbc 1450 let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1451 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1452 for file in files {
1453 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1454 bail!("cannot decode '{}' - is encrypted", pxar_name);
1455 }
1456 }
d33d8f4e 1457
9238cdf5
FG
1458 let mut path = datastore.base_path();
1459 path.push(backup_dir.relative_path());
1460 path.push(pxar_name);
d33d8f4e
DC
1461
1462 let index = DynamicIndexReader::open(&path)
1463 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1464
2d55beec
FG
1465 let (csum, size) = index.compute_csum();
1466 manifest.verify_file(&pxar_name, &csum, size)?;
1467
14f6c9cb 1468 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1469 let reader = BufferedDynamicReader::new(index, chunk_reader);
1470 let archive_size = reader.archive_size();
1471 let reader = LocalDynamicReadAt::new(reader);
1472
1473 let decoder = Accessor::new(reader, archive_size).await?;
1474 let root = decoder.open_root().await?;
1475 let file = root
1476 .lookup(OsStr::from_bytes(file_path)).await?
e062ebbc 1477 .ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
d33d8f4e 1478
804f6143
DC
1479 let body = match file.kind() {
1480 EntryKind::File { .. } => Body::wrap_stream(
1481 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1482 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1483 err
1484 }),
1485 ),
1486 EntryKind::Hardlink(_) => Body::wrap_stream(
1487 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1488 .map_err(move |err| {
1489 eprintln!(
1490 "error during streaming of hardlink '{:?}' - {}",
1491 filepath, err
1492 );
1493 err
1494 }),
1495 ),
1496 EntryKind::Directory => {
1497 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1498 let mut prefix = PathBuf::new();
1499 let mut components = file.entry().path().components();
1500 components.next_back(); // discar last
1501 for comp in components {
1502 prefix.push(comp);
1503 }
d33d8f4e 1504
804f6143 1505 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1506
1507 crate::server::spawn_internal_task(async move {
53a561a2
WB
1508 let mut zipencoder = ZipEncoder::new(channelwriter);
1509 let mut decoder = decoder;
1510 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1511 .await
1512 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1513
1514 zipencoder
1515 .finish()
1516 .await
1517 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1518 });
1519
7c667013 1520 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
804f6143 1521 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1522 err
804f6143
DC
1523 }))
1524 }
1525 other => bail!("cannot download file of type {:?}", other),
1526 };
d33d8f4e
DC
1527
1528 // fixme: set other headers ?
1529 Ok(Response::builder()
1530 .status(StatusCode::OK)
1531 .header(header::CONTENT_TYPE, "application/octet-stream")
1532 .body(body)
1533 .unwrap())
1534 }.boxed()
1535}
1536
1a0d3d11
DM
1537#[api(
1538 input: {
1539 properties: {
1540 store: {
1541 schema: DATASTORE_SCHEMA,
1542 },
1543 timeframe: {
1544 type: RRDTimeFrameResolution,
1545 },
1546 cf: {
1547 type: RRDMode,
1548 },
1549 },
1550 },
1551 access: {
1552 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1553 },
1554)]
1555/// Read datastore stats
bf78f708 1556pub fn get_rrd_stats(
1a0d3d11
DM
1557 store: String,
1558 timeframe: RRDTimeFrameResolution,
1559 cf: RRDMode,
1560 _param: Value,
1561) -> Result<Value, Error> {
1562
431cc7b1
DC
1563 create_value_from_rrd(
1564 &format!("datastore/{}", store),
1a0d3d11
DM
1565 &[
1566 "total", "used",
c94e1f65
DM
1567 "read_ios", "read_bytes",
1568 "write_ios", "write_bytes",
1569 "io_ticks",
1a0d3d11
DM
1570 ],
1571 timeframe,
1572 cf,
1573 )
1574}
1575
912b3f5b
DM
1576#[api(
1577 input: {
1578 properties: {
1579 store: {
1580 schema: DATASTORE_SCHEMA,
1581 },
1582 "backup-type": {
1583 schema: BACKUP_TYPE_SCHEMA,
1584 },
1585 "backup-id": {
1586 schema: BACKUP_ID_SCHEMA,
1587 },
1588 "backup-time": {
1589 schema: BACKUP_TIME_SCHEMA,
1590 },
1591 },
1592 },
1593 access: {
1401f4be 1594 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1595 },
1596)]
1597/// Get "notes" for a specific backup
bf78f708 1598pub fn get_notes(
912b3f5b
DM
1599 store: String,
1600 backup_type: String,
1601 backup_id: String,
1602 backup_time: i64,
1603 rpcenv: &mut dyn RpcEnvironment,
1604) -> Result<String, Error> {
1605 let datastore = DataStore::lookup_datastore(&store)?;
1606
e6dc35ac 1607 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1608 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1609
1401f4be 1610 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1611
883aa6d5 1612 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1613
883aa6d5 1614 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1615 .as_str()
1616 .unwrap_or("");
1617
1618 Ok(String::from(notes))
1619}
1620
1621#[api(
1622 input: {
1623 properties: {
1624 store: {
1625 schema: DATASTORE_SCHEMA,
1626 },
1627 "backup-type": {
1628 schema: BACKUP_TYPE_SCHEMA,
1629 },
1630 "backup-id": {
1631 schema: BACKUP_ID_SCHEMA,
1632 },
1633 "backup-time": {
1634 schema: BACKUP_TIME_SCHEMA,
1635 },
1636 notes: {
1637 description: "A multiline text.",
1638 },
1639 },
1640 },
1641 access: {
b728a69e
FG
1642 permission: &Permission::Privilege(&["datastore", "{store}"],
1643 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1644 true),
912b3f5b
DM
1645 },
1646)]
1647/// Set "notes" for a specific backup
bf78f708 1648pub fn set_notes(
912b3f5b
DM
1649 store: String,
1650 backup_type: String,
1651 backup_id: String,
1652 backup_time: i64,
1653 notes: String,
1654 rpcenv: &mut dyn RpcEnvironment,
1655) -> Result<(), Error> {
1656 let datastore = DataStore::lookup_datastore(&store)?;
1657
e6dc35ac 1658 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1659 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1660
b728a69e 1661 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1662
1a374fcf
SR
1663 datastore.update_manifest(&backup_dir,|manifest| {
1664 manifest.unprotected["notes"] = notes.into();
1665 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1666
1667 Ok(())
1668}
1669
72be0eb1 1670#[api(
4940012d 1671 input: {
72be0eb1
DW
1672 properties: {
1673 store: {
1674 schema: DATASTORE_SCHEMA,
1675 },
1676 "backup-type": {
1677 schema: BACKUP_TYPE_SCHEMA,
1678 },
1679 "backup-id": {
1680 schema: BACKUP_ID_SCHEMA,
1681 },
1682 "new-owner": {
e6dc35ac 1683 type: Authid,
72be0eb1
DW
1684 },
1685 },
4940012d
FG
1686 },
1687 access: {
bff85572
FG
1688 permission: &Permission::Anybody,
1689 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1690 },
72be0eb1
DW
1691)]
1692/// Change owner of a backup group
bf78f708 1693pub fn set_backup_owner(
72be0eb1
DW
1694 store: String,
1695 backup_type: String,
1696 backup_id: String,
e6dc35ac 1697 new_owner: Authid,
bff85572 1698 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1699) -> Result<(), Error> {
1700
1701 let datastore = DataStore::lookup_datastore(&store)?;
1702
1703 let backup_group = BackupGroup::new(backup_type, backup_id);
1704
bff85572
FG
1705 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1706
72be0eb1
DW
1707 let user_info = CachedUserInfo::new()?;
1708
bff85572
FG
1709 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1710
1711 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1712 // High-privilege user/token
1713 true
1714 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1715 let owner = datastore.get_owner(&backup_group)?;
1716
1717 match (owner.is_token(), new_owner.is_token()) {
1718 (true, true) => {
1719 // API token to API token, owned by same user
1720 let owner = owner.user();
1721 let new_owner = new_owner.user();
1722 owner == new_owner && Authid::from(owner.clone()) == auth_id
1723 },
1724 (true, false) => {
1725 // API token to API token owner
1726 Authid::from(owner.user().clone()) == auth_id
1727 && new_owner == auth_id
1728 },
1729 (false, true) => {
1730 // API token owner to API token
1731 owner == auth_id
1732 && Authid::from(new_owner.user().clone()) == auth_id
1733 },
1734 (false, false) => {
1735 // User to User, not allowed for unprivileged users
1736 false
1737 },
1738 }
1739 } else {
1740 false
1741 };
1742
1743 if !allowed {
1744 return Err(http_err!(UNAUTHORIZED,
1745 "{} does not have permission to change owner of backup group '{}' to {}",
1746 auth_id,
1747 backup_group,
1748 new_owner,
1749 ));
1750 }
1751
e6dc35ac
FG
1752 if !user_info.is_active_auth_id(&new_owner) {
1753 bail!("{} '{}' is inactive or non-existent",
1754 if new_owner.is_token() {
1755 "API token".to_string()
1756 } else {
1757 "user".to_string()
1758 },
1759 new_owner);
72be0eb1
DW
1760 }
1761
1762 datastore.set_owner(&backup_group, &new_owner, true)?;
1763
1764 Ok(())
1765}
1766
552c2259 1767#[sortable]
255f378a 1768const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1769 (
1770 "catalog",
1771 &Router::new()
1772 .get(&API_METHOD_CATALOG)
1773 ),
72be0eb1
DW
1774 (
1775 "change-owner",
1776 &Router::new()
1777 .post(&API_METHOD_SET_BACKUP_OWNER)
1778 ),
255f378a
DM
1779 (
1780 "download",
1781 &Router::new()
1782 .download(&API_METHOD_DOWNLOAD_FILE)
1783 ),
6ef9bb59
DC
1784 (
1785 "download-decoded",
1786 &Router::new()
1787 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1788 ),
255f378a
DM
1789 (
1790 "files",
1791 &Router::new()
09b1f7b2 1792 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1793 ),
1794 (
1795 "gc",
1796 &Router::new()
1797 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1798 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1799 ),
1800 (
1801 "groups",
1802 &Router::new()
b31c8019 1803 .get(&API_METHOD_LIST_GROUPS)
255f378a 1804 ),
912b3f5b
DM
1805 (
1806 "notes",
1807 &Router::new()
1808 .get(&API_METHOD_GET_NOTES)
1809 .put(&API_METHOD_SET_NOTES)
1810 ),
255f378a
DM
1811 (
1812 "prune",
1813 &Router::new()
1814 .post(&API_METHOD_PRUNE)
1815 ),
d33d8f4e
DC
1816 (
1817 "pxar-file-download",
1818 &Router::new()
1819 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1820 ),
1a0d3d11
DM
1821 (
1822 "rrd",
1823 &Router::new()
1824 .get(&API_METHOD_GET_RRD_STATS)
1825 ),
255f378a
DM
1826 (
1827 "snapshots",
1828 &Router::new()
fc189b19 1829 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1830 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1831 ),
1832 (
1833 "status",
1834 &Router::new()
1835 .get(&API_METHOD_STATUS)
1836 ),
1837 (
1838 "upload-backup-log",
1839 &Router::new()
1840 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1841 ),
c2009e53
DM
1842 (
1843 "verify",
1844 &Router::new()
1845 .post(&API_METHOD_VERIFY)
1846 ),
255f378a
DM
1847];
1848
ad51d02a 1849const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1850 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1851 .subdirs(DATASTORE_INFO_SUBDIRS);
1852
1853
1854pub const ROUTER: Router = Router::new()
bb34b589 1855 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1856 .match_all("store", &DATASTORE_INFO_ROUTER);