]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
backup: add AsyncRead/Seek to CachedChunkReader
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
cad540e9 6
6ef9bb59 7use anyhow::{bail, format_err, Error};
9e47c0a5 8use futures::*;
cad540e9
WB
9use hyper::http::request::Parts;
10use hyper::{header, Body, Response, StatusCode};
15e9b4ed 11use serde_json::{json, Value};
7c667013 12use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
b2362a12 18use proxmox::api::router::{ReturnType, SubdirMap};
cad540e9 19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
2e219481 23use pxar::accessor::aio::Accessor;
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
227501c0 28use crate::api2::helpers;
e5064ba6 29use crate::backup::*;
cad540e9 30use crate::config::datastore;
54552dda 31use crate::config::cached_user_info::CachedUserInfo;
2e219481 32use crate::pxar::create_zip;
54552dda 33
4fdf5ddf 34use crate::server::{jobstate::Job, WorkerTask};
804f6143
DC
35use crate::tools::{
36 self,
804f6143
DC
37 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
38};
39
d00e1a21
DM
40use crate::config::acl::{
41 PRIV_DATASTORE_AUDIT,
54552dda 42 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
43 PRIV_DATASTORE_READ,
44 PRIV_DATASTORE_PRUNE,
54552dda 45 PRIV_DATASTORE_BACKUP,
09f6a240 46 PRIV_DATASTORE_VERIFY,
d00e1a21 47};
1629d2ad 48
bff85572 49fn check_priv_or_backup_owner(
e7cb4dc5
WB
50 store: &DataStore,
51 group: &BackupGroup,
e6dc35ac 52 auth_id: &Authid,
bff85572
FG
53 required_privs: u64,
54) -> Result<(), Error> {
55 let user_info = CachedUserInfo::new()?;
56 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
57
58 if privs & required_privs == 0 {
59 let owner = store.get_owner(group)?;
60 check_backup_owner(&owner, auth_id)?;
61 }
62 Ok(())
63}
64
65fn check_backup_owner(
66 owner: &Authid,
67 auth_id: &Authid,
e7cb4dc5 68) -> Result<(), Error> {
bff85572
FG
69 let correct_owner = owner == auth_id
70 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
71 if !correct_owner {
e6dc35ac 72 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
73 }
74 Ok(())
75}
76
e7cb4dc5
WB
77fn read_backup_index(
78 store: &DataStore,
79 backup_dir: &BackupDir,
80) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 81
ff86ef00 82 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 83
09b1f7b2
DM
84 let mut result = Vec::new();
85 for item in manifest.files() {
86 result.push(BackupContent {
87 filename: item.filename.clone(),
f28d9088 88 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
89 size: Some(item.size),
90 });
8c70e3eb
DM
91 }
92
09b1f7b2 93 result.push(BackupContent {
96d65fbc 94 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
95 crypt_mode: match manifest.signature {
96 Some(_) => Some(CryptMode::SignOnly),
97 None => Some(CryptMode::None),
98 },
09b1f7b2
DM
99 size: Some(index_size),
100 });
4f1e40a2 101
70030b43 102 Ok((manifest, result))
8c70e3eb
DM
103}
104
1c090810
DC
105fn get_all_snapshot_files(
106 store: &DataStore,
107 info: &BackupInfo,
70030b43
DM
108) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
109
110 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
111
112 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
113 acc.insert(item.filename.clone());
114 acc
115 });
116
117 for file in &info.files {
118 if file_set.contains(file) { continue; }
f28d9088
WB
119 files.push(BackupContent {
120 filename: file.to_string(),
121 size: None,
122 crypt_mode: None,
123 });
1c090810
DC
124 }
125
70030b43 126 Ok((manifest, files))
1c090810
DC
127}
128
b31c8019
DM
129#[api(
130 input: {
131 properties: {
132 store: {
133 schema: DATASTORE_SCHEMA,
134 },
135 },
136 },
137 returns: {
138 type: Array,
139 description: "Returns the list of backup groups.",
140 items: {
141 type: GroupListItem,
142 }
143 },
bb34b589 144 access: {
54552dda
DM
145 permission: &Permission::Privilege(
146 &["datastore", "{store}"],
147 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
148 true),
bb34b589 149 },
b31c8019
DM
150)]
151/// List backup groups.
b2362a12 152pub fn list_groups(
b31c8019 153 store: String,
54552dda 154 rpcenv: &mut dyn RpcEnvironment,
b31c8019 155) -> Result<Vec<GroupListItem>, Error> {
812c6f87 156
e6dc35ac 157 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 158 let user_info = CachedUserInfo::new()?;
e6dc35ac 159 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 160
b31c8019 161 let datastore = DataStore::lookup_datastore(&store)?;
0d08fcee
FG
162 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
163
164 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
165
166 let group_info = backup_groups
167 .into_iter()
168 .fold(Vec::new(), |mut group_info, group| {
169 let owner = match datastore.get_owner(&group) {
170 Ok(auth_id) => auth_id,
171 Err(err) => {
1ed02257
FG
172 eprintln!("Failed to get owner of group '{}/{}' - {}",
173 &store,
174 group,
175 err);
0d08fcee
FG
176 return group_info;
177 },
178 };
179 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
180 return group_info;
181 }
182
183 let snapshots = match group.list_backups(&datastore.base_path()) {
184 Ok(snapshots) => snapshots,
185 Err(_) => {
186 return group_info;
187 },
188 };
189
190 let backup_count: u64 = snapshots.len() as u64;
191 if backup_count == 0 {
192 return group_info;
193 }
194
195 let last_backup = snapshots
196 .iter()
197 .fold(&snapshots[0], |last, curr| {
198 if curr.is_finished()
199 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
200 curr
201 } else {
202 last
203 }
204 })
205 .to_owned();
206
207 group_info.push(GroupListItem {
208 backup_type: group.backup_type().to_string(),
209 backup_id: group.backup_id().to_string(),
210 last_backup: last_backup.backup_dir.backup_time(),
211 owner: Some(owner),
212 backup_count,
213 files: last_backup.files,
214 });
215
216 group_info
217 });
812c6f87 218
0d08fcee 219 Ok(group_info)
812c6f87 220}
8f579717 221
f32791b4
DC
222#[api(
223 input: {
224 properties: {
225 store: {
226 schema: DATASTORE_SCHEMA,
227 },
228 "backup-type": {
229 schema: BACKUP_TYPE_SCHEMA,
230 },
231 "backup-id": {
232 schema: BACKUP_ID_SCHEMA,
233 },
234 },
235 },
236 access: {
237 permission: &Permission::Privilege(
238 &["datastore", "{store}"],
239 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
240 true),
241 },
242)]
243/// Delete backup group including all snapshots.
244pub fn delete_group(
245 store: String,
246 backup_type: String,
247 backup_id: String,
248 _info: &ApiMethod,
249 rpcenv: &mut dyn RpcEnvironment,
250) -> Result<Value, Error> {
251
252 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
253
254 let group = BackupGroup::new(backup_type, backup_id);
255 let datastore = DataStore::lookup_datastore(&store)?;
256
257 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
258
259 datastore.remove_backup_group(&group)?;
260
261 Ok(Value::Null)
262}
263
09b1f7b2
DM
264#[api(
265 input: {
266 properties: {
267 store: {
268 schema: DATASTORE_SCHEMA,
269 },
270 "backup-type": {
271 schema: BACKUP_TYPE_SCHEMA,
272 },
273 "backup-id": {
274 schema: BACKUP_ID_SCHEMA,
275 },
276 "backup-time": {
277 schema: BACKUP_TIME_SCHEMA,
278 },
279 },
280 },
281 returns: {
282 type: Array,
283 description: "Returns the list of archive files inside a backup snapshots.",
284 items: {
285 type: BackupContent,
286 }
287 },
bb34b589 288 access: {
54552dda
DM
289 permission: &Permission::Privilege(
290 &["datastore", "{store}"],
291 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
292 true),
bb34b589 293 },
09b1f7b2
DM
294)]
295/// List snapshot files.
ea5f547f 296pub fn list_snapshot_files(
09b1f7b2
DM
297 store: String,
298 backup_type: String,
299 backup_id: String,
300 backup_time: i64,
01a13423 301 _info: &ApiMethod,
54552dda 302 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 303) -> Result<Vec<BackupContent>, Error> {
01a13423 304
e6dc35ac 305 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 306 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 307
e0e5b442 308 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 309
bff85572 310 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 311
d7c24397 312 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 313
70030b43
DM
314 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
315
316 Ok(files)
01a13423
DM
317}
318
68a6a0ee
DM
319#[api(
320 input: {
321 properties: {
322 store: {
323 schema: DATASTORE_SCHEMA,
324 },
325 "backup-type": {
326 schema: BACKUP_TYPE_SCHEMA,
327 },
328 "backup-id": {
329 schema: BACKUP_ID_SCHEMA,
330 },
331 "backup-time": {
332 schema: BACKUP_TIME_SCHEMA,
333 },
334 },
335 },
bb34b589 336 access: {
54552dda
DM
337 permission: &Permission::Privilege(
338 &["datastore", "{store}"],
339 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
340 true),
bb34b589 341 },
68a6a0ee
DM
342)]
343/// Delete backup snapshot.
bf78f708 344pub fn delete_snapshot(
68a6a0ee
DM
345 store: String,
346 backup_type: String,
347 backup_id: String,
348 backup_time: i64,
6f62c924 349 _info: &ApiMethod,
54552dda 350 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
351) -> Result<Value, Error> {
352
e6dc35ac 353 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 354
e0e5b442 355 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 356 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 357
bff85572 358 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 359
c9756b40 360 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
361
362 Ok(Value::Null)
363}
364
fc189b19
DM
365#[api(
366 input: {
367 properties: {
368 store: {
369 schema: DATASTORE_SCHEMA,
370 },
371 "backup-type": {
372 optional: true,
373 schema: BACKUP_TYPE_SCHEMA,
374 },
375 "backup-id": {
376 optional: true,
377 schema: BACKUP_ID_SCHEMA,
378 },
379 },
380 },
381 returns: {
382 type: Array,
383 description: "Returns the list of snapshots.",
384 items: {
385 type: SnapshotListItem,
386 }
387 },
bb34b589 388 access: {
54552dda
DM
389 permission: &Permission::Privilege(
390 &["datastore", "{store}"],
391 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
392 true),
bb34b589 393 },
fc189b19
DM
394)]
395/// List backup snapshots.
f24fc116 396pub fn list_snapshots (
54552dda
DM
397 store: String,
398 backup_type: Option<String>,
399 backup_id: Option<String>,
400 _param: Value,
184f17af 401 _info: &ApiMethod,
54552dda 402 rpcenv: &mut dyn RpcEnvironment,
fc189b19 403) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 404
e6dc35ac 405 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 406 let user_info = CachedUserInfo::new()?;
e6dc35ac 407 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 408
0d08fcee
FG
409 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
410
54552dda 411 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 412
c0977501 413 let base_path = datastore.base_path();
184f17af 414
0d08fcee
FG
415 let groups = match (backup_type, backup_id) {
416 (Some(backup_type), Some(backup_id)) => {
417 let mut groups = Vec::with_capacity(1);
418 groups.push(BackupGroup::new(backup_type, backup_id));
419 groups
420 },
421 (Some(backup_type), None) => {
422 BackupInfo::list_backup_groups(&base_path)?
423 .into_iter()
424 .filter(|group| group.backup_type() == backup_type)
425 .collect()
426 },
427 (None, Some(backup_id)) => {
428 BackupInfo::list_backup_groups(&base_path)?
429 .into_iter()
430 .filter(|group| group.backup_id() == backup_id)
431 .collect()
432 },
433 _ => BackupInfo::list_backup_groups(&base_path)?,
434 };
54552dda 435
0d08fcee 436 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
437 let backup_type = group.backup_type().to_string();
438 let backup_id = group.backup_id().to_string();
0d08fcee 439 let backup_time = info.backup_dir.backup_time();
1c090810 440
79c53595 441 match get_all_snapshot_files(&datastore, &info) {
70030b43 442 Ok((manifest, files)) => {
70030b43
DM
443 // extract the first line from notes
444 let comment: Option<String> = manifest.unprotected["notes"]
445 .as_str()
446 .and_then(|notes| notes.lines().next())
447 .map(String::from);
448
035c40e6
FG
449 let fingerprint = match manifest.fingerprint() {
450 Ok(fp) => fp,
451 Err(err) => {
452 eprintln!("error parsing fingerprint: '{}'", err);
453 None
454 },
455 };
456
79c53595
FG
457 let verification = manifest.unprotected["verify_state"].clone();
458 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
459 Ok(verify) => verify,
460 Err(err) => {
461 eprintln!("error parsing verification state : '{}'", err);
462 None
463 }
464 };
465
0d08fcee
FG
466 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
467
79c53595
FG
468 SnapshotListItem {
469 backup_type,
470 backup_id,
471 backup_time,
472 comment,
473 verification,
035c40e6 474 fingerprint,
79c53595
FG
475 files,
476 size,
477 owner,
478 }
1c090810
DC
479 },
480 Err(err) => {
481 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 482 let files = info
70030b43 483 .files
0d08fcee 484 .into_iter()
44288184
FG
485 .map(|filename| BackupContent {
486 filename,
70030b43
DM
487 size: None,
488 crypt_mode: None,
489 })
79c53595
FG
490 .collect();
491
492 SnapshotListItem {
493 backup_type,
494 backup_id,
495 backup_time,
496 comment: None,
497 verification: None,
035c40e6 498 fingerprint: None,
79c53595
FG
499 files,
500 size: None,
501 owner,
502 }
1c090810 503 },
0d08fcee
FG
504 }
505 };
184f17af 506
0d08fcee
FG
507 groups
508 .iter()
509 .try_fold(Vec::new(), |mut snapshots, group| {
510 let owner = match datastore.get_owner(group) {
511 Ok(auth_id) => auth_id,
512 Err(err) => {
513 eprintln!("Failed to get owner of group '{}/{}' - {}",
514 &store,
515 group,
516 err);
517 return Ok(snapshots);
518 },
519 };
520
521 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
522 return Ok(snapshots);
523 }
524
525 let group_backups = group.list_backups(&datastore.base_path())?;
526
527 snapshots.extend(
528 group_backups
529 .into_iter()
530 .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
531 );
532
533 Ok(snapshots)
534 })
184f17af
DM
535}
536
fdfcb74d 537fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 538 let base_path = store.base_path();
fdfcb74d 539 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 540
fdfcb74d
FG
541 groups.iter()
542 .filter(|group| {
543 let owner = match store.get_owner(&group) {
544 Ok(owner) => owner,
545 Err(err) => {
1ed02257
FG
546 eprintln!("Failed to get owner of group '{}/{}' - {}",
547 store.name(),
548 group,
549 err);
fdfcb74d
FG
550 return false;
551 },
552 };
14e08625 553
fdfcb74d
FG
554 match filter_owner {
555 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
556 None => true,
557 }
558 })
559 .try_fold(Counts::default(), |mut counts, group| {
560 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
561
562 let type_count = match group.backup_type() {
563 "ct" => counts.ct.get_or_insert(Default::default()),
564 "vm" => counts.vm.get_or_insert(Default::default()),
565 "host" => counts.host.get_or_insert(Default::default()),
566 _ => counts.other.get_or_insert(Default::default()),
567 };
14e08625 568
fdfcb74d
FG
569 type_count.groups += 1;
570 type_count.snapshots += snapshot_count;
16f9f244 571
fdfcb74d
FG
572 Ok(counts)
573 })
16f9f244
DC
574}
575
1dc117bb
DM
576#[api(
577 input: {
578 properties: {
579 store: {
580 schema: DATASTORE_SCHEMA,
581 },
98afc7b1
FG
582 verbose: {
583 type: bool,
584 default: false,
585 optional: true,
586 description: "Include additional information like snapshot counts and GC status.",
587 },
1dc117bb 588 },
98afc7b1 589
1dc117bb
DM
590 },
591 returns: {
14e08625 592 type: DataStoreStatus,
1dc117bb 593 },
bb34b589 594 access: {
54552dda 595 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 596 },
1dc117bb
DM
597)]
598/// Get datastore status.
ea5f547f 599pub fn status(
1dc117bb 600 store: String,
98afc7b1 601 verbose: bool,
0eecf38f 602 _info: &ApiMethod,
fdfcb74d 603 rpcenv: &mut dyn RpcEnvironment,
14e08625 604) -> Result<DataStoreStatus, Error> {
1dc117bb 605 let datastore = DataStore::lookup_datastore(&store)?;
14e08625 606 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
607 let (counts, gc_status) = if verbose {
608 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
609 let user_info = CachedUserInfo::new()?;
610
611 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
612 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
613 None
614 } else {
615 Some(&auth_id)
616 };
617
618 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
619 let gc_status = Some(datastore.last_gc_status());
620
621 (counts, gc_status)
622 } else {
623 (None, None)
98afc7b1 624 };
16f9f244 625
14e08625
DC
626 Ok(DataStoreStatus {
627 total: storage.total,
628 used: storage.used,
629 avail: storage.avail,
630 gc_status,
631 counts,
632 })
0eecf38f
DM
633}
634
c2009e53
DM
635#[api(
636 input: {
637 properties: {
638 store: {
639 schema: DATASTORE_SCHEMA,
640 },
641 "backup-type": {
642 schema: BACKUP_TYPE_SCHEMA,
643 optional: true,
644 },
645 "backup-id": {
646 schema: BACKUP_ID_SCHEMA,
647 optional: true,
648 },
649 "backup-time": {
650 schema: BACKUP_TIME_SCHEMA,
651 optional: true,
652 },
653 },
654 },
655 returns: {
656 schema: UPID_SCHEMA,
657 },
658 access: {
09f6a240 659 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
660 },
661)]
662/// Verify backups.
663///
664/// This function can verify a single backup snapshot, all backup from a backup group,
665/// or all backups in the datastore.
666pub fn verify(
667 store: String,
668 backup_type: Option<String>,
669 backup_id: Option<String>,
670 backup_time: Option<i64>,
671 rpcenv: &mut dyn RpcEnvironment,
672) -> Result<Value, Error> {
673 let datastore = DataStore::lookup_datastore(&store)?;
674
09f6a240 675 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 676 let worker_id;
c2009e53
DM
677
678 let mut backup_dir = None;
679 let mut backup_group = None;
133042b5 680 let mut worker_type = "verify";
c2009e53
DM
681
682 match (backup_type, backup_id, backup_time) {
683 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 684 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 685 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
686
687 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
688
c2009e53 689 backup_dir = Some(dir);
133042b5 690 worker_type = "verify_snapshot";
c2009e53
DM
691 }
692 (Some(backup_type), Some(backup_id), None) => {
4ebda996 693 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 694 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
695
696 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
697
c2009e53 698 backup_group = Some(group);
133042b5 699 worker_type = "verify_group";
c2009e53
DM
700 }
701 (None, None, None) => {
8ea00f6e 702 worker_id = store.clone();
c2009e53 703 }
5a718dce 704 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
705 }
706
39735609 707 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
708
709 let upid_str = WorkerTask::new_thread(
133042b5 710 worker_type,
44288184 711 Some(worker_id),
09f6a240 712 auth_id.clone(),
e7cb4dc5
WB
713 to_stdout,
714 move |worker| {
9c26a3d6 715 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 716 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 717 let mut res = Vec::new();
f6b1d1cc 718 if !verify_backup_dir(
9c26a3d6 719 &verify_worker,
f6b1d1cc 720 &backup_dir,
f6b1d1cc 721 worker.upid().clone(),
d771a608 722 None,
f6b1d1cc 723 )? {
adfdc369
DC
724 res.push(backup_dir.to_string());
725 }
726 res
c2009e53 727 } else if let Some(backup_group) = backup_group {
7e25b9aa 728 let failed_dirs = verify_backup_group(
9c26a3d6 729 &verify_worker,
63d9aca9 730 &backup_group,
7e25b9aa 731 &mut StoreProgress::new(1),
f6b1d1cc 732 worker.upid(),
d771a608 733 None,
63d9aca9
DM
734 )?;
735 failed_dirs
c2009e53 736 } else {
09f6a240
FG
737 let privs = CachedUserInfo::new()?
738 .lookup_privs(&auth_id, &["datastore", &store]);
739
740 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
741 Some(auth_id)
742 } else {
743 None
744 };
745
9c26a3d6 746 verify_all_backups(&verify_worker, worker.upid(), owner, None)?
c2009e53 747 };
3984a5fd 748 if !failed_dirs.is_empty() {
a4fa3fc2 749 worker.log("Failed to verify the following snapshots/groups:");
adfdc369
DC
750 for dir in failed_dirs {
751 worker.log(format!("\t{}", dir));
752 }
1ffe0301 753 bail!("verification failed - please check the log for details");
c2009e53
DM
754 }
755 Ok(())
e7cb4dc5
WB
756 },
757 )?;
c2009e53
DM
758
759 Ok(json!(upid_str))
760}
761
255f378a
DM
762#[macro_export]
763macro_rules! add_common_prune_prameters {
552c2259
DM
764 ( [ $( $list1:tt )* ] ) => {
765 add_common_prune_prameters!([$( $list1 )* ] , [])
766 };
767 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 768 [
552c2259 769 $( $list1 )*
255f378a 770 (
552c2259 771 "keep-daily",
255f378a 772 true,
49ff1092 773 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 774 ),
102d8d41
DM
775 (
776 "keep-hourly",
777 true,
49ff1092 778 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 779 ),
255f378a 780 (
552c2259 781 "keep-last",
255f378a 782 true,
49ff1092 783 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
784 ),
785 (
552c2259 786 "keep-monthly",
255f378a 787 true,
49ff1092 788 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
789 ),
790 (
552c2259 791 "keep-weekly",
255f378a 792 true,
49ff1092 793 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
794 ),
795 (
796 "keep-yearly",
797 true,
49ff1092 798 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 799 ),
552c2259 800 $( $list2 )*
255f378a
DM
801 ]
802 }
0eecf38f
DM
803}
804
db1e061d
DM
805pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
806 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 807 &PruneListItem::API_SCHEMA
db1e061d
DM
808).schema();
809
b2362a12 810pub const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
0ab08ac9 811 &ApiHandler::Sync(&prune),
255f378a 812 &ObjectSchema::new(
0ab08ac9
DM
813 "Prune the datastore.",
814 &add_common_prune_prameters!([
815 ("backup-id", false, &BACKUP_ID_SCHEMA),
816 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
817 ("dry-run", true, &BooleanSchema::new(
818 "Just show what prune would do, but do not delete anything.")
819 .schema()
820 ),
0ab08ac9 821 ],[
66c49c21 822 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 823 ])
db1e061d 824 ))
b2362a12 825 .returns(ReturnType::new(false, &API_RETURN_SCHEMA_PRUNE))
db1e061d 826 .access(None, &Permission::Privilege(
54552dda
DM
827 &["datastore", "{store}"],
828 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
829 true)
830);
255f378a 831
bf78f708 832pub fn prune(
83b7db02
DM
833 param: Value,
834 _info: &ApiMethod,
54552dda 835 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
836) -> Result<Value, Error> {
837
54552dda 838 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
839 let backup_type = tools::required_string_param(&param, "backup-type")?;
840 let backup_id = tools::required_string_param(&param, "backup-id")?;
841
e6dc35ac 842 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 843
3b03abfe
DM
844 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
845
9fdc3ef4
DM
846 let group = BackupGroup::new(backup_type, backup_id);
847
54552dda
DM
848 let datastore = DataStore::lookup_datastore(&store)?;
849
bff85572 850 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 851
9e3f0088
DM
852 let prune_options = PruneOptions {
853 keep_last: param["keep-last"].as_u64(),
102d8d41 854 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
855 keep_daily: param["keep-daily"].as_u64(),
856 keep_weekly: param["keep-weekly"].as_u64(),
857 keep_monthly: param["keep-monthly"].as_u64(),
858 keep_yearly: param["keep-yearly"].as_u64(),
859 };
8f579717 860
4ebda996 861 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 862
dda70154
DM
863 let mut prune_result = Vec::new();
864
865 let list = group.list_backups(&datastore.base_path())?;
866
867 let mut prune_info = compute_prune_info(list, &prune_options)?;
868
869 prune_info.reverse(); // delete older snapshots first
870
871 let keep_all = !prune_options.keeps_something();
872
873 if dry_run {
874 for (info, mut keep) in prune_info {
875 if keep_all { keep = true; }
876
877 let backup_time = info.backup_dir.backup_time();
878 let group = info.backup_dir.group();
879
880 prune_result.push(json!({
881 "backup-type": group.backup_type(),
882 "backup-id": group.backup_id(),
6a7be83e 883 "backup-time": backup_time,
dda70154
DM
884 "keep": keep,
885 }));
886 }
887 return Ok(json!(prune_result));
888 }
889
890
163e9bbe 891 // We use a WorkerTask just to have a task log, but run synchrounously
44288184 892 let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
dda70154 893
f1539300
SR
894 if keep_all {
895 worker.log("No prune selection - keeping all files.");
896 } else {
897 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
898 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
899 store, backup_type, backup_id));
900 }
3b03abfe 901
f1539300
SR
902 for (info, mut keep) in prune_info {
903 if keep_all { keep = true; }
dda70154 904
f1539300
SR
905 let backup_time = info.backup_dir.backup_time();
906 let timestamp = info.backup_dir.backup_time_string();
907 let group = info.backup_dir.group();
3b03abfe 908
3b03abfe 909
f1539300
SR
910 let msg = format!(
911 "{}/{}/{} {}",
912 group.backup_type(),
913 group.backup_id(),
914 timestamp,
915 if keep { "keep" } else { "remove" },
916 );
917
918 worker.log(msg);
919
920 prune_result.push(json!({
921 "backup-type": group.backup_type(),
922 "backup-id": group.backup_id(),
923 "backup-time": backup_time,
924 "keep": keep,
925 }));
926
927 if !(dry_run || keep) {
928 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
929 worker.warn(
930 format!(
931 "failed to remove dir {:?}: {}",
932 info.backup_dir.relative_path(), err
933 )
934 );
8f0b4c1f 935 }
8f579717 936 }
f1539300 937 }
dd8e744f 938
f1539300 939 worker.log_result(&Ok(()));
83b7db02 940
dda70154 941 Ok(json!(prune_result))
83b7db02
DM
942}
943
dfc58d47
DM
944#[api(
945 input: {
946 properties: {
947 store: {
948 schema: DATASTORE_SCHEMA,
949 },
950 },
951 },
952 returns: {
953 schema: UPID_SCHEMA,
954 },
bb34b589 955 access: {
54552dda 956 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 957 },
dfc58d47
DM
958)]
959/// Start garbage collection.
bf78f708 960pub fn start_garbage_collection(
dfc58d47 961 store: String,
6049b71f 962 _info: &ApiMethod,
dd5495d6 963 rpcenv: &mut dyn RpcEnvironment,
6049b71f 964) -> Result<Value, Error> {
15e9b4ed 965
3e6a7dee 966 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 967 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 968
4fdf5ddf
DC
969 let job = Job::new("garbage_collection", &store)
970 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 971
39735609 972 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 973
4fdf5ddf
DC
974 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
975 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
976
977 Ok(json!(upid_str))
15e9b4ed
DM
978}
979
a92830dc
DM
980#[api(
981 input: {
982 properties: {
983 store: {
984 schema: DATASTORE_SCHEMA,
985 },
986 },
987 },
988 returns: {
989 type: GarbageCollectionStatus,
bb34b589
DM
990 },
991 access: {
992 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
993 },
a92830dc
DM
994)]
995/// Garbage collection status.
5eeea607 996pub fn garbage_collection_status(
a92830dc 997 store: String,
6049b71f 998 _info: &ApiMethod,
dd5495d6 999 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1000) -> Result<GarbageCollectionStatus, Error> {
691c89a0 1001
f2b99c34
DM
1002 let datastore = DataStore::lookup_datastore(&store)?;
1003
f2b99c34 1004 let status = datastore.last_gc_status();
691c89a0 1005
a92830dc 1006 Ok(status)
691c89a0
DM
1007}
1008
bb34b589 1009#[api(
30fb6025
DM
1010 returns: {
1011 description: "List the accessible datastores.",
1012 type: Array,
9b93c620 1013 items: { type: DataStoreListItem },
30fb6025 1014 },
bb34b589 1015 access: {
54552dda 1016 permission: &Permission::Anybody,
bb34b589
DM
1017 },
1018)]
1019/// Datastore list
bf78f708 1020pub fn get_datastore_list(
6049b71f
DM
1021 _param: Value,
1022 _info: &ApiMethod,
54552dda 1023 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1024) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 1025
d0187a51 1026 let (config, _digest) = datastore::config()?;
15e9b4ed 1027
e6dc35ac 1028 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1029 let user_info = CachedUserInfo::new()?;
1030
30fb6025 1031 let mut list = Vec::new();
54552dda 1032
30fb6025 1033 for (store, (_, data)) in &config.sections {
e6dc35ac 1034 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 1035 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1036 if allowed {
455e5f71
FG
1037 list.push(
1038 DataStoreListItem {
1039 store: store.clone(),
1040 comment: data["comment"].as_str().map(String::from),
1041 }
1042 );
30fb6025 1043 }
54552dda
DM
1044 }
1045
44288184 1046 Ok(list)
15e9b4ed
DM
1047}
1048
0ab08ac9
DM
1049#[sortable]
1050pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1051 &ApiHandler::AsyncHttp(&download_file),
1052 &ObjectSchema::new(
1053 "Download single raw file from backup snapshot.",
1054 &sorted!([
66c49c21 1055 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1056 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1057 ("backup-id", false, &BACKUP_ID_SCHEMA),
1058 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1059 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1060 ]),
1061 )
54552dda
DM
1062).access(None, &Permission::Privilege(
1063 &["datastore", "{store}"],
1064 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1065 true)
1066);
691c89a0 1067
bf78f708 1068pub fn download_file(
9e47c0a5
DM
1069 _parts: Parts,
1070 _req_body: Body,
1071 param: Value,
255f378a 1072 _info: &ApiMethod,
54552dda 1073 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1074) -> ApiResponseFuture {
9e47c0a5 1075
ad51d02a
DM
1076 async move {
1077 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1078 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 1079
e6dc35ac 1080 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1081
ad51d02a 1082 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1083
ad51d02a
DM
1084 let backup_type = tools::required_string_param(&param, "backup-type")?;
1085 let backup_id = tools::required_string_param(&param, "backup-id")?;
1086 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 1087
e0e5b442 1088 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1089
bff85572 1090 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1091
abdb9763 1092 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1093
ad51d02a
DM
1094 let mut path = datastore.base_path();
1095 path.push(backup_dir.relative_path());
1096 path.push(&file_name);
1097
ba694720 1098 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1099 .await
1100 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1101
db0cb9ce 1102 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1103 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1104 .map_err(move |err| {
1105 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1106 err
1107 });
ad51d02a 1108 let body = Body::wrap_stream(payload);
9e47c0a5 1109
ad51d02a
DM
1110 // fixme: set other headers ?
1111 Ok(Response::builder()
1112 .status(StatusCode::OK)
1113 .header(header::CONTENT_TYPE, "application/octet-stream")
1114 .body(body)
1115 .unwrap())
1116 }.boxed()
9e47c0a5
DM
1117}
1118
6ef9bb59
DC
1119#[sortable]
1120pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1121 &ApiHandler::AsyncHttp(&download_file_decoded),
1122 &ObjectSchema::new(
1123 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1124 &sorted!([
1125 ("store", false, &DATASTORE_SCHEMA),
1126 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1127 ("backup-id", false, &BACKUP_ID_SCHEMA),
1128 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1129 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1130 ]),
1131 )
1132).access(None, &Permission::Privilege(
1133 &["datastore", "{store}"],
1134 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1135 true)
1136);
1137
bf78f708 1138pub fn download_file_decoded(
6ef9bb59
DC
1139 _parts: Parts,
1140 _req_body: Body,
1141 param: Value,
1142 _info: &ApiMethod,
1143 rpcenv: Box<dyn RpcEnvironment>,
1144) -> ApiResponseFuture {
1145
1146 async move {
1147 let store = tools::required_string_param(&param, "store")?;
1148 let datastore = DataStore::lookup_datastore(store)?;
1149
e6dc35ac 1150 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59
DC
1151
1152 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1153
1154 let backup_type = tools::required_string_param(&param, "backup-type")?;
1155 let backup_id = tools::required_string_param(&param, "backup-id")?;
1156 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1157
e0e5b442 1158 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1159
bff85572 1160 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1161
2d55beec 1162 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1163 for file in files {
f28d9088 1164 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1165 bail!("cannot decode '{}' - is encrypted", file_name);
1166 }
1167 }
1168
1169 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1170
1171 let mut path = datastore.base_path();
1172 path.push(backup_dir.relative_path());
1173 path.push(&file_name);
1174
1175 let extension = file_name.rsplitn(2, '.').next().unwrap();
1176
1177 let body = match extension {
1178 "didx" => {
1179 let index = DynamicIndexReader::open(&path)
1180 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1181 let (csum, size) = index.compute_csum();
1182 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1183
14f6c9cb 1184 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1185 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1186 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1187 .map_err(move |err| {
1188 eprintln!("error during streaming of '{:?}' - {}", path, err);
1189 err
1190 }))
1191 },
1192 "fidx" => {
1193 let index = FixedIndexReader::open(&path)
1194 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1195
2d55beec
FG
1196 let (csum, size) = index.compute_csum();
1197 manifest.verify_file(&file_name, &csum, size)?;
1198
14f6c9cb 1199 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1200 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1201 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1202 .map_err(move |err| {
1203 eprintln!("error during streaming of '{:?}' - {}", path, err);
1204 err
1205 }))
1206 },
1207 "blob" => {
1208 let file = std::fs::File::open(&path)
8aa67ee7 1209 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1210
2d55beec
FG
1211 // FIXME: load full blob to verify index checksum?
1212
6ef9bb59
DC
1213 Body::wrap_stream(
1214 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1215 .map_err(move |err| {
1216 eprintln!("error during streaming of '{:?}' - {}", path, err);
1217 err
1218 })
1219 )
1220 },
1221 extension => {
1222 bail!("cannot download '{}' files", extension);
1223 },
1224 };
1225
1226 // fixme: set other headers ?
1227 Ok(Response::builder()
1228 .status(StatusCode::OK)
1229 .header(header::CONTENT_TYPE, "application/octet-stream")
1230 .body(body)
1231 .unwrap())
1232 }.boxed()
1233}
1234
552c2259 1235#[sortable]
0ab08ac9
DM
1236pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1237 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1238 &ObjectSchema::new(
54552dda 1239 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1240 &sorted!([
66c49c21 1241 ("store", false, &DATASTORE_SCHEMA),
255f378a 1242 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1243 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1244 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1245 ]),
9e47c0a5 1246 )
54552dda
DM
1247).access(
1248 Some("Only the backup creator/owner is allowed to do this."),
1249 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1250);
9e47c0a5 1251
bf78f708 1252pub fn upload_backup_log(
07ee2235
DM
1253 _parts: Parts,
1254 req_body: Body,
1255 param: Value,
255f378a 1256 _info: &ApiMethod,
54552dda 1257 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1258) -> ApiResponseFuture {
07ee2235 1259
ad51d02a
DM
1260 async move {
1261 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1262 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1263
96d65fbc 1264 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1265
ad51d02a
DM
1266 let backup_type = tools::required_string_param(&param, "backup-type")?;
1267 let backup_id = tools::required_string_param(&param, "backup-id")?;
1268 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1269
e0e5b442 1270 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1271
e6dc35ac 1272 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1273 let owner = datastore.get_owner(backup_dir.group())?;
1274 check_backup_owner(&owner, &auth_id)?;
54552dda 1275
ad51d02a
DM
1276 let mut path = datastore.base_path();
1277 path.push(backup_dir.relative_path());
1278 path.push(&file_name);
07ee2235 1279
ad51d02a
DM
1280 if path.exists() {
1281 bail!("backup already contains a log.");
1282 }
e128d4e8 1283
ad51d02a 1284 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1285 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1286
1287 let data = req_body
1288 .map_err(Error::from)
1289 .try_fold(Vec::new(), |mut acc, chunk| {
1290 acc.extend_from_slice(&*chunk);
1291 future::ok::<_, Error>(acc)
1292 })
1293 .await?;
1294
39f18b30
DM
1295 // always verify blob/CRC at server side
1296 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1297
1298 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1299
1300 // fixme: use correct formatter
1301 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1302 }.boxed()
07ee2235
DM
1303}
1304
5b1cfa01
DC
1305#[api(
1306 input: {
1307 properties: {
1308 store: {
1309 schema: DATASTORE_SCHEMA,
1310 },
1311 "backup-type": {
1312 schema: BACKUP_TYPE_SCHEMA,
1313 },
1314 "backup-id": {
1315 schema: BACKUP_ID_SCHEMA,
1316 },
1317 "backup-time": {
1318 schema: BACKUP_TIME_SCHEMA,
1319 },
1320 "filepath": {
1321 description: "Base64 encoded path.",
1322 type: String,
1323 }
1324 },
1325 },
1326 access: {
1327 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1328 },
1329)]
1330/// Get the entries of the given path of the catalog
bf78f708 1331pub fn catalog(
5b1cfa01
DC
1332 store: String,
1333 backup_type: String,
1334 backup_id: String,
1335 backup_time: i64,
1336 filepath: String,
5b1cfa01 1337 rpcenv: &mut dyn RpcEnvironment,
227501c0 1338) -> Result<Vec<ArchiveEntry>, Error> {
5b1cfa01
DC
1339 let datastore = DataStore::lookup_datastore(&store)?;
1340
e6dc35ac 1341 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1342
e0e5b442 1343 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1344
bff85572 1345 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1346
9238cdf5
FG
1347 let file_name = CATALOG_NAME;
1348
2d55beec 1349 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1350 for file in files {
1351 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1352 bail!("cannot decode '{}' - is encrypted", file_name);
1353 }
1354 }
1355
5b1cfa01
DC
1356 let mut path = datastore.base_path();
1357 path.push(backup_dir.relative_path());
9238cdf5 1358 path.push(file_name);
5b1cfa01
DC
1359
1360 let index = DynamicIndexReader::open(&path)
1361 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1362
2d55beec
FG
1363 let (csum, size) = index.compute_csum();
1364 manifest.verify_file(&file_name, &csum, size)?;
1365
14f6c9cb 1366 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1367 let reader = BufferedDynamicReader::new(index, chunk_reader);
1368
1369 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1370
5279ee74 1371 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1372 base64::decode(filepath)?
1373 } else {
1374 vec![b'/']
1375 };
5b1cfa01 1376
227501c0 1377 helpers::list_dir_content(&mut catalog_reader, &path)
5b1cfa01
DC
1378}
1379
d33d8f4e
DC
1380#[sortable]
1381pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1382 &ApiHandler::AsyncHttp(&pxar_file_download),
1383 &ObjectSchema::new(
1ffe0301 1384 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1385 &sorted!([
1386 ("store", false, &DATASTORE_SCHEMA),
1387 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1388 ("backup-id", false, &BACKUP_ID_SCHEMA),
1389 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1390 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1391 ]),
1392 )
1393).access(None, &Permission::Privilege(
1394 &["datastore", "{store}"],
1395 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1396 true)
1397);
1398
bf78f708 1399pub fn pxar_file_download(
d33d8f4e
DC
1400 _parts: Parts,
1401 _req_body: Body,
1402 param: Value,
1403 _info: &ApiMethod,
1404 rpcenv: Box<dyn RpcEnvironment>,
1405) -> ApiResponseFuture {
1406
1407 async move {
1408 let store = tools::required_string_param(&param, "store")?;
1409 let datastore = DataStore::lookup_datastore(&store)?;
1410
e6dc35ac 1411 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e
DC
1412
1413 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1414
1415 let backup_type = tools::required_string_param(&param, "backup-type")?;
1416 let backup_id = tools::required_string_param(&param, "backup-id")?;
1417 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1418
e0e5b442 1419 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1420
bff85572 1421 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1422
d33d8f4e 1423 let mut components = base64::decode(&filepath)?;
3984a5fd 1424 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1425 components.remove(0);
1426 }
1427
d8d8af98 1428 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1429 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1430 let file_path = split.next().unwrap_or(b"/");
2d55beec 1431 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1432 for file in files {
1433 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1434 bail!("cannot decode '{}' - is encrypted", pxar_name);
1435 }
1436 }
d33d8f4e 1437
9238cdf5
FG
1438 let mut path = datastore.base_path();
1439 path.push(backup_dir.relative_path());
1440 path.push(pxar_name);
d33d8f4e
DC
1441
1442 let index = DynamicIndexReader::open(&path)
1443 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1444
2d55beec
FG
1445 let (csum, size) = index.compute_csum();
1446 manifest.verify_file(&pxar_name, &csum, size)?;
1447
14f6c9cb 1448 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1449 let reader = BufferedDynamicReader::new(index, chunk_reader);
1450 let archive_size = reader.archive_size();
1451 let reader = LocalDynamicReadAt::new(reader);
1452
1453 let decoder = Accessor::new(reader, archive_size).await?;
1454 let root = decoder.open_root().await?;
2e219481 1455 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1456 let file = root
2e219481
DC
1457 .lookup(&path).await?
1458 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1459
804f6143
DC
1460 let body = match file.kind() {
1461 EntryKind::File { .. } => Body::wrap_stream(
1462 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1463 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1464 err
1465 }),
1466 ),
1467 EntryKind::Hardlink(_) => Body::wrap_stream(
1468 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1469 .map_err(move |err| {
1470 eprintln!(
1471 "error during streaming of hardlink '{:?}' - {}",
2e219481 1472 path, err
804f6143
DC
1473 );
1474 err
1475 }),
1476 ),
1477 EntryKind::Directory => {
1478 let (sender, receiver) = tokio::sync::mpsc::channel(100);
804f6143 1479 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
2e219481
DC
1480 crate::server::spawn_internal_task(
1481 create_zip(channelwriter, decoder, path.clone(), false)
1482 );
7c667013 1483 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
2e219481 1484 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
d33d8f4e 1485 err
804f6143
DC
1486 }))
1487 }
1488 other => bail!("cannot download file of type {:?}", other),
1489 };
d33d8f4e
DC
1490
1491 // fixme: set other headers ?
1492 Ok(Response::builder()
1493 .status(StatusCode::OK)
1494 .header(header::CONTENT_TYPE, "application/octet-stream")
1495 .body(body)
1496 .unwrap())
1497 }.boxed()
1498}
1499
1a0d3d11
DM
1500#[api(
1501 input: {
1502 properties: {
1503 store: {
1504 schema: DATASTORE_SCHEMA,
1505 },
1506 timeframe: {
1507 type: RRDTimeFrameResolution,
1508 },
1509 cf: {
1510 type: RRDMode,
1511 },
1512 },
1513 },
1514 access: {
1515 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1516 },
1517)]
1518/// Read datastore stats
bf78f708 1519pub fn get_rrd_stats(
1a0d3d11
DM
1520 store: String,
1521 timeframe: RRDTimeFrameResolution,
1522 cf: RRDMode,
1523 _param: Value,
1524) -> Result<Value, Error> {
1525
431cc7b1
DC
1526 create_value_from_rrd(
1527 &format!("datastore/{}", store),
1a0d3d11
DM
1528 &[
1529 "total", "used",
c94e1f65
DM
1530 "read_ios", "read_bytes",
1531 "write_ios", "write_bytes",
1532 "io_ticks",
1a0d3d11
DM
1533 ],
1534 timeframe,
1535 cf,
1536 )
1537}
1538
912b3f5b
DM
1539#[api(
1540 input: {
1541 properties: {
1542 store: {
1543 schema: DATASTORE_SCHEMA,
1544 },
1545 "backup-type": {
1546 schema: BACKUP_TYPE_SCHEMA,
1547 },
1548 "backup-id": {
1549 schema: BACKUP_ID_SCHEMA,
1550 },
1551 "backup-time": {
1552 schema: BACKUP_TIME_SCHEMA,
1553 },
1554 },
1555 },
1556 access: {
1401f4be 1557 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1558 },
1559)]
1560/// Get "notes" for a specific backup
bf78f708 1561pub fn get_notes(
912b3f5b
DM
1562 store: String,
1563 backup_type: String,
1564 backup_id: String,
1565 backup_time: i64,
1566 rpcenv: &mut dyn RpcEnvironment,
1567) -> Result<String, Error> {
1568 let datastore = DataStore::lookup_datastore(&store)?;
1569
e6dc35ac 1570 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1571 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1572
1401f4be 1573 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1574
883aa6d5 1575 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1576
883aa6d5 1577 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1578 .as_str()
1579 .unwrap_or("");
1580
1581 Ok(String::from(notes))
1582}
1583
1584#[api(
1585 input: {
1586 properties: {
1587 store: {
1588 schema: DATASTORE_SCHEMA,
1589 },
1590 "backup-type": {
1591 schema: BACKUP_TYPE_SCHEMA,
1592 },
1593 "backup-id": {
1594 schema: BACKUP_ID_SCHEMA,
1595 },
1596 "backup-time": {
1597 schema: BACKUP_TIME_SCHEMA,
1598 },
1599 notes: {
1600 description: "A multiline text.",
1601 },
1602 },
1603 },
1604 access: {
b728a69e
FG
1605 permission: &Permission::Privilege(&["datastore", "{store}"],
1606 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1607 true),
912b3f5b
DM
1608 },
1609)]
1610/// Set "notes" for a specific backup
bf78f708 1611pub fn set_notes(
912b3f5b
DM
1612 store: String,
1613 backup_type: String,
1614 backup_id: String,
1615 backup_time: i64,
1616 notes: String,
1617 rpcenv: &mut dyn RpcEnvironment,
1618) -> Result<(), Error> {
1619 let datastore = DataStore::lookup_datastore(&store)?;
1620
e6dc35ac 1621 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1622 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1623
b728a69e 1624 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1625
1a374fcf
SR
1626 datastore.update_manifest(&backup_dir,|manifest| {
1627 manifest.unprotected["notes"] = notes.into();
1628 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1629
1630 Ok(())
1631}
1632
72be0eb1 1633#[api(
4940012d 1634 input: {
72be0eb1
DW
1635 properties: {
1636 store: {
1637 schema: DATASTORE_SCHEMA,
1638 },
1639 "backup-type": {
1640 schema: BACKUP_TYPE_SCHEMA,
1641 },
1642 "backup-id": {
1643 schema: BACKUP_ID_SCHEMA,
1644 },
1645 "new-owner": {
e6dc35ac 1646 type: Authid,
72be0eb1
DW
1647 },
1648 },
4940012d
FG
1649 },
1650 access: {
bff85572
FG
1651 permission: &Permission::Anybody,
1652 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1653 },
72be0eb1
DW
1654)]
1655/// Change owner of a backup group
bf78f708 1656pub fn set_backup_owner(
72be0eb1
DW
1657 store: String,
1658 backup_type: String,
1659 backup_id: String,
e6dc35ac 1660 new_owner: Authid,
bff85572 1661 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1662) -> Result<(), Error> {
1663
1664 let datastore = DataStore::lookup_datastore(&store)?;
1665
1666 let backup_group = BackupGroup::new(backup_type, backup_id);
1667
bff85572
FG
1668 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1669
72be0eb1
DW
1670 let user_info = CachedUserInfo::new()?;
1671
bff85572
FG
1672 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1673
1674 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1675 // High-privilege user/token
1676 true
1677 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1678 let owner = datastore.get_owner(&backup_group)?;
1679
1680 match (owner.is_token(), new_owner.is_token()) {
1681 (true, true) => {
1682 // API token to API token, owned by same user
1683 let owner = owner.user();
1684 let new_owner = new_owner.user();
1685 owner == new_owner && Authid::from(owner.clone()) == auth_id
1686 },
1687 (true, false) => {
1688 // API token to API token owner
1689 Authid::from(owner.user().clone()) == auth_id
1690 && new_owner == auth_id
1691 },
1692 (false, true) => {
1693 // API token owner to API token
1694 owner == auth_id
1695 && Authid::from(new_owner.user().clone()) == auth_id
1696 },
1697 (false, false) => {
1698 // User to User, not allowed for unprivileged users
1699 false
1700 },
1701 }
1702 } else {
1703 false
1704 };
1705
1706 if !allowed {
1707 return Err(http_err!(UNAUTHORIZED,
1708 "{} does not have permission to change owner of backup group '{}' to {}",
1709 auth_id,
1710 backup_group,
1711 new_owner,
1712 ));
1713 }
1714
e6dc35ac
FG
1715 if !user_info.is_active_auth_id(&new_owner) {
1716 bail!("{} '{}' is inactive or non-existent",
1717 if new_owner.is_token() {
1718 "API token".to_string()
1719 } else {
1720 "user".to_string()
1721 },
1722 new_owner);
72be0eb1
DW
1723 }
1724
1725 datastore.set_owner(&backup_group, &new_owner, true)?;
1726
1727 Ok(())
1728}
1729
552c2259 1730#[sortable]
255f378a 1731const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1732 (
1733 "catalog",
1734 &Router::new()
1735 .get(&API_METHOD_CATALOG)
1736 ),
72be0eb1
DW
1737 (
1738 "change-owner",
1739 &Router::new()
1740 .post(&API_METHOD_SET_BACKUP_OWNER)
1741 ),
255f378a
DM
1742 (
1743 "download",
1744 &Router::new()
1745 .download(&API_METHOD_DOWNLOAD_FILE)
1746 ),
6ef9bb59
DC
1747 (
1748 "download-decoded",
1749 &Router::new()
1750 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1751 ),
255f378a
DM
1752 (
1753 "files",
1754 &Router::new()
09b1f7b2 1755 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1756 ),
1757 (
1758 "gc",
1759 &Router::new()
1760 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1761 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1762 ),
1763 (
1764 "groups",
1765 &Router::new()
b31c8019 1766 .get(&API_METHOD_LIST_GROUPS)
f32791b4 1767 .delete(&API_METHOD_DELETE_GROUP)
255f378a 1768 ),
912b3f5b
DM
1769 (
1770 "notes",
1771 &Router::new()
1772 .get(&API_METHOD_GET_NOTES)
1773 .put(&API_METHOD_SET_NOTES)
1774 ),
255f378a
DM
1775 (
1776 "prune",
1777 &Router::new()
1778 .post(&API_METHOD_PRUNE)
1779 ),
d33d8f4e
DC
1780 (
1781 "pxar-file-download",
1782 &Router::new()
1783 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1784 ),
1a0d3d11
DM
1785 (
1786 "rrd",
1787 &Router::new()
1788 .get(&API_METHOD_GET_RRD_STATS)
1789 ),
255f378a
DM
1790 (
1791 "snapshots",
1792 &Router::new()
fc189b19 1793 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1794 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1795 ),
1796 (
1797 "status",
1798 &Router::new()
1799 .get(&API_METHOD_STATUS)
1800 ),
1801 (
1802 "upload-backup-log",
1803 &Router::new()
1804 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1805 ),
c2009e53
DM
1806 (
1807 "verify",
1808 &Router::new()
1809 .post(&API_METHOD_VERIFY)
1810 ),
255f378a
DM
1811];
1812
ad51d02a 1813const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1814 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1815 .subdirs(DATASTORE_INFO_SUBDIRS);
1816
1817
1818pub const ROUTER: Router = Router::new()
bb34b589 1819 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1820 .match_all("store", &DATASTORE_INFO_ROUTER);