]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
set a different worker_type based on what is going to be verified(snapshot, group...
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
d33d8f4e
DC
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
6b809ff5 4use std::sync::{Arc, Mutex};
53a561a2 5use std::path::{Path, PathBuf};
804f6143 6use std::pin::Pin;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
12use serde_json::{json, Value};
13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
cad540e9
WB
18use proxmox::api::router::SubdirMap;
19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
804f6143 23use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
e5064ba6 28use crate::backup::*;
cad540e9 29use crate::config::datastore;
54552dda
DM
30use crate::config::cached_user_info::CachedUserInfo;
31
0f778e06 32use crate::server::WorkerTask;
804f6143
DC
33use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37};
38
d00e1a21
DM
39use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
54552dda 41 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
54552dda 44 PRIV_DATASTORE_BACKUP,
d00e1a21 45};
1629d2ad 46
e7cb4dc5
WB
47fn check_backup_owner(
48 store: &DataStore,
49 group: &BackupGroup,
50 userid: &Userid,
51) -> Result<(), Error> {
54552dda
DM
52 let owner = store.get_owner(group)?;
53 if &owner != userid {
54 bail!("backup owner check failed ({} != {})", userid, owner);
55 }
56 Ok(())
57}
58
e7cb4dc5
WB
59fn read_backup_index(
60 store: &DataStore,
61 backup_dir: &BackupDir,
62) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 63
ff86ef00 64 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 65
09b1f7b2
DM
66 let mut result = Vec::new();
67 for item in manifest.files() {
68 result.push(BackupContent {
69 filename: item.filename.clone(),
f28d9088 70 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
71 size: Some(item.size),
72 });
8c70e3eb
DM
73 }
74
09b1f7b2 75 result.push(BackupContent {
96d65fbc 76 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
77 crypt_mode: match manifest.signature {
78 Some(_) => Some(CryptMode::SignOnly),
79 None => Some(CryptMode::None),
80 },
09b1f7b2
DM
81 size: Some(index_size),
82 });
4f1e40a2 83
70030b43 84 Ok((manifest, result))
8c70e3eb
DM
85}
86
1c090810
DC
87fn get_all_snapshot_files(
88 store: &DataStore,
89 info: &BackupInfo,
70030b43
DM
90) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
91
92 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
93
94 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
95 acc.insert(item.filename.clone());
96 acc
97 });
98
99 for file in &info.files {
100 if file_set.contains(file) { continue; }
f28d9088
WB
101 files.push(BackupContent {
102 filename: file.to_string(),
103 size: None,
104 crypt_mode: None,
105 });
1c090810
DC
106 }
107
70030b43 108 Ok((manifest, files))
1c090810
DC
109}
110
8f579717
DM
111fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
112
113 let mut group_hash = HashMap::new();
114
115 for info in backup_list {
9b492eb2 116 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
117 let time_list = group_hash.entry(group_id).or_insert(vec![]);
118 time_list.push(info);
119 }
120
121 group_hash
122}
123
b31c8019
DM
124#[api(
125 input: {
126 properties: {
127 store: {
128 schema: DATASTORE_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of backup groups.",
135 items: {
136 type: GroupListItem,
137 }
138 },
bb34b589 139 access: {
54552dda
DM
140 permission: &Permission::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
143 true),
bb34b589 144 },
b31c8019
DM
145)]
146/// List backup groups.
ad20d198 147fn list_groups(
b31c8019 148 store: String,
54552dda 149 rpcenv: &mut dyn RpcEnvironment,
b31c8019 150) -> Result<Vec<GroupListItem>, Error> {
812c6f87 151
e7cb4dc5 152 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 153 let user_info = CachedUserInfo::new()?;
e7cb4dc5 154 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 155
b31c8019 156 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 157
c0977501 158 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
159
160 let group_hash = group_backups(backup_list);
161
b31c8019 162 let mut groups = Vec::new();
812c6f87
DM
163
164 for (_group_id, mut list) in group_hash {
165
2b01a225 166 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
167
168 let info = &list[0];
54552dda 169
9b492eb2 170 let group = info.backup_dir.group();
812c6f87 171
54552dda 172 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b 173 let owner = datastore.get_owner(group)?;
20813274
WB
174 if !list_all && owner != userid {
175 continue;
54552dda
DM
176 }
177
b31c8019
DM
178 let result_item = GroupListItem {
179 backup_type: group.backup_type().to_string(),
180 backup_id: group.backup_id().to_string(),
6a7be83e 181 last_backup: info.backup_dir.backup_time(),
b31c8019
DM
182 backup_count: list.len() as u64,
183 files: info.files.clone(),
04b0ca8b 184 owner: Some(owner),
b31c8019
DM
185 };
186 groups.push(result_item);
812c6f87
DM
187 }
188
b31c8019 189 Ok(groups)
812c6f87 190}
8f579717 191
09b1f7b2
DM
192#[api(
193 input: {
194 properties: {
195 store: {
196 schema: DATASTORE_SCHEMA,
197 },
198 "backup-type": {
199 schema: BACKUP_TYPE_SCHEMA,
200 },
201 "backup-id": {
202 schema: BACKUP_ID_SCHEMA,
203 },
204 "backup-time": {
205 schema: BACKUP_TIME_SCHEMA,
206 },
207 },
208 },
209 returns: {
210 type: Array,
211 description: "Returns the list of archive files inside a backup snapshots.",
212 items: {
213 type: BackupContent,
214 }
215 },
bb34b589 216 access: {
54552dda
DM
217 permission: &Permission::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
220 true),
bb34b589 221 },
09b1f7b2
DM
222)]
223/// List snapshot files.
ea5f547f 224pub fn list_snapshot_files(
09b1f7b2
DM
225 store: String,
226 backup_type: String,
227 backup_id: String,
228 backup_time: i64,
01a13423 229 _info: &ApiMethod,
54552dda 230 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 231) -> Result<Vec<BackupContent>, Error> {
01a13423 232
e7cb4dc5 233 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 234 let user_info = CachedUserInfo::new()?;
e7cb4dc5 235 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 236
09b1f7b2 237 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 238
e0e5b442 239 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 240
54552dda 241 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
e7cb4dc5 242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
54552dda 243
d7c24397 244 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 245
70030b43
DM
246 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
247
248 Ok(files)
01a13423
DM
249}
250
68a6a0ee
DM
251#[api(
252 input: {
253 properties: {
254 store: {
255 schema: DATASTORE_SCHEMA,
256 },
257 "backup-type": {
258 schema: BACKUP_TYPE_SCHEMA,
259 },
260 "backup-id": {
261 schema: BACKUP_ID_SCHEMA,
262 },
263 "backup-time": {
264 schema: BACKUP_TIME_SCHEMA,
265 },
266 },
267 },
bb34b589 268 access: {
54552dda
DM
269 permission: &Permission::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
272 true),
bb34b589 273 },
68a6a0ee
DM
274)]
275/// Delete backup snapshot.
276fn delete_snapshot(
277 store: String,
278 backup_type: String,
279 backup_id: String,
280 backup_time: i64,
6f62c924 281 _info: &ApiMethod,
54552dda 282 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
283) -> Result<Value, Error> {
284
e7cb4dc5 285 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 286 let user_info = CachedUserInfo::new()?;
e7cb4dc5 287 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 288
e0e5b442 289 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
6f62c924 290
68a6a0ee 291 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 292
54552dda 293 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
e7cb4dc5 294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
54552dda 295
c9756b40 296 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
297
298 Ok(Value::Null)
299}
300
fc189b19
DM
301#[api(
302 input: {
303 properties: {
304 store: {
305 schema: DATASTORE_SCHEMA,
306 },
307 "backup-type": {
308 optional: true,
309 schema: BACKUP_TYPE_SCHEMA,
310 },
311 "backup-id": {
312 optional: true,
313 schema: BACKUP_ID_SCHEMA,
314 },
315 },
316 },
317 returns: {
318 type: Array,
319 description: "Returns the list of snapshots.",
320 items: {
321 type: SnapshotListItem,
322 }
323 },
bb34b589 324 access: {
54552dda
DM
325 permission: &Permission::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
328 true),
bb34b589 329 },
fc189b19
DM
330)]
331/// List backup snapshots.
f24fc116 332pub fn list_snapshots (
54552dda
DM
333 store: String,
334 backup_type: Option<String>,
335 backup_id: Option<String>,
336 _param: Value,
184f17af 337 _info: &ApiMethod,
54552dda 338 rpcenv: &mut dyn RpcEnvironment,
fc189b19 339) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 340
e7cb4dc5 341 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 342 let user_info = CachedUserInfo::new()?;
e7cb4dc5 343 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
184f17af 344
54552dda 345 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 346
c0977501 347 let base_path = datastore.base_path();
184f17af 348
15c847f1 349 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
350
351 let mut snapshots = vec![];
352
c0977501 353 for info in backup_list {
15c847f1 354 let group = info.backup_dir.group();
54552dda 355 if let Some(ref backup_type) = backup_type {
15c847f1
DM
356 if backup_type != group.backup_type() { continue; }
357 }
54552dda 358 if let Some(ref backup_id) = backup_id {
15c847f1
DM
359 if backup_id != group.backup_id() { continue; }
360 }
a17a0e7a 361
54552dda 362 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b
DC
363 let owner = datastore.get_owner(group)?;
364
20813274
WB
365 if !list_all && owner != userid {
366 continue;
54552dda
DM
367 }
368
1c090810
DC
369 let mut size = None;
370
3b2046d2 371 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
70030b43 372 Ok((manifest, files)) => {
1c090810 373 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
70030b43
DM
374 // extract the first line from notes
375 let comment: Option<String> = manifest.unprotected["notes"]
376 .as_str()
377 .and_then(|notes| notes.lines().next())
378 .map(String::from);
379
3b2046d2
TL
380 let verify = manifest.unprotected["verify_state"].clone();
381 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
382 Ok(verify) => verify,
383 Err(err) => {
384 eprintln!("error parsing verification state : '{}'", err);
385 None
386 }
387 };
388
389 (comment, verify, files)
1c090810
DC
390 },
391 Err(err) => {
392 eprintln!("error during snapshot file listing: '{}'", err);
70030b43 393 (
3b2046d2 394 None,
70030b43
DM
395 None,
396 info
397 .files
398 .iter()
399 .map(|x| BackupContent {
400 filename: x.to_string(),
401 size: None,
402 crypt_mode: None,
403 })
404 .collect()
405 )
1c090810
DC
406 },
407 };
408
409 let result_item = SnapshotListItem {
fc189b19
DM
410 backup_type: group.backup_type().to_string(),
411 backup_id: group.backup_id().to_string(),
6a7be83e 412 backup_time: info.backup_dir.backup_time(),
70030b43 413 comment,
3b2046d2 414 verification,
1c090810
DC
415 files,
416 size,
04b0ca8b 417 owner: Some(owner),
fc189b19 418 };
a17a0e7a 419
a17a0e7a 420 snapshots.push(result_item);
184f17af
DM
421 }
422
fc189b19 423 Ok(snapshots)
184f17af
DM
424}
425
1dc117bb
DM
426#[api(
427 input: {
428 properties: {
429 store: {
430 schema: DATASTORE_SCHEMA,
431 },
432 },
433 },
434 returns: {
435 type: StorageStatus,
436 },
bb34b589 437 access: {
54552dda 438 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 439 },
1dc117bb
DM
440)]
441/// Get datastore status.
ea5f547f 442pub fn status(
1dc117bb 443 store: String,
0eecf38f
DM
444 _info: &ApiMethod,
445 _rpcenv: &mut dyn RpcEnvironment,
1dc117bb 446) -> Result<StorageStatus, Error> {
1dc117bb 447 let datastore = DataStore::lookup_datastore(&store)?;
33070956 448 crate::tools::disks::disk_usage(&datastore.base_path())
0eecf38f
DM
449}
450
c2009e53
DM
451#[api(
452 input: {
453 properties: {
454 store: {
455 schema: DATASTORE_SCHEMA,
456 },
457 "backup-type": {
458 schema: BACKUP_TYPE_SCHEMA,
459 optional: true,
460 },
461 "backup-id": {
462 schema: BACKUP_ID_SCHEMA,
463 optional: true,
464 },
465 "backup-time": {
466 schema: BACKUP_TIME_SCHEMA,
467 optional: true,
468 },
469 },
470 },
471 returns: {
472 schema: UPID_SCHEMA,
473 },
474 access: {
475 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
476 },
477)]
478/// Verify backups.
479///
480/// This function can verify a single backup snapshot, all backup from a backup group,
481/// or all backups in the datastore.
482pub fn verify(
483 store: String,
484 backup_type: Option<String>,
485 backup_id: Option<String>,
486 backup_time: Option<i64>,
487 rpcenv: &mut dyn RpcEnvironment,
488) -> Result<Value, Error> {
489 let datastore = DataStore::lookup_datastore(&store)?;
490
8ea00f6e 491 let worker_id;
c2009e53
DM
492
493 let mut backup_dir = None;
494 let mut backup_group = None;
133042b5 495 let mut worker_type = "verify";
c2009e53
DM
496
497 match (backup_type, backup_id, backup_time) {
498 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
2162e2c1 499 worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 500 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
c2009e53 501 backup_dir = Some(dir);
133042b5 502 worker_type = "verify_snapshot";
c2009e53
DM
503 }
504 (Some(backup_type), Some(backup_id), None) => {
2162e2c1 505 worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
c2009e53 506 let group = BackupGroup::new(backup_type, backup_id);
c2009e53 507 backup_group = Some(group);
133042b5 508 worker_type = "verify_group";
c2009e53
DM
509 }
510 (None, None, None) => {
8ea00f6e 511 worker_id = store.clone();
c2009e53 512 }
5a718dce 513 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
514 }
515
e7cb4dc5 516 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
c2009e53
DM
517 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
518
519 let upid_str = WorkerTask::new_thread(
133042b5 520 worker_type,
e7cb4dc5
WB
521 Some(worker_id.clone()),
522 userid,
523 to_stdout,
524 move |worker| {
4f09d310
DM
525 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
526 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
527
adfdc369 528 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 529 let mut res = Vec::new();
f6b1d1cc
WB
530 if !verify_backup_dir(
531 datastore,
532 &backup_dir,
533 verified_chunks,
534 corrupt_chunks,
535 worker.clone(),
536 worker.upid().clone(),
537 )? {
adfdc369
DC
538 res.push(backup_dir.to_string());
539 }
540 res
c2009e53 541 } else if let Some(backup_group) = backup_group {
63d9aca9
DM
542 let (_count, failed_dirs) = verify_backup_group(
543 datastore,
544 &backup_group,
545 verified_chunks,
546 corrupt_chunks,
547 None,
548 worker.clone(),
f6b1d1cc 549 worker.upid(),
63d9aca9
DM
550 )?;
551 failed_dirs
c2009e53 552 } else {
f6b1d1cc 553 verify_all_backups(datastore, worker.clone(), worker.upid())?
c2009e53 554 };
adfdc369
DC
555 if failed_dirs.len() > 0 {
556 worker.log("Failed to verify following snapshots:");
557 for dir in failed_dirs {
558 worker.log(format!("\t{}", dir));
559 }
1ffe0301 560 bail!("verification failed - please check the log for details");
c2009e53
DM
561 }
562 Ok(())
e7cb4dc5
WB
563 },
564 )?;
c2009e53
DM
565
566 Ok(json!(upid_str))
567}
568
255f378a
DM
569#[macro_export]
570macro_rules! add_common_prune_prameters {
552c2259
DM
571 ( [ $( $list1:tt )* ] ) => {
572 add_common_prune_prameters!([$( $list1 )* ] , [])
573 };
574 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 575 [
552c2259 576 $( $list1 )*
255f378a 577 (
552c2259 578 "keep-daily",
255f378a 579 true,
49ff1092 580 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 581 ),
102d8d41
DM
582 (
583 "keep-hourly",
584 true,
49ff1092 585 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 586 ),
255f378a 587 (
552c2259 588 "keep-last",
255f378a 589 true,
49ff1092 590 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
591 ),
592 (
552c2259 593 "keep-monthly",
255f378a 594 true,
49ff1092 595 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
596 ),
597 (
552c2259 598 "keep-weekly",
255f378a 599 true,
49ff1092 600 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
601 ),
602 (
603 "keep-yearly",
604 true,
49ff1092 605 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 606 ),
552c2259 607 $( $list2 )*
255f378a
DM
608 ]
609 }
0eecf38f
DM
610}
611
db1e061d
DM
612pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
613 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 614 &PruneListItem::API_SCHEMA
db1e061d
DM
615).schema();
616
0ab08ac9
DM
617const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
618 &ApiHandler::Sync(&prune),
255f378a 619 &ObjectSchema::new(
0ab08ac9
DM
620 "Prune the datastore.",
621 &add_common_prune_prameters!([
622 ("backup-id", false, &BACKUP_ID_SCHEMA),
623 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
624 ("dry-run", true, &BooleanSchema::new(
625 "Just show what prune would do, but do not delete anything.")
626 .schema()
627 ),
0ab08ac9 628 ],[
66c49c21 629 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 630 ])
db1e061d
DM
631 ))
632 .returns(&API_RETURN_SCHEMA_PRUNE)
633 .access(None, &Permission::Privilege(
54552dda
DM
634 &["datastore", "{store}"],
635 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
636 true)
637);
255f378a 638
83b7db02
DM
639fn prune(
640 param: Value,
641 _info: &ApiMethod,
54552dda 642 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
643) -> Result<Value, Error> {
644
54552dda 645 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
646 let backup_type = tools::required_string_param(&param, "backup-type")?;
647 let backup_id = tools::required_string_param(&param, "backup-id")?;
648
e7cb4dc5 649 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 650 let user_info = CachedUserInfo::new()?;
e7cb4dc5 651 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 652
3b03abfe
DM
653 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
654
9fdc3ef4
DM
655 let group = BackupGroup::new(backup_type, backup_id);
656
54552dda
DM
657 let datastore = DataStore::lookup_datastore(&store)?;
658
659 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
e7cb4dc5 660 if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
83b7db02 661
9e3f0088
DM
662 let prune_options = PruneOptions {
663 keep_last: param["keep-last"].as_u64(),
102d8d41 664 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
665 keep_daily: param["keep-daily"].as_u64(),
666 keep_weekly: param["keep-weekly"].as_u64(),
667 keep_monthly: param["keep-monthly"].as_u64(),
668 keep_yearly: param["keep-yearly"].as_u64(),
669 };
8f579717 670
503995c7
DM
671 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
672
dda70154
DM
673 let mut prune_result = Vec::new();
674
675 let list = group.list_backups(&datastore.base_path())?;
676
677 let mut prune_info = compute_prune_info(list, &prune_options)?;
678
679 prune_info.reverse(); // delete older snapshots first
680
681 let keep_all = !prune_options.keeps_something();
682
683 if dry_run {
684 for (info, mut keep) in prune_info {
685 if keep_all { keep = true; }
686
687 let backup_time = info.backup_dir.backup_time();
688 let group = info.backup_dir.group();
689
690 prune_result.push(json!({
691 "backup-type": group.backup_type(),
692 "backup-id": group.backup_id(),
6a7be83e 693 "backup-time": backup_time,
dda70154
DM
694 "keep": keep,
695 }));
696 }
697 return Ok(json!(prune_result));
698 }
699
700
163e9bbe 701 // We use a WorkerTask just to have a task log, but run synchrounously
e7cb4dc5 702 let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
dda70154 703
f1539300
SR
704 if keep_all {
705 worker.log("No prune selection - keeping all files.");
706 } else {
707 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
708 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
709 store, backup_type, backup_id));
710 }
3b03abfe 711
f1539300
SR
712 for (info, mut keep) in prune_info {
713 if keep_all { keep = true; }
dda70154 714
f1539300
SR
715 let backup_time = info.backup_dir.backup_time();
716 let timestamp = info.backup_dir.backup_time_string();
717 let group = info.backup_dir.group();
3b03abfe 718
3b03abfe 719
f1539300
SR
720 let msg = format!(
721 "{}/{}/{} {}",
722 group.backup_type(),
723 group.backup_id(),
724 timestamp,
725 if keep { "keep" } else { "remove" },
726 );
727
728 worker.log(msg);
729
730 prune_result.push(json!({
731 "backup-type": group.backup_type(),
732 "backup-id": group.backup_id(),
733 "backup-time": backup_time,
734 "keep": keep,
735 }));
736
737 if !(dry_run || keep) {
738 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
739 worker.warn(
740 format!(
741 "failed to remove dir {:?}: {}",
742 info.backup_dir.relative_path(), err
743 )
744 );
8f0b4c1f 745 }
8f579717 746 }
f1539300 747 }
dd8e744f 748
f1539300 749 worker.log_result(&Ok(()));
83b7db02 750
dda70154 751 Ok(json!(prune_result))
83b7db02
DM
752}
753
dfc58d47
DM
754#[api(
755 input: {
756 properties: {
757 store: {
758 schema: DATASTORE_SCHEMA,
759 },
760 },
761 },
762 returns: {
763 schema: UPID_SCHEMA,
764 },
bb34b589 765 access: {
54552dda 766 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 767 },
dfc58d47
DM
768)]
769/// Start garbage collection.
6049b71f 770fn start_garbage_collection(
dfc58d47 771 store: String,
6049b71f 772 _info: &ApiMethod,
dd5495d6 773 rpcenv: &mut dyn RpcEnvironment,
6049b71f 774) -> Result<Value, Error> {
15e9b4ed 775
3e6a7dee 776 let datastore = DataStore::lookup_datastore(&store)?;
15e9b4ed 777
5a778d92 778 println!("Starting garbage collection on store {}", store);
15e9b4ed 779
0f778e06 780 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 781
0f778e06 782 let upid_str = WorkerTask::new_thread(
e7cb4dc5
WB
783 "garbage_collection",
784 Some(store.clone()),
785 Userid::root_userid().clone(),
786 to_stdout,
787 move |worker| {
0f778e06 788 worker.log(format!("starting garbage collection on store {}", store));
f6b1d1cc 789 datastore.garbage_collection(&*worker, worker.upid())
e7cb4dc5
WB
790 },
791 )?;
0f778e06
DM
792
793 Ok(json!(upid_str))
15e9b4ed
DM
794}
795
a92830dc
DM
796#[api(
797 input: {
798 properties: {
799 store: {
800 schema: DATASTORE_SCHEMA,
801 },
802 },
803 },
804 returns: {
805 type: GarbageCollectionStatus,
bb34b589
DM
806 },
807 access: {
808 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
809 },
a92830dc
DM
810)]
811/// Garbage collection status.
5eeea607 812pub fn garbage_collection_status(
a92830dc 813 store: String,
6049b71f 814 _info: &ApiMethod,
dd5495d6 815 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 816) -> Result<GarbageCollectionStatus, Error> {
691c89a0 817
f2b99c34
DM
818 let datastore = DataStore::lookup_datastore(&store)?;
819
f2b99c34 820 let status = datastore.last_gc_status();
691c89a0 821
a92830dc 822 Ok(status)
691c89a0
DM
823}
824
bb34b589 825#[api(
30fb6025
DM
826 returns: {
827 description: "List the accessible datastores.",
828 type: Array,
829 items: {
830 description: "Datastore name and description.",
831 properties: {
832 store: {
833 schema: DATASTORE_SCHEMA,
834 },
835 comment: {
836 optional: true,
837 schema: SINGLE_LINE_COMMENT_SCHEMA,
838 },
839 },
840 },
841 },
bb34b589 842 access: {
54552dda 843 permission: &Permission::Anybody,
bb34b589
DM
844 },
845)]
846/// Datastore list
6049b71f
DM
847fn get_datastore_list(
848 _param: Value,
849 _info: &ApiMethod,
54552dda 850 rpcenv: &mut dyn RpcEnvironment,
6049b71f 851) -> Result<Value, Error> {
15e9b4ed 852
d0187a51 853 let (config, _digest) = datastore::config()?;
15e9b4ed 854
e7cb4dc5 855 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda
DM
856 let user_info = CachedUserInfo::new()?;
857
30fb6025 858 let mut list = Vec::new();
54552dda 859
30fb6025 860 for (store, (_, data)) in &config.sections {
e7cb4dc5 861 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 862 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025
DM
863 if allowed {
864 let mut entry = json!({ "store": store });
865 if let Some(comment) = data["comment"].as_str() {
866 entry["comment"] = comment.into();
867 }
868 list.push(entry);
869 }
54552dda
DM
870 }
871
30fb6025 872 Ok(list.into())
15e9b4ed
DM
873}
874
0ab08ac9
DM
875#[sortable]
876pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
877 &ApiHandler::AsyncHttp(&download_file),
878 &ObjectSchema::new(
879 "Download single raw file from backup snapshot.",
880 &sorted!([
66c49c21 881 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
882 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
883 ("backup-id", false, &BACKUP_ID_SCHEMA),
884 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 885 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
886 ]),
887 )
54552dda
DM
888).access(None, &Permission::Privilege(
889 &["datastore", "{store}"],
890 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
891 true)
892);
691c89a0 893
9e47c0a5
DM
894fn download_file(
895 _parts: Parts,
896 _req_body: Body,
897 param: Value,
255f378a 898 _info: &ApiMethod,
54552dda 899 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 900) -> ApiResponseFuture {
9e47c0a5 901
ad51d02a
DM
902 async move {
903 let store = tools::required_string_param(&param, "store")?;
ad51d02a 904 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 905
e7cb4dc5 906 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
54552dda 907 let user_info = CachedUserInfo::new()?;
e7cb4dc5 908 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
54552dda 909
ad51d02a 910 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 911
ad51d02a
DM
912 let backup_type = tools::required_string_param(&param, "backup-type")?;
913 let backup_id = tools::required_string_param(&param, "backup-id")?;
914 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 915
e0e5b442 916 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda
DM
917
918 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 919 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
54552dda 920
abdb9763 921 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 922
ad51d02a
DM
923 let mut path = datastore.base_path();
924 path.push(backup_dir.relative_path());
925 path.push(&file_name);
926
ba694720 927 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
928 .await
929 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 930
db0cb9ce 931 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
ba694720
DC
932 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
933 .map_err(move |err| {
934 eprintln!("error during streaming of '{:?}' - {}", &path, err);
935 err
936 });
ad51d02a 937 let body = Body::wrap_stream(payload);
9e47c0a5 938
ad51d02a
DM
939 // fixme: set other headers ?
940 Ok(Response::builder()
941 .status(StatusCode::OK)
942 .header(header::CONTENT_TYPE, "application/octet-stream")
943 .body(body)
944 .unwrap())
945 }.boxed()
9e47c0a5
DM
946}
947
6ef9bb59
DC
948#[sortable]
949pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
950 &ApiHandler::AsyncHttp(&download_file_decoded),
951 &ObjectSchema::new(
952 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
953 &sorted!([
954 ("store", false, &DATASTORE_SCHEMA),
955 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
956 ("backup-id", false, &BACKUP_ID_SCHEMA),
957 ("backup-time", false, &BACKUP_TIME_SCHEMA),
958 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
959 ]),
960 )
961).access(None, &Permission::Privilege(
962 &["datastore", "{store}"],
963 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
964 true)
965);
966
967fn download_file_decoded(
968 _parts: Parts,
969 _req_body: Body,
970 param: Value,
971 _info: &ApiMethod,
972 rpcenv: Box<dyn RpcEnvironment>,
973) -> ApiResponseFuture {
974
975 async move {
976 let store = tools::required_string_param(&param, "store")?;
977 let datastore = DataStore::lookup_datastore(store)?;
978
e7cb4dc5 979 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
6ef9bb59 980 let user_info = CachedUserInfo::new()?;
e7cb4dc5 981 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
6ef9bb59
DC
982
983 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
984
985 let backup_type = tools::required_string_param(&param, "backup-type")?;
986 let backup_id = tools::required_string_param(&param, "backup-id")?;
987 let backup_time = tools::required_integer_param(&param, "backup-time")?;
988
e0e5b442 989 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59
DC
990
991 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 992 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
6ef9bb59 993
2d55beec 994 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 995 for file in files {
f28d9088 996 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
997 bail!("cannot decode '{}' - is encrypted", file_name);
998 }
999 }
1000
1001 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1002
1003 let mut path = datastore.base_path();
1004 path.push(backup_dir.relative_path());
1005 path.push(&file_name);
1006
1007 let extension = file_name.rsplitn(2, '.').next().unwrap();
1008
1009 let body = match extension {
1010 "didx" => {
1011 let index = DynamicIndexReader::open(&path)
1012 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1013 let (csum, size) = index.compute_csum();
1014 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1015
14f6c9cb 1016 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1017 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1018 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1019 .map_err(move |err| {
1020 eprintln!("error during streaming of '{:?}' - {}", path, err);
1021 err
1022 }))
1023 },
1024 "fidx" => {
1025 let index = FixedIndexReader::open(&path)
1026 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1027
2d55beec
FG
1028 let (csum, size) = index.compute_csum();
1029 manifest.verify_file(&file_name, &csum, size)?;
1030
14f6c9cb 1031 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1032 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1033 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1034 .map_err(move |err| {
1035 eprintln!("error during streaming of '{:?}' - {}", path, err);
1036 err
1037 }))
1038 },
1039 "blob" => {
1040 let file = std::fs::File::open(&path)
8aa67ee7 1041 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1042
2d55beec
FG
1043 // FIXME: load full blob to verify index checksum?
1044
6ef9bb59
DC
1045 Body::wrap_stream(
1046 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1047 .map_err(move |err| {
1048 eprintln!("error during streaming of '{:?}' - {}", path, err);
1049 err
1050 })
1051 )
1052 },
1053 extension => {
1054 bail!("cannot download '{}' files", extension);
1055 },
1056 };
1057
1058 // fixme: set other headers ?
1059 Ok(Response::builder()
1060 .status(StatusCode::OK)
1061 .header(header::CONTENT_TYPE, "application/octet-stream")
1062 .body(body)
1063 .unwrap())
1064 }.boxed()
1065}
1066
552c2259 1067#[sortable]
0ab08ac9
DM
1068pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1069 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1070 &ObjectSchema::new(
54552dda 1071 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1072 &sorted!([
66c49c21 1073 ("store", false, &DATASTORE_SCHEMA),
255f378a 1074 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1075 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1076 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1077 ]),
9e47c0a5 1078 )
54552dda
DM
1079).access(
1080 Some("Only the backup creator/owner is allowed to do this."),
1081 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1082);
9e47c0a5 1083
07ee2235
DM
1084fn upload_backup_log(
1085 _parts: Parts,
1086 req_body: Body,
1087 param: Value,
255f378a 1088 _info: &ApiMethod,
54552dda 1089 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1090) -> ApiResponseFuture {
07ee2235 1091
ad51d02a
DM
1092 async move {
1093 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1094 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1095
96d65fbc 1096 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1097
ad51d02a
DM
1098 let backup_type = tools::required_string_param(&param, "backup-type")?;
1099 let backup_id = tools::required_string_param(&param, "backup-id")?;
1100 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1101
e0e5b442 1102 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1103
e7cb4dc5
WB
1104 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
1105 check_backup_owner(&datastore, backup_dir.group(), &userid)?;
54552dda 1106
ad51d02a
DM
1107 let mut path = datastore.base_path();
1108 path.push(backup_dir.relative_path());
1109 path.push(&file_name);
07ee2235 1110
ad51d02a
DM
1111 if path.exists() {
1112 bail!("backup already contains a log.");
1113 }
e128d4e8 1114
ad51d02a 1115 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1116 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1117
1118 let data = req_body
1119 .map_err(Error::from)
1120 .try_fold(Vec::new(), |mut acc, chunk| {
1121 acc.extend_from_slice(&*chunk);
1122 future::ok::<_, Error>(acc)
1123 })
1124 .await?;
1125
39f18b30
DM
1126 // always verify blob/CRC at server side
1127 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1128
1129 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1130
1131 // fixme: use correct formatter
1132 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1133 }.boxed()
07ee2235
DM
1134}
1135
5b1cfa01
DC
1136#[api(
1137 input: {
1138 properties: {
1139 store: {
1140 schema: DATASTORE_SCHEMA,
1141 },
1142 "backup-type": {
1143 schema: BACKUP_TYPE_SCHEMA,
1144 },
1145 "backup-id": {
1146 schema: BACKUP_ID_SCHEMA,
1147 },
1148 "backup-time": {
1149 schema: BACKUP_TIME_SCHEMA,
1150 },
1151 "filepath": {
1152 description: "Base64 encoded path.",
1153 type: String,
1154 }
1155 },
1156 },
1157 access: {
1158 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1159 },
1160)]
1161/// Get the entries of the given path of the catalog
1162fn catalog(
1163 store: String,
1164 backup_type: String,
1165 backup_id: String,
1166 backup_time: i64,
1167 filepath: String,
1168 _param: Value,
1169 _info: &ApiMethod,
1170 rpcenv: &mut dyn RpcEnvironment,
1171) -> Result<Value, Error> {
1172 let datastore = DataStore::lookup_datastore(&store)?;
1173
e7cb4dc5 1174 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
5b1cfa01 1175 let user_info = CachedUserInfo::new()?;
e7cb4dc5 1176 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
5b1cfa01 1177
e0e5b442 1178 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01
DC
1179
1180 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 1181 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
5b1cfa01 1182
9238cdf5
FG
1183 let file_name = CATALOG_NAME;
1184
2d55beec 1185 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1186 for file in files {
1187 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1188 bail!("cannot decode '{}' - is encrypted", file_name);
1189 }
1190 }
1191
5b1cfa01
DC
1192 let mut path = datastore.base_path();
1193 path.push(backup_dir.relative_path());
9238cdf5 1194 path.push(file_name);
5b1cfa01
DC
1195
1196 let index = DynamicIndexReader::open(&path)
1197 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1198
2d55beec
FG
1199 let (csum, size) = index.compute_csum();
1200 manifest.verify_file(&file_name, &csum, size)?;
1201
14f6c9cb 1202 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1203 let reader = BufferedDynamicReader::new(index, chunk_reader);
1204
1205 let mut catalog_reader = CatalogReader::new(reader);
1206 let mut current = catalog_reader.root()?;
1207 let mut components = vec![];
1208
1209
1210 if filepath != "root" {
1211 components = base64::decode(filepath)?;
1212 if components.len() > 0 && components[0] == '/' as u8 {
1213 components.remove(0);
1214 }
1215 for component in components.split(|c| *c == '/' as u8) {
1216 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1217 current = entry;
1218 } else {
1219 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1220 }
1221 }
1222 }
1223
1224 let mut res = Vec::new();
1225
1226 for direntry in catalog_reader.read_dir(&current)? {
1227 let mut components = components.clone();
1228 components.push('/' as u8);
1229 components.extend(&direntry.name);
1230 let path = base64::encode(components);
1231 let text = String::from_utf8_lossy(&direntry.name);
1232 let mut entry = json!({
1233 "filepath": path,
1234 "text": text,
1235 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1236 "leaf": true,
1237 });
1238 match direntry.attr {
1239 DirEntryAttribute::Directory { start: _ } => {
1240 entry["leaf"] = false.into();
1241 },
1242 DirEntryAttribute::File { size, mtime } => {
1243 entry["size"] = size.into();
1244 entry["mtime"] = mtime.into();
1245 },
1246 _ => {},
1247 }
1248 res.push(entry);
1249 }
1250
1251 Ok(res.into())
1252}
1253
53a561a2
WB
1254fn recurse_files<'a, T, W>(
1255 zip: &'a mut ZipEncoder<W>,
1256 decoder: &'a mut Accessor<T>,
1257 prefix: &'a Path,
804f6143 1258 file: FileEntry<T>,
53a561a2 1259) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1260where
1261 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1262 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1263{
1264 Box::pin(async move {
1265 let metadata = file.entry().metadata();
1266 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1267
1268 match file.kind() {
1269 EntryKind::File { .. } => {
1270 let entry = ZipEntry::new(
1271 path,
1272 metadata.stat.mtime.secs,
1273 metadata.stat.mode as u16,
1274 true,
1275 );
1276 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1277 .await
1278 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1279 }
1280 EntryKind::Hardlink(_) => {
1281 let realfile = decoder.follow_hardlink(&file).await?;
1282 let entry = ZipEntry::new(
1283 path,
1284 metadata.stat.mtime.secs,
1285 metadata.stat.mode as u16,
1286 true,
1287 );
1288 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1289 .await
1290 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1291 }
1292 EntryKind::Directory => {
1293 let dir = file.enter_directory().await?;
1294 let mut readdir = dir.read_dir();
1295 let entry = ZipEntry::new(
1296 path,
1297 metadata.stat.mtime.secs,
1298 metadata.stat.mode as u16,
1299 false,
1300 );
1301 zip.add_entry::<FileContents<T>>(entry, None).await?;
1302 while let Some(entry) = readdir.next().await {
1303 let entry = entry?.decode_entry().await?;
53a561a2 1304 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1305 }
1306 }
1307 _ => {} // ignore all else
1308 };
1309
53a561a2 1310 Ok(())
804f6143
DC
1311 })
1312}
1313
d33d8f4e
DC
1314#[sortable]
1315pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1316 &ApiHandler::AsyncHttp(&pxar_file_download),
1317 &ObjectSchema::new(
1ffe0301 1318 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1319 &sorted!([
1320 ("store", false, &DATASTORE_SCHEMA),
1321 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1322 ("backup-id", false, &BACKUP_ID_SCHEMA),
1323 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1324 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1325 ]),
1326 )
1327).access(None, &Permission::Privilege(
1328 &["datastore", "{store}"],
1329 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1330 true)
1331);
1332
1333fn pxar_file_download(
1334 _parts: Parts,
1335 _req_body: Body,
1336 param: Value,
1337 _info: &ApiMethod,
1338 rpcenv: Box<dyn RpcEnvironment>,
1339) -> ApiResponseFuture {
1340
1341 async move {
1342 let store = tools::required_string_param(&param, "store")?;
1343 let datastore = DataStore::lookup_datastore(&store)?;
1344
e7cb4dc5 1345 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
d33d8f4e 1346 let user_info = CachedUserInfo::new()?;
e7cb4dc5 1347 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
d33d8f4e
DC
1348
1349 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1350
1351 let backup_type = tools::required_string_param(&param, "backup-type")?;
1352 let backup_id = tools::required_string_param(&param, "backup-id")?;
1353 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1354
e0e5b442 1355 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e
DC
1356
1357 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 1358 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
d33d8f4e 1359
d33d8f4e
DC
1360 let mut components = base64::decode(&filepath)?;
1361 if components.len() > 0 && components[0] == '/' as u8 {
1362 components.remove(0);
1363 }
1364
1365 let mut split = components.splitn(2, |c| *c == '/' as u8);
9238cdf5 1366 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
d33d8f4e 1367 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1368 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1369 for file in files {
1370 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1371 bail!("cannot decode '{}' - is encrypted", pxar_name);
1372 }
1373 }
d33d8f4e 1374
9238cdf5
FG
1375 let mut path = datastore.base_path();
1376 path.push(backup_dir.relative_path());
1377 path.push(pxar_name);
d33d8f4e
DC
1378
1379 let index = DynamicIndexReader::open(&path)
1380 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1381
2d55beec
FG
1382 let (csum, size) = index.compute_csum();
1383 manifest.verify_file(&pxar_name, &csum, size)?;
1384
14f6c9cb 1385 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1386 let reader = BufferedDynamicReader::new(index, chunk_reader);
1387 let archive_size = reader.archive_size();
1388 let reader = LocalDynamicReadAt::new(reader);
1389
1390 let decoder = Accessor::new(reader, archive_size).await?;
1391 let root = decoder.open_root().await?;
1392 let file = root
1393 .lookup(OsStr::from_bytes(file_path)).await?
1394 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1395
804f6143
DC
1396 let body = match file.kind() {
1397 EntryKind::File { .. } => Body::wrap_stream(
1398 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1399 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1400 err
1401 }),
1402 ),
1403 EntryKind::Hardlink(_) => Body::wrap_stream(
1404 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1405 .map_err(move |err| {
1406 eprintln!(
1407 "error during streaming of hardlink '{:?}' - {}",
1408 filepath, err
1409 );
1410 err
1411 }),
1412 ),
1413 EntryKind::Directory => {
1414 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1415 let mut prefix = PathBuf::new();
1416 let mut components = file.entry().path().components();
1417 components.next_back(); // discar last
1418 for comp in components {
1419 prefix.push(comp);
1420 }
d33d8f4e 1421
804f6143 1422 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1423
1424 crate::server::spawn_internal_task(async move {
53a561a2
WB
1425 let mut zipencoder = ZipEncoder::new(channelwriter);
1426 let mut decoder = decoder;
1427 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1428 .await
1429 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1430
1431 zipencoder
1432 .finish()
1433 .await
1434 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1435 });
1436
1437 Body::wrap_stream(receiver.map_err(move |err| {
1438 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1439 err
804f6143
DC
1440 }))
1441 }
1442 other => bail!("cannot download file of type {:?}", other),
1443 };
d33d8f4e
DC
1444
1445 // fixme: set other headers ?
1446 Ok(Response::builder()
1447 .status(StatusCode::OK)
1448 .header(header::CONTENT_TYPE, "application/octet-stream")
1449 .body(body)
1450 .unwrap())
1451 }.boxed()
1452}
1453
1a0d3d11
DM
1454#[api(
1455 input: {
1456 properties: {
1457 store: {
1458 schema: DATASTORE_SCHEMA,
1459 },
1460 timeframe: {
1461 type: RRDTimeFrameResolution,
1462 },
1463 cf: {
1464 type: RRDMode,
1465 },
1466 },
1467 },
1468 access: {
1469 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1470 },
1471)]
1472/// Read datastore stats
1473fn get_rrd_stats(
1474 store: String,
1475 timeframe: RRDTimeFrameResolution,
1476 cf: RRDMode,
1477 _param: Value,
1478) -> Result<Value, Error> {
1479
431cc7b1
DC
1480 create_value_from_rrd(
1481 &format!("datastore/{}", store),
1a0d3d11
DM
1482 &[
1483 "total", "used",
c94e1f65
DM
1484 "read_ios", "read_bytes",
1485 "write_ios", "write_bytes",
1486 "io_ticks",
1a0d3d11
DM
1487 ],
1488 timeframe,
1489 cf,
1490 )
1491}
1492
912b3f5b
DM
1493#[api(
1494 input: {
1495 properties: {
1496 store: {
1497 schema: DATASTORE_SCHEMA,
1498 },
1499 "backup-type": {
1500 schema: BACKUP_TYPE_SCHEMA,
1501 },
1502 "backup-id": {
1503 schema: BACKUP_ID_SCHEMA,
1504 },
1505 "backup-time": {
1506 schema: BACKUP_TIME_SCHEMA,
1507 },
1508 },
1509 },
1510 access: {
1511 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1512 },
1513)]
1514/// Get "notes" for a specific backup
1515fn get_notes(
1516 store: String,
1517 backup_type: String,
1518 backup_id: String,
1519 backup_time: i64,
1520 rpcenv: &mut dyn RpcEnvironment,
1521) -> Result<String, Error> {
1522 let datastore = DataStore::lookup_datastore(&store)?;
1523
e7cb4dc5 1524 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
912b3f5b 1525 let user_info = CachedUserInfo::new()?;
e7cb4dc5 1526 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
912b3f5b 1527
e0e5b442 1528 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b
DM
1529
1530 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 1531 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
912b3f5b 1532
883aa6d5 1533 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1534
883aa6d5 1535 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1536 .as_str()
1537 .unwrap_or("");
1538
1539 Ok(String::from(notes))
1540}
1541
1542#[api(
1543 input: {
1544 properties: {
1545 store: {
1546 schema: DATASTORE_SCHEMA,
1547 },
1548 "backup-type": {
1549 schema: BACKUP_TYPE_SCHEMA,
1550 },
1551 "backup-id": {
1552 schema: BACKUP_ID_SCHEMA,
1553 },
1554 "backup-time": {
1555 schema: BACKUP_TIME_SCHEMA,
1556 },
1557 notes: {
1558 description: "A multiline text.",
1559 },
1560 },
1561 },
1562 access: {
1563 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1564 },
1565)]
1566/// Set "notes" for a specific backup
1567fn set_notes(
1568 store: String,
1569 backup_type: String,
1570 backup_id: String,
1571 backup_time: i64,
1572 notes: String,
1573 rpcenv: &mut dyn RpcEnvironment,
1574) -> Result<(), Error> {
1575 let datastore = DataStore::lookup_datastore(&store)?;
1576
e7cb4dc5 1577 let userid: Userid = rpcenv.get_user().unwrap().parse()?;
912b3f5b 1578 let user_info = CachedUserInfo::new()?;
e7cb4dc5 1579 let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
912b3f5b 1580
e0e5b442 1581 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b
DM
1582
1583 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e7cb4dc5 1584 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
912b3f5b 1585
1a374fcf
SR
1586 datastore.update_manifest(&backup_dir,|manifest| {
1587 manifest.unprotected["notes"] = notes.into();
1588 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1589
1590 Ok(())
1591}
1592
72be0eb1 1593#[api(
4940012d 1594 input: {
72be0eb1
DW
1595 properties: {
1596 store: {
1597 schema: DATASTORE_SCHEMA,
1598 },
1599 "backup-type": {
1600 schema: BACKUP_TYPE_SCHEMA,
1601 },
1602 "backup-id": {
1603 schema: BACKUP_ID_SCHEMA,
1604 },
1605 "new-owner": {
1606 type: Userid,
1607 },
1608 },
4940012d
FG
1609 },
1610 access: {
1611 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1612 },
72be0eb1
DW
1613)]
1614/// Change owner of a backup group
1615fn set_backup_owner(
1616 store: String,
1617 backup_type: String,
1618 backup_id: String,
1619 new_owner: Userid,
752dfc4b 1620 _rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1621) -> Result<(), Error> {
1622
1623 let datastore = DataStore::lookup_datastore(&store)?;
1624
1625 let backup_group = BackupGroup::new(backup_type, backup_id);
1626
1627 let user_info = CachedUserInfo::new()?;
1628
1629 if !user_info.is_active_user(&new_owner) {
1630 bail!("user '{}' is inactive or non-existent", new_owner);
1631 }
1632
1633 datastore.set_owner(&backup_group, &new_owner, true)?;
1634
1635 Ok(())
1636}
1637
552c2259 1638#[sortable]
255f378a 1639const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1640 (
1641 "catalog",
1642 &Router::new()
1643 .get(&API_METHOD_CATALOG)
1644 ),
72be0eb1
DW
1645 (
1646 "change-owner",
1647 &Router::new()
1648 .post(&API_METHOD_SET_BACKUP_OWNER)
1649 ),
255f378a
DM
1650 (
1651 "download",
1652 &Router::new()
1653 .download(&API_METHOD_DOWNLOAD_FILE)
1654 ),
6ef9bb59
DC
1655 (
1656 "download-decoded",
1657 &Router::new()
1658 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1659 ),
255f378a
DM
1660 (
1661 "files",
1662 &Router::new()
09b1f7b2 1663 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1664 ),
1665 (
1666 "gc",
1667 &Router::new()
1668 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1669 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1670 ),
1671 (
1672 "groups",
1673 &Router::new()
b31c8019 1674 .get(&API_METHOD_LIST_GROUPS)
255f378a 1675 ),
912b3f5b
DM
1676 (
1677 "notes",
1678 &Router::new()
1679 .get(&API_METHOD_GET_NOTES)
1680 .put(&API_METHOD_SET_NOTES)
1681 ),
255f378a
DM
1682 (
1683 "prune",
1684 &Router::new()
1685 .post(&API_METHOD_PRUNE)
1686 ),
d33d8f4e
DC
1687 (
1688 "pxar-file-download",
1689 &Router::new()
1690 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1691 ),
1a0d3d11
DM
1692 (
1693 "rrd",
1694 &Router::new()
1695 .get(&API_METHOD_GET_RRD_STATS)
1696 ),
255f378a
DM
1697 (
1698 "snapshots",
1699 &Router::new()
fc189b19 1700 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1701 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1702 ),
1703 (
1704 "status",
1705 &Router::new()
1706 .get(&API_METHOD_STATUS)
1707 ),
1708 (
1709 "upload-backup-log",
1710 &Router::new()
1711 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1712 ),
c2009e53
DM
1713 (
1714 "verify",
1715 &Router::new()
1716 .post(&API_METHOD_VERIFY)
1717 ),
255f378a
DM
1718];
1719
ad51d02a 1720const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1721 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1722 .subdirs(DATASTORE_INFO_SUBDIRS);
1723
1724
1725pub const ROUTER: Router = Router::new()
bb34b589 1726 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1727 .match_all("store", &DATASTORE_INFO_ROUTER);