]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
replace Userid with Authid
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
d33d8f4e
DC
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
6b809ff5 4use std::sync::{Arc, Mutex};
53a561a2 5use std::path::{Path, PathBuf};
804f6143 6use std::pin::Pin;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
12use serde_json::{json, Value};
13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
cad540e9
WB
18use proxmox::api::router::SubdirMap;
19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
804f6143 23use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
e5064ba6 28use crate::backup::*;
cad540e9 29use crate::config::datastore;
54552dda
DM
30use crate::config::cached_user_info::CachedUserInfo;
31
0f778e06 32use crate::server::WorkerTask;
804f6143
DC
33use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37};
38
d00e1a21
DM
39use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
54552dda 41 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
54552dda 44 PRIV_DATASTORE_BACKUP,
d00e1a21 45};
1629d2ad 46
e7cb4dc5
WB
47fn check_backup_owner(
48 store: &DataStore,
49 group: &BackupGroup,
e6dc35ac 50 auth_id: &Authid,
e7cb4dc5 51) -> Result<(), Error> {
54552dda 52 let owner = store.get_owner(group)?;
e6dc35ac
FG
53 if &owner != auth_id {
54 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
55 }
56 Ok(())
57}
58
e7cb4dc5
WB
59fn read_backup_index(
60 store: &DataStore,
61 backup_dir: &BackupDir,
62) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 63
ff86ef00 64 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 65
09b1f7b2
DM
66 let mut result = Vec::new();
67 for item in manifest.files() {
68 result.push(BackupContent {
69 filename: item.filename.clone(),
f28d9088 70 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
71 size: Some(item.size),
72 });
8c70e3eb
DM
73 }
74
09b1f7b2 75 result.push(BackupContent {
96d65fbc 76 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
77 crypt_mode: match manifest.signature {
78 Some(_) => Some(CryptMode::SignOnly),
79 None => Some(CryptMode::None),
80 },
09b1f7b2
DM
81 size: Some(index_size),
82 });
4f1e40a2 83
70030b43 84 Ok((manifest, result))
8c70e3eb
DM
85}
86
1c090810
DC
87fn get_all_snapshot_files(
88 store: &DataStore,
89 info: &BackupInfo,
70030b43
DM
90) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
91
92 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
93
94 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
95 acc.insert(item.filename.clone());
96 acc
97 });
98
99 for file in &info.files {
100 if file_set.contains(file) { continue; }
f28d9088
WB
101 files.push(BackupContent {
102 filename: file.to_string(),
103 size: None,
104 crypt_mode: None,
105 });
1c090810
DC
106 }
107
70030b43 108 Ok((manifest, files))
1c090810
DC
109}
110
8f579717
DM
111fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
112
113 let mut group_hash = HashMap::new();
114
115 for info in backup_list {
9b492eb2 116 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
117 let time_list = group_hash.entry(group_id).or_insert(vec![]);
118 time_list.push(info);
119 }
120
121 group_hash
122}
123
b31c8019
DM
124#[api(
125 input: {
126 properties: {
127 store: {
128 schema: DATASTORE_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of backup groups.",
135 items: {
136 type: GroupListItem,
137 }
138 },
bb34b589 139 access: {
54552dda
DM
140 permission: &Permission::Privilege(
141 &["datastore", "{store}"],
142 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
143 true),
bb34b589 144 },
b31c8019
DM
145)]
146/// List backup groups.
ad20d198 147fn list_groups(
b31c8019 148 store: String,
54552dda 149 rpcenv: &mut dyn RpcEnvironment,
b31c8019 150) -> Result<Vec<GroupListItem>, Error> {
812c6f87 151
e6dc35ac 152 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 153 let user_info = CachedUserInfo::new()?;
e6dc35ac 154 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 155
b31c8019 156 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 157
c0977501 158 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
159
160 let group_hash = group_backups(backup_list);
161
b31c8019 162 let mut groups = Vec::new();
812c6f87
DM
163
164 for (_group_id, mut list) in group_hash {
165
2b01a225 166 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
167
168 let info = &list[0];
54552dda 169
9b492eb2 170 let group = info.backup_dir.group();
812c6f87 171
54552dda 172 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b 173 let owner = datastore.get_owner(group)?;
e6dc35ac 174 if !list_all && owner != auth_id {
20813274 175 continue;
54552dda
DM
176 }
177
b31c8019
DM
178 let result_item = GroupListItem {
179 backup_type: group.backup_type().to_string(),
180 backup_id: group.backup_id().to_string(),
6a7be83e 181 last_backup: info.backup_dir.backup_time(),
b31c8019
DM
182 backup_count: list.len() as u64,
183 files: info.files.clone(),
04b0ca8b 184 owner: Some(owner),
b31c8019
DM
185 };
186 groups.push(result_item);
812c6f87
DM
187 }
188
b31c8019 189 Ok(groups)
812c6f87 190}
8f579717 191
09b1f7b2
DM
192#[api(
193 input: {
194 properties: {
195 store: {
196 schema: DATASTORE_SCHEMA,
197 },
198 "backup-type": {
199 schema: BACKUP_TYPE_SCHEMA,
200 },
201 "backup-id": {
202 schema: BACKUP_ID_SCHEMA,
203 },
204 "backup-time": {
205 schema: BACKUP_TIME_SCHEMA,
206 },
207 },
208 },
209 returns: {
210 type: Array,
211 description: "Returns the list of archive files inside a backup snapshots.",
212 items: {
213 type: BackupContent,
214 }
215 },
bb34b589 216 access: {
54552dda
DM
217 permission: &Permission::Privilege(
218 &["datastore", "{store}"],
219 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
220 true),
bb34b589 221 },
09b1f7b2
DM
222)]
223/// List snapshot files.
ea5f547f 224pub fn list_snapshot_files(
09b1f7b2
DM
225 store: String,
226 backup_type: String,
227 backup_id: String,
228 backup_time: i64,
01a13423 229 _info: &ApiMethod,
54552dda 230 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 231) -> Result<Vec<BackupContent>, Error> {
01a13423 232
e6dc35ac 233 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 234 let user_info = CachedUserInfo::new()?;
e6dc35ac 235 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 236
09b1f7b2 237 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 238
e0e5b442 239 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 240
54552dda 241 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
e6dc35ac 242 if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
54552dda 243
d7c24397 244 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 245
70030b43
DM
246 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
247
248 Ok(files)
01a13423
DM
249}
250
68a6a0ee
DM
251#[api(
252 input: {
253 properties: {
254 store: {
255 schema: DATASTORE_SCHEMA,
256 },
257 "backup-type": {
258 schema: BACKUP_TYPE_SCHEMA,
259 },
260 "backup-id": {
261 schema: BACKUP_ID_SCHEMA,
262 },
263 "backup-time": {
264 schema: BACKUP_TIME_SCHEMA,
265 },
266 },
267 },
bb34b589 268 access: {
54552dda
DM
269 permission: &Permission::Privilege(
270 &["datastore", "{store}"],
271 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
272 true),
bb34b589 273 },
68a6a0ee
DM
274)]
275/// Delete backup snapshot.
276fn delete_snapshot(
277 store: String,
278 backup_type: String,
279 backup_id: String,
280 backup_time: i64,
6f62c924 281 _info: &ApiMethod,
54552dda 282 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
283) -> Result<Value, Error> {
284
e6dc35ac 285 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 286 let user_info = CachedUserInfo::new()?;
e6dc35ac 287 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 288
e0e5b442 289 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
6f62c924 290
68a6a0ee 291 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 292
54552dda 293 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
e6dc35ac 294 if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
54552dda 295
c9756b40 296 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
297
298 Ok(Value::Null)
299}
300
fc189b19
DM
301#[api(
302 input: {
303 properties: {
304 store: {
305 schema: DATASTORE_SCHEMA,
306 },
307 "backup-type": {
308 optional: true,
309 schema: BACKUP_TYPE_SCHEMA,
310 },
311 "backup-id": {
312 optional: true,
313 schema: BACKUP_ID_SCHEMA,
314 },
315 },
316 },
317 returns: {
318 type: Array,
319 description: "Returns the list of snapshots.",
320 items: {
321 type: SnapshotListItem,
322 }
323 },
bb34b589 324 access: {
54552dda
DM
325 permission: &Permission::Privilege(
326 &["datastore", "{store}"],
327 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
328 true),
bb34b589 329 },
fc189b19
DM
330)]
331/// List backup snapshots.
f24fc116 332pub fn list_snapshots (
54552dda
DM
333 store: String,
334 backup_type: Option<String>,
335 backup_id: Option<String>,
336 _param: Value,
184f17af 337 _info: &ApiMethod,
54552dda 338 rpcenv: &mut dyn RpcEnvironment,
fc189b19 339) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 340
e6dc35ac 341 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 342 let user_info = CachedUserInfo::new()?;
e6dc35ac 343 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 344
54552dda 345 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 346
c0977501 347 let base_path = datastore.base_path();
184f17af 348
15c847f1 349 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
350
351 let mut snapshots = vec![];
352
c0977501 353 for info in backup_list {
15c847f1 354 let group = info.backup_dir.group();
54552dda 355 if let Some(ref backup_type) = backup_type {
15c847f1
DM
356 if backup_type != group.backup_type() { continue; }
357 }
54552dda 358 if let Some(ref backup_id) = backup_id {
15c847f1
DM
359 if backup_id != group.backup_id() { continue; }
360 }
a17a0e7a 361
54552dda 362 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b
DC
363 let owner = datastore.get_owner(group)?;
364
e6dc35ac 365 if !list_all && owner != auth_id {
20813274 366 continue;
54552dda
DM
367 }
368
1c090810
DC
369 let mut size = None;
370
3b2046d2 371 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
70030b43 372 Ok((manifest, files)) => {
1c090810 373 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
70030b43
DM
374 // extract the first line from notes
375 let comment: Option<String> = manifest.unprotected["notes"]
376 .as_str()
377 .and_then(|notes| notes.lines().next())
378 .map(String::from);
379
3b2046d2
TL
380 let verify = manifest.unprotected["verify_state"].clone();
381 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
382 Ok(verify) => verify,
383 Err(err) => {
384 eprintln!("error parsing verification state : '{}'", err);
385 None
386 }
387 };
388
389 (comment, verify, files)
1c090810
DC
390 },
391 Err(err) => {
392 eprintln!("error during snapshot file listing: '{}'", err);
70030b43 393 (
3b2046d2 394 None,
70030b43
DM
395 None,
396 info
397 .files
398 .iter()
399 .map(|x| BackupContent {
400 filename: x.to_string(),
401 size: None,
402 crypt_mode: None,
403 })
404 .collect()
405 )
1c090810
DC
406 },
407 };
408
409 let result_item = SnapshotListItem {
fc189b19
DM
410 backup_type: group.backup_type().to_string(),
411 backup_id: group.backup_id().to_string(),
6a7be83e 412 backup_time: info.backup_dir.backup_time(),
70030b43 413 comment,
3b2046d2 414 verification,
1c090810
DC
415 files,
416 size,
04b0ca8b 417 owner: Some(owner),
fc189b19 418 };
a17a0e7a 419
a17a0e7a 420 snapshots.push(result_item);
184f17af
DM
421 }
422
fc189b19 423 Ok(snapshots)
184f17af
DM
424}
425
14e08625 426fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
16f9f244
DC
427 let base_path = store.base_path();
428 let backup_list = BackupInfo::list_backups(&base_path)?;
429 let mut groups = HashSet::new();
14e08625
DC
430
431 let mut result = Counts {
432 ct: None,
433 host: None,
434 vm: None,
435 other: None,
436 };
437
16f9f244
DC
438 for info in backup_list {
439 let group = info.backup_dir.group();
440
441 let id = group.backup_id();
442 let backup_type = group.backup_type();
443
444 let mut new_id = false;
445
446 if groups.insert(format!("{}-{}", &backup_type, &id)) {
447 new_id = true;
448 }
449
14e08625
DC
450 let mut counts = match backup_type {
451 "ct" => result.ct.take().unwrap_or(Default::default()),
452 "host" => result.host.take().unwrap_or(Default::default()),
453 "vm" => result.vm.take().unwrap_or(Default::default()),
454 _ => result.other.take().unwrap_or(Default::default()),
455 };
456
457 counts.snapshots += 1;
458 if new_id {
459 counts.groups +=1;
460 }
461
462 match backup_type {
463 "ct" => result.ct = Some(counts),
464 "host" => result.host = Some(counts),
465 "vm" => result.vm = Some(counts),
466 _ => result.other = Some(counts),
16f9f244
DC
467 }
468 }
469
470 Ok(result)
471}
472
1dc117bb
DM
473#[api(
474 input: {
475 properties: {
476 store: {
477 schema: DATASTORE_SCHEMA,
478 },
479 },
480 },
481 returns: {
14e08625 482 type: DataStoreStatus,
1dc117bb 483 },
bb34b589 484 access: {
54552dda 485 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 486 },
1dc117bb
DM
487)]
488/// Get datastore status.
ea5f547f 489pub fn status(
1dc117bb 490 store: String,
0eecf38f
DM
491 _info: &ApiMethod,
492 _rpcenv: &mut dyn RpcEnvironment,
14e08625 493) -> Result<DataStoreStatus, Error> {
1dc117bb 494 let datastore = DataStore::lookup_datastore(&store)?;
14e08625
DC
495 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
496 let counts = get_snapshots_count(&datastore)?;
16f9f244
DC
497 let gc_status = datastore.last_gc_status();
498
14e08625
DC
499 Ok(DataStoreStatus {
500 total: storage.total,
501 used: storage.used,
502 avail: storage.avail,
503 gc_status,
504 counts,
505 })
0eecf38f
DM
506}
507
c2009e53
DM
508#[api(
509 input: {
510 properties: {
511 store: {
512 schema: DATASTORE_SCHEMA,
513 },
514 "backup-type": {
515 schema: BACKUP_TYPE_SCHEMA,
516 optional: true,
517 },
518 "backup-id": {
519 schema: BACKUP_ID_SCHEMA,
520 optional: true,
521 },
522 "backup-time": {
523 schema: BACKUP_TIME_SCHEMA,
524 optional: true,
525 },
526 },
527 },
528 returns: {
529 schema: UPID_SCHEMA,
530 },
531 access: {
532 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
533 },
534)]
535/// Verify backups.
536///
537/// This function can verify a single backup snapshot, all backup from a backup group,
538/// or all backups in the datastore.
539pub fn verify(
540 store: String,
541 backup_type: Option<String>,
542 backup_id: Option<String>,
543 backup_time: Option<i64>,
544 rpcenv: &mut dyn RpcEnvironment,
545) -> Result<Value, Error> {
546 let datastore = DataStore::lookup_datastore(&store)?;
547
8ea00f6e 548 let worker_id;
c2009e53
DM
549
550 let mut backup_dir = None;
551 let mut backup_group = None;
133042b5 552 let mut worker_type = "verify";
c2009e53
DM
553
554 match (backup_type, backup_id, backup_time) {
555 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 556 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 557 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
c2009e53 558 backup_dir = Some(dir);
133042b5 559 worker_type = "verify_snapshot";
c2009e53
DM
560 }
561 (Some(backup_type), Some(backup_id), None) => {
4ebda996 562 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 563 let group = BackupGroup::new(backup_type, backup_id);
c2009e53 564 backup_group = Some(group);
133042b5 565 worker_type = "verify_group";
c2009e53
DM
566 }
567 (None, None, None) => {
8ea00f6e 568 worker_id = store.clone();
c2009e53 569 }
5a718dce 570 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
571 }
572
e6dc35ac 573 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
c2009e53
DM
574 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
575
576 let upid_str = WorkerTask::new_thread(
133042b5 577 worker_type,
e7cb4dc5 578 Some(worker_id.clone()),
e6dc35ac 579 auth_id,
e7cb4dc5
WB
580 to_stdout,
581 move |worker| {
4f09d310
DM
582 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
583 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
584
adfdc369 585 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 586 let mut res = Vec::new();
f6b1d1cc
WB
587 if !verify_backup_dir(
588 datastore,
589 &backup_dir,
590 verified_chunks,
591 corrupt_chunks,
592 worker.clone(),
593 worker.upid().clone(),
d771a608 594 None,
f6b1d1cc 595 )? {
adfdc369
DC
596 res.push(backup_dir.to_string());
597 }
598 res
c2009e53 599 } else if let Some(backup_group) = backup_group {
63d9aca9
DM
600 let (_count, failed_dirs) = verify_backup_group(
601 datastore,
602 &backup_group,
603 verified_chunks,
604 corrupt_chunks,
605 None,
606 worker.clone(),
f6b1d1cc 607 worker.upid(),
d771a608 608 None,
63d9aca9
DM
609 )?;
610 failed_dirs
c2009e53 611 } else {
d771a608 612 verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
c2009e53 613 };
adfdc369
DC
614 if failed_dirs.len() > 0 {
615 worker.log("Failed to verify following snapshots:");
616 for dir in failed_dirs {
617 worker.log(format!("\t{}", dir));
618 }
1ffe0301 619 bail!("verification failed - please check the log for details");
c2009e53
DM
620 }
621 Ok(())
e7cb4dc5
WB
622 },
623 )?;
c2009e53
DM
624
625 Ok(json!(upid_str))
626}
627
255f378a
DM
628#[macro_export]
629macro_rules! add_common_prune_prameters {
552c2259
DM
630 ( [ $( $list1:tt )* ] ) => {
631 add_common_prune_prameters!([$( $list1 )* ] , [])
632 };
633 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 634 [
552c2259 635 $( $list1 )*
255f378a 636 (
552c2259 637 "keep-daily",
255f378a 638 true,
49ff1092 639 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 640 ),
102d8d41
DM
641 (
642 "keep-hourly",
643 true,
49ff1092 644 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 645 ),
255f378a 646 (
552c2259 647 "keep-last",
255f378a 648 true,
49ff1092 649 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
650 ),
651 (
552c2259 652 "keep-monthly",
255f378a 653 true,
49ff1092 654 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
655 ),
656 (
552c2259 657 "keep-weekly",
255f378a 658 true,
49ff1092 659 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
660 ),
661 (
662 "keep-yearly",
663 true,
49ff1092 664 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 665 ),
552c2259 666 $( $list2 )*
255f378a
DM
667 ]
668 }
0eecf38f
DM
669}
670
db1e061d
DM
671pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
672 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 673 &PruneListItem::API_SCHEMA
db1e061d
DM
674).schema();
675
0ab08ac9
DM
676const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
677 &ApiHandler::Sync(&prune),
255f378a 678 &ObjectSchema::new(
0ab08ac9
DM
679 "Prune the datastore.",
680 &add_common_prune_prameters!([
681 ("backup-id", false, &BACKUP_ID_SCHEMA),
682 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
683 ("dry-run", true, &BooleanSchema::new(
684 "Just show what prune would do, but do not delete anything.")
685 .schema()
686 ),
0ab08ac9 687 ],[
66c49c21 688 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 689 ])
db1e061d
DM
690 ))
691 .returns(&API_RETURN_SCHEMA_PRUNE)
692 .access(None, &Permission::Privilege(
54552dda
DM
693 &["datastore", "{store}"],
694 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
695 true)
696);
255f378a 697
83b7db02
DM
698fn prune(
699 param: Value,
700 _info: &ApiMethod,
54552dda 701 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
702) -> Result<Value, Error> {
703
54552dda 704 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
705 let backup_type = tools::required_string_param(&param, "backup-type")?;
706 let backup_id = tools::required_string_param(&param, "backup-id")?;
707
e6dc35ac 708 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 709 let user_info = CachedUserInfo::new()?;
e6dc35ac 710 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 711
3b03abfe
DM
712 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
713
9fdc3ef4
DM
714 let group = BackupGroup::new(backup_type, backup_id);
715
54552dda
DM
716 let datastore = DataStore::lookup_datastore(&store)?;
717
718 let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
e6dc35ac 719 if !allowed { check_backup_owner(&datastore, &group, &auth_id)?; }
83b7db02 720
9e3f0088
DM
721 let prune_options = PruneOptions {
722 keep_last: param["keep-last"].as_u64(),
102d8d41 723 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
724 keep_daily: param["keep-daily"].as_u64(),
725 keep_weekly: param["keep-weekly"].as_u64(),
726 keep_monthly: param["keep-monthly"].as_u64(),
727 keep_yearly: param["keep-yearly"].as_u64(),
728 };
8f579717 729
4ebda996 730 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 731
dda70154
DM
732 let mut prune_result = Vec::new();
733
734 let list = group.list_backups(&datastore.base_path())?;
735
736 let mut prune_info = compute_prune_info(list, &prune_options)?;
737
738 prune_info.reverse(); // delete older snapshots first
739
740 let keep_all = !prune_options.keeps_something();
741
742 if dry_run {
743 for (info, mut keep) in prune_info {
744 if keep_all { keep = true; }
745
746 let backup_time = info.backup_dir.backup_time();
747 let group = info.backup_dir.group();
748
749 prune_result.push(json!({
750 "backup-type": group.backup_type(),
751 "backup-id": group.backup_id(),
6a7be83e 752 "backup-time": backup_time,
dda70154
DM
753 "keep": keep,
754 }));
755 }
756 return Ok(json!(prune_result));
757 }
758
759
163e9bbe 760 // We use a WorkerTask just to have a task log, but run synchrounously
e6dc35ac 761 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
dda70154 762
f1539300
SR
763 if keep_all {
764 worker.log("No prune selection - keeping all files.");
765 } else {
766 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
767 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
768 store, backup_type, backup_id));
769 }
3b03abfe 770
f1539300
SR
771 for (info, mut keep) in prune_info {
772 if keep_all { keep = true; }
dda70154 773
f1539300
SR
774 let backup_time = info.backup_dir.backup_time();
775 let timestamp = info.backup_dir.backup_time_string();
776 let group = info.backup_dir.group();
3b03abfe 777
3b03abfe 778
f1539300
SR
779 let msg = format!(
780 "{}/{}/{} {}",
781 group.backup_type(),
782 group.backup_id(),
783 timestamp,
784 if keep { "keep" } else { "remove" },
785 );
786
787 worker.log(msg);
788
789 prune_result.push(json!({
790 "backup-type": group.backup_type(),
791 "backup-id": group.backup_id(),
792 "backup-time": backup_time,
793 "keep": keep,
794 }));
795
796 if !(dry_run || keep) {
797 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
798 worker.warn(
799 format!(
800 "failed to remove dir {:?}: {}",
801 info.backup_dir.relative_path(), err
802 )
803 );
8f0b4c1f 804 }
8f579717 805 }
f1539300 806 }
dd8e744f 807
f1539300 808 worker.log_result(&Ok(()));
83b7db02 809
dda70154 810 Ok(json!(prune_result))
83b7db02
DM
811}
812
dfc58d47
DM
813#[api(
814 input: {
815 properties: {
816 store: {
817 schema: DATASTORE_SCHEMA,
818 },
819 },
820 },
821 returns: {
822 schema: UPID_SCHEMA,
823 },
bb34b589 824 access: {
54552dda 825 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 826 },
dfc58d47
DM
827)]
828/// Start garbage collection.
6049b71f 829fn start_garbage_collection(
dfc58d47 830 store: String,
6049b71f 831 _info: &ApiMethod,
dd5495d6 832 rpcenv: &mut dyn RpcEnvironment,
6049b71f 833) -> Result<Value, Error> {
15e9b4ed 834
3e6a7dee 835 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 836 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 837
5a778d92 838 println!("Starting garbage collection on store {}", store);
15e9b4ed 839
0f778e06 840 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 841
0f778e06 842 let upid_str = WorkerTask::new_thread(
e7cb4dc5
WB
843 "garbage_collection",
844 Some(store.clone()),
e6dc35ac 845 auth_id.clone(),
e7cb4dc5
WB
846 to_stdout,
847 move |worker| {
0f778e06 848 worker.log(format!("starting garbage collection on store {}", store));
f6b1d1cc 849 datastore.garbage_collection(&*worker, worker.upid())
e7cb4dc5
WB
850 },
851 )?;
0f778e06
DM
852
853 Ok(json!(upid_str))
15e9b4ed
DM
854}
855
a92830dc
DM
856#[api(
857 input: {
858 properties: {
859 store: {
860 schema: DATASTORE_SCHEMA,
861 },
862 },
863 },
864 returns: {
865 type: GarbageCollectionStatus,
bb34b589
DM
866 },
867 access: {
868 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
869 },
a92830dc
DM
870)]
871/// Garbage collection status.
5eeea607 872pub fn garbage_collection_status(
a92830dc 873 store: String,
6049b71f 874 _info: &ApiMethod,
dd5495d6 875 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 876) -> Result<GarbageCollectionStatus, Error> {
691c89a0 877
f2b99c34
DM
878 let datastore = DataStore::lookup_datastore(&store)?;
879
f2b99c34 880 let status = datastore.last_gc_status();
691c89a0 881
a92830dc 882 Ok(status)
691c89a0
DM
883}
884
bb34b589 885#[api(
30fb6025
DM
886 returns: {
887 description: "List the accessible datastores.",
888 type: Array,
889 items: {
890 description: "Datastore name and description.",
891 properties: {
892 store: {
893 schema: DATASTORE_SCHEMA,
894 },
895 comment: {
896 optional: true,
897 schema: SINGLE_LINE_COMMENT_SCHEMA,
898 },
899 },
900 },
901 },
bb34b589 902 access: {
54552dda 903 permission: &Permission::Anybody,
bb34b589
DM
904 },
905)]
906/// Datastore list
6049b71f
DM
907fn get_datastore_list(
908 _param: Value,
909 _info: &ApiMethod,
54552dda 910 rpcenv: &mut dyn RpcEnvironment,
6049b71f 911) -> Result<Value, Error> {
15e9b4ed 912
d0187a51 913 let (config, _digest) = datastore::config()?;
15e9b4ed 914
e6dc35ac 915 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
916 let user_info = CachedUserInfo::new()?;
917
30fb6025 918 let mut list = Vec::new();
54552dda 919
30fb6025 920 for (store, (_, data)) in &config.sections {
e6dc35ac 921 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 922 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025
DM
923 if allowed {
924 let mut entry = json!({ "store": store });
925 if let Some(comment) = data["comment"].as_str() {
926 entry["comment"] = comment.into();
927 }
928 list.push(entry);
929 }
54552dda
DM
930 }
931
30fb6025 932 Ok(list.into())
15e9b4ed
DM
933}
934
0ab08ac9
DM
935#[sortable]
936pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
937 &ApiHandler::AsyncHttp(&download_file),
938 &ObjectSchema::new(
939 "Download single raw file from backup snapshot.",
940 &sorted!([
66c49c21 941 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
942 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
943 ("backup-id", false, &BACKUP_ID_SCHEMA),
944 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 945 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
946 ]),
947 )
54552dda
DM
948).access(None, &Permission::Privilege(
949 &["datastore", "{store}"],
950 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
951 true)
952);
691c89a0 953
9e47c0a5
DM
954fn download_file(
955 _parts: Parts,
956 _req_body: Body,
957 param: Value,
255f378a 958 _info: &ApiMethod,
54552dda 959 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 960) -> ApiResponseFuture {
9e47c0a5 961
ad51d02a
DM
962 async move {
963 let store = tools::required_string_param(&param, "store")?;
ad51d02a 964 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 965
e6dc35ac 966 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 967 let user_info = CachedUserInfo::new()?;
e6dc35ac 968 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 969
ad51d02a 970 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 971
ad51d02a
DM
972 let backup_type = tools::required_string_param(&param, "backup-type")?;
973 let backup_id = tools::required_string_param(&param, "backup-id")?;
974 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 975
e0e5b442 976 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda
DM
977
978 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 979 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
54552dda 980
abdb9763 981 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 982
ad51d02a
DM
983 let mut path = datastore.base_path();
984 path.push(backup_dir.relative_path());
985 path.push(&file_name);
986
ba694720 987 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
988 .await
989 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 990
db0cb9ce 991 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
ba694720
DC
992 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
993 .map_err(move |err| {
994 eprintln!("error during streaming of '{:?}' - {}", &path, err);
995 err
996 });
ad51d02a 997 let body = Body::wrap_stream(payload);
9e47c0a5 998
ad51d02a
DM
999 // fixme: set other headers ?
1000 Ok(Response::builder()
1001 .status(StatusCode::OK)
1002 .header(header::CONTENT_TYPE, "application/octet-stream")
1003 .body(body)
1004 .unwrap())
1005 }.boxed()
9e47c0a5
DM
1006}
1007
6ef9bb59
DC
1008#[sortable]
1009pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1010 &ApiHandler::AsyncHttp(&download_file_decoded),
1011 &ObjectSchema::new(
1012 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1013 &sorted!([
1014 ("store", false, &DATASTORE_SCHEMA),
1015 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1016 ("backup-id", false, &BACKUP_ID_SCHEMA),
1017 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1018 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1019 ]),
1020 )
1021).access(None, &Permission::Privilege(
1022 &["datastore", "{store}"],
1023 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1024 true)
1025);
1026
1027fn download_file_decoded(
1028 _parts: Parts,
1029 _req_body: Body,
1030 param: Value,
1031 _info: &ApiMethod,
1032 rpcenv: Box<dyn RpcEnvironment>,
1033) -> ApiResponseFuture {
1034
1035 async move {
1036 let store = tools::required_string_param(&param, "store")?;
1037 let datastore = DataStore::lookup_datastore(store)?;
1038
e6dc35ac 1039 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1040 let user_info = CachedUserInfo::new()?;
e6dc35ac 1041 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
6ef9bb59
DC
1042
1043 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1044
1045 let backup_type = tools::required_string_param(&param, "backup-type")?;
1046 let backup_id = tools::required_string_param(&param, "backup-id")?;
1047 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1048
e0e5b442 1049 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59
DC
1050
1051 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 1052 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
6ef9bb59 1053
2d55beec 1054 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1055 for file in files {
f28d9088 1056 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1057 bail!("cannot decode '{}' - is encrypted", file_name);
1058 }
1059 }
1060
1061 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1062
1063 let mut path = datastore.base_path();
1064 path.push(backup_dir.relative_path());
1065 path.push(&file_name);
1066
1067 let extension = file_name.rsplitn(2, '.').next().unwrap();
1068
1069 let body = match extension {
1070 "didx" => {
1071 let index = DynamicIndexReader::open(&path)
1072 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1073 let (csum, size) = index.compute_csum();
1074 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1075
14f6c9cb 1076 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1077 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1078 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1079 .map_err(move |err| {
1080 eprintln!("error during streaming of '{:?}' - {}", path, err);
1081 err
1082 }))
1083 },
1084 "fidx" => {
1085 let index = FixedIndexReader::open(&path)
1086 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1087
2d55beec
FG
1088 let (csum, size) = index.compute_csum();
1089 manifest.verify_file(&file_name, &csum, size)?;
1090
14f6c9cb 1091 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1092 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1093 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1094 .map_err(move |err| {
1095 eprintln!("error during streaming of '{:?}' - {}", path, err);
1096 err
1097 }))
1098 },
1099 "blob" => {
1100 let file = std::fs::File::open(&path)
8aa67ee7 1101 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1102
2d55beec
FG
1103 // FIXME: load full blob to verify index checksum?
1104
6ef9bb59
DC
1105 Body::wrap_stream(
1106 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1107 .map_err(move |err| {
1108 eprintln!("error during streaming of '{:?}' - {}", path, err);
1109 err
1110 })
1111 )
1112 },
1113 extension => {
1114 bail!("cannot download '{}' files", extension);
1115 },
1116 };
1117
1118 // fixme: set other headers ?
1119 Ok(Response::builder()
1120 .status(StatusCode::OK)
1121 .header(header::CONTENT_TYPE, "application/octet-stream")
1122 .body(body)
1123 .unwrap())
1124 }.boxed()
1125}
1126
552c2259 1127#[sortable]
0ab08ac9
DM
1128pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1129 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1130 &ObjectSchema::new(
54552dda 1131 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1132 &sorted!([
66c49c21 1133 ("store", false, &DATASTORE_SCHEMA),
255f378a 1134 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1135 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1136 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1137 ]),
9e47c0a5 1138 )
54552dda
DM
1139).access(
1140 Some("Only the backup creator/owner is allowed to do this."),
1141 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1142);
9e47c0a5 1143
07ee2235
DM
1144fn upload_backup_log(
1145 _parts: Parts,
1146 req_body: Body,
1147 param: Value,
255f378a 1148 _info: &ApiMethod,
54552dda 1149 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1150) -> ApiResponseFuture {
07ee2235 1151
ad51d02a
DM
1152 async move {
1153 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1154 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1155
96d65fbc 1156 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1157
ad51d02a
DM
1158 let backup_type = tools::required_string_param(&param, "backup-type")?;
1159 let backup_id = tools::required_string_param(&param, "backup-id")?;
1160 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1161
e0e5b442 1162 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1163
e6dc35ac
FG
1164 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1165 check_backup_owner(&datastore, backup_dir.group(), &auth_id)?;
54552dda 1166
ad51d02a
DM
1167 let mut path = datastore.base_path();
1168 path.push(backup_dir.relative_path());
1169 path.push(&file_name);
07ee2235 1170
ad51d02a
DM
1171 if path.exists() {
1172 bail!("backup already contains a log.");
1173 }
e128d4e8 1174
ad51d02a 1175 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1176 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1177
1178 let data = req_body
1179 .map_err(Error::from)
1180 .try_fold(Vec::new(), |mut acc, chunk| {
1181 acc.extend_from_slice(&*chunk);
1182 future::ok::<_, Error>(acc)
1183 })
1184 .await?;
1185
39f18b30
DM
1186 // always verify blob/CRC at server side
1187 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1188
1189 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1190
1191 // fixme: use correct formatter
1192 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1193 }.boxed()
07ee2235
DM
1194}
1195
5b1cfa01
DC
1196#[api(
1197 input: {
1198 properties: {
1199 store: {
1200 schema: DATASTORE_SCHEMA,
1201 },
1202 "backup-type": {
1203 schema: BACKUP_TYPE_SCHEMA,
1204 },
1205 "backup-id": {
1206 schema: BACKUP_ID_SCHEMA,
1207 },
1208 "backup-time": {
1209 schema: BACKUP_TIME_SCHEMA,
1210 },
1211 "filepath": {
1212 description: "Base64 encoded path.",
1213 type: String,
1214 }
1215 },
1216 },
1217 access: {
1218 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1219 },
1220)]
1221/// Get the entries of the given path of the catalog
1222fn catalog(
1223 store: String,
1224 backup_type: String,
1225 backup_id: String,
1226 backup_time: i64,
1227 filepath: String,
1228 _param: Value,
1229 _info: &ApiMethod,
1230 rpcenv: &mut dyn RpcEnvironment,
1231) -> Result<Value, Error> {
1232 let datastore = DataStore::lookup_datastore(&store)?;
1233
e6dc35ac 1234 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1235 let user_info = CachedUserInfo::new()?;
e6dc35ac 1236 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
5b1cfa01 1237
e0e5b442 1238 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01
DC
1239
1240 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 1241 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
5b1cfa01 1242
9238cdf5
FG
1243 let file_name = CATALOG_NAME;
1244
2d55beec 1245 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1246 for file in files {
1247 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1248 bail!("cannot decode '{}' - is encrypted", file_name);
1249 }
1250 }
1251
5b1cfa01
DC
1252 let mut path = datastore.base_path();
1253 path.push(backup_dir.relative_path());
9238cdf5 1254 path.push(file_name);
5b1cfa01
DC
1255
1256 let index = DynamicIndexReader::open(&path)
1257 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1258
2d55beec
FG
1259 let (csum, size) = index.compute_csum();
1260 manifest.verify_file(&file_name, &csum, size)?;
1261
14f6c9cb 1262 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1263 let reader = BufferedDynamicReader::new(index, chunk_reader);
1264
1265 let mut catalog_reader = CatalogReader::new(reader);
1266 let mut current = catalog_reader.root()?;
1267 let mut components = vec![];
1268
1269
1270 if filepath != "root" {
1271 components = base64::decode(filepath)?;
1272 if components.len() > 0 && components[0] == '/' as u8 {
1273 components.remove(0);
1274 }
1275 for component in components.split(|c| *c == '/' as u8) {
1276 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1277 current = entry;
1278 } else {
1279 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1280 }
1281 }
1282 }
1283
1284 let mut res = Vec::new();
1285
1286 for direntry in catalog_reader.read_dir(&current)? {
1287 let mut components = components.clone();
1288 components.push('/' as u8);
1289 components.extend(&direntry.name);
1290 let path = base64::encode(components);
1291 let text = String::from_utf8_lossy(&direntry.name);
1292 let mut entry = json!({
1293 "filepath": path,
1294 "text": text,
1295 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1296 "leaf": true,
1297 });
1298 match direntry.attr {
1299 DirEntryAttribute::Directory { start: _ } => {
1300 entry["leaf"] = false.into();
1301 },
1302 DirEntryAttribute::File { size, mtime } => {
1303 entry["size"] = size.into();
1304 entry["mtime"] = mtime.into();
1305 },
1306 _ => {},
1307 }
1308 res.push(entry);
1309 }
1310
1311 Ok(res.into())
1312}
1313
53a561a2
WB
1314fn recurse_files<'a, T, W>(
1315 zip: &'a mut ZipEncoder<W>,
1316 decoder: &'a mut Accessor<T>,
1317 prefix: &'a Path,
804f6143 1318 file: FileEntry<T>,
53a561a2 1319) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1320where
1321 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1322 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1323{
1324 Box::pin(async move {
1325 let metadata = file.entry().metadata();
1326 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1327
1328 match file.kind() {
1329 EntryKind::File { .. } => {
1330 let entry = ZipEntry::new(
1331 path,
1332 metadata.stat.mtime.secs,
1333 metadata.stat.mode as u16,
1334 true,
1335 );
1336 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1337 .await
1338 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1339 }
1340 EntryKind::Hardlink(_) => {
1341 let realfile = decoder.follow_hardlink(&file).await?;
1342 let entry = ZipEntry::new(
1343 path,
1344 metadata.stat.mtime.secs,
1345 metadata.stat.mode as u16,
1346 true,
1347 );
1348 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1349 .await
1350 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1351 }
1352 EntryKind::Directory => {
1353 let dir = file.enter_directory().await?;
1354 let mut readdir = dir.read_dir();
1355 let entry = ZipEntry::new(
1356 path,
1357 metadata.stat.mtime.secs,
1358 metadata.stat.mode as u16,
1359 false,
1360 );
1361 zip.add_entry::<FileContents<T>>(entry, None).await?;
1362 while let Some(entry) = readdir.next().await {
1363 let entry = entry?.decode_entry().await?;
53a561a2 1364 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1365 }
1366 }
1367 _ => {} // ignore all else
1368 };
1369
53a561a2 1370 Ok(())
804f6143
DC
1371 })
1372}
1373
d33d8f4e
DC
1374#[sortable]
1375pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1376 &ApiHandler::AsyncHttp(&pxar_file_download),
1377 &ObjectSchema::new(
1ffe0301 1378 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1379 &sorted!([
1380 ("store", false, &DATASTORE_SCHEMA),
1381 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1382 ("backup-id", false, &BACKUP_ID_SCHEMA),
1383 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1384 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1385 ]),
1386 )
1387).access(None, &Permission::Privilege(
1388 &["datastore", "{store}"],
1389 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1390 true)
1391);
1392
1393fn pxar_file_download(
1394 _parts: Parts,
1395 _req_body: Body,
1396 param: Value,
1397 _info: &ApiMethod,
1398 rpcenv: Box<dyn RpcEnvironment>,
1399) -> ApiResponseFuture {
1400
1401 async move {
1402 let store = tools::required_string_param(&param, "store")?;
1403 let datastore = DataStore::lookup_datastore(&store)?;
1404
e6dc35ac 1405 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1406 let user_info = CachedUserInfo::new()?;
e6dc35ac 1407 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
d33d8f4e
DC
1408
1409 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1410
1411 let backup_type = tools::required_string_param(&param, "backup-type")?;
1412 let backup_id = tools::required_string_param(&param, "backup-id")?;
1413 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1414
e0e5b442 1415 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e
DC
1416
1417 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 1418 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
d33d8f4e 1419
d33d8f4e
DC
1420 let mut components = base64::decode(&filepath)?;
1421 if components.len() > 0 && components[0] == '/' as u8 {
1422 components.remove(0);
1423 }
1424
1425 let mut split = components.splitn(2, |c| *c == '/' as u8);
9238cdf5 1426 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
d33d8f4e 1427 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1428 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1429 for file in files {
1430 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1431 bail!("cannot decode '{}' - is encrypted", pxar_name);
1432 }
1433 }
d33d8f4e 1434
9238cdf5
FG
1435 let mut path = datastore.base_path();
1436 path.push(backup_dir.relative_path());
1437 path.push(pxar_name);
d33d8f4e
DC
1438
1439 let index = DynamicIndexReader::open(&path)
1440 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1441
2d55beec
FG
1442 let (csum, size) = index.compute_csum();
1443 manifest.verify_file(&pxar_name, &csum, size)?;
1444
14f6c9cb 1445 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1446 let reader = BufferedDynamicReader::new(index, chunk_reader);
1447 let archive_size = reader.archive_size();
1448 let reader = LocalDynamicReadAt::new(reader);
1449
1450 let decoder = Accessor::new(reader, archive_size).await?;
1451 let root = decoder.open_root().await?;
1452 let file = root
1453 .lookup(OsStr::from_bytes(file_path)).await?
1454 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1455
804f6143
DC
1456 let body = match file.kind() {
1457 EntryKind::File { .. } => Body::wrap_stream(
1458 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1459 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1460 err
1461 }),
1462 ),
1463 EntryKind::Hardlink(_) => Body::wrap_stream(
1464 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1465 .map_err(move |err| {
1466 eprintln!(
1467 "error during streaming of hardlink '{:?}' - {}",
1468 filepath, err
1469 );
1470 err
1471 }),
1472 ),
1473 EntryKind::Directory => {
1474 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1475 let mut prefix = PathBuf::new();
1476 let mut components = file.entry().path().components();
1477 components.next_back(); // discar last
1478 for comp in components {
1479 prefix.push(comp);
1480 }
d33d8f4e 1481
804f6143 1482 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1483
1484 crate::server::spawn_internal_task(async move {
53a561a2
WB
1485 let mut zipencoder = ZipEncoder::new(channelwriter);
1486 let mut decoder = decoder;
1487 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1488 .await
1489 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1490
1491 zipencoder
1492 .finish()
1493 .await
1494 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1495 });
1496
1497 Body::wrap_stream(receiver.map_err(move |err| {
1498 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1499 err
804f6143
DC
1500 }))
1501 }
1502 other => bail!("cannot download file of type {:?}", other),
1503 };
d33d8f4e
DC
1504
1505 // fixme: set other headers ?
1506 Ok(Response::builder()
1507 .status(StatusCode::OK)
1508 .header(header::CONTENT_TYPE, "application/octet-stream")
1509 .body(body)
1510 .unwrap())
1511 }.boxed()
1512}
1513
1a0d3d11
DM
1514#[api(
1515 input: {
1516 properties: {
1517 store: {
1518 schema: DATASTORE_SCHEMA,
1519 },
1520 timeframe: {
1521 type: RRDTimeFrameResolution,
1522 },
1523 cf: {
1524 type: RRDMode,
1525 },
1526 },
1527 },
1528 access: {
1529 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1530 },
1531)]
1532/// Read datastore stats
1533fn get_rrd_stats(
1534 store: String,
1535 timeframe: RRDTimeFrameResolution,
1536 cf: RRDMode,
1537 _param: Value,
1538) -> Result<Value, Error> {
1539
431cc7b1
DC
1540 create_value_from_rrd(
1541 &format!("datastore/{}", store),
1a0d3d11
DM
1542 &[
1543 "total", "used",
c94e1f65
DM
1544 "read_ios", "read_bytes",
1545 "write_ios", "write_bytes",
1546 "io_ticks",
1a0d3d11
DM
1547 ],
1548 timeframe,
1549 cf,
1550 )
1551}
1552
912b3f5b
DM
1553#[api(
1554 input: {
1555 properties: {
1556 store: {
1557 schema: DATASTORE_SCHEMA,
1558 },
1559 "backup-type": {
1560 schema: BACKUP_TYPE_SCHEMA,
1561 },
1562 "backup-id": {
1563 schema: BACKUP_ID_SCHEMA,
1564 },
1565 "backup-time": {
1566 schema: BACKUP_TIME_SCHEMA,
1567 },
1568 },
1569 },
1570 access: {
1571 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1572 },
1573)]
1574/// Get "notes" for a specific backup
1575fn get_notes(
1576 store: String,
1577 backup_type: String,
1578 backup_id: String,
1579 backup_time: i64,
1580 rpcenv: &mut dyn RpcEnvironment,
1581) -> Result<String, Error> {
1582 let datastore = DataStore::lookup_datastore(&store)?;
1583
e6dc35ac 1584 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
912b3f5b 1585 let user_info = CachedUserInfo::new()?;
e6dc35ac 1586 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
912b3f5b 1587
e0e5b442 1588 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b
DM
1589
1590 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 1591 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
912b3f5b 1592
883aa6d5 1593 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1594
883aa6d5 1595 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1596 .as_str()
1597 .unwrap_or("");
1598
1599 Ok(String::from(notes))
1600}
1601
1602#[api(
1603 input: {
1604 properties: {
1605 store: {
1606 schema: DATASTORE_SCHEMA,
1607 },
1608 "backup-type": {
1609 schema: BACKUP_TYPE_SCHEMA,
1610 },
1611 "backup-id": {
1612 schema: BACKUP_ID_SCHEMA,
1613 },
1614 "backup-time": {
1615 schema: BACKUP_TIME_SCHEMA,
1616 },
1617 notes: {
1618 description: "A multiline text.",
1619 },
1620 },
1621 },
1622 access: {
1623 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1624 },
1625)]
1626/// Set "notes" for a specific backup
1627fn set_notes(
1628 store: String,
1629 backup_type: String,
1630 backup_id: String,
1631 backup_time: i64,
1632 notes: String,
1633 rpcenv: &mut dyn RpcEnvironment,
1634) -> Result<(), Error> {
1635 let datastore = DataStore::lookup_datastore(&store)?;
1636
e6dc35ac 1637 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
912b3f5b 1638 let user_info = CachedUserInfo::new()?;
e6dc35ac 1639 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
912b3f5b 1640
e0e5b442 1641 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b
DM
1642
1643 let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
e6dc35ac 1644 if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
912b3f5b 1645
1a374fcf
SR
1646 datastore.update_manifest(&backup_dir,|manifest| {
1647 manifest.unprotected["notes"] = notes.into();
1648 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1649
1650 Ok(())
1651}
1652
72be0eb1 1653#[api(
4940012d 1654 input: {
72be0eb1
DW
1655 properties: {
1656 store: {
1657 schema: DATASTORE_SCHEMA,
1658 },
1659 "backup-type": {
1660 schema: BACKUP_TYPE_SCHEMA,
1661 },
1662 "backup-id": {
1663 schema: BACKUP_ID_SCHEMA,
1664 },
1665 "new-owner": {
e6dc35ac 1666 type: Authid,
72be0eb1
DW
1667 },
1668 },
4940012d
FG
1669 },
1670 access: {
1671 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
1672 },
72be0eb1
DW
1673)]
1674/// Change owner of a backup group
1675fn set_backup_owner(
1676 store: String,
1677 backup_type: String,
1678 backup_id: String,
e6dc35ac 1679 new_owner: Authid,
752dfc4b 1680 _rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1681) -> Result<(), Error> {
1682
1683 let datastore = DataStore::lookup_datastore(&store)?;
1684
1685 let backup_group = BackupGroup::new(backup_type, backup_id);
1686
1687 let user_info = CachedUserInfo::new()?;
1688
e6dc35ac
FG
1689 if !user_info.is_active_auth_id(&new_owner) {
1690 bail!("{} '{}' is inactive or non-existent",
1691 if new_owner.is_token() {
1692 "API token".to_string()
1693 } else {
1694 "user".to_string()
1695 },
1696 new_owner);
72be0eb1
DW
1697 }
1698
1699 datastore.set_owner(&backup_group, &new_owner, true)?;
1700
1701 Ok(())
1702}
1703
552c2259 1704#[sortable]
255f378a 1705const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1706 (
1707 "catalog",
1708 &Router::new()
1709 .get(&API_METHOD_CATALOG)
1710 ),
72be0eb1
DW
1711 (
1712 "change-owner",
1713 &Router::new()
1714 .post(&API_METHOD_SET_BACKUP_OWNER)
1715 ),
255f378a
DM
1716 (
1717 "download",
1718 &Router::new()
1719 .download(&API_METHOD_DOWNLOAD_FILE)
1720 ),
6ef9bb59
DC
1721 (
1722 "download-decoded",
1723 &Router::new()
1724 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1725 ),
255f378a
DM
1726 (
1727 "files",
1728 &Router::new()
09b1f7b2 1729 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1730 ),
1731 (
1732 "gc",
1733 &Router::new()
1734 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1735 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1736 ),
1737 (
1738 "groups",
1739 &Router::new()
b31c8019 1740 .get(&API_METHOD_LIST_GROUPS)
255f378a 1741 ),
912b3f5b
DM
1742 (
1743 "notes",
1744 &Router::new()
1745 .get(&API_METHOD_GET_NOTES)
1746 .put(&API_METHOD_SET_NOTES)
1747 ),
255f378a
DM
1748 (
1749 "prune",
1750 &Router::new()
1751 .post(&API_METHOD_PRUNE)
1752 ),
d33d8f4e
DC
1753 (
1754 "pxar-file-download",
1755 &Router::new()
1756 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1757 ),
1a0d3d11
DM
1758 (
1759 "rrd",
1760 &Router::new()
1761 .get(&API_METHOD_GET_RRD_STATS)
1762 ),
255f378a
DM
1763 (
1764 "snapshots",
1765 &Router::new()
fc189b19 1766 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1767 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1768 ),
1769 (
1770 "status",
1771 &Router::new()
1772 .get(&API_METHOD_STATUS)
1773 ),
1774 (
1775 "upload-backup-log",
1776 &Router::new()
1777 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1778 ),
c2009e53
DM
1779 (
1780 "verify",
1781 &Router::new()
1782 .post(&API_METHOD_VERIFY)
1783 ),
255f378a
DM
1784];
1785
ad51d02a 1786const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1787 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1788 .subdirs(DATASTORE_INFO_SUBDIRS);
1789
1790
1791pub const ROUTER: Router = Router::new()
bb34b589 1792 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1793 .match_all("store", &DATASTORE_INFO_ROUTER);