]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
verify: introduce & use new Datastore.Verify privilege
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
d33d8f4e
DC
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
6b809ff5 4use std::sync::{Arc, Mutex};
53a561a2 5use std::path::{Path, PathBuf};
804f6143 6use std::pin::Pin;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
12use serde_json::{json, Value};
13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
cad540e9
WB
18use proxmox::api::router::SubdirMap;
19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
804f6143 23use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
e5064ba6 28use crate::backup::*;
cad540e9 29use crate::config::datastore;
54552dda
DM
30use crate::config::cached_user_info::CachedUserInfo;
31
0f778e06 32use crate::server::WorkerTask;
804f6143
DC
33use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37};
38
d00e1a21
DM
39use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
54552dda 41 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
54552dda 44 PRIV_DATASTORE_BACKUP,
09f6a240 45 PRIV_DATASTORE_VERIFY,
d00e1a21 46};
1629d2ad 47
bff85572 48fn check_priv_or_backup_owner(
e7cb4dc5
WB
49 store: &DataStore,
50 group: &BackupGroup,
e6dc35ac 51 auth_id: &Authid,
bff85572
FG
52 required_privs: u64,
53) -> Result<(), Error> {
54 let user_info = CachedUserInfo::new()?;
55 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
56
57 if privs & required_privs == 0 {
58 let owner = store.get_owner(group)?;
59 check_backup_owner(&owner, auth_id)?;
60 }
61 Ok(())
62}
63
64fn check_backup_owner(
65 owner: &Authid,
66 auth_id: &Authid,
e7cb4dc5 67) -> Result<(), Error> {
bff85572
FG
68 let correct_owner = owner == auth_id
69 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
70 if !correct_owner {
e6dc35ac 71 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
72 }
73 Ok(())
74}
75
e7cb4dc5
WB
76fn read_backup_index(
77 store: &DataStore,
78 backup_dir: &BackupDir,
79) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 80
ff86ef00 81 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 82
09b1f7b2
DM
83 let mut result = Vec::new();
84 for item in manifest.files() {
85 result.push(BackupContent {
86 filename: item.filename.clone(),
f28d9088 87 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
88 size: Some(item.size),
89 });
8c70e3eb
DM
90 }
91
09b1f7b2 92 result.push(BackupContent {
96d65fbc 93 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
94 crypt_mode: match manifest.signature {
95 Some(_) => Some(CryptMode::SignOnly),
96 None => Some(CryptMode::None),
97 },
09b1f7b2
DM
98 size: Some(index_size),
99 });
4f1e40a2 100
70030b43 101 Ok((manifest, result))
8c70e3eb
DM
102}
103
1c090810
DC
104fn get_all_snapshot_files(
105 store: &DataStore,
106 info: &BackupInfo,
70030b43
DM
107) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
108
109 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
110
111 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
112 acc.insert(item.filename.clone());
113 acc
114 });
115
116 for file in &info.files {
117 if file_set.contains(file) { continue; }
f28d9088
WB
118 files.push(BackupContent {
119 filename: file.to_string(),
120 size: None,
121 crypt_mode: None,
122 });
1c090810
DC
123 }
124
70030b43 125 Ok((manifest, files))
1c090810
DC
126}
127
8f579717
DM
128fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
129
130 let mut group_hash = HashMap::new();
131
132 for info in backup_list {
9b492eb2 133 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
134 let time_list = group_hash.entry(group_id).or_insert(vec![]);
135 time_list.push(info);
136 }
137
138 group_hash
139}
140
b31c8019
DM
141#[api(
142 input: {
143 properties: {
144 store: {
145 schema: DATASTORE_SCHEMA,
146 },
147 },
148 },
149 returns: {
150 type: Array,
151 description: "Returns the list of backup groups.",
152 items: {
153 type: GroupListItem,
154 }
155 },
bb34b589 156 access: {
54552dda
DM
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
bb34b589 161 },
b31c8019
DM
162)]
163/// List backup groups.
ad20d198 164fn list_groups(
b31c8019 165 store: String,
54552dda 166 rpcenv: &mut dyn RpcEnvironment,
b31c8019 167) -> Result<Vec<GroupListItem>, Error> {
812c6f87 168
e6dc35ac 169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 170 let user_info = CachedUserInfo::new()?;
e6dc35ac 171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 172
b31c8019 173 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 174
c0977501 175 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
176
177 let group_hash = group_backups(backup_list);
178
b31c8019 179 let mut groups = Vec::new();
812c6f87
DM
180
181 for (_group_id, mut list) in group_hash {
182
2b01a225 183 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
184
185 let info = &list[0];
54552dda 186
9b492eb2 187 let group = info.backup_dir.group();
812c6f87 188
54552dda 189 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b 190 let owner = datastore.get_owner(group)?;
bff85572 191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 192 continue;
54552dda
DM
193 }
194
b31c8019
DM
195 let result_item = GroupListItem {
196 backup_type: group.backup_type().to_string(),
197 backup_id: group.backup_id().to_string(),
6a7be83e 198 last_backup: info.backup_dir.backup_time(),
b31c8019
DM
199 backup_count: list.len() as u64,
200 files: info.files.clone(),
04b0ca8b 201 owner: Some(owner),
b31c8019
DM
202 };
203 groups.push(result_item);
812c6f87
DM
204 }
205
b31c8019 206 Ok(groups)
812c6f87 207}
8f579717 208
09b1f7b2
DM
209#[api(
210 input: {
211 properties: {
212 store: {
213 schema: DATASTORE_SCHEMA,
214 },
215 "backup-type": {
216 schema: BACKUP_TYPE_SCHEMA,
217 },
218 "backup-id": {
219 schema: BACKUP_ID_SCHEMA,
220 },
221 "backup-time": {
222 schema: BACKUP_TIME_SCHEMA,
223 },
224 },
225 },
226 returns: {
227 type: Array,
228 description: "Returns the list of archive files inside a backup snapshots.",
229 items: {
230 type: BackupContent,
231 }
232 },
bb34b589 233 access: {
54552dda
DM
234 permission: &Permission::Privilege(
235 &["datastore", "{store}"],
236 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
237 true),
bb34b589 238 },
09b1f7b2
DM
239)]
240/// List snapshot files.
ea5f547f 241pub fn list_snapshot_files(
09b1f7b2
DM
242 store: String,
243 backup_type: String,
244 backup_id: String,
245 backup_time: i64,
01a13423 246 _info: &ApiMethod,
54552dda 247 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 248) -> Result<Vec<BackupContent>, Error> {
01a13423 249
e6dc35ac 250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 251 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 252
e0e5b442 253 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 254
bff85572 255 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 256
d7c24397 257 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 258
70030b43
DM
259 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
260
261 Ok(files)
01a13423
DM
262}
263
68a6a0ee
DM
264#[api(
265 input: {
266 properties: {
267 store: {
268 schema: DATASTORE_SCHEMA,
269 },
270 "backup-type": {
271 schema: BACKUP_TYPE_SCHEMA,
272 },
273 "backup-id": {
274 schema: BACKUP_ID_SCHEMA,
275 },
276 "backup-time": {
277 schema: BACKUP_TIME_SCHEMA,
278 },
279 },
280 },
bb34b589 281 access: {
54552dda
DM
282 permission: &Permission::Privilege(
283 &["datastore", "{store}"],
284 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
285 true),
bb34b589 286 },
68a6a0ee
DM
287)]
288/// Delete backup snapshot.
289fn delete_snapshot(
290 store: String,
291 backup_type: String,
292 backup_id: String,
293 backup_time: i64,
6f62c924 294 _info: &ApiMethod,
54552dda 295 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
296) -> Result<Value, Error> {
297
e6dc35ac 298 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 299
e0e5b442 300 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 301 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 302
bff85572 303 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 304
c9756b40 305 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
306
307 Ok(Value::Null)
308}
309
fc189b19
DM
310#[api(
311 input: {
312 properties: {
313 store: {
314 schema: DATASTORE_SCHEMA,
315 },
316 "backup-type": {
317 optional: true,
318 schema: BACKUP_TYPE_SCHEMA,
319 },
320 "backup-id": {
321 optional: true,
322 schema: BACKUP_ID_SCHEMA,
323 },
324 },
325 },
326 returns: {
327 type: Array,
328 description: "Returns the list of snapshots.",
329 items: {
330 type: SnapshotListItem,
331 }
332 },
bb34b589 333 access: {
54552dda
DM
334 permission: &Permission::Privilege(
335 &["datastore", "{store}"],
336 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
337 true),
bb34b589 338 },
fc189b19
DM
339)]
340/// List backup snapshots.
f24fc116 341pub fn list_snapshots (
54552dda
DM
342 store: String,
343 backup_type: Option<String>,
344 backup_id: Option<String>,
345 _param: Value,
184f17af 346 _info: &ApiMethod,
54552dda 347 rpcenv: &mut dyn RpcEnvironment,
fc189b19 348) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 349
e6dc35ac 350 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 351 let user_info = CachedUserInfo::new()?;
e6dc35ac 352 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 353
54552dda 354 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 355
c0977501 356 let base_path = datastore.base_path();
184f17af 357
15c847f1 358 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
359
360 let mut snapshots = vec![];
361
c0977501 362 for info in backup_list {
15c847f1 363 let group = info.backup_dir.group();
54552dda 364 if let Some(ref backup_type) = backup_type {
15c847f1
DM
365 if backup_type != group.backup_type() { continue; }
366 }
54552dda 367 if let Some(ref backup_id) = backup_id {
15c847f1
DM
368 if backup_id != group.backup_id() { continue; }
369 }
a17a0e7a 370
54552dda 371 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b
DC
372 let owner = datastore.get_owner(group)?;
373
bff85572 374 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 375 continue;
54552dda
DM
376 }
377
1c090810
DC
378 let mut size = None;
379
3b2046d2 380 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
70030b43 381 Ok((manifest, files)) => {
1c090810 382 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
70030b43
DM
383 // extract the first line from notes
384 let comment: Option<String> = manifest.unprotected["notes"]
385 .as_str()
386 .and_then(|notes| notes.lines().next())
387 .map(String::from);
388
3b2046d2
TL
389 let verify = manifest.unprotected["verify_state"].clone();
390 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
391 Ok(verify) => verify,
392 Err(err) => {
393 eprintln!("error parsing verification state : '{}'", err);
394 None
395 }
396 };
397
398 (comment, verify, files)
1c090810
DC
399 },
400 Err(err) => {
401 eprintln!("error during snapshot file listing: '{}'", err);
70030b43 402 (
3b2046d2 403 None,
70030b43
DM
404 None,
405 info
406 .files
407 .iter()
408 .map(|x| BackupContent {
409 filename: x.to_string(),
410 size: None,
411 crypt_mode: None,
412 })
413 .collect()
414 )
1c090810
DC
415 },
416 };
417
418 let result_item = SnapshotListItem {
fc189b19
DM
419 backup_type: group.backup_type().to_string(),
420 backup_id: group.backup_id().to_string(),
6a7be83e 421 backup_time: info.backup_dir.backup_time(),
70030b43 422 comment,
3b2046d2 423 verification,
1c090810
DC
424 files,
425 size,
04b0ca8b 426 owner: Some(owner),
fc189b19 427 };
a17a0e7a 428
a17a0e7a 429 snapshots.push(result_item);
184f17af
DM
430 }
431
fc189b19 432 Ok(snapshots)
184f17af
DM
433}
434
14e08625 435fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
16f9f244
DC
436 let base_path = store.base_path();
437 let backup_list = BackupInfo::list_backups(&base_path)?;
438 let mut groups = HashSet::new();
14e08625
DC
439
440 let mut result = Counts {
441 ct: None,
442 host: None,
443 vm: None,
444 other: None,
445 };
446
16f9f244
DC
447 for info in backup_list {
448 let group = info.backup_dir.group();
449
450 let id = group.backup_id();
451 let backup_type = group.backup_type();
452
453 let mut new_id = false;
454
455 if groups.insert(format!("{}-{}", &backup_type, &id)) {
456 new_id = true;
457 }
458
14e08625
DC
459 let mut counts = match backup_type {
460 "ct" => result.ct.take().unwrap_or(Default::default()),
461 "host" => result.host.take().unwrap_or(Default::default()),
462 "vm" => result.vm.take().unwrap_or(Default::default()),
463 _ => result.other.take().unwrap_or(Default::default()),
464 };
465
466 counts.snapshots += 1;
467 if new_id {
468 counts.groups +=1;
469 }
470
471 match backup_type {
472 "ct" => result.ct = Some(counts),
473 "host" => result.host = Some(counts),
474 "vm" => result.vm = Some(counts),
475 _ => result.other = Some(counts),
16f9f244
DC
476 }
477 }
478
479 Ok(result)
480}
481
1dc117bb
DM
482#[api(
483 input: {
484 properties: {
485 store: {
486 schema: DATASTORE_SCHEMA,
487 },
488 },
489 },
490 returns: {
14e08625 491 type: DataStoreStatus,
1dc117bb 492 },
bb34b589 493 access: {
54552dda 494 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 495 },
1dc117bb
DM
496)]
497/// Get datastore status.
ea5f547f 498pub fn status(
1dc117bb 499 store: String,
0eecf38f
DM
500 _info: &ApiMethod,
501 _rpcenv: &mut dyn RpcEnvironment,
14e08625 502) -> Result<DataStoreStatus, Error> {
1dc117bb 503 let datastore = DataStore::lookup_datastore(&store)?;
14e08625
DC
504 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
505 let counts = get_snapshots_count(&datastore)?;
16f9f244
DC
506 let gc_status = datastore.last_gc_status();
507
14e08625
DC
508 Ok(DataStoreStatus {
509 total: storage.total,
510 used: storage.used,
511 avail: storage.avail,
512 gc_status,
513 counts,
514 })
0eecf38f
DM
515}
516
c2009e53
DM
517#[api(
518 input: {
519 properties: {
520 store: {
521 schema: DATASTORE_SCHEMA,
522 },
523 "backup-type": {
524 schema: BACKUP_TYPE_SCHEMA,
525 optional: true,
526 },
527 "backup-id": {
528 schema: BACKUP_ID_SCHEMA,
529 optional: true,
530 },
531 "backup-time": {
532 schema: BACKUP_TIME_SCHEMA,
533 optional: true,
534 },
535 },
536 },
537 returns: {
538 schema: UPID_SCHEMA,
539 },
540 access: {
09f6a240 541 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
542 },
543)]
544/// Verify backups.
545///
546/// This function can verify a single backup snapshot, all backup from a backup group,
547/// or all backups in the datastore.
548pub fn verify(
549 store: String,
550 backup_type: Option<String>,
551 backup_id: Option<String>,
552 backup_time: Option<i64>,
553 rpcenv: &mut dyn RpcEnvironment,
554) -> Result<Value, Error> {
555 let datastore = DataStore::lookup_datastore(&store)?;
556
09f6a240 557 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 558 let worker_id;
c2009e53
DM
559
560 let mut backup_dir = None;
561 let mut backup_group = None;
133042b5 562 let mut worker_type = "verify";
c2009e53
DM
563
564 match (backup_type, backup_id, backup_time) {
565 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 566 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 567 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
568
569 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
570
c2009e53 571 backup_dir = Some(dir);
133042b5 572 worker_type = "verify_snapshot";
c2009e53
DM
573 }
574 (Some(backup_type), Some(backup_id), None) => {
4ebda996 575 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 576 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
577
578 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
579
c2009e53 580 backup_group = Some(group);
133042b5 581 worker_type = "verify_group";
c2009e53
DM
582 }
583 (None, None, None) => {
8ea00f6e 584 worker_id = store.clone();
c2009e53 585 }
5a718dce 586 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
587 }
588
c2009e53
DM
589 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
590
591 let upid_str = WorkerTask::new_thread(
133042b5 592 worker_type,
e7cb4dc5 593 Some(worker_id.clone()),
09f6a240 594 auth_id.clone(),
e7cb4dc5
WB
595 to_stdout,
596 move |worker| {
4f09d310
DM
597 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
598 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
599
adfdc369 600 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 601 let mut res = Vec::new();
f6b1d1cc
WB
602 if !verify_backup_dir(
603 datastore,
604 &backup_dir,
605 verified_chunks,
606 corrupt_chunks,
607 worker.clone(),
608 worker.upid().clone(),
d771a608 609 None,
f6b1d1cc 610 )? {
adfdc369
DC
611 res.push(backup_dir.to_string());
612 }
613 res
c2009e53 614 } else if let Some(backup_group) = backup_group {
63d9aca9
DM
615 let (_count, failed_dirs) = verify_backup_group(
616 datastore,
617 &backup_group,
618 verified_chunks,
619 corrupt_chunks,
620 None,
621 worker.clone(),
f6b1d1cc 622 worker.upid(),
d771a608 623 None,
63d9aca9
DM
624 )?;
625 failed_dirs
c2009e53 626 } else {
09f6a240
FG
627 let privs = CachedUserInfo::new()?
628 .lookup_privs(&auth_id, &["datastore", &store]);
629
630 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
631 Some(auth_id)
632 } else {
633 None
634 };
635
636 verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
c2009e53 637 };
adfdc369
DC
638 if failed_dirs.len() > 0 {
639 worker.log("Failed to verify following snapshots:");
640 for dir in failed_dirs {
641 worker.log(format!("\t{}", dir));
642 }
1ffe0301 643 bail!("verification failed - please check the log for details");
c2009e53
DM
644 }
645 Ok(())
e7cb4dc5
WB
646 },
647 )?;
c2009e53
DM
648
649 Ok(json!(upid_str))
650}
651
255f378a
DM
652#[macro_export]
653macro_rules! add_common_prune_prameters {
552c2259
DM
654 ( [ $( $list1:tt )* ] ) => {
655 add_common_prune_prameters!([$( $list1 )* ] , [])
656 };
657 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 658 [
552c2259 659 $( $list1 )*
255f378a 660 (
552c2259 661 "keep-daily",
255f378a 662 true,
49ff1092 663 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 664 ),
102d8d41
DM
665 (
666 "keep-hourly",
667 true,
49ff1092 668 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 669 ),
255f378a 670 (
552c2259 671 "keep-last",
255f378a 672 true,
49ff1092 673 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
674 ),
675 (
552c2259 676 "keep-monthly",
255f378a 677 true,
49ff1092 678 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
679 ),
680 (
552c2259 681 "keep-weekly",
255f378a 682 true,
49ff1092 683 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
684 ),
685 (
686 "keep-yearly",
687 true,
49ff1092 688 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 689 ),
552c2259 690 $( $list2 )*
255f378a
DM
691 ]
692 }
0eecf38f
DM
693}
694
db1e061d
DM
695pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
696 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 697 &PruneListItem::API_SCHEMA
db1e061d
DM
698).schema();
699
0ab08ac9
DM
700const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
701 &ApiHandler::Sync(&prune),
255f378a 702 &ObjectSchema::new(
0ab08ac9
DM
703 "Prune the datastore.",
704 &add_common_prune_prameters!([
705 ("backup-id", false, &BACKUP_ID_SCHEMA),
706 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
707 ("dry-run", true, &BooleanSchema::new(
708 "Just show what prune would do, but do not delete anything.")
709 .schema()
710 ),
0ab08ac9 711 ],[
66c49c21 712 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 713 ])
db1e061d
DM
714 ))
715 .returns(&API_RETURN_SCHEMA_PRUNE)
716 .access(None, &Permission::Privilege(
54552dda
DM
717 &["datastore", "{store}"],
718 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
719 true)
720);
255f378a 721
83b7db02
DM
722fn prune(
723 param: Value,
724 _info: &ApiMethod,
54552dda 725 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
726) -> Result<Value, Error> {
727
54552dda 728 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
729 let backup_type = tools::required_string_param(&param, "backup-type")?;
730 let backup_id = tools::required_string_param(&param, "backup-id")?;
731
e6dc35ac 732 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 733
3b03abfe
DM
734 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
735
9fdc3ef4
DM
736 let group = BackupGroup::new(backup_type, backup_id);
737
54552dda
DM
738 let datastore = DataStore::lookup_datastore(&store)?;
739
bff85572 740 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 741
9e3f0088
DM
742 let prune_options = PruneOptions {
743 keep_last: param["keep-last"].as_u64(),
102d8d41 744 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
745 keep_daily: param["keep-daily"].as_u64(),
746 keep_weekly: param["keep-weekly"].as_u64(),
747 keep_monthly: param["keep-monthly"].as_u64(),
748 keep_yearly: param["keep-yearly"].as_u64(),
749 };
8f579717 750
4ebda996 751 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 752
dda70154
DM
753 let mut prune_result = Vec::new();
754
755 let list = group.list_backups(&datastore.base_path())?;
756
757 let mut prune_info = compute_prune_info(list, &prune_options)?;
758
759 prune_info.reverse(); // delete older snapshots first
760
761 let keep_all = !prune_options.keeps_something();
762
763 if dry_run {
764 for (info, mut keep) in prune_info {
765 if keep_all { keep = true; }
766
767 let backup_time = info.backup_dir.backup_time();
768 let group = info.backup_dir.group();
769
770 prune_result.push(json!({
771 "backup-type": group.backup_type(),
772 "backup-id": group.backup_id(),
6a7be83e 773 "backup-time": backup_time,
dda70154
DM
774 "keep": keep,
775 }));
776 }
777 return Ok(json!(prune_result));
778 }
779
780
163e9bbe 781 // We use a WorkerTask just to have a task log, but run synchrounously
e6dc35ac 782 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
dda70154 783
f1539300
SR
784 if keep_all {
785 worker.log("No prune selection - keeping all files.");
786 } else {
787 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
788 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
789 store, backup_type, backup_id));
790 }
3b03abfe 791
f1539300
SR
792 for (info, mut keep) in prune_info {
793 if keep_all { keep = true; }
dda70154 794
f1539300
SR
795 let backup_time = info.backup_dir.backup_time();
796 let timestamp = info.backup_dir.backup_time_string();
797 let group = info.backup_dir.group();
3b03abfe 798
3b03abfe 799
f1539300
SR
800 let msg = format!(
801 "{}/{}/{} {}",
802 group.backup_type(),
803 group.backup_id(),
804 timestamp,
805 if keep { "keep" } else { "remove" },
806 );
807
808 worker.log(msg);
809
810 prune_result.push(json!({
811 "backup-type": group.backup_type(),
812 "backup-id": group.backup_id(),
813 "backup-time": backup_time,
814 "keep": keep,
815 }));
816
817 if !(dry_run || keep) {
818 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
819 worker.warn(
820 format!(
821 "failed to remove dir {:?}: {}",
822 info.backup_dir.relative_path(), err
823 )
824 );
8f0b4c1f 825 }
8f579717 826 }
f1539300 827 }
dd8e744f 828
f1539300 829 worker.log_result(&Ok(()));
83b7db02 830
dda70154 831 Ok(json!(prune_result))
83b7db02
DM
832}
833
dfc58d47
DM
834#[api(
835 input: {
836 properties: {
837 store: {
838 schema: DATASTORE_SCHEMA,
839 },
840 },
841 },
842 returns: {
843 schema: UPID_SCHEMA,
844 },
bb34b589 845 access: {
54552dda 846 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 847 },
dfc58d47
DM
848)]
849/// Start garbage collection.
6049b71f 850fn start_garbage_collection(
dfc58d47 851 store: String,
6049b71f 852 _info: &ApiMethod,
dd5495d6 853 rpcenv: &mut dyn RpcEnvironment,
6049b71f 854) -> Result<Value, Error> {
15e9b4ed 855
3e6a7dee 856 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 857 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 858
5a778d92 859 println!("Starting garbage collection on store {}", store);
15e9b4ed 860
0f778e06 861 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 862
0f778e06 863 let upid_str = WorkerTask::new_thread(
e7cb4dc5
WB
864 "garbage_collection",
865 Some(store.clone()),
e6dc35ac 866 auth_id.clone(),
e7cb4dc5
WB
867 to_stdout,
868 move |worker| {
0f778e06 869 worker.log(format!("starting garbage collection on store {}", store));
f6b1d1cc 870 datastore.garbage_collection(&*worker, worker.upid())
e7cb4dc5
WB
871 },
872 )?;
0f778e06
DM
873
874 Ok(json!(upid_str))
15e9b4ed
DM
875}
876
a92830dc
DM
877#[api(
878 input: {
879 properties: {
880 store: {
881 schema: DATASTORE_SCHEMA,
882 },
883 },
884 },
885 returns: {
886 type: GarbageCollectionStatus,
bb34b589
DM
887 },
888 access: {
889 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
890 },
a92830dc
DM
891)]
892/// Garbage collection status.
5eeea607 893pub fn garbage_collection_status(
a92830dc 894 store: String,
6049b71f 895 _info: &ApiMethod,
dd5495d6 896 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 897) -> Result<GarbageCollectionStatus, Error> {
691c89a0 898
f2b99c34
DM
899 let datastore = DataStore::lookup_datastore(&store)?;
900
f2b99c34 901 let status = datastore.last_gc_status();
691c89a0 902
a92830dc 903 Ok(status)
691c89a0
DM
904}
905
bb34b589 906#[api(
30fb6025
DM
907 returns: {
908 description: "List the accessible datastores.",
909 type: Array,
910 items: {
911 description: "Datastore name and description.",
912 properties: {
913 store: {
914 schema: DATASTORE_SCHEMA,
915 },
916 comment: {
917 optional: true,
918 schema: SINGLE_LINE_COMMENT_SCHEMA,
919 },
920 },
921 },
922 },
bb34b589 923 access: {
54552dda 924 permission: &Permission::Anybody,
bb34b589
DM
925 },
926)]
927/// Datastore list
6049b71f
DM
928fn get_datastore_list(
929 _param: Value,
930 _info: &ApiMethod,
54552dda 931 rpcenv: &mut dyn RpcEnvironment,
6049b71f 932) -> Result<Value, Error> {
15e9b4ed 933
d0187a51 934 let (config, _digest) = datastore::config()?;
15e9b4ed 935
e6dc35ac 936 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
937 let user_info = CachedUserInfo::new()?;
938
30fb6025 939 let mut list = Vec::new();
54552dda 940
30fb6025 941 for (store, (_, data)) in &config.sections {
e6dc35ac 942 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 943 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025
DM
944 if allowed {
945 let mut entry = json!({ "store": store });
946 if let Some(comment) = data["comment"].as_str() {
947 entry["comment"] = comment.into();
948 }
949 list.push(entry);
950 }
54552dda
DM
951 }
952
30fb6025 953 Ok(list.into())
15e9b4ed
DM
954}
955
0ab08ac9
DM
956#[sortable]
957pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
958 &ApiHandler::AsyncHttp(&download_file),
959 &ObjectSchema::new(
960 "Download single raw file from backup snapshot.",
961 &sorted!([
66c49c21 962 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
963 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
964 ("backup-id", false, &BACKUP_ID_SCHEMA),
965 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 966 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
967 ]),
968 )
54552dda
DM
969).access(None, &Permission::Privilege(
970 &["datastore", "{store}"],
971 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
972 true)
973);
691c89a0 974
9e47c0a5
DM
975fn download_file(
976 _parts: Parts,
977 _req_body: Body,
978 param: Value,
255f378a 979 _info: &ApiMethod,
54552dda 980 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 981) -> ApiResponseFuture {
9e47c0a5 982
ad51d02a
DM
983 async move {
984 let store = tools::required_string_param(&param, "store")?;
ad51d02a 985 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 986
e6dc35ac 987 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 988
ad51d02a 989 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 990
ad51d02a
DM
991 let backup_type = tools::required_string_param(&param, "backup-type")?;
992 let backup_id = tools::required_string_param(&param, "backup-id")?;
993 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 994
e0e5b442 995 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 996
bff85572 997 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 998
abdb9763 999 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1000
ad51d02a
DM
1001 let mut path = datastore.base_path();
1002 path.push(backup_dir.relative_path());
1003 path.push(&file_name);
1004
ba694720 1005 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1006 .await
1007 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1008
db0cb9ce 1009 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
ba694720
DC
1010 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
1011 .map_err(move |err| {
1012 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1013 err
1014 });
ad51d02a 1015 let body = Body::wrap_stream(payload);
9e47c0a5 1016
ad51d02a
DM
1017 // fixme: set other headers ?
1018 Ok(Response::builder()
1019 .status(StatusCode::OK)
1020 .header(header::CONTENT_TYPE, "application/octet-stream")
1021 .body(body)
1022 .unwrap())
1023 }.boxed()
9e47c0a5
DM
1024}
1025
6ef9bb59
DC
1026#[sortable]
1027pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1028 &ApiHandler::AsyncHttp(&download_file_decoded),
1029 &ObjectSchema::new(
1030 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1031 &sorted!([
1032 ("store", false, &DATASTORE_SCHEMA),
1033 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1034 ("backup-id", false, &BACKUP_ID_SCHEMA),
1035 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1036 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1037 ]),
1038 )
1039).access(None, &Permission::Privilege(
1040 &["datastore", "{store}"],
1041 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1042 true)
1043);
1044
1045fn download_file_decoded(
1046 _parts: Parts,
1047 _req_body: Body,
1048 param: Value,
1049 _info: &ApiMethod,
1050 rpcenv: Box<dyn RpcEnvironment>,
1051) -> ApiResponseFuture {
1052
1053 async move {
1054 let store = tools::required_string_param(&param, "store")?;
1055 let datastore = DataStore::lookup_datastore(store)?;
1056
e6dc35ac 1057 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59
DC
1058
1059 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1060
1061 let backup_type = tools::required_string_param(&param, "backup-type")?;
1062 let backup_id = tools::required_string_param(&param, "backup-id")?;
1063 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1064
e0e5b442 1065 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1066
bff85572 1067 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1068
2d55beec 1069 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1070 for file in files {
f28d9088 1071 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1072 bail!("cannot decode '{}' - is encrypted", file_name);
1073 }
1074 }
1075
1076 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1077
1078 let mut path = datastore.base_path();
1079 path.push(backup_dir.relative_path());
1080 path.push(&file_name);
1081
1082 let extension = file_name.rsplitn(2, '.').next().unwrap();
1083
1084 let body = match extension {
1085 "didx" => {
1086 let index = DynamicIndexReader::open(&path)
1087 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1088 let (csum, size) = index.compute_csum();
1089 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1090
14f6c9cb 1091 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1092 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1093 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1094 .map_err(move |err| {
1095 eprintln!("error during streaming of '{:?}' - {}", path, err);
1096 err
1097 }))
1098 },
1099 "fidx" => {
1100 let index = FixedIndexReader::open(&path)
1101 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1102
2d55beec
FG
1103 let (csum, size) = index.compute_csum();
1104 manifest.verify_file(&file_name, &csum, size)?;
1105
14f6c9cb 1106 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1107 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1108 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1109 .map_err(move |err| {
1110 eprintln!("error during streaming of '{:?}' - {}", path, err);
1111 err
1112 }))
1113 },
1114 "blob" => {
1115 let file = std::fs::File::open(&path)
8aa67ee7 1116 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1117
2d55beec
FG
1118 // FIXME: load full blob to verify index checksum?
1119
6ef9bb59
DC
1120 Body::wrap_stream(
1121 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1122 .map_err(move |err| {
1123 eprintln!("error during streaming of '{:?}' - {}", path, err);
1124 err
1125 })
1126 )
1127 },
1128 extension => {
1129 bail!("cannot download '{}' files", extension);
1130 },
1131 };
1132
1133 // fixme: set other headers ?
1134 Ok(Response::builder()
1135 .status(StatusCode::OK)
1136 .header(header::CONTENT_TYPE, "application/octet-stream")
1137 .body(body)
1138 .unwrap())
1139 }.boxed()
1140}
1141
552c2259 1142#[sortable]
0ab08ac9
DM
1143pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1144 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1145 &ObjectSchema::new(
54552dda 1146 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1147 &sorted!([
66c49c21 1148 ("store", false, &DATASTORE_SCHEMA),
255f378a 1149 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1150 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1151 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1152 ]),
9e47c0a5 1153 )
54552dda
DM
1154).access(
1155 Some("Only the backup creator/owner is allowed to do this."),
1156 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1157);
9e47c0a5 1158
07ee2235
DM
1159fn upload_backup_log(
1160 _parts: Parts,
1161 req_body: Body,
1162 param: Value,
255f378a 1163 _info: &ApiMethod,
54552dda 1164 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1165) -> ApiResponseFuture {
07ee2235 1166
ad51d02a
DM
1167 async move {
1168 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1169 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1170
96d65fbc 1171 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1172
ad51d02a
DM
1173 let backup_type = tools::required_string_param(&param, "backup-type")?;
1174 let backup_id = tools::required_string_param(&param, "backup-id")?;
1175 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1176
e0e5b442 1177 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1178
e6dc35ac 1179 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1180 let owner = datastore.get_owner(backup_dir.group())?;
1181 check_backup_owner(&owner, &auth_id)?;
54552dda 1182
ad51d02a
DM
1183 let mut path = datastore.base_path();
1184 path.push(backup_dir.relative_path());
1185 path.push(&file_name);
07ee2235 1186
ad51d02a
DM
1187 if path.exists() {
1188 bail!("backup already contains a log.");
1189 }
e128d4e8 1190
ad51d02a 1191 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1192 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1193
1194 let data = req_body
1195 .map_err(Error::from)
1196 .try_fold(Vec::new(), |mut acc, chunk| {
1197 acc.extend_from_slice(&*chunk);
1198 future::ok::<_, Error>(acc)
1199 })
1200 .await?;
1201
39f18b30
DM
1202 // always verify blob/CRC at server side
1203 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1204
1205 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1206
1207 // fixme: use correct formatter
1208 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1209 }.boxed()
07ee2235
DM
1210}
1211
5b1cfa01
DC
1212#[api(
1213 input: {
1214 properties: {
1215 store: {
1216 schema: DATASTORE_SCHEMA,
1217 },
1218 "backup-type": {
1219 schema: BACKUP_TYPE_SCHEMA,
1220 },
1221 "backup-id": {
1222 schema: BACKUP_ID_SCHEMA,
1223 },
1224 "backup-time": {
1225 schema: BACKUP_TIME_SCHEMA,
1226 },
1227 "filepath": {
1228 description: "Base64 encoded path.",
1229 type: String,
1230 }
1231 },
1232 },
1233 access: {
1234 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1235 },
1236)]
1237/// Get the entries of the given path of the catalog
1238fn catalog(
1239 store: String,
1240 backup_type: String,
1241 backup_id: String,
1242 backup_time: i64,
1243 filepath: String,
1244 _param: Value,
1245 _info: &ApiMethod,
1246 rpcenv: &mut dyn RpcEnvironment,
1247) -> Result<Value, Error> {
1248 let datastore = DataStore::lookup_datastore(&store)?;
1249
e6dc35ac 1250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1251
e0e5b442 1252 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1253
bff85572 1254 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1255
9238cdf5
FG
1256 let file_name = CATALOG_NAME;
1257
2d55beec 1258 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1259 for file in files {
1260 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1261 bail!("cannot decode '{}' - is encrypted", file_name);
1262 }
1263 }
1264
5b1cfa01
DC
1265 let mut path = datastore.base_path();
1266 path.push(backup_dir.relative_path());
9238cdf5 1267 path.push(file_name);
5b1cfa01
DC
1268
1269 let index = DynamicIndexReader::open(&path)
1270 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1271
2d55beec
FG
1272 let (csum, size) = index.compute_csum();
1273 manifest.verify_file(&file_name, &csum, size)?;
1274
14f6c9cb 1275 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1276 let reader = BufferedDynamicReader::new(index, chunk_reader);
1277
1278 let mut catalog_reader = CatalogReader::new(reader);
1279 let mut current = catalog_reader.root()?;
1280 let mut components = vec![];
1281
1282
1283 if filepath != "root" {
1284 components = base64::decode(filepath)?;
1285 if components.len() > 0 && components[0] == '/' as u8 {
1286 components.remove(0);
1287 }
1288 for component in components.split(|c| *c == '/' as u8) {
1289 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1290 current = entry;
1291 } else {
1292 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1293 }
1294 }
1295 }
1296
1297 let mut res = Vec::new();
1298
1299 for direntry in catalog_reader.read_dir(&current)? {
1300 let mut components = components.clone();
1301 components.push('/' as u8);
1302 components.extend(&direntry.name);
1303 let path = base64::encode(components);
1304 let text = String::from_utf8_lossy(&direntry.name);
1305 let mut entry = json!({
1306 "filepath": path,
1307 "text": text,
1308 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1309 "leaf": true,
1310 });
1311 match direntry.attr {
1312 DirEntryAttribute::Directory { start: _ } => {
1313 entry["leaf"] = false.into();
1314 },
1315 DirEntryAttribute::File { size, mtime } => {
1316 entry["size"] = size.into();
1317 entry["mtime"] = mtime.into();
1318 },
1319 _ => {},
1320 }
1321 res.push(entry);
1322 }
1323
1324 Ok(res.into())
1325}
1326
53a561a2
WB
1327fn recurse_files<'a, T, W>(
1328 zip: &'a mut ZipEncoder<W>,
1329 decoder: &'a mut Accessor<T>,
1330 prefix: &'a Path,
804f6143 1331 file: FileEntry<T>,
53a561a2 1332) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1333where
1334 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1335 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1336{
1337 Box::pin(async move {
1338 let metadata = file.entry().metadata();
1339 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1340
1341 match file.kind() {
1342 EntryKind::File { .. } => {
1343 let entry = ZipEntry::new(
1344 path,
1345 metadata.stat.mtime.secs,
1346 metadata.stat.mode as u16,
1347 true,
1348 );
1349 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1350 .await
1351 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1352 }
1353 EntryKind::Hardlink(_) => {
1354 let realfile = decoder.follow_hardlink(&file).await?;
1355 let entry = ZipEntry::new(
1356 path,
1357 metadata.stat.mtime.secs,
1358 metadata.stat.mode as u16,
1359 true,
1360 );
1361 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1362 .await
1363 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1364 }
1365 EntryKind::Directory => {
1366 let dir = file.enter_directory().await?;
1367 let mut readdir = dir.read_dir();
1368 let entry = ZipEntry::new(
1369 path,
1370 metadata.stat.mtime.secs,
1371 metadata.stat.mode as u16,
1372 false,
1373 );
1374 zip.add_entry::<FileContents<T>>(entry, None).await?;
1375 while let Some(entry) = readdir.next().await {
1376 let entry = entry?.decode_entry().await?;
53a561a2 1377 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1378 }
1379 }
1380 _ => {} // ignore all else
1381 };
1382
53a561a2 1383 Ok(())
804f6143
DC
1384 })
1385}
1386
d33d8f4e
DC
1387#[sortable]
1388pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1389 &ApiHandler::AsyncHttp(&pxar_file_download),
1390 &ObjectSchema::new(
1ffe0301 1391 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1392 &sorted!([
1393 ("store", false, &DATASTORE_SCHEMA),
1394 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1395 ("backup-id", false, &BACKUP_ID_SCHEMA),
1396 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1397 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1398 ]),
1399 )
1400).access(None, &Permission::Privilege(
1401 &["datastore", "{store}"],
1402 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1403 true)
1404);
1405
1406fn pxar_file_download(
1407 _parts: Parts,
1408 _req_body: Body,
1409 param: Value,
1410 _info: &ApiMethod,
1411 rpcenv: Box<dyn RpcEnvironment>,
1412) -> ApiResponseFuture {
1413
1414 async move {
1415 let store = tools::required_string_param(&param, "store")?;
1416 let datastore = DataStore::lookup_datastore(&store)?;
1417
e6dc35ac 1418 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e
DC
1419
1420 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1421
1422 let backup_type = tools::required_string_param(&param, "backup-type")?;
1423 let backup_id = tools::required_string_param(&param, "backup-id")?;
1424 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1425
e0e5b442 1426 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1427
bff85572 1428 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1429
d33d8f4e
DC
1430 let mut components = base64::decode(&filepath)?;
1431 if components.len() > 0 && components[0] == '/' as u8 {
1432 components.remove(0);
1433 }
1434
1435 let mut split = components.splitn(2, |c| *c == '/' as u8);
9238cdf5 1436 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
d33d8f4e 1437 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1438 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1439 for file in files {
1440 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1441 bail!("cannot decode '{}' - is encrypted", pxar_name);
1442 }
1443 }
d33d8f4e 1444
9238cdf5
FG
1445 let mut path = datastore.base_path();
1446 path.push(backup_dir.relative_path());
1447 path.push(pxar_name);
d33d8f4e
DC
1448
1449 let index = DynamicIndexReader::open(&path)
1450 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1451
2d55beec
FG
1452 let (csum, size) = index.compute_csum();
1453 manifest.verify_file(&pxar_name, &csum, size)?;
1454
14f6c9cb 1455 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1456 let reader = BufferedDynamicReader::new(index, chunk_reader);
1457 let archive_size = reader.archive_size();
1458 let reader = LocalDynamicReadAt::new(reader);
1459
1460 let decoder = Accessor::new(reader, archive_size).await?;
1461 let root = decoder.open_root().await?;
1462 let file = root
1463 .lookup(OsStr::from_bytes(file_path)).await?
1464 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1465
804f6143
DC
1466 let body = match file.kind() {
1467 EntryKind::File { .. } => Body::wrap_stream(
1468 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1469 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1470 err
1471 }),
1472 ),
1473 EntryKind::Hardlink(_) => Body::wrap_stream(
1474 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1475 .map_err(move |err| {
1476 eprintln!(
1477 "error during streaming of hardlink '{:?}' - {}",
1478 filepath, err
1479 );
1480 err
1481 }),
1482 ),
1483 EntryKind::Directory => {
1484 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1485 let mut prefix = PathBuf::new();
1486 let mut components = file.entry().path().components();
1487 components.next_back(); // discar last
1488 for comp in components {
1489 prefix.push(comp);
1490 }
d33d8f4e 1491
804f6143 1492 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1493
1494 crate::server::spawn_internal_task(async move {
53a561a2
WB
1495 let mut zipencoder = ZipEncoder::new(channelwriter);
1496 let mut decoder = decoder;
1497 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1498 .await
1499 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1500
1501 zipencoder
1502 .finish()
1503 .await
1504 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1505 });
1506
1507 Body::wrap_stream(receiver.map_err(move |err| {
1508 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1509 err
804f6143
DC
1510 }))
1511 }
1512 other => bail!("cannot download file of type {:?}", other),
1513 };
d33d8f4e
DC
1514
1515 // fixme: set other headers ?
1516 Ok(Response::builder()
1517 .status(StatusCode::OK)
1518 .header(header::CONTENT_TYPE, "application/octet-stream")
1519 .body(body)
1520 .unwrap())
1521 }.boxed()
1522}
1523
1a0d3d11
DM
1524#[api(
1525 input: {
1526 properties: {
1527 store: {
1528 schema: DATASTORE_SCHEMA,
1529 },
1530 timeframe: {
1531 type: RRDTimeFrameResolution,
1532 },
1533 cf: {
1534 type: RRDMode,
1535 },
1536 },
1537 },
1538 access: {
1539 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1540 },
1541)]
1542/// Read datastore stats
1543fn get_rrd_stats(
1544 store: String,
1545 timeframe: RRDTimeFrameResolution,
1546 cf: RRDMode,
1547 _param: Value,
1548) -> Result<Value, Error> {
1549
431cc7b1
DC
1550 create_value_from_rrd(
1551 &format!("datastore/{}", store),
1a0d3d11
DM
1552 &[
1553 "total", "used",
c94e1f65
DM
1554 "read_ios", "read_bytes",
1555 "write_ios", "write_bytes",
1556 "io_ticks",
1a0d3d11
DM
1557 ],
1558 timeframe,
1559 cf,
1560 )
1561}
1562
912b3f5b
DM
1563#[api(
1564 input: {
1565 properties: {
1566 store: {
1567 schema: DATASTORE_SCHEMA,
1568 },
1569 "backup-type": {
1570 schema: BACKUP_TYPE_SCHEMA,
1571 },
1572 "backup-id": {
1573 schema: BACKUP_ID_SCHEMA,
1574 },
1575 "backup-time": {
1576 schema: BACKUP_TIME_SCHEMA,
1577 },
1578 },
1579 },
1580 access: {
1401f4be 1581 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1582 },
1583)]
1584/// Get "notes" for a specific backup
1585fn get_notes(
1586 store: String,
1587 backup_type: String,
1588 backup_id: String,
1589 backup_time: i64,
1590 rpcenv: &mut dyn RpcEnvironment,
1591) -> Result<String, Error> {
1592 let datastore = DataStore::lookup_datastore(&store)?;
1593
e6dc35ac 1594 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1595 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1596
1401f4be 1597 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1598
883aa6d5 1599 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1600
883aa6d5 1601 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1602 .as_str()
1603 .unwrap_or("");
1604
1605 Ok(String::from(notes))
1606}
1607
1608#[api(
1609 input: {
1610 properties: {
1611 store: {
1612 schema: DATASTORE_SCHEMA,
1613 },
1614 "backup-type": {
1615 schema: BACKUP_TYPE_SCHEMA,
1616 },
1617 "backup-id": {
1618 schema: BACKUP_ID_SCHEMA,
1619 },
1620 "backup-time": {
1621 schema: BACKUP_TIME_SCHEMA,
1622 },
1623 notes: {
1624 description: "A multiline text.",
1625 },
1626 },
1627 },
1628 access: {
b728a69e
FG
1629 permission: &Permission::Privilege(&["datastore", "{store}"],
1630 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1631 true),
912b3f5b
DM
1632 },
1633)]
1634/// Set "notes" for a specific backup
1635fn set_notes(
1636 store: String,
1637 backup_type: String,
1638 backup_id: String,
1639 backup_time: i64,
1640 notes: String,
1641 rpcenv: &mut dyn RpcEnvironment,
1642) -> Result<(), Error> {
1643 let datastore = DataStore::lookup_datastore(&store)?;
1644
e6dc35ac 1645 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1646 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1647
b728a69e 1648 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1649
1a374fcf
SR
1650 datastore.update_manifest(&backup_dir,|manifest| {
1651 manifest.unprotected["notes"] = notes.into();
1652 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1653
1654 Ok(())
1655}
1656
72be0eb1 1657#[api(
4940012d 1658 input: {
72be0eb1
DW
1659 properties: {
1660 store: {
1661 schema: DATASTORE_SCHEMA,
1662 },
1663 "backup-type": {
1664 schema: BACKUP_TYPE_SCHEMA,
1665 },
1666 "backup-id": {
1667 schema: BACKUP_ID_SCHEMA,
1668 },
1669 "new-owner": {
e6dc35ac 1670 type: Authid,
72be0eb1
DW
1671 },
1672 },
4940012d
FG
1673 },
1674 access: {
bff85572
FG
1675 permission: &Permission::Anybody,
1676 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1677 },
72be0eb1
DW
1678)]
1679/// Change owner of a backup group
1680fn set_backup_owner(
1681 store: String,
1682 backup_type: String,
1683 backup_id: String,
e6dc35ac 1684 new_owner: Authid,
bff85572 1685 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1686) -> Result<(), Error> {
1687
1688 let datastore = DataStore::lookup_datastore(&store)?;
1689
1690 let backup_group = BackupGroup::new(backup_type, backup_id);
1691
bff85572
FG
1692 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1693
72be0eb1
DW
1694 let user_info = CachedUserInfo::new()?;
1695
bff85572
FG
1696 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1697
1698 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1699 // High-privilege user/token
1700 true
1701 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1702 let owner = datastore.get_owner(&backup_group)?;
1703
1704 match (owner.is_token(), new_owner.is_token()) {
1705 (true, true) => {
1706 // API token to API token, owned by same user
1707 let owner = owner.user();
1708 let new_owner = new_owner.user();
1709 owner == new_owner && Authid::from(owner.clone()) == auth_id
1710 },
1711 (true, false) => {
1712 // API token to API token owner
1713 Authid::from(owner.user().clone()) == auth_id
1714 && new_owner == auth_id
1715 },
1716 (false, true) => {
1717 // API token owner to API token
1718 owner == auth_id
1719 && Authid::from(new_owner.user().clone()) == auth_id
1720 },
1721 (false, false) => {
1722 // User to User, not allowed for unprivileged users
1723 false
1724 },
1725 }
1726 } else {
1727 false
1728 };
1729
1730 if !allowed {
1731 return Err(http_err!(UNAUTHORIZED,
1732 "{} does not have permission to change owner of backup group '{}' to {}",
1733 auth_id,
1734 backup_group,
1735 new_owner,
1736 ));
1737 }
1738
e6dc35ac
FG
1739 if !user_info.is_active_auth_id(&new_owner) {
1740 bail!("{} '{}' is inactive or non-existent",
1741 if new_owner.is_token() {
1742 "API token".to_string()
1743 } else {
1744 "user".to_string()
1745 },
1746 new_owner);
72be0eb1
DW
1747 }
1748
1749 datastore.set_owner(&backup_group, &new_owner, true)?;
1750
1751 Ok(())
1752}
1753
552c2259 1754#[sortable]
255f378a 1755const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1756 (
1757 "catalog",
1758 &Router::new()
1759 .get(&API_METHOD_CATALOG)
1760 ),
72be0eb1
DW
1761 (
1762 "change-owner",
1763 &Router::new()
1764 .post(&API_METHOD_SET_BACKUP_OWNER)
1765 ),
255f378a
DM
1766 (
1767 "download",
1768 &Router::new()
1769 .download(&API_METHOD_DOWNLOAD_FILE)
1770 ),
6ef9bb59
DC
1771 (
1772 "download-decoded",
1773 &Router::new()
1774 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1775 ),
255f378a
DM
1776 (
1777 "files",
1778 &Router::new()
09b1f7b2 1779 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1780 ),
1781 (
1782 "gc",
1783 &Router::new()
1784 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1785 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1786 ),
1787 (
1788 "groups",
1789 &Router::new()
b31c8019 1790 .get(&API_METHOD_LIST_GROUPS)
255f378a 1791 ),
912b3f5b
DM
1792 (
1793 "notes",
1794 &Router::new()
1795 .get(&API_METHOD_GET_NOTES)
1796 .put(&API_METHOD_SET_NOTES)
1797 ),
255f378a
DM
1798 (
1799 "prune",
1800 &Router::new()
1801 .post(&API_METHOD_PRUNE)
1802 ),
d33d8f4e
DC
1803 (
1804 "pxar-file-download",
1805 &Router::new()
1806 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1807 ),
1a0d3d11
DM
1808 (
1809 "rrd",
1810 &Router::new()
1811 .get(&API_METHOD_GET_RRD_STATS)
1812 ),
255f378a
DM
1813 (
1814 "snapshots",
1815 &Router::new()
fc189b19 1816 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1817 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1818 ),
1819 (
1820 "status",
1821 &Router::new()
1822 .get(&API_METHOD_STATUS)
1823 ),
1824 (
1825 "upload-backup-log",
1826 &Router::new()
1827 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1828 ),
c2009e53
DM
1829 (
1830 "verify",
1831 &Router::new()
1832 .post(&API_METHOD_VERIFY)
1833 ),
255f378a
DM
1834];
1835
ad51d02a 1836const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1837 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1838 .subdirs(DATASTORE_INFO_SUBDIRS);
1839
1840
1841pub const ROUTER: Router = Router::new()
bb34b589 1842 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1843 .match_all("store", &DATASTORE_INFO_ROUTER);