]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
config: make notify a property string
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
d33d8f4e
DC
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
6b809ff5 4use std::sync::{Arc, Mutex};
53a561a2 5use std::path::{Path, PathBuf};
804f6143 6use std::pin::Pin;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
12use serde_json::{json, Value};
13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
cad540e9
WB
18use proxmox::api::router::SubdirMap;
19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
804f6143 23use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
e5064ba6 28use crate::backup::*;
cad540e9 29use crate::config::datastore;
54552dda
DM
30use crate::config::cached_user_info::CachedUserInfo;
31
4fdf5ddf 32use crate::server::{jobstate::Job, WorkerTask};
804f6143
DC
33use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37};
38
d00e1a21
DM
39use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
54552dda 41 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
54552dda 44 PRIV_DATASTORE_BACKUP,
09f6a240 45 PRIV_DATASTORE_VERIFY,
d00e1a21 46};
1629d2ad 47
bff85572 48fn check_priv_or_backup_owner(
e7cb4dc5
WB
49 store: &DataStore,
50 group: &BackupGroup,
e6dc35ac 51 auth_id: &Authid,
bff85572
FG
52 required_privs: u64,
53) -> Result<(), Error> {
54 let user_info = CachedUserInfo::new()?;
55 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
56
57 if privs & required_privs == 0 {
58 let owner = store.get_owner(group)?;
59 check_backup_owner(&owner, auth_id)?;
60 }
61 Ok(())
62}
63
64fn check_backup_owner(
65 owner: &Authid,
66 auth_id: &Authid,
e7cb4dc5 67) -> Result<(), Error> {
bff85572
FG
68 let correct_owner = owner == auth_id
69 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
70 if !correct_owner {
e6dc35ac 71 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
72 }
73 Ok(())
74}
75
e7cb4dc5
WB
76fn read_backup_index(
77 store: &DataStore,
78 backup_dir: &BackupDir,
79) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 80
ff86ef00 81 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 82
09b1f7b2
DM
83 let mut result = Vec::new();
84 for item in manifest.files() {
85 result.push(BackupContent {
86 filename: item.filename.clone(),
f28d9088 87 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
88 size: Some(item.size),
89 });
8c70e3eb
DM
90 }
91
09b1f7b2 92 result.push(BackupContent {
96d65fbc 93 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
94 crypt_mode: match manifest.signature {
95 Some(_) => Some(CryptMode::SignOnly),
96 None => Some(CryptMode::None),
97 },
09b1f7b2
DM
98 size: Some(index_size),
99 });
4f1e40a2 100
70030b43 101 Ok((manifest, result))
8c70e3eb
DM
102}
103
1c090810
DC
104fn get_all_snapshot_files(
105 store: &DataStore,
106 info: &BackupInfo,
70030b43
DM
107) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
108
109 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
110
111 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
112 acc.insert(item.filename.clone());
113 acc
114 });
115
116 for file in &info.files {
117 if file_set.contains(file) { continue; }
f28d9088
WB
118 files.push(BackupContent {
119 filename: file.to_string(),
120 size: None,
121 crypt_mode: None,
122 });
1c090810
DC
123 }
124
70030b43 125 Ok((manifest, files))
1c090810
DC
126}
127
8f579717
DM
128fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
129
130 let mut group_hash = HashMap::new();
131
132 for info in backup_list {
9b492eb2 133 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
134 let time_list = group_hash.entry(group_id).or_insert(vec![]);
135 time_list.push(info);
136 }
137
138 group_hash
139}
140
b31c8019
DM
141#[api(
142 input: {
143 properties: {
144 store: {
145 schema: DATASTORE_SCHEMA,
146 },
147 },
148 },
149 returns: {
150 type: Array,
151 description: "Returns the list of backup groups.",
152 items: {
153 type: GroupListItem,
154 }
155 },
bb34b589 156 access: {
54552dda
DM
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
bb34b589 161 },
b31c8019
DM
162)]
163/// List backup groups.
ad20d198 164fn list_groups(
b31c8019 165 store: String,
54552dda 166 rpcenv: &mut dyn RpcEnvironment,
b31c8019 167) -> Result<Vec<GroupListItem>, Error> {
812c6f87 168
e6dc35ac 169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 170 let user_info = CachedUserInfo::new()?;
e6dc35ac 171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 172
b31c8019 173 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 174
c0977501 175 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
176
177 let group_hash = group_backups(backup_list);
178
b31c8019 179 let mut groups = Vec::new();
812c6f87
DM
180
181 for (_group_id, mut list) in group_hash {
182
2b01a225 183 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
184
185 let info = &list[0];
54552dda 186
9b492eb2 187 let group = info.backup_dir.group();
812c6f87 188
54552dda 189 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b 190 let owner = datastore.get_owner(group)?;
bff85572 191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 192 continue;
54552dda
DM
193 }
194
b31c8019
DM
195 let result_item = GroupListItem {
196 backup_type: group.backup_type().to_string(),
197 backup_id: group.backup_id().to_string(),
6a7be83e 198 last_backup: info.backup_dir.backup_time(),
b31c8019
DM
199 backup_count: list.len() as u64,
200 files: info.files.clone(),
04b0ca8b 201 owner: Some(owner),
b31c8019
DM
202 };
203 groups.push(result_item);
812c6f87
DM
204 }
205
b31c8019 206 Ok(groups)
812c6f87 207}
8f579717 208
09b1f7b2
DM
209#[api(
210 input: {
211 properties: {
212 store: {
213 schema: DATASTORE_SCHEMA,
214 },
215 "backup-type": {
216 schema: BACKUP_TYPE_SCHEMA,
217 },
218 "backup-id": {
219 schema: BACKUP_ID_SCHEMA,
220 },
221 "backup-time": {
222 schema: BACKUP_TIME_SCHEMA,
223 },
224 },
225 },
226 returns: {
227 type: Array,
228 description: "Returns the list of archive files inside a backup snapshots.",
229 items: {
230 type: BackupContent,
231 }
232 },
bb34b589 233 access: {
54552dda
DM
234 permission: &Permission::Privilege(
235 &["datastore", "{store}"],
236 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
237 true),
bb34b589 238 },
09b1f7b2
DM
239)]
240/// List snapshot files.
ea5f547f 241pub fn list_snapshot_files(
09b1f7b2
DM
242 store: String,
243 backup_type: String,
244 backup_id: String,
245 backup_time: i64,
01a13423 246 _info: &ApiMethod,
54552dda 247 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 248) -> Result<Vec<BackupContent>, Error> {
01a13423 249
e6dc35ac 250 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 251 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 252
e0e5b442 253 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 254
bff85572 255 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 256
d7c24397 257 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 258
70030b43
DM
259 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
260
261 Ok(files)
01a13423
DM
262}
263
68a6a0ee
DM
264#[api(
265 input: {
266 properties: {
267 store: {
268 schema: DATASTORE_SCHEMA,
269 },
270 "backup-type": {
271 schema: BACKUP_TYPE_SCHEMA,
272 },
273 "backup-id": {
274 schema: BACKUP_ID_SCHEMA,
275 },
276 "backup-time": {
277 schema: BACKUP_TIME_SCHEMA,
278 },
279 },
280 },
bb34b589 281 access: {
54552dda
DM
282 permission: &Permission::Privilege(
283 &["datastore", "{store}"],
284 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
285 true),
bb34b589 286 },
68a6a0ee
DM
287)]
288/// Delete backup snapshot.
289fn delete_snapshot(
290 store: String,
291 backup_type: String,
292 backup_id: String,
293 backup_time: i64,
6f62c924 294 _info: &ApiMethod,
54552dda 295 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
296) -> Result<Value, Error> {
297
e6dc35ac 298 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 299
e0e5b442 300 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 301 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 302
bff85572 303 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 304
c9756b40 305 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
306
307 Ok(Value::Null)
308}
309
fc189b19
DM
310#[api(
311 input: {
312 properties: {
313 store: {
314 schema: DATASTORE_SCHEMA,
315 },
316 "backup-type": {
317 optional: true,
318 schema: BACKUP_TYPE_SCHEMA,
319 },
320 "backup-id": {
321 optional: true,
322 schema: BACKUP_ID_SCHEMA,
323 },
324 },
325 },
326 returns: {
327 type: Array,
328 description: "Returns the list of snapshots.",
329 items: {
330 type: SnapshotListItem,
331 }
332 },
bb34b589 333 access: {
54552dda
DM
334 permission: &Permission::Privilege(
335 &["datastore", "{store}"],
336 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
337 true),
bb34b589 338 },
fc189b19
DM
339)]
340/// List backup snapshots.
f24fc116 341pub fn list_snapshots (
54552dda
DM
342 store: String,
343 backup_type: Option<String>,
344 backup_id: Option<String>,
345 _param: Value,
184f17af 346 _info: &ApiMethod,
54552dda 347 rpcenv: &mut dyn RpcEnvironment,
fc189b19 348) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 349
e6dc35ac 350 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 351 let user_info = CachedUserInfo::new()?;
e6dc35ac 352 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 353
54552dda 354 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 355
c0977501 356 let base_path = datastore.base_path();
184f17af 357
15c847f1 358 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
359
360 let mut snapshots = vec![];
361
c0977501 362 for info in backup_list {
15c847f1 363 let group = info.backup_dir.group();
54552dda 364 if let Some(ref backup_type) = backup_type {
15c847f1
DM
365 if backup_type != group.backup_type() { continue; }
366 }
54552dda 367 if let Some(ref backup_id) = backup_id {
15c847f1
DM
368 if backup_id != group.backup_id() { continue; }
369 }
a17a0e7a 370
54552dda 371 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
04b0ca8b
DC
372 let owner = datastore.get_owner(group)?;
373
bff85572 374 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 375 continue;
54552dda
DM
376 }
377
1c090810
DC
378 let mut size = None;
379
3b2046d2 380 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
70030b43 381 Ok((manifest, files)) => {
1c090810 382 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
70030b43
DM
383 // extract the first line from notes
384 let comment: Option<String> = manifest.unprotected["notes"]
385 .as_str()
386 .and_then(|notes| notes.lines().next())
387 .map(String::from);
388
3b2046d2
TL
389 let verify = manifest.unprotected["verify_state"].clone();
390 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
391 Ok(verify) => verify,
392 Err(err) => {
393 eprintln!("error parsing verification state : '{}'", err);
394 None
395 }
396 };
397
398 (comment, verify, files)
1c090810
DC
399 },
400 Err(err) => {
401 eprintln!("error during snapshot file listing: '{}'", err);
70030b43 402 (
3b2046d2 403 None,
70030b43
DM
404 None,
405 info
406 .files
407 .iter()
408 .map(|x| BackupContent {
409 filename: x.to_string(),
410 size: None,
411 crypt_mode: None,
412 })
413 .collect()
414 )
1c090810
DC
415 },
416 };
417
418 let result_item = SnapshotListItem {
fc189b19
DM
419 backup_type: group.backup_type().to_string(),
420 backup_id: group.backup_id().to_string(),
6a7be83e 421 backup_time: info.backup_dir.backup_time(),
70030b43 422 comment,
3b2046d2 423 verification,
1c090810
DC
424 files,
425 size,
04b0ca8b 426 owner: Some(owner),
fc189b19 427 };
a17a0e7a 428
a17a0e7a 429 snapshots.push(result_item);
184f17af
DM
430 }
431
fc189b19 432 Ok(snapshots)
184f17af
DM
433}
434
14e08625 435fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
16f9f244
DC
436 let base_path = store.base_path();
437 let backup_list = BackupInfo::list_backups(&base_path)?;
438 let mut groups = HashSet::new();
14e08625
DC
439
440 let mut result = Counts {
441 ct: None,
442 host: None,
443 vm: None,
444 other: None,
445 };
446
16f9f244
DC
447 for info in backup_list {
448 let group = info.backup_dir.group();
449
450 let id = group.backup_id();
451 let backup_type = group.backup_type();
452
453 let mut new_id = false;
454
455 if groups.insert(format!("{}-{}", &backup_type, &id)) {
456 new_id = true;
457 }
458
14e08625
DC
459 let mut counts = match backup_type {
460 "ct" => result.ct.take().unwrap_or(Default::default()),
461 "host" => result.host.take().unwrap_or(Default::default()),
462 "vm" => result.vm.take().unwrap_or(Default::default()),
463 _ => result.other.take().unwrap_or(Default::default()),
464 };
465
466 counts.snapshots += 1;
467 if new_id {
468 counts.groups +=1;
469 }
470
471 match backup_type {
472 "ct" => result.ct = Some(counts),
473 "host" => result.host = Some(counts),
474 "vm" => result.vm = Some(counts),
475 _ => result.other = Some(counts),
16f9f244
DC
476 }
477 }
478
479 Ok(result)
480}
481
1dc117bb
DM
482#[api(
483 input: {
484 properties: {
485 store: {
486 schema: DATASTORE_SCHEMA,
487 },
488 },
489 },
490 returns: {
14e08625 491 type: DataStoreStatus,
1dc117bb 492 },
bb34b589 493 access: {
54552dda 494 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 495 },
1dc117bb
DM
496)]
497/// Get datastore status.
ea5f547f 498pub fn status(
1dc117bb 499 store: String,
0eecf38f
DM
500 _info: &ApiMethod,
501 _rpcenv: &mut dyn RpcEnvironment,
14e08625 502) -> Result<DataStoreStatus, Error> {
1dc117bb 503 let datastore = DataStore::lookup_datastore(&store)?;
14e08625
DC
504 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
505 let counts = get_snapshots_count(&datastore)?;
16f9f244
DC
506 let gc_status = datastore.last_gc_status();
507
14e08625
DC
508 Ok(DataStoreStatus {
509 total: storage.total,
510 used: storage.used,
511 avail: storage.avail,
512 gc_status,
513 counts,
514 })
0eecf38f
DM
515}
516
c2009e53
DM
517#[api(
518 input: {
519 properties: {
520 store: {
521 schema: DATASTORE_SCHEMA,
522 },
523 "backup-type": {
524 schema: BACKUP_TYPE_SCHEMA,
525 optional: true,
526 },
527 "backup-id": {
528 schema: BACKUP_ID_SCHEMA,
529 optional: true,
530 },
531 "backup-time": {
532 schema: BACKUP_TIME_SCHEMA,
533 optional: true,
534 },
535 },
536 },
537 returns: {
538 schema: UPID_SCHEMA,
539 },
540 access: {
09f6a240 541 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
542 },
543)]
544/// Verify backups.
545///
546/// This function can verify a single backup snapshot, all backup from a backup group,
547/// or all backups in the datastore.
548pub fn verify(
549 store: String,
550 backup_type: Option<String>,
551 backup_id: Option<String>,
552 backup_time: Option<i64>,
553 rpcenv: &mut dyn RpcEnvironment,
554) -> Result<Value, Error> {
555 let datastore = DataStore::lookup_datastore(&store)?;
556
09f6a240 557 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 558 let worker_id;
c2009e53
DM
559
560 let mut backup_dir = None;
561 let mut backup_group = None;
133042b5 562 let mut worker_type = "verify";
c2009e53
DM
563
564 match (backup_type, backup_id, backup_time) {
565 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 566 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 567 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
568
569 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
570
c2009e53 571 backup_dir = Some(dir);
133042b5 572 worker_type = "verify_snapshot";
c2009e53
DM
573 }
574 (Some(backup_type), Some(backup_id), None) => {
4ebda996 575 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 576 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
577
578 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
579
c2009e53 580 backup_group = Some(group);
133042b5 581 worker_type = "verify_group";
c2009e53
DM
582 }
583 (None, None, None) => {
8ea00f6e 584 worker_id = store.clone();
c2009e53 585 }
5a718dce 586 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
587 }
588
c2009e53
DM
589 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
590
591 let upid_str = WorkerTask::new_thread(
133042b5 592 worker_type,
e7cb4dc5 593 Some(worker_id.clone()),
09f6a240 594 auth_id.clone(),
e7cb4dc5
WB
595 to_stdout,
596 move |worker| {
4f09d310
DM
597 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
598 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
599
adfdc369 600 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 601 let mut res = Vec::new();
f6b1d1cc
WB
602 if !verify_backup_dir(
603 datastore,
604 &backup_dir,
605 verified_chunks,
606 corrupt_chunks,
607 worker.clone(),
608 worker.upid().clone(),
d771a608 609 None,
f6b1d1cc 610 )? {
adfdc369
DC
611 res.push(backup_dir.to_string());
612 }
613 res
c2009e53 614 } else if let Some(backup_group) = backup_group {
63d9aca9
DM
615 let (_count, failed_dirs) = verify_backup_group(
616 datastore,
617 &backup_group,
618 verified_chunks,
619 corrupt_chunks,
620 None,
621 worker.clone(),
f6b1d1cc 622 worker.upid(),
d771a608 623 None,
63d9aca9
DM
624 )?;
625 failed_dirs
c2009e53 626 } else {
09f6a240
FG
627 let privs = CachedUserInfo::new()?
628 .lookup_privs(&auth_id, &["datastore", &store]);
629
630 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
631 Some(auth_id)
632 } else {
633 None
634 };
635
636 verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
c2009e53 637 };
adfdc369
DC
638 if failed_dirs.len() > 0 {
639 worker.log("Failed to verify following snapshots:");
640 for dir in failed_dirs {
641 worker.log(format!("\t{}", dir));
642 }
1ffe0301 643 bail!("verification failed - please check the log for details");
c2009e53
DM
644 }
645 Ok(())
e7cb4dc5
WB
646 },
647 )?;
c2009e53
DM
648
649 Ok(json!(upid_str))
650}
651
255f378a
DM
652#[macro_export]
653macro_rules! add_common_prune_prameters {
552c2259
DM
654 ( [ $( $list1:tt )* ] ) => {
655 add_common_prune_prameters!([$( $list1 )* ] , [])
656 };
657 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 658 [
552c2259 659 $( $list1 )*
255f378a 660 (
552c2259 661 "keep-daily",
255f378a 662 true,
49ff1092 663 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 664 ),
102d8d41
DM
665 (
666 "keep-hourly",
667 true,
49ff1092 668 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 669 ),
255f378a 670 (
552c2259 671 "keep-last",
255f378a 672 true,
49ff1092 673 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
674 ),
675 (
552c2259 676 "keep-monthly",
255f378a 677 true,
49ff1092 678 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
679 ),
680 (
552c2259 681 "keep-weekly",
255f378a 682 true,
49ff1092 683 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
684 ),
685 (
686 "keep-yearly",
687 true,
49ff1092 688 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 689 ),
552c2259 690 $( $list2 )*
255f378a
DM
691 ]
692 }
0eecf38f
DM
693}
694
db1e061d
DM
695pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
696 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 697 &PruneListItem::API_SCHEMA
db1e061d
DM
698).schema();
699
0ab08ac9
DM
700const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
701 &ApiHandler::Sync(&prune),
255f378a 702 &ObjectSchema::new(
0ab08ac9
DM
703 "Prune the datastore.",
704 &add_common_prune_prameters!([
705 ("backup-id", false, &BACKUP_ID_SCHEMA),
706 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
707 ("dry-run", true, &BooleanSchema::new(
708 "Just show what prune would do, but do not delete anything.")
709 .schema()
710 ),
0ab08ac9 711 ],[
66c49c21 712 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 713 ])
db1e061d
DM
714 ))
715 .returns(&API_RETURN_SCHEMA_PRUNE)
716 .access(None, &Permission::Privilege(
54552dda
DM
717 &["datastore", "{store}"],
718 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
719 true)
720);
255f378a 721
83b7db02
DM
722fn prune(
723 param: Value,
724 _info: &ApiMethod,
54552dda 725 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
726) -> Result<Value, Error> {
727
54552dda 728 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
729 let backup_type = tools::required_string_param(&param, "backup-type")?;
730 let backup_id = tools::required_string_param(&param, "backup-id")?;
731
e6dc35ac 732 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 733
3b03abfe
DM
734 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
735
9fdc3ef4
DM
736 let group = BackupGroup::new(backup_type, backup_id);
737
54552dda
DM
738 let datastore = DataStore::lookup_datastore(&store)?;
739
bff85572 740 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 741
9e3f0088
DM
742 let prune_options = PruneOptions {
743 keep_last: param["keep-last"].as_u64(),
102d8d41 744 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
745 keep_daily: param["keep-daily"].as_u64(),
746 keep_weekly: param["keep-weekly"].as_u64(),
747 keep_monthly: param["keep-monthly"].as_u64(),
748 keep_yearly: param["keep-yearly"].as_u64(),
749 };
8f579717 750
4ebda996 751 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 752
dda70154
DM
753 let mut prune_result = Vec::new();
754
755 let list = group.list_backups(&datastore.base_path())?;
756
757 let mut prune_info = compute_prune_info(list, &prune_options)?;
758
759 prune_info.reverse(); // delete older snapshots first
760
761 let keep_all = !prune_options.keeps_something();
762
763 if dry_run {
764 for (info, mut keep) in prune_info {
765 if keep_all { keep = true; }
766
767 let backup_time = info.backup_dir.backup_time();
768 let group = info.backup_dir.group();
769
770 prune_result.push(json!({
771 "backup-type": group.backup_type(),
772 "backup-id": group.backup_id(),
6a7be83e 773 "backup-time": backup_time,
dda70154
DM
774 "keep": keep,
775 }));
776 }
777 return Ok(json!(prune_result));
778 }
779
780
163e9bbe 781 // We use a WorkerTask just to have a task log, but run synchrounously
e6dc35ac 782 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
dda70154 783
f1539300
SR
784 if keep_all {
785 worker.log("No prune selection - keeping all files.");
786 } else {
787 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
788 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
789 store, backup_type, backup_id));
790 }
3b03abfe 791
f1539300
SR
792 for (info, mut keep) in prune_info {
793 if keep_all { keep = true; }
dda70154 794
f1539300
SR
795 let backup_time = info.backup_dir.backup_time();
796 let timestamp = info.backup_dir.backup_time_string();
797 let group = info.backup_dir.group();
3b03abfe 798
3b03abfe 799
f1539300
SR
800 let msg = format!(
801 "{}/{}/{} {}",
802 group.backup_type(),
803 group.backup_id(),
804 timestamp,
805 if keep { "keep" } else { "remove" },
806 );
807
808 worker.log(msg);
809
810 prune_result.push(json!({
811 "backup-type": group.backup_type(),
812 "backup-id": group.backup_id(),
813 "backup-time": backup_time,
814 "keep": keep,
815 }));
816
817 if !(dry_run || keep) {
818 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
819 worker.warn(
820 format!(
821 "failed to remove dir {:?}: {}",
822 info.backup_dir.relative_path(), err
823 )
824 );
8f0b4c1f 825 }
8f579717 826 }
f1539300 827 }
dd8e744f 828
f1539300 829 worker.log_result(&Ok(()));
83b7db02 830
dda70154 831 Ok(json!(prune_result))
83b7db02
DM
832}
833
dfc58d47
DM
834#[api(
835 input: {
836 properties: {
837 store: {
838 schema: DATASTORE_SCHEMA,
839 },
840 },
841 },
842 returns: {
843 schema: UPID_SCHEMA,
844 },
bb34b589 845 access: {
54552dda 846 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 847 },
dfc58d47
DM
848)]
849/// Start garbage collection.
6049b71f 850fn start_garbage_collection(
dfc58d47 851 store: String,
6049b71f 852 _info: &ApiMethod,
dd5495d6 853 rpcenv: &mut dyn RpcEnvironment,
6049b71f 854) -> Result<Value, Error> {
15e9b4ed 855
3e6a7dee 856 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 857 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 858
4fdf5ddf
DC
859 let job = Job::new("garbage_collection", &store)
860 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 861
0f778e06 862 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 863
4fdf5ddf
DC
864 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
865 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
866
867 Ok(json!(upid_str))
15e9b4ed
DM
868}
869
a92830dc
DM
870#[api(
871 input: {
872 properties: {
873 store: {
874 schema: DATASTORE_SCHEMA,
875 },
876 },
877 },
878 returns: {
879 type: GarbageCollectionStatus,
bb34b589
DM
880 },
881 access: {
882 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
883 },
a92830dc
DM
884)]
885/// Garbage collection status.
5eeea607 886pub fn garbage_collection_status(
a92830dc 887 store: String,
6049b71f 888 _info: &ApiMethod,
dd5495d6 889 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 890) -> Result<GarbageCollectionStatus, Error> {
691c89a0 891
f2b99c34
DM
892 let datastore = DataStore::lookup_datastore(&store)?;
893
f2b99c34 894 let status = datastore.last_gc_status();
691c89a0 895
a92830dc 896 Ok(status)
691c89a0
DM
897}
898
bb34b589 899#[api(
30fb6025
DM
900 returns: {
901 description: "List the accessible datastores.",
902 type: Array,
903 items: {
904 description: "Datastore name and description.",
905 properties: {
906 store: {
907 schema: DATASTORE_SCHEMA,
908 },
909 comment: {
910 optional: true,
911 schema: SINGLE_LINE_COMMENT_SCHEMA,
912 },
913 },
914 },
915 },
bb34b589 916 access: {
54552dda 917 permission: &Permission::Anybody,
bb34b589
DM
918 },
919)]
920/// Datastore list
6049b71f
DM
921fn get_datastore_list(
922 _param: Value,
923 _info: &ApiMethod,
54552dda 924 rpcenv: &mut dyn RpcEnvironment,
6049b71f 925) -> Result<Value, Error> {
15e9b4ed 926
d0187a51 927 let (config, _digest) = datastore::config()?;
15e9b4ed 928
e6dc35ac 929 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
930 let user_info = CachedUserInfo::new()?;
931
30fb6025 932 let mut list = Vec::new();
54552dda 933
30fb6025 934 for (store, (_, data)) in &config.sections {
e6dc35ac 935 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 936 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025
DM
937 if allowed {
938 let mut entry = json!({ "store": store });
939 if let Some(comment) = data["comment"].as_str() {
940 entry["comment"] = comment.into();
941 }
942 list.push(entry);
943 }
54552dda
DM
944 }
945
30fb6025 946 Ok(list.into())
15e9b4ed
DM
947}
948
0ab08ac9
DM
949#[sortable]
950pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
951 &ApiHandler::AsyncHttp(&download_file),
952 &ObjectSchema::new(
953 "Download single raw file from backup snapshot.",
954 &sorted!([
66c49c21 955 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
956 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
957 ("backup-id", false, &BACKUP_ID_SCHEMA),
958 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 959 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
960 ]),
961 )
54552dda
DM
962).access(None, &Permission::Privilege(
963 &["datastore", "{store}"],
964 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
965 true)
966);
691c89a0 967
9e47c0a5
DM
968fn download_file(
969 _parts: Parts,
970 _req_body: Body,
971 param: Value,
255f378a 972 _info: &ApiMethod,
54552dda 973 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 974) -> ApiResponseFuture {
9e47c0a5 975
ad51d02a
DM
976 async move {
977 let store = tools::required_string_param(&param, "store")?;
ad51d02a 978 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 979
e6dc35ac 980 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 981
ad51d02a 982 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 983
ad51d02a
DM
984 let backup_type = tools::required_string_param(&param, "backup-type")?;
985 let backup_id = tools::required_string_param(&param, "backup-id")?;
986 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 987
e0e5b442 988 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 989
bff85572 990 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 991
abdb9763 992 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 993
ad51d02a
DM
994 let mut path = datastore.base_path();
995 path.push(backup_dir.relative_path());
996 path.push(&file_name);
997
ba694720 998 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
999 .await
1000 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1001
db0cb9ce 1002 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
ba694720
DC
1003 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
1004 .map_err(move |err| {
1005 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1006 err
1007 });
ad51d02a 1008 let body = Body::wrap_stream(payload);
9e47c0a5 1009
ad51d02a
DM
1010 // fixme: set other headers ?
1011 Ok(Response::builder()
1012 .status(StatusCode::OK)
1013 .header(header::CONTENT_TYPE, "application/octet-stream")
1014 .body(body)
1015 .unwrap())
1016 }.boxed()
9e47c0a5
DM
1017}
1018
6ef9bb59
DC
1019#[sortable]
1020pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1021 &ApiHandler::AsyncHttp(&download_file_decoded),
1022 &ObjectSchema::new(
1023 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1024 &sorted!([
1025 ("store", false, &DATASTORE_SCHEMA),
1026 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1027 ("backup-id", false, &BACKUP_ID_SCHEMA),
1028 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1029 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1030 ]),
1031 )
1032).access(None, &Permission::Privilege(
1033 &["datastore", "{store}"],
1034 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1035 true)
1036);
1037
1038fn download_file_decoded(
1039 _parts: Parts,
1040 _req_body: Body,
1041 param: Value,
1042 _info: &ApiMethod,
1043 rpcenv: Box<dyn RpcEnvironment>,
1044) -> ApiResponseFuture {
1045
1046 async move {
1047 let store = tools::required_string_param(&param, "store")?;
1048 let datastore = DataStore::lookup_datastore(store)?;
1049
e6dc35ac 1050 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59
DC
1051
1052 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1053
1054 let backup_type = tools::required_string_param(&param, "backup-type")?;
1055 let backup_id = tools::required_string_param(&param, "backup-id")?;
1056 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1057
e0e5b442 1058 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1059
bff85572 1060 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1061
2d55beec 1062 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1063 for file in files {
f28d9088 1064 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1065 bail!("cannot decode '{}' - is encrypted", file_name);
1066 }
1067 }
1068
1069 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1070
1071 let mut path = datastore.base_path();
1072 path.push(backup_dir.relative_path());
1073 path.push(&file_name);
1074
1075 let extension = file_name.rsplitn(2, '.').next().unwrap();
1076
1077 let body = match extension {
1078 "didx" => {
1079 let index = DynamicIndexReader::open(&path)
1080 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1081 let (csum, size) = index.compute_csum();
1082 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1083
14f6c9cb 1084 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1085 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1086 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1087 .map_err(move |err| {
1088 eprintln!("error during streaming of '{:?}' - {}", path, err);
1089 err
1090 }))
1091 },
1092 "fidx" => {
1093 let index = FixedIndexReader::open(&path)
1094 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1095
2d55beec
FG
1096 let (csum, size) = index.compute_csum();
1097 manifest.verify_file(&file_name, &csum, size)?;
1098
14f6c9cb 1099 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1100 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1101 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1102 .map_err(move |err| {
1103 eprintln!("error during streaming of '{:?}' - {}", path, err);
1104 err
1105 }))
1106 },
1107 "blob" => {
1108 let file = std::fs::File::open(&path)
8aa67ee7 1109 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1110
2d55beec
FG
1111 // FIXME: load full blob to verify index checksum?
1112
6ef9bb59
DC
1113 Body::wrap_stream(
1114 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1115 .map_err(move |err| {
1116 eprintln!("error during streaming of '{:?}' - {}", path, err);
1117 err
1118 })
1119 )
1120 },
1121 extension => {
1122 bail!("cannot download '{}' files", extension);
1123 },
1124 };
1125
1126 // fixme: set other headers ?
1127 Ok(Response::builder()
1128 .status(StatusCode::OK)
1129 .header(header::CONTENT_TYPE, "application/octet-stream")
1130 .body(body)
1131 .unwrap())
1132 }.boxed()
1133}
1134
552c2259 1135#[sortable]
0ab08ac9
DM
1136pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1137 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1138 &ObjectSchema::new(
54552dda 1139 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1140 &sorted!([
66c49c21 1141 ("store", false, &DATASTORE_SCHEMA),
255f378a 1142 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1143 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1144 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1145 ]),
9e47c0a5 1146 )
54552dda
DM
1147).access(
1148 Some("Only the backup creator/owner is allowed to do this."),
1149 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1150);
9e47c0a5 1151
07ee2235
DM
1152fn upload_backup_log(
1153 _parts: Parts,
1154 req_body: Body,
1155 param: Value,
255f378a 1156 _info: &ApiMethod,
54552dda 1157 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1158) -> ApiResponseFuture {
07ee2235 1159
ad51d02a
DM
1160 async move {
1161 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1162 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1163
96d65fbc 1164 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1165
ad51d02a
DM
1166 let backup_type = tools::required_string_param(&param, "backup-type")?;
1167 let backup_id = tools::required_string_param(&param, "backup-id")?;
1168 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1169
e0e5b442 1170 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1171
e6dc35ac 1172 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1173 let owner = datastore.get_owner(backup_dir.group())?;
1174 check_backup_owner(&owner, &auth_id)?;
54552dda 1175
ad51d02a
DM
1176 let mut path = datastore.base_path();
1177 path.push(backup_dir.relative_path());
1178 path.push(&file_name);
07ee2235 1179
ad51d02a
DM
1180 if path.exists() {
1181 bail!("backup already contains a log.");
1182 }
e128d4e8 1183
ad51d02a 1184 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1185 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1186
1187 let data = req_body
1188 .map_err(Error::from)
1189 .try_fold(Vec::new(), |mut acc, chunk| {
1190 acc.extend_from_slice(&*chunk);
1191 future::ok::<_, Error>(acc)
1192 })
1193 .await?;
1194
39f18b30
DM
1195 // always verify blob/CRC at server side
1196 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1197
1198 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1199
1200 // fixme: use correct formatter
1201 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1202 }.boxed()
07ee2235
DM
1203}
1204
5b1cfa01
DC
1205#[api(
1206 input: {
1207 properties: {
1208 store: {
1209 schema: DATASTORE_SCHEMA,
1210 },
1211 "backup-type": {
1212 schema: BACKUP_TYPE_SCHEMA,
1213 },
1214 "backup-id": {
1215 schema: BACKUP_ID_SCHEMA,
1216 },
1217 "backup-time": {
1218 schema: BACKUP_TIME_SCHEMA,
1219 },
1220 "filepath": {
1221 description: "Base64 encoded path.",
1222 type: String,
1223 }
1224 },
1225 },
1226 access: {
1227 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1228 },
1229)]
1230/// Get the entries of the given path of the catalog
1231fn catalog(
1232 store: String,
1233 backup_type: String,
1234 backup_id: String,
1235 backup_time: i64,
1236 filepath: String,
1237 _param: Value,
1238 _info: &ApiMethod,
1239 rpcenv: &mut dyn RpcEnvironment,
1240) -> Result<Value, Error> {
1241 let datastore = DataStore::lookup_datastore(&store)?;
1242
e6dc35ac 1243 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1244
e0e5b442 1245 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1246
bff85572 1247 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1248
9238cdf5
FG
1249 let file_name = CATALOG_NAME;
1250
2d55beec 1251 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1252 for file in files {
1253 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1254 bail!("cannot decode '{}' - is encrypted", file_name);
1255 }
1256 }
1257
5b1cfa01
DC
1258 let mut path = datastore.base_path();
1259 path.push(backup_dir.relative_path());
9238cdf5 1260 path.push(file_name);
5b1cfa01
DC
1261
1262 let index = DynamicIndexReader::open(&path)
1263 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1264
2d55beec
FG
1265 let (csum, size) = index.compute_csum();
1266 manifest.verify_file(&file_name, &csum, size)?;
1267
14f6c9cb 1268 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1269 let reader = BufferedDynamicReader::new(index, chunk_reader);
1270
1271 let mut catalog_reader = CatalogReader::new(reader);
1272 let mut current = catalog_reader.root()?;
1273 let mut components = vec![];
1274
1275
1276 if filepath != "root" {
1277 components = base64::decode(filepath)?;
1278 if components.len() > 0 && components[0] == '/' as u8 {
1279 components.remove(0);
1280 }
1281 for component in components.split(|c| *c == '/' as u8) {
1282 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1283 current = entry;
1284 } else {
1285 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1286 }
1287 }
1288 }
1289
1290 let mut res = Vec::new();
1291
1292 for direntry in catalog_reader.read_dir(&current)? {
1293 let mut components = components.clone();
1294 components.push('/' as u8);
1295 components.extend(&direntry.name);
1296 let path = base64::encode(components);
1297 let text = String::from_utf8_lossy(&direntry.name);
1298 let mut entry = json!({
1299 "filepath": path,
1300 "text": text,
1301 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1302 "leaf": true,
1303 });
1304 match direntry.attr {
1305 DirEntryAttribute::Directory { start: _ } => {
1306 entry["leaf"] = false.into();
1307 },
1308 DirEntryAttribute::File { size, mtime } => {
1309 entry["size"] = size.into();
1310 entry["mtime"] = mtime.into();
1311 },
1312 _ => {},
1313 }
1314 res.push(entry);
1315 }
1316
1317 Ok(res.into())
1318}
1319
53a561a2
WB
1320fn recurse_files<'a, T, W>(
1321 zip: &'a mut ZipEncoder<W>,
1322 decoder: &'a mut Accessor<T>,
1323 prefix: &'a Path,
804f6143 1324 file: FileEntry<T>,
53a561a2 1325) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1326where
1327 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1328 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1329{
1330 Box::pin(async move {
1331 let metadata = file.entry().metadata();
1332 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1333
1334 match file.kind() {
1335 EntryKind::File { .. } => {
1336 let entry = ZipEntry::new(
1337 path,
1338 metadata.stat.mtime.secs,
1339 metadata.stat.mode as u16,
1340 true,
1341 );
1342 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1343 .await
1344 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1345 }
1346 EntryKind::Hardlink(_) => {
1347 let realfile = decoder.follow_hardlink(&file).await?;
1348 let entry = ZipEntry::new(
1349 path,
1350 metadata.stat.mtime.secs,
1351 metadata.stat.mode as u16,
1352 true,
1353 );
1354 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1355 .await
1356 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1357 }
1358 EntryKind::Directory => {
1359 let dir = file.enter_directory().await?;
1360 let mut readdir = dir.read_dir();
1361 let entry = ZipEntry::new(
1362 path,
1363 metadata.stat.mtime.secs,
1364 metadata.stat.mode as u16,
1365 false,
1366 );
1367 zip.add_entry::<FileContents<T>>(entry, None).await?;
1368 while let Some(entry) = readdir.next().await {
1369 let entry = entry?.decode_entry().await?;
53a561a2 1370 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1371 }
1372 }
1373 _ => {} // ignore all else
1374 };
1375
53a561a2 1376 Ok(())
804f6143
DC
1377 })
1378}
1379
d33d8f4e
DC
1380#[sortable]
1381pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1382 &ApiHandler::AsyncHttp(&pxar_file_download),
1383 &ObjectSchema::new(
1ffe0301 1384 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1385 &sorted!([
1386 ("store", false, &DATASTORE_SCHEMA),
1387 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1388 ("backup-id", false, &BACKUP_ID_SCHEMA),
1389 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1390 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1391 ]),
1392 )
1393).access(None, &Permission::Privilege(
1394 &["datastore", "{store}"],
1395 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1396 true)
1397);
1398
1399fn pxar_file_download(
1400 _parts: Parts,
1401 _req_body: Body,
1402 param: Value,
1403 _info: &ApiMethod,
1404 rpcenv: Box<dyn RpcEnvironment>,
1405) -> ApiResponseFuture {
1406
1407 async move {
1408 let store = tools::required_string_param(&param, "store")?;
1409 let datastore = DataStore::lookup_datastore(&store)?;
1410
e6dc35ac 1411 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e
DC
1412
1413 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1414
1415 let backup_type = tools::required_string_param(&param, "backup-type")?;
1416 let backup_id = tools::required_string_param(&param, "backup-id")?;
1417 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1418
e0e5b442 1419 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1420
bff85572 1421 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1422
d33d8f4e
DC
1423 let mut components = base64::decode(&filepath)?;
1424 if components.len() > 0 && components[0] == '/' as u8 {
1425 components.remove(0);
1426 }
1427
1428 let mut split = components.splitn(2, |c| *c == '/' as u8);
9238cdf5 1429 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
d33d8f4e 1430 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1431 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1432 for file in files {
1433 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1434 bail!("cannot decode '{}' - is encrypted", pxar_name);
1435 }
1436 }
d33d8f4e 1437
9238cdf5
FG
1438 let mut path = datastore.base_path();
1439 path.push(backup_dir.relative_path());
1440 path.push(pxar_name);
d33d8f4e
DC
1441
1442 let index = DynamicIndexReader::open(&path)
1443 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1444
2d55beec
FG
1445 let (csum, size) = index.compute_csum();
1446 manifest.verify_file(&pxar_name, &csum, size)?;
1447
14f6c9cb 1448 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1449 let reader = BufferedDynamicReader::new(index, chunk_reader);
1450 let archive_size = reader.archive_size();
1451 let reader = LocalDynamicReadAt::new(reader);
1452
1453 let decoder = Accessor::new(reader, archive_size).await?;
1454 let root = decoder.open_root().await?;
1455 let file = root
1456 .lookup(OsStr::from_bytes(file_path)).await?
1457 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1458
804f6143
DC
1459 let body = match file.kind() {
1460 EntryKind::File { .. } => Body::wrap_stream(
1461 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1462 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1463 err
1464 }),
1465 ),
1466 EntryKind::Hardlink(_) => Body::wrap_stream(
1467 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1468 .map_err(move |err| {
1469 eprintln!(
1470 "error during streaming of hardlink '{:?}' - {}",
1471 filepath, err
1472 );
1473 err
1474 }),
1475 ),
1476 EntryKind::Directory => {
1477 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1478 let mut prefix = PathBuf::new();
1479 let mut components = file.entry().path().components();
1480 components.next_back(); // discar last
1481 for comp in components {
1482 prefix.push(comp);
1483 }
d33d8f4e 1484
804f6143 1485 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1486
1487 crate::server::spawn_internal_task(async move {
53a561a2
WB
1488 let mut zipencoder = ZipEncoder::new(channelwriter);
1489 let mut decoder = decoder;
1490 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1491 .await
1492 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1493
1494 zipencoder
1495 .finish()
1496 .await
1497 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1498 });
1499
1500 Body::wrap_stream(receiver.map_err(move |err| {
1501 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1502 err
804f6143
DC
1503 }))
1504 }
1505 other => bail!("cannot download file of type {:?}", other),
1506 };
d33d8f4e
DC
1507
1508 // fixme: set other headers ?
1509 Ok(Response::builder()
1510 .status(StatusCode::OK)
1511 .header(header::CONTENT_TYPE, "application/octet-stream")
1512 .body(body)
1513 .unwrap())
1514 }.boxed()
1515}
1516
1a0d3d11
DM
1517#[api(
1518 input: {
1519 properties: {
1520 store: {
1521 schema: DATASTORE_SCHEMA,
1522 },
1523 timeframe: {
1524 type: RRDTimeFrameResolution,
1525 },
1526 cf: {
1527 type: RRDMode,
1528 },
1529 },
1530 },
1531 access: {
1532 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1533 },
1534)]
1535/// Read datastore stats
1536fn get_rrd_stats(
1537 store: String,
1538 timeframe: RRDTimeFrameResolution,
1539 cf: RRDMode,
1540 _param: Value,
1541) -> Result<Value, Error> {
1542
431cc7b1
DC
1543 create_value_from_rrd(
1544 &format!("datastore/{}", store),
1a0d3d11
DM
1545 &[
1546 "total", "used",
c94e1f65
DM
1547 "read_ios", "read_bytes",
1548 "write_ios", "write_bytes",
1549 "io_ticks",
1a0d3d11
DM
1550 ],
1551 timeframe,
1552 cf,
1553 )
1554}
1555
912b3f5b
DM
1556#[api(
1557 input: {
1558 properties: {
1559 store: {
1560 schema: DATASTORE_SCHEMA,
1561 },
1562 "backup-type": {
1563 schema: BACKUP_TYPE_SCHEMA,
1564 },
1565 "backup-id": {
1566 schema: BACKUP_ID_SCHEMA,
1567 },
1568 "backup-time": {
1569 schema: BACKUP_TIME_SCHEMA,
1570 },
1571 },
1572 },
1573 access: {
1401f4be 1574 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1575 },
1576)]
1577/// Get "notes" for a specific backup
1578fn get_notes(
1579 store: String,
1580 backup_type: String,
1581 backup_id: String,
1582 backup_time: i64,
1583 rpcenv: &mut dyn RpcEnvironment,
1584) -> Result<String, Error> {
1585 let datastore = DataStore::lookup_datastore(&store)?;
1586
e6dc35ac 1587 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1588 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1589
1401f4be 1590 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1591
883aa6d5 1592 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1593
883aa6d5 1594 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1595 .as_str()
1596 .unwrap_or("");
1597
1598 Ok(String::from(notes))
1599}
1600
1601#[api(
1602 input: {
1603 properties: {
1604 store: {
1605 schema: DATASTORE_SCHEMA,
1606 },
1607 "backup-type": {
1608 schema: BACKUP_TYPE_SCHEMA,
1609 },
1610 "backup-id": {
1611 schema: BACKUP_ID_SCHEMA,
1612 },
1613 "backup-time": {
1614 schema: BACKUP_TIME_SCHEMA,
1615 },
1616 notes: {
1617 description: "A multiline text.",
1618 },
1619 },
1620 },
1621 access: {
b728a69e
FG
1622 permission: &Permission::Privilege(&["datastore", "{store}"],
1623 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1624 true),
912b3f5b
DM
1625 },
1626)]
1627/// Set "notes" for a specific backup
1628fn set_notes(
1629 store: String,
1630 backup_type: String,
1631 backup_id: String,
1632 backup_time: i64,
1633 notes: String,
1634 rpcenv: &mut dyn RpcEnvironment,
1635) -> Result<(), Error> {
1636 let datastore = DataStore::lookup_datastore(&store)?;
1637
e6dc35ac 1638 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1639 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1640
b728a69e 1641 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1642
1a374fcf
SR
1643 datastore.update_manifest(&backup_dir,|manifest| {
1644 manifest.unprotected["notes"] = notes.into();
1645 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1646
1647 Ok(())
1648}
1649
72be0eb1 1650#[api(
4940012d 1651 input: {
72be0eb1
DW
1652 properties: {
1653 store: {
1654 schema: DATASTORE_SCHEMA,
1655 },
1656 "backup-type": {
1657 schema: BACKUP_TYPE_SCHEMA,
1658 },
1659 "backup-id": {
1660 schema: BACKUP_ID_SCHEMA,
1661 },
1662 "new-owner": {
e6dc35ac 1663 type: Authid,
72be0eb1
DW
1664 },
1665 },
4940012d
FG
1666 },
1667 access: {
bff85572
FG
1668 permission: &Permission::Anybody,
1669 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1670 },
72be0eb1
DW
1671)]
1672/// Change owner of a backup group
1673fn set_backup_owner(
1674 store: String,
1675 backup_type: String,
1676 backup_id: String,
e6dc35ac 1677 new_owner: Authid,
bff85572 1678 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1679) -> Result<(), Error> {
1680
1681 let datastore = DataStore::lookup_datastore(&store)?;
1682
1683 let backup_group = BackupGroup::new(backup_type, backup_id);
1684
bff85572
FG
1685 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1686
72be0eb1
DW
1687 let user_info = CachedUserInfo::new()?;
1688
bff85572
FG
1689 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1690
1691 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1692 // High-privilege user/token
1693 true
1694 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1695 let owner = datastore.get_owner(&backup_group)?;
1696
1697 match (owner.is_token(), new_owner.is_token()) {
1698 (true, true) => {
1699 // API token to API token, owned by same user
1700 let owner = owner.user();
1701 let new_owner = new_owner.user();
1702 owner == new_owner && Authid::from(owner.clone()) == auth_id
1703 },
1704 (true, false) => {
1705 // API token to API token owner
1706 Authid::from(owner.user().clone()) == auth_id
1707 && new_owner == auth_id
1708 },
1709 (false, true) => {
1710 // API token owner to API token
1711 owner == auth_id
1712 && Authid::from(new_owner.user().clone()) == auth_id
1713 },
1714 (false, false) => {
1715 // User to User, not allowed for unprivileged users
1716 false
1717 },
1718 }
1719 } else {
1720 false
1721 };
1722
1723 if !allowed {
1724 return Err(http_err!(UNAUTHORIZED,
1725 "{} does not have permission to change owner of backup group '{}' to {}",
1726 auth_id,
1727 backup_group,
1728 new_owner,
1729 ));
1730 }
1731
e6dc35ac
FG
1732 if !user_info.is_active_auth_id(&new_owner) {
1733 bail!("{} '{}' is inactive or non-existent",
1734 if new_owner.is_token() {
1735 "API token".to_string()
1736 } else {
1737 "user".to_string()
1738 },
1739 new_owner);
72be0eb1
DW
1740 }
1741
1742 datastore.set_owner(&backup_group, &new_owner, true)?;
1743
1744 Ok(())
1745}
1746
552c2259 1747#[sortable]
255f378a 1748const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1749 (
1750 "catalog",
1751 &Router::new()
1752 .get(&API_METHOD_CATALOG)
1753 ),
72be0eb1
DW
1754 (
1755 "change-owner",
1756 &Router::new()
1757 .post(&API_METHOD_SET_BACKUP_OWNER)
1758 ),
255f378a
DM
1759 (
1760 "download",
1761 &Router::new()
1762 .download(&API_METHOD_DOWNLOAD_FILE)
1763 ),
6ef9bb59
DC
1764 (
1765 "download-decoded",
1766 &Router::new()
1767 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1768 ),
255f378a
DM
1769 (
1770 "files",
1771 &Router::new()
09b1f7b2 1772 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1773 ),
1774 (
1775 "gc",
1776 &Router::new()
1777 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1778 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1779 ),
1780 (
1781 "groups",
1782 &Router::new()
b31c8019 1783 .get(&API_METHOD_LIST_GROUPS)
255f378a 1784 ),
912b3f5b
DM
1785 (
1786 "notes",
1787 &Router::new()
1788 .get(&API_METHOD_GET_NOTES)
1789 .put(&API_METHOD_SET_NOTES)
1790 ),
255f378a
DM
1791 (
1792 "prune",
1793 &Router::new()
1794 .post(&API_METHOD_PRUNE)
1795 ),
d33d8f4e
DC
1796 (
1797 "pxar-file-download",
1798 &Router::new()
1799 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1800 ),
1a0d3d11
DM
1801 (
1802 "rrd",
1803 &Router::new()
1804 .get(&API_METHOD_GET_RRD_STATS)
1805 ),
255f378a
DM
1806 (
1807 "snapshots",
1808 &Router::new()
fc189b19 1809 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1810 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1811 ),
1812 (
1813 "status",
1814 &Router::new()
1815 .get(&API_METHOD_STATUS)
1816 ),
1817 (
1818 "upload-backup-log",
1819 &Router::new()
1820 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1821 ),
c2009e53
DM
1822 (
1823 "verify",
1824 &Router::new()
1825 .post(&API_METHOD_VERIFY)
1826 ),
255f378a
DM
1827];
1828
ad51d02a 1829const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1830 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1831 .subdirs(DATASTORE_INFO_SUBDIRS);
1832
1833
1834pub const ROUTER: Router = Router::new()
bb34b589 1835 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1836 .match_all("store", &DATASTORE_INFO_ROUTER);