]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
client/http_client: add put method
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
d33d8f4e
DC
2use std::ffi::OsStr;
3use std::os::unix::ffi::OsStrExt;
6b809ff5 4use std::sync::{Arc, Mutex};
53a561a2 5use std::path::{Path, PathBuf};
804f6143 6use std::pin::Pin;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
12use serde_json::{json, Value};
13
bb34b589
DM
14use proxmox::api::{
15 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
e7cb4dc5
WB
16 RpcEnvironment, RpcEnvironmentType, Permission
17};
cad540e9
WB
18use proxmox::api::router::SubdirMap;
19use proxmox::api::schema::*;
60f9a6ea 20use proxmox::tools::fs::{replace_file, CreateOptions};
9ea4bce4 21use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 22
804f6143 23use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
d33d8f4e
DC
24use pxar::EntryKind;
25
cad540e9 26use crate::api2::types::*;
431cc7b1 27use crate::api2::node::rrd::create_value_from_rrd;
e5064ba6 28use crate::backup::*;
cad540e9 29use crate::config::datastore;
54552dda
DM
30use crate::config::cached_user_info::CachedUserInfo;
31
4fdf5ddf 32use crate::server::{jobstate::Job, WorkerTask};
804f6143
DC
33use crate::tools::{
34 self,
35 zip::{ZipEncoder, ZipEntry},
36 AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
37};
38
d00e1a21
DM
39use crate::config::acl::{
40 PRIV_DATASTORE_AUDIT,
54552dda 41 PRIV_DATASTORE_MODIFY,
d00e1a21
DM
42 PRIV_DATASTORE_READ,
43 PRIV_DATASTORE_PRUNE,
54552dda 44 PRIV_DATASTORE_BACKUP,
09f6a240 45 PRIV_DATASTORE_VERIFY,
d00e1a21 46};
1629d2ad 47
bff85572 48fn check_priv_or_backup_owner(
e7cb4dc5
WB
49 store: &DataStore,
50 group: &BackupGroup,
e6dc35ac 51 auth_id: &Authid,
bff85572
FG
52 required_privs: u64,
53) -> Result<(), Error> {
54 let user_info = CachedUserInfo::new()?;
55 let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
56
57 if privs & required_privs == 0 {
58 let owner = store.get_owner(group)?;
59 check_backup_owner(&owner, auth_id)?;
60 }
61 Ok(())
62}
63
64fn check_backup_owner(
65 owner: &Authid,
66 auth_id: &Authid,
e7cb4dc5 67) -> Result<(), Error> {
bff85572
FG
68 let correct_owner = owner == auth_id
69 || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
70 if !correct_owner {
e6dc35ac 71 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
72 }
73 Ok(())
74}
75
e7cb4dc5
WB
76fn read_backup_index(
77 store: &DataStore,
78 backup_dir: &BackupDir,
79) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 80
ff86ef00 81 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 82
09b1f7b2
DM
83 let mut result = Vec::new();
84 for item in manifest.files() {
85 result.push(BackupContent {
86 filename: item.filename.clone(),
f28d9088 87 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
88 size: Some(item.size),
89 });
8c70e3eb
DM
90 }
91
09b1f7b2 92 result.push(BackupContent {
96d65fbc 93 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
94 crypt_mode: match manifest.signature {
95 Some(_) => Some(CryptMode::SignOnly),
96 None => Some(CryptMode::None),
97 },
09b1f7b2
DM
98 size: Some(index_size),
99 });
4f1e40a2 100
70030b43 101 Ok((manifest, result))
8c70e3eb
DM
102}
103
1c090810
DC
104fn get_all_snapshot_files(
105 store: &DataStore,
106 info: &BackupInfo,
70030b43
DM
107) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
108
109 let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
1c090810
DC
110
111 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
112 acc.insert(item.filename.clone());
113 acc
114 });
115
116 for file in &info.files {
117 if file_set.contains(file) { continue; }
f28d9088
WB
118 files.push(BackupContent {
119 filename: file.to_string(),
120 size: None,
121 crypt_mode: None,
122 });
1c090810
DC
123 }
124
70030b43 125 Ok((manifest, files))
1c090810
DC
126}
127
8f579717
DM
128fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
129
130 let mut group_hash = HashMap::new();
131
132 for info in backup_list {
9b492eb2 133 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
134 let time_list = group_hash.entry(group_id).or_insert(vec![]);
135 time_list.push(info);
136 }
137
138 group_hash
139}
140
b31c8019
DM
141#[api(
142 input: {
143 properties: {
144 store: {
145 schema: DATASTORE_SCHEMA,
146 },
147 },
148 },
149 returns: {
150 type: Array,
151 description: "Returns the list of backup groups.",
152 items: {
153 type: GroupListItem,
154 }
155 },
bb34b589 156 access: {
54552dda
DM
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
bb34b589 161 },
b31c8019
DM
162)]
163/// List backup groups.
ad20d198 164fn list_groups(
b31c8019 165 store: String,
54552dda 166 rpcenv: &mut dyn RpcEnvironment,
b31c8019 167) -> Result<Vec<GroupListItem>, Error> {
812c6f87 168
e6dc35ac 169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 170 let user_info = CachedUserInfo::new()?;
e6dc35ac 171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 172
b31c8019 173 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 174
c0977501 175 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
176
177 let group_hash = group_backups(backup_list);
178
b31c8019 179 let mut groups = Vec::new();
812c6f87
DM
180
181 for (_group_id, mut list) in group_hash {
182
2b01a225 183 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
184
185 let info = &list[0];
54552dda 186
9b492eb2 187 let group = info.backup_dir.group();
812c6f87 188
54552dda 189 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
414c23fa
FG
190 let owner = match datastore.get_owner(group) {
191 Ok(auth_id) => auth_id,
192 Err(err) => {
193 println!("Failed to get owner of group '{}' - {}", group, err);
194 continue;
195 },
196 };
bff85572 197 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 198 continue;
54552dda
DM
199 }
200
b31c8019
DM
201 let result_item = GroupListItem {
202 backup_type: group.backup_type().to_string(),
203 backup_id: group.backup_id().to_string(),
6a7be83e 204 last_backup: info.backup_dir.backup_time(),
b31c8019
DM
205 backup_count: list.len() as u64,
206 files: info.files.clone(),
04b0ca8b 207 owner: Some(owner),
b31c8019
DM
208 };
209 groups.push(result_item);
812c6f87
DM
210 }
211
b31c8019 212 Ok(groups)
812c6f87 213}
8f579717 214
09b1f7b2
DM
215#[api(
216 input: {
217 properties: {
218 store: {
219 schema: DATASTORE_SCHEMA,
220 },
221 "backup-type": {
222 schema: BACKUP_TYPE_SCHEMA,
223 },
224 "backup-id": {
225 schema: BACKUP_ID_SCHEMA,
226 },
227 "backup-time": {
228 schema: BACKUP_TIME_SCHEMA,
229 },
230 },
231 },
232 returns: {
233 type: Array,
234 description: "Returns the list of archive files inside a backup snapshots.",
235 items: {
236 type: BackupContent,
237 }
238 },
bb34b589 239 access: {
54552dda
DM
240 permission: &Permission::Privilege(
241 &["datastore", "{store}"],
242 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
243 true),
bb34b589 244 },
09b1f7b2
DM
245)]
246/// List snapshot files.
ea5f547f 247pub fn list_snapshot_files(
09b1f7b2
DM
248 store: String,
249 backup_type: String,
250 backup_id: String,
251 backup_time: i64,
01a13423 252 _info: &ApiMethod,
54552dda 253 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 254) -> Result<Vec<BackupContent>, Error> {
01a13423 255
e6dc35ac 256 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 257 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 258
e0e5b442 259 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 260
bff85572 261 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 262
d7c24397 263 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 264
70030b43
DM
265 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
266
267 Ok(files)
01a13423
DM
268}
269
68a6a0ee
DM
270#[api(
271 input: {
272 properties: {
273 store: {
274 schema: DATASTORE_SCHEMA,
275 },
276 "backup-type": {
277 schema: BACKUP_TYPE_SCHEMA,
278 },
279 "backup-id": {
280 schema: BACKUP_ID_SCHEMA,
281 },
282 "backup-time": {
283 schema: BACKUP_TIME_SCHEMA,
284 },
285 },
286 },
bb34b589 287 access: {
54552dda
DM
288 permission: &Permission::Privilege(
289 &["datastore", "{store}"],
290 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
291 true),
bb34b589 292 },
68a6a0ee
DM
293)]
294/// Delete backup snapshot.
295fn delete_snapshot(
296 store: String,
297 backup_type: String,
298 backup_id: String,
299 backup_time: i64,
6f62c924 300 _info: &ApiMethod,
54552dda 301 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
302) -> Result<Value, Error> {
303
e6dc35ac 304 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 305
e0e5b442 306 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 307 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 308
bff85572 309 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 310
c9756b40 311 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
312
313 Ok(Value::Null)
314}
315
fc189b19
DM
316#[api(
317 input: {
318 properties: {
319 store: {
320 schema: DATASTORE_SCHEMA,
321 },
322 "backup-type": {
323 optional: true,
324 schema: BACKUP_TYPE_SCHEMA,
325 },
326 "backup-id": {
327 optional: true,
328 schema: BACKUP_ID_SCHEMA,
329 },
330 },
331 },
332 returns: {
333 type: Array,
334 description: "Returns the list of snapshots.",
335 items: {
336 type: SnapshotListItem,
337 }
338 },
bb34b589 339 access: {
54552dda
DM
340 permission: &Permission::Privilege(
341 &["datastore", "{store}"],
342 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
343 true),
bb34b589 344 },
fc189b19
DM
345)]
346/// List backup snapshots.
f24fc116 347pub fn list_snapshots (
54552dda
DM
348 store: String,
349 backup_type: Option<String>,
350 backup_id: Option<String>,
351 _param: Value,
184f17af 352 _info: &ApiMethod,
54552dda 353 rpcenv: &mut dyn RpcEnvironment,
fc189b19 354) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 355
e6dc35ac 356 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 357 let user_info = CachedUserInfo::new()?;
e6dc35ac 358 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 359
54552dda 360 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 361
c0977501 362 let base_path = datastore.base_path();
184f17af 363
15c847f1 364 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
365
366 let mut snapshots = vec![];
367
c0977501 368 for info in backup_list {
15c847f1 369 let group = info.backup_dir.group();
54552dda 370 if let Some(ref backup_type) = backup_type {
15c847f1
DM
371 if backup_type != group.backup_type() { continue; }
372 }
54552dda 373 if let Some(ref backup_id) = backup_id {
15c847f1
DM
374 if backup_id != group.backup_id() { continue; }
375 }
a17a0e7a 376
54552dda 377 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
414c23fa
FG
378 let owner = match datastore.get_owner(group) {
379 Ok(auth_id) => auth_id,
380 Err(err) => {
381 println!("Failed to get owner of group '{}' - {}", group, err);
382 continue;
383 },
384 };
04b0ca8b 385
bff85572 386 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
20813274 387 continue;
54552dda
DM
388 }
389
1c090810
DC
390 let mut size = None;
391
3b2046d2 392 let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
70030b43 393 Ok((manifest, files)) => {
1c090810 394 size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
70030b43
DM
395 // extract the first line from notes
396 let comment: Option<String> = manifest.unprotected["notes"]
397 .as_str()
398 .and_then(|notes| notes.lines().next())
399 .map(String::from);
400
3b2046d2
TL
401 let verify = manifest.unprotected["verify_state"].clone();
402 let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
403 Ok(verify) => verify,
404 Err(err) => {
405 eprintln!("error parsing verification state : '{}'", err);
406 None
407 }
408 };
409
410 (comment, verify, files)
1c090810
DC
411 },
412 Err(err) => {
413 eprintln!("error during snapshot file listing: '{}'", err);
70030b43 414 (
3b2046d2 415 None,
70030b43
DM
416 None,
417 info
418 .files
419 .iter()
420 .map(|x| BackupContent {
421 filename: x.to_string(),
422 size: None,
423 crypt_mode: None,
424 })
425 .collect()
426 )
1c090810
DC
427 },
428 };
429
430 let result_item = SnapshotListItem {
fc189b19
DM
431 backup_type: group.backup_type().to_string(),
432 backup_id: group.backup_id().to_string(),
6a7be83e 433 backup_time: info.backup_dir.backup_time(),
70030b43 434 comment,
3b2046d2 435 verification,
1c090810
DC
436 files,
437 size,
04b0ca8b 438 owner: Some(owner),
fc189b19 439 };
a17a0e7a 440
a17a0e7a 441 snapshots.push(result_item);
184f17af
DM
442 }
443
fc189b19 444 Ok(snapshots)
184f17af
DM
445}
446
14e08625 447fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> {
16f9f244
DC
448 let base_path = store.base_path();
449 let backup_list = BackupInfo::list_backups(&base_path)?;
450 let mut groups = HashSet::new();
14e08625
DC
451
452 let mut result = Counts {
453 ct: None,
454 host: None,
455 vm: None,
456 other: None,
457 };
458
16f9f244
DC
459 for info in backup_list {
460 let group = info.backup_dir.group();
461
462 let id = group.backup_id();
463 let backup_type = group.backup_type();
464
465 let mut new_id = false;
466
467 if groups.insert(format!("{}-{}", &backup_type, &id)) {
468 new_id = true;
469 }
470
14e08625
DC
471 let mut counts = match backup_type {
472 "ct" => result.ct.take().unwrap_or(Default::default()),
473 "host" => result.host.take().unwrap_or(Default::default()),
474 "vm" => result.vm.take().unwrap_or(Default::default()),
475 _ => result.other.take().unwrap_or(Default::default()),
476 };
477
478 counts.snapshots += 1;
479 if new_id {
480 counts.groups +=1;
481 }
482
483 match backup_type {
484 "ct" => result.ct = Some(counts),
485 "host" => result.host = Some(counts),
486 "vm" => result.vm = Some(counts),
487 _ => result.other = Some(counts),
16f9f244
DC
488 }
489 }
490
491 Ok(result)
492}
493
1dc117bb
DM
494#[api(
495 input: {
496 properties: {
497 store: {
498 schema: DATASTORE_SCHEMA,
499 },
500 },
501 },
502 returns: {
14e08625 503 type: DataStoreStatus,
1dc117bb 504 },
bb34b589 505 access: {
54552dda 506 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 507 },
1dc117bb
DM
508)]
509/// Get datastore status.
ea5f547f 510pub fn status(
1dc117bb 511 store: String,
0eecf38f
DM
512 _info: &ApiMethod,
513 _rpcenv: &mut dyn RpcEnvironment,
14e08625 514) -> Result<DataStoreStatus, Error> {
1dc117bb 515 let datastore = DataStore::lookup_datastore(&store)?;
14e08625
DC
516 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
517 let counts = get_snapshots_count(&datastore)?;
16f9f244
DC
518 let gc_status = datastore.last_gc_status();
519
14e08625
DC
520 Ok(DataStoreStatus {
521 total: storage.total,
522 used: storage.used,
523 avail: storage.avail,
524 gc_status,
525 counts,
526 })
0eecf38f
DM
527}
528
c2009e53
DM
529#[api(
530 input: {
531 properties: {
532 store: {
533 schema: DATASTORE_SCHEMA,
534 },
535 "backup-type": {
536 schema: BACKUP_TYPE_SCHEMA,
537 optional: true,
538 },
539 "backup-id": {
540 schema: BACKUP_ID_SCHEMA,
541 optional: true,
542 },
543 "backup-time": {
544 schema: BACKUP_TIME_SCHEMA,
545 optional: true,
546 },
547 },
548 },
549 returns: {
550 schema: UPID_SCHEMA,
551 },
552 access: {
09f6a240 553 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
554 },
555)]
556/// Verify backups.
557///
558/// This function can verify a single backup snapshot, all backup from a backup group,
559/// or all backups in the datastore.
560pub fn verify(
561 store: String,
562 backup_type: Option<String>,
563 backup_id: Option<String>,
564 backup_time: Option<i64>,
565 rpcenv: &mut dyn RpcEnvironment,
566) -> Result<Value, Error> {
567 let datastore = DataStore::lookup_datastore(&store)?;
568
09f6a240 569 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 570 let worker_id;
c2009e53
DM
571
572 let mut backup_dir = None;
573 let mut backup_group = None;
133042b5 574 let mut worker_type = "verify";
c2009e53
DM
575
576 match (backup_type, backup_id, backup_time) {
577 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 578 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 579 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
580
581 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
582
c2009e53 583 backup_dir = Some(dir);
133042b5 584 worker_type = "verify_snapshot";
c2009e53
DM
585 }
586 (Some(backup_type), Some(backup_id), None) => {
4ebda996 587 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 588 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
589
590 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
591
c2009e53 592 backup_group = Some(group);
133042b5 593 worker_type = "verify_group";
c2009e53
DM
594 }
595 (None, None, None) => {
8ea00f6e 596 worker_id = store.clone();
c2009e53 597 }
5a718dce 598 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
599 }
600
c2009e53
DM
601 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
602
603 let upid_str = WorkerTask::new_thread(
133042b5 604 worker_type,
e7cb4dc5 605 Some(worker_id.clone()),
09f6a240 606 auth_id.clone(),
e7cb4dc5
WB
607 to_stdout,
608 move |worker| {
4f09d310
DM
609 let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
610 let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
611
adfdc369 612 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 613 let mut res = Vec::new();
f6b1d1cc
WB
614 if !verify_backup_dir(
615 datastore,
616 &backup_dir,
617 verified_chunks,
618 corrupt_chunks,
619 worker.clone(),
620 worker.upid().clone(),
d771a608 621 None,
f6b1d1cc 622 )? {
adfdc369
DC
623 res.push(backup_dir.to_string());
624 }
625 res
c2009e53 626 } else if let Some(backup_group) = backup_group {
63d9aca9
DM
627 let (_count, failed_dirs) = verify_backup_group(
628 datastore,
629 &backup_group,
630 verified_chunks,
631 corrupt_chunks,
632 None,
633 worker.clone(),
f6b1d1cc 634 worker.upid(),
d771a608 635 None,
63d9aca9
DM
636 )?;
637 failed_dirs
c2009e53 638 } else {
09f6a240
FG
639 let privs = CachedUserInfo::new()?
640 .lookup_privs(&auth_id, &["datastore", &store]);
641
642 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
643 Some(auth_id)
644 } else {
645 None
646 };
647
648 verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
c2009e53 649 };
adfdc369 650 if failed_dirs.len() > 0 {
1b1cab83 651 worker.log("Failed to verify following snapshots/groups:");
adfdc369
DC
652 for dir in failed_dirs {
653 worker.log(format!("\t{}", dir));
654 }
1ffe0301 655 bail!("verification failed - please check the log for details");
c2009e53
DM
656 }
657 Ok(())
e7cb4dc5
WB
658 },
659 )?;
c2009e53
DM
660
661 Ok(json!(upid_str))
662}
663
255f378a
DM
664#[macro_export]
665macro_rules! add_common_prune_prameters {
552c2259
DM
666 ( [ $( $list1:tt )* ] ) => {
667 add_common_prune_prameters!([$( $list1 )* ] , [])
668 };
669 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 670 [
552c2259 671 $( $list1 )*
255f378a 672 (
552c2259 673 "keep-daily",
255f378a 674 true,
49ff1092 675 &PRUNE_SCHEMA_KEEP_DAILY,
255f378a 676 ),
102d8d41
DM
677 (
678 "keep-hourly",
679 true,
49ff1092 680 &PRUNE_SCHEMA_KEEP_HOURLY,
102d8d41 681 ),
255f378a 682 (
552c2259 683 "keep-last",
255f378a 684 true,
49ff1092 685 &PRUNE_SCHEMA_KEEP_LAST,
255f378a
DM
686 ),
687 (
552c2259 688 "keep-monthly",
255f378a 689 true,
49ff1092 690 &PRUNE_SCHEMA_KEEP_MONTHLY,
255f378a
DM
691 ),
692 (
552c2259 693 "keep-weekly",
255f378a 694 true,
49ff1092 695 &PRUNE_SCHEMA_KEEP_WEEKLY,
255f378a
DM
696 ),
697 (
698 "keep-yearly",
699 true,
49ff1092 700 &PRUNE_SCHEMA_KEEP_YEARLY,
255f378a 701 ),
552c2259 702 $( $list2 )*
255f378a
DM
703 ]
704 }
0eecf38f
DM
705}
706
db1e061d
DM
707pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
708 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
660a3489 709 &PruneListItem::API_SCHEMA
db1e061d
DM
710).schema();
711
0ab08ac9
DM
712const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
713 &ApiHandler::Sync(&prune),
255f378a 714 &ObjectSchema::new(
0ab08ac9
DM
715 "Prune the datastore.",
716 &add_common_prune_prameters!([
717 ("backup-id", false, &BACKUP_ID_SCHEMA),
718 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
719 ("dry-run", true, &BooleanSchema::new(
720 "Just show what prune would do, but do not delete anything.")
721 .schema()
722 ),
0ab08ac9 723 ],[
66c49c21 724 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 725 ])
db1e061d
DM
726 ))
727 .returns(&API_RETURN_SCHEMA_PRUNE)
728 .access(None, &Permission::Privilege(
54552dda
DM
729 &["datastore", "{store}"],
730 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
731 true)
732);
255f378a 733
83b7db02
DM
734fn prune(
735 param: Value,
736 _info: &ApiMethod,
54552dda 737 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
738) -> Result<Value, Error> {
739
54552dda 740 let store = tools::required_string_param(&param, "store")?;
9fdc3ef4
DM
741 let backup_type = tools::required_string_param(&param, "backup-type")?;
742 let backup_id = tools::required_string_param(&param, "backup-id")?;
743
e6dc35ac 744 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 745
3b03abfe
DM
746 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
747
9fdc3ef4
DM
748 let group = BackupGroup::new(backup_type, backup_id);
749
54552dda
DM
750 let datastore = DataStore::lookup_datastore(&store)?;
751
bff85572 752 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 753
9e3f0088
DM
754 let prune_options = PruneOptions {
755 keep_last: param["keep-last"].as_u64(),
102d8d41 756 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
757 keep_daily: param["keep-daily"].as_u64(),
758 keep_weekly: param["keep-weekly"].as_u64(),
759 keep_monthly: param["keep-monthly"].as_u64(),
760 keep_yearly: param["keep-yearly"].as_u64(),
761 };
8f579717 762
4ebda996 763 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
503995c7 764
dda70154
DM
765 let mut prune_result = Vec::new();
766
767 let list = group.list_backups(&datastore.base_path())?;
768
769 let mut prune_info = compute_prune_info(list, &prune_options)?;
770
771 prune_info.reverse(); // delete older snapshots first
772
773 let keep_all = !prune_options.keeps_something();
774
775 if dry_run {
776 for (info, mut keep) in prune_info {
777 if keep_all { keep = true; }
778
779 let backup_time = info.backup_dir.backup_time();
780 let group = info.backup_dir.group();
781
782 prune_result.push(json!({
783 "backup-type": group.backup_type(),
784 "backup-id": group.backup_id(),
6a7be83e 785 "backup-time": backup_time,
dda70154
DM
786 "keep": keep,
787 }));
788 }
789 return Ok(json!(prune_result));
790 }
791
792
163e9bbe 793 // We use a WorkerTask just to have a task log, but run synchrounously
e6dc35ac 794 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
dda70154 795
f1539300
SR
796 if keep_all {
797 worker.log("No prune selection - keeping all files.");
798 } else {
799 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
800 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
801 store, backup_type, backup_id));
802 }
3b03abfe 803
f1539300
SR
804 for (info, mut keep) in prune_info {
805 if keep_all { keep = true; }
dda70154 806
f1539300
SR
807 let backup_time = info.backup_dir.backup_time();
808 let timestamp = info.backup_dir.backup_time_string();
809 let group = info.backup_dir.group();
3b03abfe 810
3b03abfe 811
f1539300
SR
812 let msg = format!(
813 "{}/{}/{} {}",
814 group.backup_type(),
815 group.backup_id(),
816 timestamp,
817 if keep { "keep" } else { "remove" },
818 );
819
820 worker.log(msg);
821
822 prune_result.push(json!({
823 "backup-type": group.backup_type(),
824 "backup-id": group.backup_id(),
825 "backup-time": backup_time,
826 "keep": keep,
827 }));
828
829 if !(dry_run || keep) {
830 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
831 worker.warn(
832 format!(
833 "failed to remove dir {:?}: {}",
834 info.backup_dir.relative_path(), err
835 )
836 );
8f0b4c1f 837 }
8f579717 838 }
f1539300 839 }
dd8e744f 840
f1539300 841 worker.log_result(&Ok(()));
83b7db02 842
dda70154 843 Ok(json!(prune_result))
83b7db02
DM
844}
845
dfc58d47
DM
846#[api(
847 input: {
848 properties: {
849 store: {
850 schema: DATASTORE_SCHEMA,
851 },
852 },
853 },
854 returns: {
855 schema: UPID_SCHEMA,
856 },
bb34b589 857 access: {
54552dda 858 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 859 },
dfc58d47
DM
860)]
861/// Start garbage collection.
6049b71f 862fn start_garbage_collection(
dfc58d47 863 store: String,
6049b71f 864 _info: &ApiMethod,
dd5495d6 865 rpcenv: &mut dyn RpcEnvironment,
6049b71f 866) -> Result<Value, Error> {
15e9b4ed 867
3e6a7dee 868 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 869 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 870
4fdf5ddf
DC
871 let job = Job::new("garbage_collection", &store)
872 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 873
0f778e06 874 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 875
4fdf5ddf
DC
876 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
877 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
878
879 Ok(json!(upid_str))
15e9b4ed
DM
880}
881
a92830dc
DM
882#[api(
883 input: {
884 properties: {
885 store: {
886 schema: DATASTORE_SCHEMA,
887 },
888 },
889 },
890 returns: {
891 type: GarbageCollectionStatus,
bb34b589
DM
892 },
893 access: {
894 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
895 },
a92830dc
DM
896)]
897/// Garbage collection status.
5eeea607 898pub fn garbage_collection_status(
a92830dc 899 store: String,
6049b71f 900 _info: &ApiMethod,
dd5495d6 901 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 902) -> Result<GarbageCollectionStatus, Error> {
691c89a0 903
f2b99c34
DM
904 let datastore = DataStore::lookup_datastore(&store)?;
905
f2b99c34 906 let status = datastore.last_gc_status();
691c89a0 907
a92830dc 908 Ok(status)
691c89a0
DM
909}
910
bb34b589 911#[api(
30fb6025
DM
912 returns: {
913 description: "List the accessible datastores.",
914 type: Array,
915 items: {
916 description: "Datastore name and description.",
455e5f71 917 type: DataStoreListItem,
30fb6025
DM
918 },
919 },
bb34b589 920 access: {
54552dda 921 permission: &Permission::Anybody,
bb34b589
DM
922 },
923)]
924/// Datastore list
6049b71f
DM
925fn get_datastore_list(
926 _param: Value,
927 _info: &ApiMethod,
54552dda 928 rpcenv: &mut dyn RpcEnvironment,
455e5f71 929) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 930
d0187a51 931 let (config, _digest) = datastore::config()?;
15e9b4ed 932
e6dc35ac 933 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
934 let user_info = CachedUserInfo::new()?;
935
30fb6025 936 let mut list = Vec::new();
54552dda 937
30fb6025 938 for (store, (_, data)) in &config.sections {
e6dc35ac 939 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 940 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 941 if allowed {
455e5f71
FG
942 list.push(
943 DataStoreListItem {
944 store: store.clone(),
945 comment: data["comment"].as_str().map(String::from),
946 }
947 );
30fb6025 948 }
54552dda
DM
949 }
950
30fb6025 951 Ok(list.into())
15e9b4ed
DM
952}
953
0ab08ac9
DM
954#[sortable]
955pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
956 &ApiHandler::AsyncHttp(&download_file),
957 &ObjectSchema::new(
958 "Download single raw file from backup snapshot.",
959 &sorted!([
66c49c21 960 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
961 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
962 ("backup-id", false, &BACKUP_ID_SCHEMA),
963 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 964 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
965 ]),
966 )
54552dda
DM
967).access(None, &Permission::Privilege(
968 &["datastore", "{store}"],
969 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
970 true)
971);
691c89a0 972
9e47c0a5
DM
973fn download_file(
974 _parts: Parts,
975 _req_body: Body,
976 param: Value,
255f378a 977 _info: &ApiMethod,
54552dda 978 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 979) -> ApiResponseFuture {
9e47c0a5 980
ad51d02a
DM
981 async move {
982 let store = tools::required_string_param(&param, "store")?;
ad51d02a 983 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 984
e6dc35ac 985 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 986
ad51d02a 987 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 988
ad51d02a
DM
989 let backup_type = tools::required_string_param(&param, "backup-type")?;
990 let backup_id = tools::required_string_param(&param, "backup-id")?;
991 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 992
e0e5b442 993 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 994
bff85572 995 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 996
abdb9763 997 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 998
ad51d02a
DM
999 let mut path = datastore.base_path();
1000 path.push(backup_dir.relative_path());
1001 path.push(&file_name);
1002
ba694720 1003 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1004 .await
1005 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1006
db0cb9ce 1007 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
ba694720
DC
1008 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
1009 .map_err(move |err| {
1010 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1011 err
1012 });
ad51d02a 1013 let body = Body::wrap_stream(payload);
9e47c0a5 1014
ad51d02a
DM
1015 // fixme: set other headers ?
1016 Ok(Response::builder()
1017 .status(StatusCode::OK)
1018 .header(header::CONTENT_TYPE, "application/octet-stream")
1019 .body(body)
1020 .unwrap())
1021 }.boxed()
9e47c0a5
DM
1022}
1023
6ef9bb59
DC
1024#[sortable]
1025pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1026 &ApiHandler::AsyncHttp(&download_file_decoded),
1027 &ObjectSchema::new(
1028 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1029 &sorted!([
1030 ("store", false, &DATASTORE_SCHEMA),
1031 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1032 ("backup-id", false, &BACKUP_ID_SCHEMA),
1033 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1034 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1035 ]),
1036 )
1037).access(None, &Permission::Privilege(
1038 &["datastore", "{store}"],
1039 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1040 true)
1041);
1042
1043fn download_file_decoded(
1044 _parts: Parts,
1045 _req_body: Body,
1046 param: Value,
1047 _info: &ApiMethod,
1048 rpcenv: Box<dyn RpcEnvironment>,
1049) -> ApiResponseFuture {
1050
1051 async move {
1052 let store = tools::required_string_param(&param, "store")?;
1053 let datastore = DataStore::lookup_datastore(store)?;
1054
e6dc35ac 1055 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59
DC
1056
1057 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
1058
1059 let backup_type = tools::required_string_param(&param, "backup-type")?;
1060 let backup_id = tools::required_string_param(&param, "backup-id")?;
1061 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1062
e0e5b442 1063 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1064
bff85572 1065 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1066
2d55beec 1067 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1068 for file in files {
f28d9088 1069 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1070 bail!("cannot decode '{}' - is encrypted", file_name);
1071 }
1072 }
1073
1074 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1075
1076 let mut path = datastore.base_path();
1077 path.push(backup_dir.relative_path());
1078 path.push(&file_name);
1079
1080 let extension = file_name.rsplitn(2, '.').next().unwrap();
1081
1082 let body = match extension {
1083 "didx" => {
1084 let index = DynamicIndexReader::open(&path)
1085 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1086 let (csum, size) = index.compute_csum();
1087 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1088
14f6c9cb 1089 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1090 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1091 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1092 .map_err(move |err| {
1093 eprintln!("error during streaming of '{:?}' - {}", path, err);
1094 err
1095 }))
1096 },
1097 "fidx" => {
1098 let index = FixedIndexReader::open(&path)
1099 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1100
2d55beec
FG
1101 let (csum, size) = index.compute_csum();
1102 manifest.verify_file(&file_name, &csum, size)?;
1103
14f6c9cb 1104 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
6ef9bb59 1105 let reader = AsyncIndexReader::new(index, chunk_reader);
f386f512 1106 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1107 .map_err(move |err| {
1108 eprintln!("error during streaming of '{:?}' - {}", path, err);
1109 err
1110 }))
1111 },
1112 "blob" => {
1113 let file = std::fs::File::open(&path)
8aa67ee7 1114 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1115
2d55beec
FG
1116 // FIXME: load full blob to verify index checksum?
1117
6ef9bb59
DC
1118 Body::wrap_stream(
1119 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1120 .map_err(move |err| {
1121 eprintln!("error during streaming of '{:?}' - {}", path, err);
1122 err
1123 })
1124 )
1125 },
1126 extension => {
1127 bail!("cannot download '{}' files", extension);
1128 },
1129 };
1130
1131 // fixme: set other headers ?
1132 Ok(Response::builder()
1133 .status(StatusCode::OK)
1134 .header(header::CONTENT_TYPE, "application/octet-stream")
1135 .body(body)
1136 .unwrap())
1137 }.boxed()
1138}
1139
552c2259 1140#[sortable]
0ab08ac9
DM
1141pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1142 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1143 &ObjectSchema::new(
54552dda 1144 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1145 &sorted!([
66c49c21 1146 ("store", false, &DATASTORE_SCHEMA),
255f378a 1147 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1148 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1149 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1150 ]),
9e47c0a5 1151 )
54552dda
DM
1152).access(
1153 Some("Only the backup creator/owner is allowed to do this."),
1154 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1155);
9e47c0a5 1156
07ee2235
DM
1157fn upload_backup_log(
1158 _parts: Parts,
1159 req_body: Body,
1160 param: Value,
255f378a 1161 _info: &ApiMethod,
54552dda 1162 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1163) -> ApiResponseFuture {
07ee2235 1164
ad51d02a
DM
1165 async move {
1166 let store = tools::required_string_param(&param, "store")?;
ad51d02a 1167 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1168
96d65fbc 1169 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1170
ad51d02a
DM
1171 let backup_type = tools::required_string_param(&param, "backup-type")?;
1172 let backup_id = tools::required_string_param(&param, "backup-id")?;
1173 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 1174
e0e5b442 1175 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1176
e6dc35ac 1177 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1178 let owner = datastore.get_owner(backup_dir.group())?;
1179 check_backup_owner(&owner, &auth_id)?;
54552dda 1180
ad51d02a
DM
1181 let mut path = datastore.base_path();
1182 path.push(backup_dir.relative_path());
1183 path.push(&file_name);
07ee2235 1184
ad51d02a
DM
1185 if path.exists() {
1186 bail!("backup already contains a log.");
1187 }
e128d4e8 1188
ad51d02a 1189 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1190 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1191
1192 let data = req_body
1193 .map_err(Error::from)
1194 .try_fold(Vec::new(), |mut acc, chunk| {
1195 acc.extend_from_slice(&*chunk);
1196 future::ok::<_, Error>(acc)
1197 })
1198 .await?;
1199
39f18b30
DM
1200 // always verify blob/CRC at server side
1201 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1202
1203 replace_file(&path, blob.raw_data(), CreateOptions::new())?;
ad51d02a
DM
1204
1205 // fixme: use correct formatter
1206 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
1207 }.boxed()
07ee2235
DM
1208}
1209
5b1cfa01
DC
1210#[api(
1211 input: {
1212 properties: {
1213 store: {
1214 schema: DATASTORE_SCHEMA,
1215 },
1216 "backup-type": {
1217 schema: BACKUP_TYPE_SCHEMA,
1218 },
1219 "backup-id": {
1220 schema: BACKUP_ID_SCHEMA,
1221 },
1222 "backup-time": {
1223 schema: BACKUP_TIME_SCHEMA,
1224 },
1225 "filepath": {
1226 description: "Base64 encoded path.",
1227 type: String,
1228 }
1229 },
1230 },
1231 access: {
1232 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1233 },
1234)]
1235/// Get the entries of the given path of the catalog
1236fn catalog(
1237 store: String,
1238 backup_type: String,
1239 backup_id: String,
1240 backup_time: i64,
1241 filepath: String,
1242 _param: Value,
1243 _info: &ApiMethod,
1244 rpcenv: &mut dyn RpcEnvironment,
1245) -> Result<Value, Error> {
1246 let datastore = DataStore::lookup_datastore(&store)?;
1247
e6dc35ac 1248 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1249
e0e5b442 1250 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1251
bff85572 1252 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1253
9238cdf5
FG
1254 let file_name = CATALOG_NAME;
1255
2d55beec 1256 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1257 for file in files {
1258 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1259 bail!("cannot decode '{}' - is encrypted", file_name);
1260 }
1261 }
1262
5b1cfa01
DC
1263 let mut path = datastore.base_path();
1264 path.push(backup_dir.relative_path());
9238cdf5 1265 path.push(file_name);
5b1cfa01
DC
1266
1267 let index = DynamicIndexReader::open(&path)
1268 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1269
2d55beec
FG
1270 let (csum, size) = index.compute_csum();
1271 manifest.verify_file(&file_name, &csum, size)?;
1272
14f6c9cb 1273 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1274 let reader = BufferedDynamicReader::new(index, chunk_reader);
1275
1276 let mut catalog_reader = CatalogReader::new(reader);
1277 let mut current = catalog_reader.root()?;
1278 let mut components = vec![];
1279
1280
1281 if filepath != "root" {
1282 components = base64::decode(filepath)?;
1283 if components.len() > 0 && components[0] == '/' as u8 {
1284 components.remove(0);
1285 }
1286 for component in components.split(|c| *c == '/' as u8) {
1287 if let Some(entry) = catalog_reader.lookup(&current, component)? {
1288 current = entry;
1289 } else {
1290 bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
1291 }
1292 }
1293 }
1294
1295 let mut res = Vec::new();
1296
1297 for direntry in catalog_reader.read_dir(&current)? {
1298 let mut components = components.clone();
1299 components.push('/' as u8);
1300 components.extend(&direntry.name);
1301 let path = base64::encode(components);
1302 let text = String::from_utf8_lossy(&direntry.name);
1303 let mut entry = json!({
1304 "filepath": path,
1305 "text": text,
1306 "type": CatalogEntryType::from(&direntry.attr).to_string(),
1307 "leaf": true,
1308 });
1309 match direntry.attr {
1310 DirEntryAttribute::Directory { start: _ } => {
1311 entry["leaf"] = false.into();
1312 },
1313 DirEntryAttribute::File { size, mtime } => {
1314 entry["size"] = size.into();
1315 entry["mtime"] = mtime.into();
1316 },
1317 _ => {},
1318 }
1319 res.push(entry);
1320 }
1321
1322 Ok(res.into())
1323}
1324
53a561a2
WB
1325fn recurse_files<'a, T, W>(
1326 zip: &'a mut ZipEncoder<W>,
1327 decoder: &'a mut Accessor<T>,
1328 prefix: &'a Path,
804f6143 1329 file: FileEntry<T>,
53a561a2 1330) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
804f6143
DC
1331where
1332 T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
1333 W: tokio::io::AsyncWrite + Unpin + Send + 'static,
1334{
1335 Box::pin(async move {
1336 let metadata = file.entry().metadata();
1337 let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
1338
1339 match file.kind() {
1340 EntryKind::File { .. } => {
1341 let entry = ZipEntry::new(
1342 path,
1343 metadata.stat.mtime.secs,
1344 metadata.stat.mode as u16,
1345 true,
1346 );
1347 zip.add_entry(entry, Some(file.contents().await?))
e832860a
WB
1348 .await
1349 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1350 }
1351 EntryKind::Hardlink(_) => {
1352 let realfile = decoder.follow_hardlink(&file).await?;
1353 let entry = ZipEntry::new(
1354 path,
1355 metadata.stat.mtime.secs,
1356 metadata.stat.mode as u16,
1357 true,
1358 );
1359 zip.add_entry(entry, Some(realfile.contents().await?))
e832860a
WB
1360 .await
1361 .map_err(|err| format_err!("could not send file entry: {}", err))?;
804f6143
DC
1362 }
1363 EntryKind::Directory => {
1364 let dir = file.enter_directory().await?;
1365 let mut readdir = dir.read_dir();
1366 let entry = ZipEntry::new(
1367 path,
1368 metadata.stat.mtime.secs,
1369 metadata.stat.mode as u16,
1370 false,
1371 );
1372 zip.add_entry::<FileContents<T>>(entry, None).await?;
1373 while let Some(entry) = readdir.next().await {
1374 let entry = entry?.decode_entry().await?;
53a561a2 1375 recurse_files(zip, decoder, prefix, entry).await?;
804f6143
DC
1376 }
1377 }
1378 _ => {} // ignore all else
1379 };
1380
53a561a2 1381 Ok(())
804f6143
DC
1382 })
1383}
1384
d33d8f4e
DC
1385#[sortable]
1386pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1387 &ApiHandler::AsyncHttp(&pxar_file_download),
1388 &ObjectSchema::new(
1ffe0301 1389 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1390 &sorted!([
1391 ("store", false, &DATASTORE_SCHEMA),
1392 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1393 ("backup-id", false, &BACKUP_ID_SCHEMA),
1394 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1395 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1396 ]),
1397 )
1398).access(None, &Permission::Privilege(
1399 &["datastore", "{store}"],
1400 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1401 true)
1402);
1403
1404fn pxar_file_download(
1405 _parts: Parts,
1406 _req_body: Body,
1407 param: Value,
1408 _info: &ApiMethod,
1409 rpcenv: Box<dyn RpcEnvironment>,
1410) -> ApiResponseFuture {
1411
1412 async move {
1413 let store = tools::required_string_param(&param, "store")?;
1414 let datastore = DataStore::lookup_datastore(&store)?;
1415
e6dc35ac 1416 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e
DC
1417
1418 let filepath = tools::required_string_param(&param, "filepath")?.to_owned();
1419
1420 let backup_type = tools::required_string_param(&param, "backup-type")?;
1421 let backup_id = tools::required_string_param(&param, "backup-id")?;
1422 let backup_time = tools::required_integer_param(&param, "backup-time")?;
1423
e0e5b442 1424 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1425
bff85572 1426 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1427
d33d8f4e
DC
1428 let mut components = base64::decode(&filepath)?;
1429 if components.len() > 0 && components[0] == '/' as u8 {
1430 components.remove(0);
1431 }
1432
1433 let mut split = components.splitn(2, |c| *c == '/' as u8);
9238cdf5 1434 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
d33d8f4e 1435 let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
2d55beec 1436 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1437 for file in files {
1438 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1439 bail!("cannot decode '{}' - is encrypted", pxar_name);
1440 }
1441 }
d33d8f4e 1442
9238cdf5
FG
1443 let mut path = datastore.base_path();
1444 path.push(backup_dir.relative_path());
1445 path.push(pxar_name);
d33d8f4e
DC
1446
1447 let index = DynamicIndexReader::open(&path)
1448 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1449
2d55beec
FG
1450 let (csum, size) = index.compute_csum();
1451 manifest.verify_file(&pxar_name, &csum, size)?;
1452
14f6c9cb 1453 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1454 let reader = BufferedDynamicReader::new(index, chunk_reader);
1455 let archive_size = reader.archive_size();
1456 let reader = LocalDynamicReadAt::new(reader);
1457
1458 let decoder = Accessor::new(reader, archive_size).await?;
1459 let root = decoder.open_root().await?;
1460 let file = root
1461 .lookup(OsStr::from_bytes(file_path)).await?
1462 .ok_or(format_err!("error opening '{:?}'", file_path))?;
1463
804f6143
DC
1464 let body = match file.kind() {
1465 EntryKind::File { .. } => Body::wrap_stream(
1466 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1467 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1468 err
1469 }),
1470 ),
1471 EntryKind::Hardlink(_) => Body::wrap_stream(
1472 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1473 .map_err(move |err| {
1474 eprintln!(
1475 "error during streaming of hardlink '{:?}' - {}",
1476 filepath, err
1477 );
1478 err
1479 }),
1480 ),
1481 EntryKind::Directory => {
1482 let (sender, receiver) = tokio::sync::mpsc::channel(100);
1483 let mut prefix = PathBuf::new();
1484 let mut components = file.entry().path().components();
1485 components.next_back(); // discar last
1486 for comp in components {
1487 prefix.push(comp);
1488 }
d33d8f4e 1489
804f6143 1490 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
804f6143
DC
1491
1492 crate::server::spawn_internal_task(async move {
53a561a2
WB
1493 let mut zipencoder = ZipEncoder::new(channelwriter);
1494 let mut decoder = decoder;
1495 recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
804f6143
DC
1496 .await
1497 .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
1498
1499 zipencoder
1500 .finish()
1501 .await
1502 .map_err(|err| eprintln!("error during finishing of zip: {}", err))
1503 });
1504
1505 Body::wrap_stream(receiver.map_err(move |err| {
1506 eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
d33d8f4e 1507 err
804f6143
DC
1508 }))
1509 }
1510 other => bail!("cannot download file of type {:?}", other),
1511 };
d33d8f4e
DC
1512
1513 // fixme: set other headers ?
1514 Ok(Response::builder()
1515 .status(StatusCode::OK)
1516 .header(header::CONTENT_TYPE, "application/octet-stream")
1517 .body(body)
1518 .unwrap())
1519 }.boxed()
1520}
1521
1a0d3d11
DM
1522#[api(
1523 input: {
1524 properties: {
1525 store: {
1526 schema: DATASTORE_SCHEMA,
1527 },
1528 timeframe: {
1529 type: RRDTimeFrameResolution,
1530 },
1531 cf: {
1532 type: RRDMode,
1533 },
1534 },
1535 },
1536 access: {
1537 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1538 },
1539)]
1540/// Read datastore stats
1541fn get_rrd_stats(
1542 store: String,
1543 timeframe: RRDTimeFrameResolution,
1544 cf: RRDMode,
1545 _param: Value,
1546) -> Result<Value, Error> {
1547
431cc7b1
DC
1548 create_value_from_rrd(
1549 &format!("datastore/{}", store),
1a0d3d11
DM
1550 &[
1551 "total", "used",
c94e1f65
DM
1552 "read_ios", "read_bytes",
1553 "write_ios", "write_bytes",
1554 "io_ticks",
1a0d3d11
DM
1555 ],
1556 timeframe,
1557 cf,
1558 )
1559}
1560
912b3f5b
DM
1561#[api(
1562 input: {
1563 properties: {
1564 store: {
1565 schema: DATASTORE_SCHEMA,
1566 },
1567 "backup-type": {
1568 schema: BACKUP_TYPE_SCHEMA,
1569 },
1570 "backup-id": {
1571 schema: BACKUP_ID_SCHEMA,
1572 },
1573 "backup-time": {
1574 schema: BACKUP_TIME_SCHEMA,
1575 },
1576 },
1577 },
1578 access: {
1401f4be 1579 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1580 },
1581)]
1582/// Get "notes" for a specific backup
1583fn get_notes(
1584 store: String,
1585 backup_type: String,
1586 backup_id: String,
1587 backup_time: i64,
1588 rpcenv: &mut dyn RpcEnvironment,
1589) -> Result<String, Error> {
1590 let datastore = DataStore::lookup_datastore(&store)?;
1591
e6dc35ac 1592 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1593 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1594
1401f4be 1595 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1596
883aa6d5 1597 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1598
883aa6d5 1599 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1600 .as_str()
1601 .unwrap_or("");
1602
1603 Ok(String::from(notes))
1604}
1605
1606#[api(
1607 input: {
1608 properties: {
1609 store: {
1610 schema: DATASTORE_SCHEMA,
1611 },
1612 "backup-type": {
1613 schema: BACKUP_TYPE_SCHEMA,
1614 },
1615 "backup-id": {
1616 schema: BACKUP_ID_SCHEMA,
1617 },
1618 "backup-time": {
1619 schema: BACKUP_TIME_SCHEMA,
1620 },
1621 notes: {
1622 description: "A multiline text.",
1623 },
1624 },
1625 },
1626 access: {
b728a69e
FG
1627 permission: &Permission::Privilege(&["datastore", "{store}"],
1628 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1629 true),
912b3f5b
DM
1630 },
1631)]
1632/// Set "notes" for a specific backup
1633fn set_notes(
1634 store: String,
1635 backup_type: String,
1636 backup_id: String,
1637 backup_time: i64,
1638 notes: String,
1639 rpcenv: &mut dyn RpcEnvironment,
1640) -> Result<(), Error> {
1641 let datastore = DataStore::lookup_datastore(&store)?;
1642
e6dc35ac 1643 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1644 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1645
b728a69e 1646 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1647
1a374fcf
SR
1648 datastore.update_manifest(&backup_dir,|manifest| {
1649 manifest.unprotected["notes"] = notes.into();
1650 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1651
1652 Ok(())
1653}
1654
72be0eb1 1655#[api(
4940012d 1656 input: {
72be0eb1
DW
1657 properties: {
1658 store: {
1659 schema: DATASTORE_SCHEMA,
1660 },
1661 "backup-type": {
1662 schema: BACKUP_TYPE_SCHEMA,
1663 },
1664 "backup-id": {
1665 schema: BACKUP_ID_SCHEMA,
1666 },
1667 "new-owner": {
e6dc35ac 1668 type: Authid,
72be0eb1
DW
1669 },
1670 },
4940012d
FG
1671 },
1672 access: {
bff85572
FG
1673 permission: &Permission::Anybody,
1674 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1675 },
72be0eb1
DW
1676)]
1677/// Change owner of a backup group
1678fn set_backup_owner(
1679 store: String,
1680 backup_type: String,
1681 backup_id: String,
e6dc35ac 1682 new_owner: Authid,
bff85572 1683 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1684) -> Result<(), Error> {
1685
1686 let datastore = DataStore::lookup_datastore(&store)?;
1687
1688 let backup_group = BackupGroup::new(backup_type, backup_id);
1689
bff85572
FG
1690 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1691
72be0eb1
DW
1692 let user_info = CachedUserInfo::new()?;
1693
bff85572
FG
1694 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1695
1696 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1697 // High-privilege user/token
1698 true
1699 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1700 let owner = datastore.get_owner(&backup_group)?;
1701
1702 match (owner.is_token(), new_owner.is_token()) {
1703 (true, true) => {
1704 // API token to API token, owned by same user
1705 let owner = owner.user();
1706 let new_owner = new_owner.user();
1707 owner == new_owner && Authid::from(owner.clone()) == auth_id
1708 },
1709 (true, false) => {
1710 // API token to API token owner
1711 Authid::from(owner.user().clone()) == auth_id
1712 && new_owner == auth_id
1713 },
1714 (false, true) => {
1715 // API token owner to API token
1716 owner == auth_id
1717 && Authid::from(new_owner.user().clone()) == auth_id
1718 },
1719 (false, false) => {
1720 // User to User, not allowed for unprivileged users
1721 false
1722 },
1723 }
1724 } else {
1725 false
1726 };
1727
1728 if !allowed {
1729 return Err(http_err!(UNAUTHORIZED,
1730 "{} does not have permission to change owner of backup group '{}' to {}",
1731 auth_id,
1732 backup_group,
1733 new_owner,
1734 ));
1735 }
1736
e6dc35ac
FG
1737 if !user_info.is_active_auth_id(&new_owner) {
1738 bail!("{} '{}' is inactive or non-existent",
1739 if new_owner.is_token() {
1740 "API token".to_string()
1741 } else {
1742 "user".to_string()
1743 },
1744 new_owner);
72be0eb1
DW
1745 }
1746
1747 datastore.set_owner(&backup_group, &new_owner, true)?;
1748
1749 Ok(())
1750}
1751
552c2259 1752#[sortable]
255f378a 1753const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1754 (
1755 "catalog",
1756 &Router::new()
1757 .get(&API_METHOD_CATALOG)
1758 ),
72be0eb1
DW
1759 (
1760 "change-owner",
1761 &Router::new()
1762 .post(&API_METHOD_SET_BACKUP_OWNER)
1763 ),
255f378a
DM
1764 (
1765 "download",
1766 &Router::new()
1767 .download(&API_METHOD_DOWNLOAD_FILE)
1768 ),
6ef9bb59
DC
1769 (
1770 "download-decoded",
1771 &Router::new()
1772 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1773 ),
255f378a
DM
1774 (
1775 "files",
1776 &Router::new()
09b1f7b2 1777 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1778 ),
1779 (
1780 "gc",
1781 &Router::new()
1782 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1783 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1784 ),
1785 (
1786 "groups",
1787 &Router::new()
b31c8019 1788 .get(&API_METHOD_LIST_GROUPS)
255f378a 1789 ),
912b3f5b
DM
1790 (
1791 "notes",
1792 &Router::new()
1793 .get(&API_METHOD_GET_NOTES)
1794 .put(&API_METHOD_SET_NOTES)
1795 ),
255f378a
DM
1796 (
1797 "prune",
1798 &Router::new()
1799 .post(&API_METHOD_PRUNE)
1800 ),
d33d8f4e
DC
1801 (
1802 "pxar-file-download",
1803 &Router::new()
1804 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
1805 ),
1a0d3d11
DM
1806 (
1807 "rrd",
1808 &Router::new()
1809 .get(&API_METHOD_GET_RRD_STATS)
1810 ),
255f378a
DM
1811 (
1812 "snapshots",
1813 &Router::new()
fc189b19 1814 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 1815 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
1816 ),
1817 (
1818 "status",
1819 &Router::new()
1820 .get(&API_METHOD_STATUS)
1821 ),
1822 (
1823 "upload-backup-log",
1824 &Router::new()
1825 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
1826 ),
c2009e53
DM
1827 (
1828 "verify",
1829 &Router::new()
1830 .post(&API_METHOD_VERIFY)
1831 ),
255f378a
DM
1832];
1833
ad51d02a 1834const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
1835 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
1836 .subdirs(DATASTORE_INFO_SUBDIRS);
1837
1838
1839pub const ROUTER: Router = Router::new()
bb34b589 1840 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 1841 .match_all("store", &DATASTORE_INFO_ROUTER);