]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
datastore status: do not count empty groups
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed 12use serde_json::{json, Value};
7c667013 13use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 14
e25982f2 15use proxmox_sys::sortable;
25877d05 16use proxmox_sys::fs::{
d6688884
SR
17 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
18};
6ef1b649
WB
19use proxmox_router::{
20 list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
21 RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
22};
23use proxmox_schema::*;
d5790a9f 24use proxmox_sys::{task_log, task_warn};
9a1b24b6 25use proxmox_async::blocking::WrappedReaderStream;
92a8f0bc 26use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
e18a6c9e 27
2e219481 28use pxar::accessor::aio::Accessor;
d33d8f4e
DC
29use pxar::EntryKind;
30
89725197
DM
31use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
32 DataStoreListItem, GarbageCollectionStatus, GroupListItem,
33 SnapshotListItem, SnapshotVerifyState, PruneOptions,
c68fa58a 34 DataStoreStatus, RRDMode, RRDTimeFrame,
89725197
DM
35 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
36 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
37 IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
38 VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
39 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
8cc3760e
DM
40 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
41
b2065dc7 42};
2b7f8dd5 43use pbs_client::pxar::create_zip;
8a23ea46
DM
44use pbs_datastore::{
45 check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
46 CATALOG_NAME,
47};
b2065dc7
WB
48use pbs_datastore::backup_info::BackupInfo;
49use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 50use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
51use pbs_datastore::data_blob::DataBlob;
52use pbs_datastore::data_blob_reader::DataBlobReader;
53use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
54use pbs_datastore::fixed_index::{FixedIndexReader};
55use pbs_datastore::index::IndexFile;
56use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 57use pbs_datastore::prune::compute_prune_info;
3c8c2827 58use pbs_tools::json::{required_integer_param, required_string_param};
e7d4be9d 59use pbs_config::CachedUserInfo;
b9700a9f 60use proxmox_rest_server::{WorkerTask, formatter};
2b7f8dd5 61
431cc7b1 62use crate::api2::node::rrd::create_value_from_rrd;
b2065dc7 63use crate::backup::{
6d5d305d 64 verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
b2065dc7 65};
54552dda 66
b9700a9f 67use crate::server::jobstate::Job;
804f6143 68
1629d2ad 69
d6688884
SR
70const GROUP_NOTES_FILE_NAME: &str = "notes";
71
72fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
73 let mut note_path = store.base_path();
74 note_path.push(group.group_path());
75 note_path.push(GROUP_NOTES_FILE_NAME);
76 note_path
77}
78
bff85572 79fn check_priv_or_backup_owner(
e7cb4dc5
WB
80 store: &DataStore,
81 group: &BackupGroup,
e6dc35ac 82 auth_id: &Authid,
bff85572
FG
83 required_privs: u64,
84) -> Result<(), Error> {
85 let user_info = CachedUserInfo::new()?;
9a37bd6c 86 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
87
88 if privs & required_privs == 0 {
89 let owner = store.get_owner(group)?;
90 check_backup_owner(&owner, auth_id)?;
91 }
92 Ok(())
93}
94
e7cb4dc5
WB
95fn read_backup_index(
96 store: &DataStore,
97 backup_dir: &BackupDir,
98) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 99
ff86ef00 100 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 101
09b1f7b2
DM
102 let mut result = Vec::new();
103 for item in manifest.files() {
104 result.push(BackupContent {
105 filename: item.filename.clone(),
f28d9088 106 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
107 size: Some(item.size),
108 });
8c70e3eb
DM
109 }
110
09b1f7b2 111 result.push(BackupContent {
96d65fbc 112 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
113 crypt_mode: match manifest.signature {
114 Some(_) => Some(CryptMode::SignOnly),
115 None => Some(CryptMode::None),
116 },
09b1f7b2
DM
117 size: Some(index_size),
118 });
4f1e40a2 119
70030b43 120 Ok((manifest, result))
8c70e3eb
DM
121}
122
1c090810
DC
123fn get_all_snapshot_files(
124 store: &DataStore,
125 info: &BackupInfo,
70030b43
DM
126) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
127
9a37bd6c 128 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
129
130 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
131 acc.insert(item.filename.clone());
132 acc
133 });
134
135 for file in &info.files {
136 if file_set.contains(file) { continue; }
f28d9088
WB
137 files.push(BackupContent {
138 filename: file.to_string(),
139 size: None,
140 crypt_mode: None,
141 });
1c090810
DC
142 }
143
70030b43 144 Ok((manifest, files))
1c090810
DC
145}
146
b31c8019
DM
147#[api(
148 input: {
149 properties: {
150 store: {
151 schema: DATASTORE_SCHEMA,
152 },
153 },
154 },
7b570c17 155 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 156 access: {
54552dda
DM
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
bb34b589 161 },
b31c8019
DM
162)]
163/// List backup groups.
b2362a12 164pub fn list_groups(
b31c8019 165 store: String,
54552dda 166 rpcenv: &mut dyn RpcEnvironment,
b31c8019 167) -> Result<Vec<GroupListItem>, Error> {
812c6f87 168
e6dc35ac 169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 170 let user_info = CachedUserInfo::new()?;
e6dc35ac 171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 172
b31c8019 173 let datastore = DataStore::lookup_datastore(&store)?;
0d08fcee
FG
174 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
175
176 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
177
178 let group_info = backup_groups
179 .into_iter()
180 .fold(Vec::new(), |mut group_info, group| {
181 let owner = match datastore.get_owner(&group) {
182 Ok(auth_id) => auth_id,
183 Err(err) => {
1ed02257
FG
184 eprintln!("Failed to get owner of group '{}/{}' - {}",
185 &store,
186 group,
187 err);
0d08fcee
FG
188 return group_info;
189 },
190 };
191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
192 return group_info;
193 }
194
195 let snapshots = match group.list_backups(&datastore.base_path()) {
196 Ok(snapshots) => snapshots,
197 Err(_) => {
198 return group_info;
199 },
200 };
201
202 let backup_count: u64 = snapshots.len() as u64;
203 if backup_count == 0 {
204 return group_info;
205 }
206
207 let last_backup = snapshots
208 .iter()
209 .fold(&snapshots[0], |last, curr| {
210 if curr.is_finished()
211 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
212 curr
213 } else {
214 last
215 }
216 })
217 .to_owned();
218
d6688884
SR
219 let note_path = get_group_note_path(&datastore, &group);
220 let comment = file_read_firstline(&note_path).ok();
221
0d08fcee
FG
222 group_info.push(GroupListItem {
223 backup_type: group.backup_type().to_string(),
224 backup_id: group.backup_id().to_string(),
225 last_backup: last_backup.backup_dir.backup_time(),
226 owner: Some(owner),
227 backup_count,
228 files: last_backup.files,
d6688884 229 comment,
0d08fcee
FG
230 });
231
232 group_info
233 });
812c6f87 234
0d08fcee 235 Ok(group_info)
812c6f87 236}
8f579717 237
f32791b4
DC
238#[api(
239 input: {
240 properties: {
241 store: {
242 schema: DATASTORE_SCHEMA,
243 },
244 "backup-type": {
245 schema: BACKUP_TYPE_SCHEMA,
246 },
247 "backup-id": {
248 schema: BACKUP_ID_SCHEMA,
249 },
250 },
251 },
252 access: {
253 permission: &Permission::Privilege(
254 &["datastore", "{store}"],
255 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
256 true),
257 },
258)]
259/// Delete backup group including all snapshots.
260pub fn delete_group(
261 store: String,
262 backup_type: String,
263 backup_id: String,
264 _info: &ApiMethod,
265 rpcenv: &mut dyn RpcEnvironment,
266) -> Result<Value, Error> {
267
268 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
269
270 let group = BackupGroup::new(backup_type, backup_id);
271 let datastore = DataStore::lookup_datastore(&store)?;
272
273 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
274
5cc7d891
DC
275 if !datastore.remove_backup_group(&group)? {
276 bail!("did not delete whole group because of protected snapthots");
277 }
f32791b4
DC
278
279 Ok(Value::Null)
280}
281
09b1f7b2
DM
282#[api(
283 input: {
284 properties: {
285 store: {
286 schema: DATASTORE_SCHEMA,
287 },
288 "backup-type": {
289 schema: BACKUP_TYPE_SCHEMA,
290 },
291 "backup-id": {
292 schema: BACKUP_ID_SCHEMA,
293 },
294 "backup-time": {
295 schema: BACKUP_TIME_SCHEMA,
296 },
297 },
298 },
7b570c17 299 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 300 access: {
54552dda
DM
301 permission: &Permission::Privilege(
302 &["datastore", "{store}"],
303 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
304 true),
bb34b589 305 },
09b1f7b2
DM
306)]
307/// List snapshot files.
ea5f547f 308pub fn list_snapshot_files(
09b1f7b2
DM
309 store: String,
310 backup_type: String,
311 backup_id: String,
312 backup_time: i64,
01a13423 313 _info: &ApiMethod,
54552dda 314 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 315) -> Result<Vec<BackupContent>, Error> {
01a13423 316
e6dc35ac 317 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
09b1f7b2 318 let datastore = DataStore::lookup_datastore(&store)?;
54552dda 319
e0e5b442 320 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 321
bff85572 322 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 323
d7c24397 324 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 325
70030b43
DM
326 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
327
328 Ok(files)
01a13423
DM
329}
330
68a6a0ee
DM
331#[api(
332 input: {
333 properties: {
334 store: {
335 schema: DATASTORE_SCHEMA,
336 },
337 "backup-type": {
338 schema: BACKUP_TYPE_SCHEMA,
339 },
340 "backup-id": {
341 schema: BACKUP_ID_SCHEMA,
342 },
343 "backup-time": {
344 schema: BACKUP_TIME_SCHEMA,
345 },
346 },
347 },
bb34b589 348 access: {
54552dda
DM
349 permission: &Permission::Privilege(
350 &["datastore", "{store}"],
351 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
352 true),
bb34b589 353 },
68a6a0ee
DM
354)]
355/// Delete backup snapshot.
bf78f708 356pub fn delete_snapshot(
68a6a0ee
DM
357 store: String,
358 backup_type: String,
359 backup_id: String,
360 backup_time: i64,
6f62c924 361 _info: &ApiMethod,
54552dda 362 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
363) -> Result<Value, Error> {
364
e6dc35ac 365 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 366
e0e5b442 367 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
68a6a0ee 368 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924 369
bff85572 370 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 371
c9756b40 372 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
373
374 Ok(Value::Null)
375}
376
fc189b19
DM
377#[api(
378 input: {
379 properties: {
380 store: {
381 schema: DATASTORE_SCHEMA,
382 },
383 "backup-type": {
384 optional: true,
385 schema: BACKUP_TYPE_SCHEMA,
386 },
387 "backup-id": {
388 optional: true,
389 schema: BACKUP_ID_SCHEMA,
390 },
391 },
392 },
7b570c17 393 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 394 access: {
54552dda
DM
395 permission: &Permission::Privilege(
396 &["datastore", "{store}"],
397 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
398 true),
bb34b589 399 },
fc189b19
DM
400)]
401/// List backup snapshots.
f24fc116 402pub fn list_snapshots (
54552dda
DM
403 store: String,
404 backup_type: Option<String>,
405 backup_id: Option<String>,
406 _param: Value,
184f17af 407 _info: &ApiMethod,
54552dda 408 rpcenv: &mut dyn RpcEnvironment,
fc189b19 409) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 410
e6dc35ac 411 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 412 let user_info = CachedUserInfo::new()?;
e6dc35ac 413 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 414
0d08fcee
FG
415 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
416
54552dda 417 let datastore = DataStore::lookup_datastore(&store)?;
184f17af 418
c0977501 419 let base_path = datastore.base_path();
184f17af 420
0d08fcee
FG
421 let groups = match (backup_type, backup_id) {
422 (Some(backup_type), Some(backup_id)) => {
423 let mut groups = Vec::with_capacity(1);
424 groups.push(BackupGroup::new(backup_type, backup_id));
425 groups
426 },
427 (Some(backup_type), None) => {
428 BackupInfo::list_backup_groups(&base_path)?
429 .into_iter()
430 .filter(|group| group.backup_type() == backup_type)
431 .collect()
432 },
433 (None, Some(backup_id)) => {
434 BackupInfo::list_backup_groups(&base_path)?
435 .into_iter()
436 .filter(|group| group.backup_id() == backup_id)
437 .collect()
438 },
439 _ => BackupInfo::list_backup_groups(&base_path)?,
440 };
54552dda 441
0d08fcee 442 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
443 let backup_type = group.backup_type().to_string();
444 let backup_id = group.backup_id().to_string();
0d08fcee 445 let backup_time = info.backup_dir.backup_time();
02db7267 446 let protected = info.backup_dir.is_protected(base_path.clone());
1c090810 447
79c53595 448 match get_all_snapshot_files(&datastore, &info) {
70030b43 449 Ok((manifest, files)) => {
70030b43
DM
450 // extract the first line from notes
451 let comment: Option<String> = manifest.unprotected["notes"]
452 .as_str()
453 .and_then(|notes| notes.lines().next())
454 .map(String::from);
455
035c40e6
FG
456 let fingerprint = match manifest.fingerprint() {
457 Ok(fp) => fp,
458 Err(err) => {
459 eprintln!("error parsing fingerprint: '{}'", err);
460 None
461 },
462 };
463
79c53595
FG
464 let verification = manifest.unprotected["verify_state"].clone();
465 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
466 Ok(verify) => verify,
467 Err(err) => {
468 eprintln!("error parsing verification state : '{}'", err);
469 None
470 }
471 };
472
0d08fcee
FG
473 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
474
79c53595
FG
475 SnapshotListItem {
476 backup_type,
477 backup_id,
478 backup_time,
479 comment,
480 verification,
035c40e6 481 fingerprint,
79c53595
FG
482 files,
483 size,
484 owner,
02db7267 485 protected,
79c53595 486 }
1c090810
DC
487 },
488 Err(err) => {
489 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 490 let files = info
70030b43 491 .files
0d08fcee 492 .into_iter()
44288184
FG
493 .map(|filename| BackupContent {
494 filename,
70030b43
DM
495 size: None,
496 crypt_mode: None,
497 })
79c53595
FG
498 .collect();
499
500 SnapshotListItem {
501 backup_type,
502 backup_id,
503 backup_time,
504 comment: None,
505 verification: None,
035c40e6 506 fingerprint: None,
79c53595
FG
507 files,
508 size: None,
509 owner,
02db7267 510 protected,
79c53595 511 }
1c090810 512 },
0d08fcee
FG
513 }
514 };
184f17af 515
0d08fcee
FG
516 groups
517 .iter()
518 .try_fold(Vec::new(), |mut snapshots, group| {
519 let owner = match datastore.get_owner(group) {
520 Ok(auth_id) => auth_id,
521 Err(err) => {
522 eprintln!("Failed to get owner of group '{}/{}' - {}",
523 &store,
524 group,
525 err);
526 return Ok(snapshots);
527 },
528 };
529
530 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
531 return Ok(snapshots);
532 }
533
534 let group_backups = group.list_backups(&datastore.base_path())?;
535
536 snapshots.extend(
537 group_backups
538 .into_iter()
9a37bd6c 539 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
0d08fcee
FG
540 );
541
542 Ok(snapshots)
543 })
184f17af
DM
544}
545
fdfcb74d 546fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 547 let base_path = store.base_path();
fdfcb74d 548 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 549
fdfcb74d
FG
550 groups.iter()
551 .filter(|group| {
9a37bd6c 552 let owner = match store.get_owner(group) {
fdfcb74d
FG
553 Ok(owner) => owner,
554 Err(err) => {
1ed02257
FG
555 eprintln!("Failed to get owner of group '{}/{}' - {}",
556 store.name(),
557 group,
558 err);
fdfcb74d
FG
559 return false;
560 },
561 };
14e08625 562
fdfcb74d
FG
563 match filter_owner {
564 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
565 None => true,
566 }
567 })
568 .try_fold(Counts::default(), |mut counts, group| {
569 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
570
b44483a8
DM
571 // only include groups with snapshots (avoid confusing users
572 // by counting/displaying emtpy groups)
573 if snapshot_count > 0 {
574 let type_count = match group.backup_type() {
575 "ct" => counts.ct.get_or_insert(Default::default()),
576 "vm" => counts.vm.get_or_insert(Default::default()),
577 "host" => counts.host.get_or_insert(Default::default()),
578 _ => counts.other.get_or_insert(Default::default()),
579 };
14e08625 580
b44483a8
DM
581 type_count.groups += 1;
582 type_count.snapshots += snapshot_count;
583 }
16f9f244 584
fdfcb74d
FG
585 Ok(counts)
586 })
16f9f244
DC
587}
588
1dc117bb
DM
589#[api(
590 input: {
591 properties: {
592 store: {
593 schema: DATASTORE_SCHEMA,
594 },
98afc7b1
FG
595 verbose: {
596 type: bool,
597 default: false,
598 optional: true,
599 description: "Include additional information like snapshot counts and GC status.",
600 },
1dc117bb 601 },
98afc7b1 602
1dc117bb
DM
603 },
604 returns: {
14e08625 605 type: DataStoreStatus,
1dc117bb 606 },
bb34b589 607 access: {
54552dda 608 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 609 },
1dc117bb
DM
610)]
611/// Get datastore status.
ea5f547f 612pub fn status(
1dc117bb 613 store: String,
98afc7b1 614 verbose: bool,
0eecf38f 615 _info: &ApiMethod,
fdfcb74d 616 rpcenv: &mut dyn RpcEnvironment,
14e08625 617) -> Result<DataStoreStatus, Error> {
1dc117bb 618 let datastore = DataStore::lookup_datastore(&store)?;
14e08625 619 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
620 let (counts, gc_status) = if verbose {
621 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
622 let user_info = CachedUserInfo::new()?;
623
624 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
625 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
626 None
627 } else {
628 Some(&auth_id)
629 };
630
631 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
632 let gc_status = Some(datastore.last_gc_status());
633
634 (counts, gc_status)
635 } else {
636 (None, None)
98afc7b1 637 };
16f9f244 638
14e08625
DC
639 Ok(DataStoreStatus {
640 total: storage.total,
641 used: storage.used,
642 avail: storage.avail,
643 gc_status,
644 counts,
645 })
0eecf38f
DM
646}
647
c2009e53
DM
648#[api(
649 input: {
650 properties: {
651 store: {
652 schema: DATASTORE_SCHEMA,
653 },
654 "backup-type": {
655 schema: BACKUP_TYPE_SCHEMA,
656 optional: true,
657 },
658 "backup-id": {
659 schema: BACKUP_ID_SCHEMA,
660 optional: true,
661 },
dcbf29e7
HL
662 "ignore-verified": {
663 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
664 optional: true,
665 },
666 "outdated-after": {
667 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
668 optional: true,
669 },
c2009e53
DM
670 "backup-time": {
671 schema: BACKUP_TIME_SCHEMA,
672 optional: true,
673 },
674 },
675 },
676 returns: {
677 schema: UPID_SCHEMA,
678 },
679 access: {
09f6a240 680 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
681 },
682)]
683/// Verify backups.
684///
685/// This function can verify a single backup snapshot, all backup from a backup group,
686/// or all backups in the datastore.
687pub fn verify(
688 store: String,
689 backup_type: Option<String>,
690 backup_id: Option<String>,
691 backup_time: Option<i64>,
dcbf29e7
HL
692 ignore_verified: Option<bool>,
693 outdated_after: Option<i64>,
c2009e53
DM
694 rpcenv: &mut dyn RpcEnvironment,
695) -> Result<Value, Error> {
696 let datastore = DataStore::lookup_datastore(&store)?;
dcbf29e7 697 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 698
09f6a240 699 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 700 let worker_id;
c2009e53
DM
701
702 let mut backup_dir = None;
703 let mut backup_group = None;
133042b5 704 let mut worker_type = "verify";
c2009e53
DM
705
706 match (backup_type, backup_id, backup_time) {
707 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 708 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 709 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
710
711 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
712
c2009e53 713 backup_dir = Some(dir);
133042b5 714 worker_type = "verify_snapshot";
c2009e53
DM
715 }
716 (Some(backup_type), Some(backup_id), None) => {
4ebda996 717 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 718 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
719
720 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
721
c2009e53 722 backup_group = Some(group);
133042b5 723 worker_type = "verify_group";
c2009e53
DM
724 }
725 (None, None, None) => {
8ea00f6e 726 worker_id = store.clone();
c2009e53 727 }
5a718dce 728 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
729 }
730
39735609 731 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
732
733 let upid_str = WorkerTask::new_thread(
133042b5 734 worker_type,
44288184 735 Some(worker_id),
049a22a3 736 auth_id.to_string(),
e7cb4dc5
WB
737 to_stdout,
738 move |worker| {
9c26a3d6 739 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 740 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 741 let mut res = Vec::new();
f6b1d1cc 742 if !verify_backup_dir(
9c26a3d6 743 &verify_worker,
f6b1d1cc 744 &backup_dir,
f6b1d1cc 745 worker.upid().clone(),
dcbf29e7
HL
746 Some(&move |manifest| {
747 verify_filter(ignore_verified, outdated_after, manifest)
748 }),
f6b1d1cc 749 )? {
adfdc369
DC
750 res.push(backup_dir.to_string());
751 }
752 res
c2009e53 753 } else if let Some(backup_group) = backup_group {
7e25b9aa 754 let failed_dirs = verify_backup_group(
9c26a3d6 755 &verify_worker,
63d9aca9 756 &backup_group,
7e25b9aa 757 &mut StoreProgress::new(1),
f6b1d1cc 758 worker.upid(),
dcbf29e7
HL
759 Some(&move |manifest| {
760 verify_filter(ignore_verified, outdated_after, manifest)
761 }),
63d9aca9
DM
762 )?;
763 failed_dirs
c2009e53 764 } else {
09f6a240
FG
765 let privs = CachedUserInfo::new()?
766 .lookup_privs(&auth_id, &["datastore", &store]);
767
768 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
769 Some(auth_id)
770 } else {
771 None
772 };
773
dcbf29e7
HL
774 verify_all_backups(
775 &verify_worker,
776 worker.upid(),
777 owner,
778 Some(&move |manifest| {
779 verify_filter(ignore_verified, outdated_after, manifest)
780 }),
781 )?
c2009e53 782 };
3984a5fd 783 if !failed_dirs.is_empty() {
1ec0d70d 784 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 785 for dir in failed_dirs {
1ec0d70d 786 task_log!(worker, "\t{}", dir);
adfdc369 787 }
1ffe0301 788 bail!("verification failed - please check the log for details");
c2009e53
DM
789 }
790 Ok(())
e7cb4dc5
WB
791 },
792 )?;
c2009e53
DM
793
794 Ok(json!(upid_str))
795}
796
0a240aaa
DC
797#[api(
798 input: {
799 properties: {
800 "backup-id": {
801 schema: BACKUP_ID_SCHEMA,
802 },
803 "backup-type": {
804 schema: BACKUP_TYPE_SCHEMA,
805 },
806 "dry-run": {
807 optional: true,
808 type: bool,
809 default: false,
810 description: "Just show what prune would do, but do not delete anything.",
811 },
812 "prune-options": {
813 type: PruneOptions,
814 flatten: true,
815 },
816 store: {
817 schema: DATASTORE_SCHEMA,
818 },
819 },
820 },
7b570c17 821 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
822 access: {
823 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
824 },
825)]
9805207a 826/// Prune a group on the datastore
bf78f708 827pub fn prune(
0a240aaa
DC
828 backup_id: String,
829 backup_type: String,
830 dry_run: bool,
831 prune_options: PruneOptions,
832 store: String,
833 _param: Value,
54552dda 834 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
835) -> Result<Value, Error> {
836
e6dc35ac 837 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 838
0a240aaa 839 let group = BackupGroup::new(&backup_type, &backup_id);
9fdc3ef4 840
54552dda
DM
841 let datastore = DataStore::lookup_datastore(&store)?;
842
bff85572 843 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 844
0a240aaa 845 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
503995c7 846
dda70154
DM
847 let mut prune_result = Vec::new();
848
849 let list = group.list_backups(&datastore.base_path())?;
850
851 let mut prune_info = compute_prune_info(list, &prune_options)?;
852
853 prune_info.reverse(); // delete older snapshots first
854
89725197 855 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
856
857 if dry_run {
02db7267
DC
858 for (info, mark) in prune_info {
859 let keep = keep_all || mark.keep();
dda70154
DM
860
861 let backup_time = info.backup_dir.backup_time();
862 let group = info.backup_dir.group();
863
864 prune_result.push(json!({
865 "backup-type": group.backup_type(),
866 "backup-id": group.backup_id(),
6a7be83e 867 "backup-time": backup_time,
dda70154 868 "keep": keep,
02db7267 869 "protected": mark.protected(),
dda70154
DM
870 }));
871 }
872 return Ok(json!(prune_result));
873 }
874
875
163e9bbe 876 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 877 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 878
f1539300 879 if keep_all {
1ec0d70d 880 task_log!(worker, "No prune selection - keeping all files.");
f1539300 881 } else {
1ec0d70d
DM
882 task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
883 task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
884 store, backup_type, backup_id);
f1539300 885 }
3b03abfe 886
02db7267
DC
887 for (info, mark) in prune_info {
888 let keep = keep_all || mark.keep();
dda70154 889
f1539300
SR
890 let backup_time = info.backup_dir.backup_time();
891 let timestamp = info.backup_dir.backup_time_string();
892 let group = info.backup_dir.group();
3b03abfe 893
3b03abfe 894
f1539300
SR
895 let msg = format!(
896 "{}/{}/{} {}",
897 group.backup_type(),
898 group.backup_id(),
899 timestamp,
02db7267 900 mark,
f1539300
SR
901 );
902
1ec0d70d 903 task_log!(worker, "{}", msg);
f1539300
SR
904
905 prune_result.push(json!({
906 "backup-type": group.backup_type(),
907 "backup-id": group.backup_id(),
908 "backup-time": backup_time,
909 "keep": keep,
02db7267 910 "protected": mark.protected(),
f1539300
SR
911 }));
912
913 if !(dry_run || keep) {
914 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
1ec0d70d
DM
915 task_warn!(
916 worker,
917 "failed to remove dir {:?}: {}",
918 info.backup_dir.relative_path(),
919 err,
f1539300 920 );
8f0b4c1f 921 }
8f579717 922 }
f1539300 923 }
dd8e744f 924
f1539300 925 worker.log_result(&Ok(()));
83b7db02 926
dda70154 927 Ok(json!(prune_result))
83b7db02
DM
928}
929
9805207a
DC
930#[api(
931 input: {
932 properties: {
933 "dry-run": {
934 optional: true,
935 type: bool,
936 default: false,
937 description: "Just show what prune would do, but do not delete anything.",
938 },
939 "prune-options": {
940 type: PruneOptions,
941 flatten: true,
942 },
943 store: {
944 schema: DATASTORE_SCHEMA,
945 },
946 },
947 },
948 returns: {
949 schema: UPID_SCHEMA,
950 },
951 access: {
952 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
953 },
954)]
955/// Prune the datastore
956pub fn prune_datastore(
957 dry_run: bool,
958 prune_options: PruneOptions,
959 store: String,
960 _param: Value,
961 rpcenv: &mut dyn RpcEnvironment,
962) -> Result<String, Error> {
963
964 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
965
966 let datastore = DataStore::lookup_datastore(&store)?;
967
bfa942c0
DC
968 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
969
9805207a
DC
970 let upid_str = WorkerTask::new_thread(
971 "prune",
972 Some(store.clone()),
049a22a3 973 auth_id.to_string(),
bfa942c0 974 to_stdout,
9805207a 975 move |worker| crate::server::prune_datastore(
aa174e8e 976 worker,
9805207a
DC
977 auth_id,
978 prune_options,
979 &store,
980 datastore,
981 dry_run
982 ),
983 )?;
984
985 Ok(upid_str)
986}
987
dfc58d47
DM
988#[api(
989 input: {
990 properties: {
991 store: {
992 schema: DATASTORE_SCHEMA,
993 },
994 },
995 },
996 returns: {
997 schema: UPID_SCHEMA,
998 },
bb34b589 999 access: {
54552dda 1000 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1001 },
dfc58d47
DM
1002)]
1003/// Start garbage collection.
bf78f708 1004pub fn start_garbage_collection(
dfc58d47 1005 store: String,
6049b71f 1006 _info: &ApiMethod,
dd5495d6 1007 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1008) -> Result<Value, Error> {
15e9b4ed 1009
3e6a7dee 1010 let datastore = DataStore::lookup_datastore(&store)?;
e6dc35ac 1011 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1012
4fdf5ddf
DC
1013 let job = Job::new("garbage_collection", &store)
1014 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1015
39735609 1016 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1017
4fdf5ddf
DC
1018 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1019 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
1020
1021 Ok(json!(upid_str))
15e9b4ed
DM
1022}
1023
a92830dc
DM
1024#[api(
1025 input: {
1026 properties: {
1027 store: {
1028 schema: DATASTORE_SCHEMA,
1029 },
1030 },
1031 },
1032 returns: {
1033 type: GarbageCollectionStatus,
bb34b589
DM
1034 },
1035 access: {
1036 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1037 },
a92830dc
DM
1038)]
1039/// Garbage collection status.
5eeea607 1040pub fn garbage_collection_status(
a92830dc 1041 store: String,
6049b71f 1042 _info: &ApiMethod,
dd5495d6 1043 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1044) -> Result<GarbageCollectionStatus, Error> {
691c89a0 1045
f2b99c34
DM
1046 let datastore = DataStore::lookup_datastore(&store)?;
1047
f2b99c34 1048 let status = datastore.last_gc_status();
691c89a0 1049
a92830dc 1050 Ok(status)
691c89a0
DM
1051}
1052
bb34b589 1053#[api(
30fb6025
DM
1054 returns: {
1055 description: "List the accessible datastores.",
1056 type: Array,
9b93c620 1057 items: { type: DataStoreListItem },
30fb6025 1058 },
bb34b589 1059 access: {
54552dda 1060 permission: &Permission::Anybody,
bb34b589
DM
1061 },
1062)]
1063/// Datastore list
bf78f708 1064pub fn get_datastore_list(
6049b71f
DM
1065 _param: Value,
1066 _info: &ApiMethod,
54552dda 1067 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1068) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 1069
e7d4be9d 1070 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1071
e6dc35ac 1072 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1073 let user_info = CachedUserInfo::new()?;
1074
30fb6025 1075 let mut list = Vec::new();
54552dda 1076
30fb6025 1077 for (store, (_, data)) in &config.sections {
9a37bd6c 1078 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
54552dda 1079 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1080 if allowed {
455e5f71
FG
1081 list.push(
1082 DataStoreListItem {
1083 store: store.clone(),
1084 comment: data["comment"].as_str().map(String::from),
1085 }
1086 );
30fb6025 1087 }
54552dda
DM
1088 }
1089
44288184 1090 Ok(list)
15e9b4ed
DM
1091}
1092
0ab08ac9
DM
1093#[sortable]
1094pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1095 &ApiHandler::AsyncHttp(&download_file),
1096 &ObjectSchema::new(
1097 "Download single raw file from backup snapshot.",
1098 &sorted!([
66c49c21 1099 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1100 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1101 ("backup-id", false, &BACKUP_ID_SCHEMA),
1102 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1103 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1104 ]),
1105 )
54552dda
DM
1106).access(None, &Permission::Privilege(
1107 &["datastore", "{store}"],
1108 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1109 true)
1110);
691c89a0 1111
bf78f708 1112pub fn download_file(
9e47c0a5
DM
1113 _parts: Parts,
1114 _req_body: Body,
1115 param: Value,
255f378a 1116 _info: &ApiMethod,
54552dda 1117 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1118) -> ApiResponseFuture {
9e47c0a5 1119
ad51d02a 1120 async move {
3c8c2827 1121 let store = required_string_param(&param, "store")?;
ad51d02a 1122 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 1123
e6dc35ac 1124 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1125
3c8c2827 1126 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1127
3c8c2827
WB
1128 let backup_type = required_string_param(&param, "backup-type")?;
1129 let backup_id = required_string_param(&param, "backup-id")?;
1130 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1131
e0e5b442 1132 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1133
bff85572 1134 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1135
abdb9763 1136 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1137
ad51d02a
DM
1138 let mut path = datastore.base_path();
1139 path.push(backup_dir.relative_path());
1140 path.push(&file_name);
1141
ba694720 1142 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1143 .await
1144 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1145
db0cb9ce 1146 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1147 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1148 .map_err(move |err| {
1149 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1150 err
1151 });
ad51d02a 1152 let body = Body::wrap_stream(payload);
9e47c0a5 1153
ad51d02a
DM
1154 // fixme: set other headers ?
1155 Ok(Response::builder()
1156 .status(StatusCode::OK)
1157 .header(header::CONTENT_TYPE, "application/octet-stream")
1158 .body(body)
1159 .unwrap())
1160 }.boxed()
9e47c0a5
DM
1161}
1162
6ef9bb59
DC
1163#[sortable]
1164pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1165 &ApiHandler::AsyncHttp(&download_file_decoded),
1166 &ObjectSchema::new(
1167 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1168 &sorted!([
1169 ("store", false, &DATASTORE_SCHEMA),
1170 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1171 ("backup-id", false, &BACKUP_ID_SCHEMA),
1172 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1173 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1174 ]),
1175 )
1176).access(None, &Permission::Privilege(
1177 &["datastore", "{store}"],
1178 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1179 true)
1180);
1181
bf78f708 1182pub fn download_file_decoded(
6ef9bb59
DC
1183 _parts: Parts,
1184 _req_body: Body,
1185 param: Value,
1186 _info: &ApiMethod,
1187 rpcenv: Box<dyn RpcEnvironment>,
1188) -> ApiResponseFuture {
1189
1190 async move {
3c8c2827 1191 let store = required_string_param(&param, "store")?;
6ef9bb59
DC
1192 let datastore = DataStore::lookup_datastore(store)?;
1193
e6dc35ac 1194 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1195
3c8c2827 1196 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1197
3c8c2827
WB
1198 let backup_type = required_string_param(&param, "backup-type")?;
1199 let backup_id = required_string_param(&param, "backup-id")?;
1200 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1201
e0e5b442 1202 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1203
bff85572 1204 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1205
2d55beec 1206 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1207 for file in files {
f28d9088 1208 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1209 bail!("cannot decode '{}' - is encrypted", file_name);
1210 }
1211 }
1212
1213 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1214
1215 let mut path = datastore.base_path();
1216 path.push(backup_dir.relative_path());
1217 path.push(&file_name);
1218
1219 let extension = file_name.rsplitn(2, '.').next().unwrap();
1220
1221 let body = match extension {
1222 "didx" => {
1223 let index = DynamicIndexReader::open(&path)
1224 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1225 let (csum, size) = index.compute_csum();
1226 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1227
14f6c9cb 1228 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1229 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1230 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1231 .map_err(move |err| {
1232 eprintln!("error during streaming of '{:?}' - {}", path, err);
1233 err
1234 }))
1235 },
1236 "fidx" => {
1237 let index = FixedIndexReader::open(&path)
1238 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1239
2d55beec
FG
1240 let (csum, size) = index.compute_csum();
1241 manifest.verify_file(&file_name, &csum, size)?;
1242
14f6c9cb 1243 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1244 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1245 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1246 .map_err(move |err| {
1247 eprintln!("error during streaming of '{:?}' - {}", path, err);
1248 err
1249 }))
1250 },
1251 "blob" => {
1252 let file = std::fs::File::open(&path)
8aa67ee7 1253 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1254
2d55beec
FG
1255 // FIXME: load full blob to verify index checksum?
1256
6ef9bb59
DC
1257 Body::wrap_stream(
1258 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1259 .map_err(move |err| {
1260 eprintln!("error during streaming of '{:?}' - {}", path, err);
1261 err
1262 })
1263 )
1264 },
1265 extension => {
1266 bail!("cannot download '{}' files", extension);
1267 },
1268 };
1269
1270 // fixme: set other headers ?
1271 Ok(Response::builder()
1272 .status(StatusCode::OK)
1273 .header(header::CONTENT_TYPE, "application/octet-stream")
1274 .body(body)
1275 .unwrap())
1276 }.boxed()
1277}
1278
552c2259 1279#[sortable]
0ab08ac9
DM
1280pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1281 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1282 &ObjectSchema::new(
54552dda 1283 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1284 &sorted!([
66c49c21 1285 ("store", false, &DATASTORE_SCHEMA),
255f378a 1286 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1287 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1288 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1289 ]),
9e47c0a5 1290 )
54552dda
DM
1291).access(
1292 Some("Only the backup creator/owner is allowed to do this."),
1293 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1294);
9e47c0a5 1295
bf78f708 1296pub fn upload_backup_log(
07ee2235
DM
1297 _parts: Parts,
1298 req_body: Body,
1299 param: Value,
255f378a 1300 _info: &ApiMethod,
54552dda 1301 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1302) -> ApiResponseFuture {
07ee2235 1303
ad51d02a 1304 async move {
3c8c2827 1305 let store = required_string_param(&param, "store")?;
ad51d02a 1306 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 1307
96d65fbc 1308 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1309
3c8c2827
WB
1310 let backup_type = required_string_param(&param, "backup-type")?;
1311 let backup_id = required_string_param(&param, "backup-id")?;
1312 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1313
e0e5b442 1314 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1315
e6dc35ac 1316 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1317 let owner = datastore.get_owner(backup_dir.group())?;
1318 check_backup_owner(&owner, &auth_id)?;
54552dda 1319
ad51d02a
DM
1320 let mut path = datastore.base_path();
1321 path.push(backup_dir.relative_path());
1322 path.push(&file_name);
07ee2235 1323
ad51d02a
DM
1324 if path.exists() {
1325 bail!("backup already contains a log.");
1326 }
e128d4e8 1327
ad51d02a 1328 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1329 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1330
1331 let data = req_body
1332 .map_err(Error::from)
1333 .try_fold(Vec::new(), |mut acc, chunk| {
1334 acc.extend_from_slice(&*chunk);
1335 future::ok::<_, Error>(acc)
1336 })
1337 .await?;
1338
39f18b30
DM
1339 // always verify blob/CRC at server side
1340 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1341
e0a19d33 1342 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1343
1344 // fixme: use correct formatter
53daae8e 1345 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
ad51d02a 1346 }.boxed()
07ee2235
DM
1347}
1348
5b1cfa01
DC
1349#[api(
1350 input: {
1351 properties: {
1352 store: {
1353 schema: DATASTORE_SCHEMA,
1354 },
1355 "backup-type": {
1356 schema: BACKUP_TYPE_SCHEMA,
1357 },
1358 "backup-id": {
1359 schema: BACKUP_ID_SCHEMA,
1360 },
1361 "backup-time": {
1362 schema: BACKUP_TIME_SCHEMA,
1363 },
1364 "filepath": {
1365 description: "Base64 encoded path.",
1366 type: String,
1367 }
1368 },
1369 },
1370 access: {
1371 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1372 },
1373)]
1374/// Get the entries of the given path of the catalog
bf78f708 1375pub fn catalog(
5b1cfa01
DC
1376 store: String,
1377 backup_type: String,
1378 backup_id: String,
1379 backup_time: i64,
1380 filepath: String,
5b1cfa01 1381 rpcenv: &mut dyn RpcEnvironment,
227501c0 1382) -> Result<Vec<ArchiveEntry>, Error> {
5b1cfa01
DC
1383 let datastore = DataStore::lookup_datastore(&store)?;
1384
e6dc35ac 1385 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1386
e0e5b442 1387 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1388
bff85572 1389 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1390
9238cdf5
FG
1391 let file_name = CATALOG_NAME;
1392
2d55beec 1393 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1394 for file in files {
1395 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1396 bail!("cannot decode '{}' - is encrypted", file_name);
1397 }
1398 }
1399
5b1cfa01
DC
1400 let mut path = datastore.base_path();
1401 path.push(backup_dir.relative_path());
9238cdf5 1402 path.push(file_name);
5b1cfa01
DC
1403
1404 let index = DynamicIndexReader::open(&path)
1405 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1406
2d55beec 1407 let (csum, size) = index.compute_csum();
9a37bd6c 1408 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1409
14f6c9cb 1410 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1411 let reader = BufferedDynamicReader::new(index, chunk_reader);
1412
1413 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1414
5279ee74 1415 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1416 base64::decode(filepath)?
1417 } else {
1418 vec![b'/']
1419 };
5b1cfa01 1420
86582454 1421 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1422}
1423
d33d8f4e
DC
1424#[sortable]
1425pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1426 &ApiHandler::AsyncHttp(&pxar_file_download),
1427 &ObjectSchema::new(
1ffe0301 1428 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1429 &sorted!([
1430 ("store", false, &DATASTORE_SCHEMA),
1431 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1432 ("backup-id", false, &BACKUP_ID_SCHEMA),
1433 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1434 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1435 ]),
1436 )
1437).access(None, &Permission::Privilege(
1438 &["datastore", "{store}"],
1439 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1440 true)
1441);
1442
bf78f708 1443pub fn pxar_file_download(
d33d8f4e
DC
1444 _parts: Parts,
1445 _req_body: Body,
1446 param: Value,
1447 _info: &ApiMethod,
1448 rpcenv: Box<dyn RpcEnvironment>,
1449) -> ApiResponseFuture {
1450
1451 async move {
3c8c2827 1452 let store = required_string_param(&param, "store")?;
9a37bd6c 1453 let datastore = DataStore::lookup_datastore(store)?;
d33d8f4e 1454
e6dc35ac 1455 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1456
3c8c2827 1457 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1458
3c8c2827
WB
1459 let backup_type = required_string_param(&param, "backup-type")?;
1460 let backup_id = required_string_param(&param, "backup-id")?;
1461 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1462
e0e5b442 1463 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1464
bff85572 1465 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1466
d33d8f4e 1467 let mut components = base64::decode(&filepath)?;
3984a5fd 1468 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1469 components.remove(0);
1470 }
1471
d8d8af98 1472 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1473 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1474 let file_path = split.next().unwrap_or(b"/");
2d55beec 1475 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1476 for file in files {
1477 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1478 bail!("cannot decode '{}' - is encrypted", pxar_name);
1479 }
1480 }
d33d8f4e 1481
9238cdf5
FG
1482 let mut path = datastore.base_path();
1483 path.push(backup_dir.relative_path());
1484 path.push(pxar_name);
d33d8f4e
DC
1485
1486 let index = DynamicIndexReader::open(&path)
1487 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1488
2d55beec 1489 let (csum, size) = index.compute_csum();
9a37bd6c 1490 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1491
14f6c9cb 1492 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1493 let reader = BufferedDynamicReader::new(index, chunk_reader);
1494 let archive_size = reader.archive_size();
1495 let reader = LocalDynamicReadAt::new(reader);
1496
1497 let decoder = Accessor::new(reader, archive_size).await?;
1498 let root = decoder.open_root().await?;
2e219481 1499 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1500 let file = root
2e219481
DC
1501 .lookup(&path).await?
1502 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1503
804f6143
DC
1504 let body = match file.kind() {
1505 EntryKind::File { .. } => Body::wrap_stream(
1506 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1507 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1508 err
1509 }),
1510 ),
1511 EntryKind::Hardlink(_) => Body::wrap_stream(
1512 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1513 .map_err(move |err| {
1514 eprintln!(
1515 "error during streaming of hardlink '{:?}' - {}",
2e219481 1516 path, err
804f6143
DC
1517 );
1518 err
1519 }),
1520 ),
1521 EntryKind::Directory => {
1522 let (sender, receiver) = tokio::sync::mpsc::channel(100);
804f6143 1523 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
fd6d2438 1524 proxmox_rest_server::spawn_internal_task(
2e219481
DC
1525 create_zip(channelwriter, decoder, path.clone(), false)
1526 );
7c667013 1527 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
2e219481 1528 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
d33d8f4e 1529 err
804f6143
DC
1530 }))
1531 }
1532 other => bail!("cannot download file of type {:?}", other),
1533 };
d33d8f4e
DC
1534
1535 // fixme: set other headers ?
1536 Ok(Response::builder()
1537 .status(StatusCode::OK)
1538 .header(header::CONTENT_TYPE, "application/octet-stream")
1539 .body(body)
1540 .unwrap())
1541 }.boxed()
1542}
1543
1a0d3d11
DM
1544#[api(
1545 input: {
1546 properties: {
1547 store: {
1548 schema: DATASTORE_SCHEMA,
1549 },
1550 timeframe: {
c68fa58a 1551 type: RRDTimeFrame,
1a0d3d11
DM
1552 },
1553 cf: {
1554 type: RRDMode,
1555 },
1556 },
1557 },
1558 access: {
1559 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1560 },
1561)]
1562/// Read datastore stats
bf78f708 1563pub fn get_rrd_stats(
1a0d3d11 1564 store: String,
c68fa58a 1565 timeframe: RRDTimeFrame,
1a0d3d11
DM
1566 cf: RRDMode,
1567 _param: Value,
1568) -> Result<Value, Error> {
1569
431cc7b1
DC
1570 create_value_from_rrd(
1571 &format!("datastore/{}", store),
1a0d3d11
DM
1572 &[
1573 "total", "used",
c94e1f65
DM
1574 "read_ios", "read_bytes",
1575 "write_ios", "write_bytes",
1576 "io_ticks",
1a0d3d11
DM
1577 ],
1578 timeframe,
1579 cf,
1580 )
1581}
1582
d6688884
SR
1583#[api(
1584 input: {
1585 properties: {
1586 store: {
1587 schema: DATASTORE_SCHEMA,
1588 },
1589 "backup-type": {
1590 schema: BACKUP_TYPE_SCHEMA,
1591 },
1592 "backup-id": {
1593 schema: BACKUP_ID_SCHEMA,
1594 },
1595 },
1596 },
1597 access: {
1598 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1599 },
1600)]
1601/// Get "notes" for a backup group
1602pub fn get_group_notes(
1603 store: String,
1604 backup_type: String,
1605 backup_id: String,
1606 rpcenv: &mut dyn RpcEnvironment,
1607) -> Result<String, Error> {
1608 let datastore = DataStore::lookup_datastore(&store)?;
1609
1610 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1611 let backup_group = BackupGroup::new(backup_type, backup_id);
1612
1613 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1614
1615 let note_path = get_group_note_path(&datastore, &backup_group);
1616 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1617}
1618
1619#[api(
1620 input: {
1621 properties: {
1622 store: {
1623 schema: DATASTORE_SCHEMA,
1624 },
1625 "backup-type": {
1626 schema: BACKUP_TYPE_SCHEMA,
1627 },
1628 "backup-id": {
1629 schema: BACKUP_ID_SCHEMA,
1630 },
1631 notes: {
1632 description: "A multiline text.",
1633 },
1634 },
1635 },
1636 access: {
1637 permission: &Permission::Privilege(&["datastore", "{store}"],
1638 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1639 true),
1640 },
1641)]
1642/// Set "notes" for a backup group
1643pub fn set_group_notes(
1644 store: String,
1645 backup_type: String,
1646 backup_id: String,
1647 notes: String,
1648 rpcenv: &mut dyn RpcEnvironment,
1649) -> Result<(), Error> {
1650 let datastore = DataStore::lookup_datastore(&store)?;
1651
1652 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1653 let backup_group = BackupGroup::new(backup_type, backup_id);
1654
1655 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1656
1657 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1658 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1659
1660 Ok(())
1661}
1662
912b3f5b
DM
1663#[api(
1664 input: {
1665 properties: {
1666 store: {
1667 schema: DATASTORE_SCHEMA,
1668 },
1669 "backup-type": {
1670 schema: BACKUP_TYPE_SCHEMA,
1671 },
1672 "backup-id": {
1673 schema: BACKUP_ID_SCHEMA,
1674 },
1675 "backup-time": {
1676 schema: BACKUP_TIME_SCHEMA,
1677 },
1678 },
1679 },
1680 access: {
1401f4be 1681 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1682 },
1683)]
1684/// Get "notes" for a specific backup
bf78f708 1685pub fn get_notes(
912b3f5b
DM
1686 store: String,
1687 backup_type: String,
1688 backup_id: String,
1689 backup_time: i64,
1690 rpcenv: &mut dyn RpcEnvironment,
1691) -> Result<String, Error> {
1692 let datastore = DataStore::lookup_datastore(&store)?;
1693
e6dc35ac 1694 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1695 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1696
1401f4be 1697 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1698
883aa6d5 1699 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1700
883aa6d5 1701 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1702 .as_str()
1703 .unwrap_or("");
1704
1705 Ok(String::from(notes))
1706}
1707
1708#[api(
1709 input: {
1710 properties: {
1711 store: {
1712 schema: DATASTORE_SCHEMA,
1713 },
1714 "backup-type": {
1715 schema: BACKUP_TYPE_SCHEMA,
1716 },
1717 "backup-id": {
1718 schema: BACKUP_ID_SCHEMA,
1719 },
1720 "backup-time": {
1721 schema: BACKUP_TIME_SCHEMA,
1722 },
1723 notes: {
1724 description: "A multiline text.",
1725 },
1726 },
1727 },
1728 access: {
b728a69e
FG
1729 permission: &Permission::Privilege(&["datastore", "{store}"],
1730 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1731 true),
912b3f5b
DM
1732 },
1733)]
1734/// Set "notes" for a specific backup
bf78f708 1735pub fn set_notes(
912b3f5b
DM
1736 store: String,
1737 backup_type: String,
1738 backup_id: String,
1739 backup_time: i64,
1740 notes: String,
1741 rpcenv: &mut dyn RpcEnvironment,
1742) -> Result<(), Error> {
1743 let datastore = DataStore::lookup_datastore(&store)?;
1744
e6dc35ac 1745 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1746 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1747
b728a69e 1748 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1749
1a374fcf
SR
1750 datastore.update_manifest(&backup_dir,|manifest| {
1751 manifest.unprotected["notes"] = notes.into();
1752 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1753
1754 Ok(())
1755}
1756
8292d3d2
DC
1757#[api(
1758 input: {
1759 properties: {
1760 store: {
1761 schema: DATASTORE_SCHEMA,
1762 },
1763 "backup-type": {
1764 schema: BACKUP_TYPE_SCHEMA,
1765 },
1766 "backup-id": {
1767 schema: BACKUP_ID_SCHEMA,
1768 },
1769 "backup-time": {
1770 schema: BACKUP_TIME_SCHEMA,
1771 },
1772 },
1773 },
1774 access: {
1775 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1776 },
1777)]
1778/// Query protection for a specific backup
1779pub fn get_protection(
1780 store: String,
1781 backup_type: String,
1782 backup_id: String,
1783 backup_time: i64,
1784 rpcenv: &mut dyn RpcEnvironment,
1785) -> Result<bool, Error> {
1786 let datastore = DataStore::lookup_datastore(&store)?;
1787
1788 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1789 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1790
1791 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1792
9b1e2ae8 1793 Ok(backup_dir.is_protected(datastore.base_path()))
8292d3d2
DC
1794}
1795
1796#[api(
1797 input: {
1798 properties: {
1799 store: {
1800 schema: DATASTORE_SCHEMA,
1801 },
1802 "backup-type": {
1803 schema: BACKUP_TYPE_SCHEMA,
1804 },
1805 "backup-id": {
1806 schema: BACKUP_ID_SCHEMA,
1807 },
1808 "backup-time": {
1809 schema: BACKUP_TIME_SCHEMA,
1810 },
1811 protected: {
1812 description: "Enable/disable protection.",
1813 },
1814 },
1815 },
1816 access: {
1817 permission: &Permission::Privilege(&["datastore", "{store}"],
1818 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1819 true),
1820 },
1821)]
1822/// En- or disable protection for a specific backup
1823pub fn set_protection(
1824 store: String,
1825 backup_type: String,
1826 backup_id: String,
1827 backup_time: i64,
1828 protected: bool,
1829 rpcenv: &mut dyn RpcEnvironment,
1830) -> Result<(), Error> {
1831 let datastore = DataStore::lookup_datastore(&store)?;
1832
1833 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1834 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1835
1836 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1837
1838 datastore.update_protection(&backup_dir, protected)
1839}
1840
72be0eb1 1841#[api(
4940012d 1842 input: {
72be0eb1
DW
1843 properties: {
1844 store: {
1845 schema: DATASTORE_SCHEMA,
1846 },
1847 "backup-type": {
1848 schema: BACKUP_TYPE_SCHEMA,
1849 },
1850 "backup-id": {
1851 schema: BACKUP_ID_SCHEMA,
1852 },
1853 "new-owner": {
e6dc35ac 1854 type: Authid,
72be0eb1
DW
1855 },
1856 },
4940012d
FG
1857 },
1858 access: {
bff85572
FG
1859 permission: &Permission::Anybody,
1860 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1861 },
72be0eb1
DW
1862)]
1863/// Change owner of a backup group
bf78f708 1864pub fn set_backup_owner(
72be0eb1
DW
1865 store: String,
1866 backup_type: String,
1867 backup_id: String,
e6dc35ac 1868 new_owner: Authid,
bff85572 1869 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1870) -> Result<(), Error> {
1871
1872 let datastore = DataStore::lookup_datastore(&store)?;
1873
1874 let backup_group = BackupGroup::new(backup_type, backup_id);
1875
bff85572
FG
1876 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1877
72be0eb1
DW
1878 let user_info = CachedUserInfo::new()?;
1879
bff85572
FG
1880 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1881
1882 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1883 // High-privilege user/token
1884 true
1885 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1886 let owner = datastore.get_owner(&backup_group)?;
1887
1888 match (owner.is_token(), new_owner.is_token()) {
1889 (true, true) => {
1890 // API token to API token, owned by same user
1891 let owner = owner.user();
1892 let new_owner = new_owner.user();
1893 owner == new_owner && Authid::from(owner.clone()) == auth_id
1894 },
1895 (true, false) => {
1896 // API token to API token owner
1897 Authid::from(owner.user().clone()) == auth_id
1898 && new_owner == auth_id
1899 },
1900 (false, true) => {
1901 // API token owner to API token
1902 owner == auth_id
1903 && Authid::from(new_owner.user().clone()) == auth_id
1904 },
1905 (false, false) => {
1906 // User to User, not allowed for unprivileged users
1907 false
1908 },
1909 }
1910 } else {
1911 false
1912 };
1913
1914 if !allowed {
1915 return Err(http_err!(UNAUTHORIZED,
1916 "{} does not have permission to change owner of backup group '{}' to {}",
1917 auth_id,
1918 backup_group,
1919 new_owner,
1920 ));
1921 }
1922
e6dc35ac
FG
1923 if !user_info.is_active_auth_id(&new_owner) {
1924 bail!("{} '{}' is inactive or non-existent",
1925 if new_owner.is_token() {
1926 "API token".to_string()
1927 } else {
1928 "user".to_string()
1929 },
1930 new_owner);
72be0eb1
DW
1931 }
1932
1933 datastore.set_owner(&backup_group, &new_owner, true)?;
1934
1935 Ok(())
1936}
1937
552c2259 1938#[sortable]
255f378a 1939const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5b1cfa01
DC
1940 (
1941 "catalog",
1942 &Router::new()
1943 .get(&API_METHOD_CATALOG)
1944 ),
72be0eb1
DW
1945 (
1946 "change-owner",
1947 &Router::new()
1948 .post(&API_METHOD_SET_BACKUP_OWNER)
1949 ),
255f378a
DM
1950 (
1951 "download",
1952 &Router::new()
1953 .download(&API_METHOD_DOWNLOAD_FILE)
1954 ),
6ef9bb59
DC
1955 (
1956 "download-decoded",
1957 &Router::new()
1958 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1959 ),
255f378a
DM
1960 (
1961 "files",
1962 &Router::new()
09b1f7b2 1963 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
1964 ),
1965 (
1966 "gc",
1967 &Router::new()
1968 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
1969 .post(&API_METHOD_START_GARBAGE_COLLECTION)
1970 ),
d6688884
SR
1971 (
1972 "group-notes",
1973 &Router::new()
1974 .get(&API_METHOD_GET_GROUP_NOTES)
1975 .put(&API_METHOD_SET_GROUP_NOTES)
1976 ),
255f378a
DM
1977 (
1978 "groups",
1979 &Router::new()
b31c8019 1980 .get(&API_METHOD_LIST_GROUPS)
f32791b4 1981 .delete(&API_METHOD_DELETE_GROUP)
255f378a 1982 ),
912b3f5b
DM
1983 (
1984 "notes",
1985 &Router::new()
1986 .get(&API_METHOD_GET_NOTES)
1987 .put(&API_METHOD_SET_NOTES)
1988 ),
8292d3d2
DC
1989 (
1990 "protected",
1991 &Router::new()
1992 .get(&API_METHOD_GET_PROTECTION)
1993 .put(&API_METHOD_SET_PROTECTION)
1994 ),
255f378a
DM
1995 (
1996 "prune",
1997 &Router::new()
1998 .post(&API_METHOD_PRUNE)
1999 ),
9805207a
DC
2000 (
2001 "prune-datastore",
2002 &Router::new()
2003 .post(&API_METHOD_PRUNE_DATASTORE)
2004 ),
d33d8f4e
DC
2005 (
2006 "pxar-file-download",
2007 &Router::new()
2008 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
2009 ),
1a0d3d11
DM
2010 (
2011 "rrd",
2012 &Router::new()
2013 .get(&API_METHOD_GET_RRD_STATS)
2014 ),
255f378a
DM
2015 (
2016 "snapshots",
2017 &Router::new()
fc189b19 2018 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 2019 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
2020 ),
2021 (
2022 "status",
2023 &Router::new()
2024 .get(&API_METHOD_STATUS)
2025 ),
2026 (
2027 "upload-backup-log",
2028 &Router::new()
2029 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
2030 ),
c2009e53
DM
2031 (
2032 "verify",
2033 &Router::new()
2034 .post(&API_METHOD_VERIFY)
2035 ),
255f378a
DM
2036];
2037
ad51d02a 2038const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2039 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2040 .subdirs(DATASTORE_INFO_SUBDIRS);
2041
2042
2043pub const ROUTER: Router = Router::new()
bb34b589 2044 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2045 .match_all("store", &DATASTORE_INFO_ROUTER);