]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
datastore: add helper to get a iterator for backup groups
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed 12use serde_json::{json, Value};
7c667013 13use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 14
dc7a5b34
TL
15use proxmox_async::blocking::WrappedReaderStream;
16use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 17use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 18use proxmox_router::{
dc7a5b34
TL
19 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
20 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
21};
22use proxmox_schema::*;
dc7a5b34
TL
23use proxmox_sys::fs::{
24 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
25};
26use proxmox_sys::sortable;
d5790a9f 27use proxmox_sys::{task_log, task_warn};
e18a6c9e 28
2e219481 29use pxar::accessor::aio::Accessor;
d33d8f4e
DC
30use pxar::EntryKind;
31
dc7a5b34
TL
32use pbs_api_types::{
33 Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
34 GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
35 SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
36 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
37 PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
38 PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 39};
984ddb2f 40use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 41use pbs_config::CachedUserInfo;
b2065dc7
WB
42use pbs_datastore::backup_info::BackupInfo;
43use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 44use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
45use pbs_datastore::data_blob::DataBlob;
46use pbs_datastore::data_blob_reader::DataBlobReader;
47use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 48use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
49use pbs_datastore::index::IndexFile;
50use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 51use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
52use pbs_datastore::{
53 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
54 StoreProgress, CATALOG_NAME,
55};
3c8c2827 56use pbs_tools::json::{required_integer_param, required_string_param};
dc7a5b34 57use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 58
431cc7b1 59use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 60use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 61
b9700a9f 62use crate::server::jobstate::Job;
804f6143 63
d6688884
SR
64const GROUP_NOTES_FILE_NAME: &str = "notes";
65
66fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
67 let mut note_path = store.base_path();
68 note_path.push(group.group_path());
69 note_path.push(GROUP_NOTES_FILE_NAME);
70 note_path
71}
72
bff85572 73fn check_priv_or_backup_owner(
e7cb4dc5
WB
74 store: &DataStore,
75 group: &BackupGroup,
e6dc35ac 76 auth_id: &Authid,
bff85572
FG
77 required_privs: u64,
78) -> Result<(), Error> {
79 let user_info = CachedUserInfo::new()?;
9a37bd6c 80 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
81
82 if privs & required_privs == 0 {
83 let owner = store.get_owner(group)?;
84 check_backup_owner(&owner, auth_id)?;
85 }
86 Ok(())
87}
88
e7cb4dc5
WB
89fn read_backup_index(
90 store: &DataStore,
91 backup_dir: &BackupDir,
92) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
ff86ef00 93 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 94
09b1f7b2
DM
95 let mut result = Vec::new();
96 for item in manifest.files() {
97 result.push(BackupContent {
98 filename: item.filename.clone(),
f28d9088 99 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
100 size: Some(item.size),
101 });
8c70e3eb
DM
102 }
103
09b1f7b2 104 result.push(BackupContent {
96d65fbc 105 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
106 crypt_mode: match manifest.signature {
107 Some(_) => Some(CryptMode::SignOnly),
108 None => Some(CryptMode::None),
109 },
09b1f7b2
DM
110 size: Some(index_size),
111 });
4f1e40a2 112
70030b43 113 Ok((manifest, result))
8c70e3eb
DM
114}
115
1c090810
DC
116fn get_all_snapshot_files(
117 store: &DataStore,
118 info: &BackupInfo,
70030b43 119) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9a37bd6c 120 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
121
122 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
123 acc.insert(item.filename.clone());
124 acc
125 });
126
127 for file in &info.files {
dc7a5b34
TL
128 if file_set.contains(file) {
129 continue;
130 }
f28d9088
WB
131 files.push(BackupContent {
132 filename: file.to_string(),
133 size: None,
134 crypt_mode: None,
135 });
1c090810
DC
136 }
137
70030b43 138 Ok((manifest, files))
1c090810
DC
139}
140
b31c8019
DM
141#[api(
142 input: {
143 properties: {
144 store: {
145 schema: DATASTORE_SCHEMA,
146 },
147 },
148 },
7b570c17 149 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 150 access: {
54552dda
DM
151 permission: &Permission::Privilege(
152 &["datastore", "{store}"],
153 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
154 true),
bb34b589 155 },
b31c8019
DM
156)]
157/// List backup groups.
b2362a12 158pub fn list_groups(
b31c8019 159 store: String,
54552dda 160 rpcenv: &mut dyn RpcEnvironment,
b31c8019 161) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 162 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 163 let user_info = CachedUserInfo::new()?;
e6dc35ac 164 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 165
e9d2fc93 166 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee
FG
167 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
168
7d9cb8c4
TL
169 let group_info =
170 datastore
171 .list_backup_groups()?
172 .into_iter()
173 .fold(Vec::new(), |mut group_info, group| {
174 let owner = match datastore.get_owner(&group) {
175 Ok(auth_id) => auth_id,
176 Err(err) => {
177 eprintln!(
178 "Failed to get owner of group '{}/{}' - {}",
179 &store, group, err
180 );
181 return group_info;
182 }
183 };
184 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
0d08fcee 185 return group_info;
dc7a5b34 186 }
0d08fcee 187
7d9cb8c4
TL
188 let snapshots = match group.list_backups(&datastore.base_path()) {
189 Ok(snapshots) => snapshots,
190 Err(_) => {
191 return group_info;
192 }
193 };
194
195 let backup_count: u64 = snapshots.len() as u64;
196 if backup_count == 0 {
0d08fcee 197 return group_info;
dc7a5b34 198 }
0d08fcee 199
7d9cb8c4
TL
200 let last_backup = snapshots
201 .iter()
202 .fold(&snapshots[0], |last, curr| {
203 if curr.is_finished()
204 && curr.backup_dir.backup_time() > last.backup_dir.backup_time()
205 {
206 curr
207 } else {
208 last
209 }
210 })
211 .to_owned();
212
213 let note_path = get_group_note_path(&datastore, &group);
214 let comment = file_read_firstline(&note_path).ok();
215
216 group_info.push(GroupListItem {
217 backup_type: group.backup_type().to_string(),
218 backup_id: group.backup_id().to_string(),
219 last_backup: last_backup.backup_dir.backup_time(),
220 owner: Some(owner),
221 backup_count,
222 files: last_backup.files,
223 comment,
224 });
0d08fcee 225
7d9cb8c4 226 group_info
0d08fcee
FG
227 });
228
0d08fcee 229 Ok(group_info)
812c6f87 230}
8f579717 231
f32791b4
DC
232#[api(
233 input: {
234 properties: {
235 store: {
236 schema: DATASTORE_SCHEMA,
237 },
238 "backup-type": {
239 schema: BACKUP_TYPE_SCHEMA,
240 },
241 "backup-id": {
242 schema: BACKUP_ID_SCHEMA,
243 },
244 },
245 },
246 access: {
247 permission: &Permission::Privilege(
248 &["datastore", "{store}"],
249 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
250 true),
251 },
252)]
253/// Delete backup group including all snapshots.
254pub fn delete_group(
255 store: String,
256 backup_type: String,
257 backup_id: String,
258 _info: &ApiMethod,
259 rpcenv: &mut dyn RpcEnvironment,
260) -> Result<Value, Error> {
f32791b4
DC
261 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
262
263 let group = BackupGroup::new(backup_type, backup_id);
e9d2fc93 264 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
f32791b4
DC
265
266 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
267
5cc7d891 268 if !datastore.remove_backup_group(&group)? {
171a00ca 269 bail!("group only partially deleted due to protected snapshots");
5cc7d891 270 }
f32791b4
DC
271
272 Ok(Value::Null)
273}
274
09b1f7b2
DM
275#[api(
276 input: {
277 properties: {
278 store: {
279 schema: DATASTORE_SCHEMA,
280 },
281 "backup-type": {
282 schema: BACKUP_TYPE_SCHEMA,
283 },
284 "backup-id": {
285 schema: BACKUP_ID_SCHEMA,
286 },
287 "backup-time": {
288 schema: BACKUP_TIME_SCHEMA,
289 },
290 },
291 },
7b570c17 292 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 293 access: {
54552dda
DM
294 permission: &Permission::Privilege(
295 &["datastore", "{store}"],
296 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
297 true),
bb34b589 298 },
09b1f7b2
DM
299)]
300/// List snapshot files.
ea5f547f 301pub fn list_snapshot_files(
09b1f7b2
DM
302 store: String,
303 backup_type: String,
304 backup_id: String,
305 backup_time: i64,
01a13423 306 _info: &ApiMethod,
54552dda 307 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 308) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 309 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e9d2fc93 310 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
54552dda 311
e0e5b442 312 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 313
dc7a5b34
TL
314 check_priv_or_backup_owner(
315 &datastore,
316 snapshot.group(),
317 &auth_id,
318 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
319 )?;
54552dda 320
d7c24397 321 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 322
70030b43
DM
323 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
324
325 Ok(files)
01a13423
DM
326}
327
68a6a0ee
DM
328#[api(
329 input: {
330 properties: {
331 store: {
332 schema: DATASTORE_SCHEMA,
333 },
334 "backup-type": {
335 schema: BACKUP_TYPE_SCHEMA,
336 },
337 "backup-id": {
338 schema: BACKUP_ID_SCHEMA,
339 },
340 "backup-time": {
341 schema: BACKUP_TIME_SCHEMA,
342 },
343 },
344 },
bb34b589 345 access: {
54552dda
DM
346 permission: &Permission::Privilege(
347 &["datastore", "{store}"],
348 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
349 true),
bb34b589 350 },
68a6a0ee
DM
351)]
352/// Delete backup snapshot.
bf78f708 353pub fn delete_snapshot(
68a6a0ee
DM
354 store: String,
355 backup_type: String,
356 backup_id: String,
357 backup_time: i64,
6f62c924 358 _info: &ApiMethod,
54552dda 359 rpcenv: &mut dyn RpcEnvironment,
6f62c924 360) -> Result<Value, Error> {
e6dc35ac 361 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 362
e0e5b442 363 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
e9d2fc93 364 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
6f62c924 365
dc7a5b34
TL
366 check_priv_or_backup_owner(
367 &datastore,
368 snapshot.group(),
369 &auth_id,
370 PRIV_DATASTORE_MODIFY,
371 )?;
54552dda 372
c9756b40 373 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
374
375 Ok(Value::Null)
376}
377
fc189b19 378#[api(
b7c3eaa9 379 streaming: true,
fc189b19
DM
380 input: {
381 properties: {
382 store: {
383 schema: DATASTORE_SCHEMA,
384 },
385 "backup-type": {
386 optional: true,
387 schema: BACKUP_TYPE_SCHEMA,
388 },
389 "backup-id": {
390 optional: true,
391 schema: BACKUP_ID_SCHEMA,
392 },
393 },
394 },
7b570c17 395 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 396 access: {
54552dda
DM
397 permission: &Permission::Privilege(
398 &["datastore", "{store}"],
399 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
400 true),
bb34b589 401 },
fc189b19
DM
402)]
403/// List backup snapshots.
dc7a5b34 404pub fn list_snapshots(
54552dda
DM
405 store: String,
406 backup_type: Option<String>,
407 backup_id: Option<String>,
408 _param: Value,
184f17af 409 _info: &ApiMethod,
54552dda 410 rpcenv: &mut dyn RpcEnvironment,
fc189b19 411) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 412 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 413 let user_info = CachedUserInfo::new()?;
e6dc35ac 414 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 415
0d08fcee
FG
416 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
417
e9d2fc93 418 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 419
0d08fcee 420 let groups = match (backup_type, backup_id) {
72f81545 421 (Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)],
7d9cb8c4
TL
422 (Some(backup_type), None) => datastore
423 .list_backup_groups()?
dc7a5b34
TL
424 .into_iter()
425 .filter(|group| group.backup_type() == backup_type)
426 .collect(),
7d9cb8c4
TL
427 (None, Some(backup_id)) => datastore
428 .list_backup_groups()?
dc7a5b34
TL
429 .into_iter()
430 .filter(|group| group.backup_id() == backup_id)
431 .collect(),
7d9cb8c4 432 _ => datastore.list_backup_groups()?,
0d08fcee 433 };
54552dda 434
0d08fcee 435 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
436 let backup_type = group.backup_type().to_string();
437 let backup_id = group.backup_id().to_string();
0d08fcee 438 let backup_time = info.backup_dir.backup_time();
7d9cb8c4 439 let protected = info.backup_dir.is_protected(datastore.base_path());
1c090810 440
79c53595 441 match get_all_snapshot_files(&datastore, &info) {
70030b43 442 Ok((manifest, files)) => {
70030b43
DM
443 // extract the first line from notes
444 let comment: Option<String> = manifest.unprotected["notes"]
445 .as_str()
446 .and_then(|notes| notes.lines().next())
447 .map(String::from);
448
035c40e6
FG
449 let fingerprint = match manifest.fingerprint() {
450 Ok(fp) => fp,
451 Err(err) => {
452 eprintln!("error parsing fingerprint: '{}'", err);
453 None
dc7a5b34 454 }
035c40e6
FG
455 };
456
79c53595 457 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
458 let verification: Option<SnapshotVerifyState> =
459 match serde_json::from_value(verification) {
460 Ok(verify) => verify,
461 Err(err) => {
462 eprintln!("error parsing verification state : '{}'", err);
463 None
464 }
465 };
3b2046d2 466
0d08fcee
FG
467 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
468
79c53595
FG
469 SnapshotListItem {
470 backup_type,
471 backup_id,
472 backup_time,
473 comment,
474 verification,
035c40e6 475 fingerprint,
79c53595
FG
476 files,
477 size,
478 owner,
02db7267 479 protected,
79c53595 480 }
dc7a5b34 481 }
1c090810
DC
482 Err(err) => {
483 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 484 let files = info
dc7a5b34
TL
485 .files
486 .into_iter()
487 .map(|filename| BackupContent {
488 filename,
489 size: None,
490 crypt_mode: None,
491 })
492 .collect();
79c53595
FG
493
494 SnapshotListItem {
495 backup_type,
496 backup_id,
497 backup_time,
498 comment: None,
499 verification: None,
035c40e6 500 fingerprint: None,
79c53595
FG
501 files,
502 size: None,
503 owner,
02db7267 504 protected,
79c53595 505 }
dc7a5b34 506 }
0d08fcee
FG
507 }
508 };
184f17af 509
dc7a5b34
TL
510 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
511 let owner = match datastore.get_owner(group) {
512 Ok(auth_id) => auth_id,
513 Err(err) => {
514 eprintln!(
515 "Failed to get owner of group '{}/{}' - {}",
516 &store, group, err
517 );
0d08fcee
FG
518 return Ok(snapshots);
519 }
dc7a5b34 520 };
0d08fcee 521
dc7a5b34
TL
522 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
523 return Ok(snapshots);
524 }
0d08fcee 525
dc7a5b34 526 let group_backups = group.list_backups(&datastore.base_path())?;
0d08fcee 527
dc7a5b34
TL
528 snapshots.extend(
529 group_backups
530 .into_iter()
531 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
532 );
533
534 Ok(snapshots)
535 })
184f17af
DM
536}
537
fdfcb74d 538fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
7d9cb8c4
TL
539 store
540 .list_backup_groups()?
541 .into_iter()
fdfcb74d 542 .filter(|group| {
9a37bd6c 543 let owner = match store.get_owner(group) {
fdfcb74d
FG
544 Ok(owner) => owner,
545 Err(err) => {
72f81545
TL
546 let id = store.name();
547 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 548 return false;
dc7a5b34 549 }
fdfcb74d 550 };
14e08625 551
fdfcb74d
FG
552 match filter_owner {
553 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
554 None => true,
555 }
556 })
557 .try_fold(Counts::default(), |mut counts, group| {
7d9cb8c4 558 let snapshot_count = group.list_backups(&store.base_path())?.len() as u64;
fdfcb74d 559
72f81545 560 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
561 if snapshot_count > 0 {
562 let type_count = match group.backup_type() {
563 "ct" => counts.ct.get_or_insert(Default::default()),
564 "vm" => counts.vm.get_or_insert(Default::default()),
565 "host" => counts.host.get_or_insert(Default::default()),
566 _ => counts.other.get_or_insert(Default::default()),
567 };
14e08625 568
b44483a8
DM
569 type_count.groups += 1;
570 type_count.snapshots += snapshot_count;
571 }
16f9f244 572
fdfcb74d
FG
573 Ok(counts)
574 })
16f9f244
DC
575}
576
1dc117bb
DM
577#[api(
578 input: {
579 properties: {
580 store: {
581 schema: DATASTORE_SCHEMA,
582 },
98afc7b1
FG
583 verbose: {
584 type: bool,
585 default: false,
586 optional: true,
587 description: "Include additional information like snapshot counts and GC status.",
588 },
1dc117bb 589 },
98afc7b1 590
1dc117bb
DM
591 },
592 returns: {
14e08625 593 type: DataStoreStatus,
1dc117bb 594 },
bb34b589 595 access: {
54552dda 596 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 597 },
1dc117bb
DM
598)]
599/// Get datastore status.
ea5f547f 600pub fn status(
1dc117bb 601 store: String,
98afc7b1 602 verbose: bool,
0eecf38f 603 _info: &ApiMethod,
fdfcb74d 604 rpcenv: &mut dyn RpcEnvironment,
14e08625 605) -> Result<DataStoreStatus, Error> {
e9d2fc93 606 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 607 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
608 let (counts, gc_status) = if verbose {
609 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
610 let user_info = CachedUserInfo::new()?;
611
612 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
613 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
614 None
615 } else {
616 Some(&auth_id)
617 };
618
619 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
620 let gc_status = Some(datastore.last_gc_status());
621
622 (counts, gc_status)
623 } else {
624 (None, None)
98afc7b1 625 };
16f9f244 626
14e08625
DC
627 Ok(DataStoreStatus {
628 total: storage.total,
629 used: storage.used,
630 avail: storage.avail,
631 gc_status,
632 counts,
633 })
0eecf38f
DM
634}
635
c2009e53
DM
636#[api(
637 input: {
638 properties: {
639 store: {
640 schema: DATASTORE_SCHEMA,
641 },
642 "backup-type": {
643 schema: BACKUP_TYPE_SCHEMA,
644 optional: true,
645 },
646 "backup-id": {
647 schema: BACKUP_ID_SCHEMA,
648 optional: true,
649 },
dcbf29e7
HL
650 "ignore-verified": {
651 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
652 optional: true,
653 },
654 "outdated-after": {
655 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
656 optional: true,
657 },
c2009e53
DM
658 "backup-time": {
659 schema: BACKUP_TIME_SCHEMA,
660 optional: true,
661 },
662 },
663 },
664 returns: {
665 schema: UPID_SCHEMA,
666 },
667 access: {
09f6a240 668 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
669 },
670)]
671/// Verify backups.
672///
673/// This function can verify a single backup snapshot, all backup from a backup group,
674/// or all backups in the datastore.
675pub fn verify(
676 store: String,
677 backup_type: Option<String>,
678 backup_id: Option<String>,
679 backup_time: Option<i64>,
dcbf29e7
HL
680 ignore_verified: Option<bool>,
681 outdated_after: Option<i64>,
c2009e53
DM
682 rpcenv: &mut dyn RpcEnvironment,
683) -> Result<Value, Error> {
e9d2fc93 684 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 685 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 686
09f6a240 687 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 688 let worker_id;
c2009e53
DM
689
690 let mut backup_dir = None;
691 let mut backup_group = None;
133042b5 692 let mut worker_type = "verify";
c2009e53
DM
693
694 match (backup_type, backup_id, backup_time) {
695 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34
TL
696 worker_id = format!(
697 "{}:{}/{}/{:08X}",
698 store, backup_type, backup_id, backup_time
699 );
e0e5b442 700 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
701
702 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
703
c2009e53 704 backup_dir = Some(dir);
133042b5 705 worker_type = "verify_snapshot";
c2009e53
DM
706 }
707 (Some(backup_type), Some(backup_id), None) => {
4ebda996 708 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 709 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
710
711 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
712
c2009e53 713 backup_group = Some(group);
133042b5 714 worker_type = "verify_group";
c2009e53
DM
715 }
716 (None, None, None) => {
8ea00f6e 717 worker_id = store.clone();
c2009e53 718 }
5a718dce 719 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
720 }
721
39735609 722 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
723
724 let upid_str = WorkerTask::new_thread(
133042b5 725 worker_type,
44288184 726 Some(worker_id),
049a22a3 727 auth_id.to_string(),
e7cb4dc5
WB
728 to_stdout,
729 move |worker| {
9c26a3d6 730 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 731 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 732 let mut res = Vec::new();
f6b1d1cc 733 if !verify_backup_dir(
9c26a3d6 734 &verify_worker,
f6b1d1cc 735 &backup_dir,
f6b1d1cc 736 worker.upid().clone(),
dc7a5b34 737 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 738 )? {
adfdc369
DC
739 res.push(backup_dir.to_string());
740 }
741 res
c2009e53 742 } else if let Some(backup_group) = backup_group {
7e25b9aa 743 let failed_dirs = verify_backup_group(
9c26a3d6 744 &verify_worker,
63d9aca9 745 &backup_group,
7e25b9aa 746 &mut StoreProgress::new(1),
f6b1d1cc 747 worker.upid(),
dc7a5b34 748 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
749 )?;
750 failed_dirs
c2009e53 751 } else {
dc7a5b34 752 let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
09f6a240
FG
753
754 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
755 Some(auth_id)
756 } else {
757 None
758 };
759
dcbf29e7
HL
760 verify_all_backups(
761 &verify_worker,
762 worker.upid(),
763 owner,
dc7a5b34 764 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 765 )?
c2009e53 766 };
3984a5fd 767 if !failed_dirs.is_empty() {
1ec0d70d 768 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 769 for dir in failed_dirs {
1ec0d70d 770 task_log!(worker, "\t{}", dir);
adfdc369 771 }
1ffe0301 772 bail!("verification failed - please check the log for details");
c2009e53
DM
773 }
774 Ok(())
e7cb4dc5
WB
775 },
776 )?;
c2009e53
DM
777
778 Ok(json!(upid_str))
779}
780
0a240aaa
DC
781#[api(
782 input: {
783 properties: {
784 "backup-id": {
785 schema: BACKUP_ID_SCHEMA,
786 },
787 "backup-type": {
788 schema: BACKUP_TYPE_SCHEMA,
789 },
790 "dry-run": {
791 optional: true,
792 type: bool,
793 default: false,
794 description: "Just show what prune would do, but do not delete anything.",
795 },
796 "prune-options": {
797 type: PruneOptions,
798 flatten: true,
799 },
800 store: {
801 schema: DATASTORE_SCHEMA,
802 },
803 },
804 },
7b570c17 805 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
806 access: {
807 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
808 },
809)]
9805207a 810/// Prune a group on the datastore
bf78f708 811pub fn prune(
0a240aaa
DC
812 backup_id: String,
813 backup_type: String,
814 dry_run: bool,
815 prune_options: PruneOptions,
816 store: String,
817 _param: Value,
54552dda 818 rpcenv: &mut dyn RpcEnvironment,
83b7db02 819) -> Result<Value, Error> {
e6dc35ac 820 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 821
0a240aaa 822 let group = BackupGroup::new(&backup_type, &backup_id);
9fdc3ef4 823
e9d2fc93 824 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
54552dda 825
bff85572 826 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 827
0a240aaa 828 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
503995c7 829
dda70154
DM
830 let mut prune_result = Vec::new();
831
832 let list = group.list_backups(&datastore.base_path())?;
833
834 let mut prune_info = compute_prune_info(list, &prune_options)?;
835
836 prune_info.reverse(); // delete older snapshots first
837
89725197 838 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
839
840 if dry_run {
02db7267
DC
841 for (info, mark) in prune_info {
842 let keep = keep_all || mark.keep();
dda70154
DM
843
844 let backup_time = info.backup_dir.backup_time();
845 let group = info.backup_dir.group();
846
847 prune_result.push(json!({
848 "backup-type": group.backup_type(),
849 "backup-id": group.backup_id(),
6a7be83e 850 "backup-time": backup_time,
dda70154 851 "keep": keep,
02db7267 852 "protected": mark.protected(),
dda70154
DM
853 }));
854 }
855 return Ok(json!(prune_result));
856 }
857
163e9bbe 858 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 859 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 860
f1539300 861 if keep_all {
1ec0d70d 862 task_log!(worker, "No prune selection - keeping all files.");
f1539300 863 } else {
dc7a5b34
TL
864 task_log!(
865 worker,
866 "retention options: {}",
867 pbs_datastore::prune::cli_options_string(&prune_options)
868 );
869 task_log!(
870 worker,
871 "Starting prune on store \"{}\" group \"{}/{}\"",
872 store,
873 backup_type,
874 backup_id
875 );
f1539300 876 }
3b03abfe 877
02db7267
DC
878 for (info, mark) in prune_info {
879 let keep = keep_all || mark.keep();
dda70154 880
f1539300
SR
881 let backup_time = info.backup_dir.backup_time();
882 let timestamp = info.backup_dir.backup_time_string();
883 let group = info.backup_dir.group();
3b03abfe 884
f1539300
SR
885 let msg = format!(
886 "{}/{}/{} {}",
887 group.backup_type(),
888 group.backup_id(),
889 timestamp,
02db7267 890 mark,
f1539300
SR
891 );
892
1ec0d70d 893 task_log!(worker, "{}", msg);
f1539300
SR
894
895 prune_result.push(json!({
896 "backup-type": group.backup_type(),
897 "backup-id": group.backup_id(),
898 "backup-time": backup_time,
899 "keep": keep,
02db7267 900 "protected": mark.protected(),
f1539300
SR
901 }));
902
903 if !(dry_run || keep) {
904 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
1ec0d70d
DM
905 task_warn!(
906 worker,
907 "failed to remove dir {:?}: {}",
908 info.backup_dir.relative_path(),
909 err,
f1539300 910 );
8f0b4c1f 911 }
8f579717 912 }
f1539300 913 }
dd8e744f 914
f1539300 915 worker.log_result(&Ok(()));
83b7db02 916
dda70154 917 Ok(json!(prune_result))
83b7db02
DM
918}
919
9805207a
DC
920#[api(
921 input: {
922 properties: {
923 "dry-run": {
924 optional: true,
925 type: bool,
926 default: false,
927 description: "Just show what prune would do, but do not delete anything.",
928 },
929 "prune-options": {
930 type: PruneOptions,
931 flatten: true,
932 },
933 store: {
934 schema: DATASTORE_SCHEMA,
935 },
936 },
937 },
938 returns: {
939 schema: UPID_SCHEMA,
940 },
941 access: {
942 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
943 },
944)]
945/// Prune the datastore
946pub fn prune_datastore(
947 dry_run: bool,
948 prune_options: PruneOptions,
949 store: String,
950 _param: Value,
951 rpcenv: &mut dyn RpcEnvironment,
952) -> Result<String, Error> {
9805207a
DC
953 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
954
e9d2fc93 955 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 956
bfa942c0
DC
957 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
958
9805207a
DC
959 let upid_str = WorkerTask::new_thread(
960 "prune",
961 Some(store.clone()),
049a22a3 962 auth_id.to_string(),
bfa942c0 963 to_stdout,
dc7a5b34
TL
964 move |worker| {
965 crate::server::prune_datastore(
966 worker,
967 auth_id,
968 prune_options,
969 &store,
970 datastore,
971 dry_run,
972 )
973 },
9805207a
DC
974 )?;
975
976 Ok(upid_str)
977}
978
dfc58d47
DM
979#[api(
980 input: {
981 properties: {
982 store: {
983 schema: DATASTORE_SCHEMA,
984 },
985 },
986 },
987 returns: {
988 schema: UPID_SCHEMA,
989 },
bb34b589 990 access: {
54552dda 991 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 992 },
dfc58d47
DM
993)]
994/// Start garbage collection.
bf78f708 995pub fn start_garbage_collection(
dfc58d47 996 store: String,
6049b71f 997 _info: &ApiMethod,
dd5495d6 998 rpcenv: &mut dyn RpcEnvironment,
6049b71f 999) -> Result<Value, Error> {
e9d2fc93 1000 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1001 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1002
dc7a5b34 1003 let job = Job::new("garbage_collection", &store)
4fdf5ddf 1004 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1005
39735609 1006 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1007
dc7a5b34
TL
1008 let upid_str =
1009 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1010 .map_err(|err| {
1011 format_err!(
1012 "unable to start garbage collection job on datastore {} - {}",
1013 store,
1014 err
1015 )
1016 })?;
0f778e06
DM
1017
1018 Ok(json!(upid_str))
15e9b4ed
DM
1019}
1020
a92830dc
DM
1021#[api(
1022 input: {
1023 properties: {
1024 store: {
1025 schema: DATASTORE_SCHEMA,
1026 },
1027 },
1028 },
1029 returns: {
1030 type: GarbageCollectionStatus,
bb34b589
DM
1031 },
1032 access: {
1033 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1034 },
a92830dc
DM
1035)]
1036/// Garbage collection status.
5eeea607 1037pub fn garbage_collection_status(
a92830dc 1038 store: String,
6049b71f 1039 _info: &ApiMethod,
dd5495d6 1040 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1041) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 1042 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1043
f2b99c34 1044 let status = datastore.last_gc_status();
691c89a0 1045
a92830dc 1046 Ok(status)
691c89a0
DM
1047}
1048
bb34b589 1049#[api(
30fb6025
DM
1050 returns: {
1051 description: "List the accessible datastores.",
1052 type: Array,
9b93c620 1053 items: { type: DataStoreListItem },
30fb6025 1054 },
bb34b589 1055 access: {
54552dda 1056 permission: &Permission::Anybody,
bb34b589
DM
1057 },
1058)]
1059/// Datastore list
bf78f708 1060pub fn get_datastore_list(
6049b71f
DM
1061 _param: Value,
1062 _info: &ApiMethod,
54552dda 1063 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1064) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1065 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1066
e6dc35ac 1067 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1068 let user_info = CachedUserInfo::new()?;
1069
30fb6025 1070 let mut list = Vec::new();
54552dda 1071
30fb6025 1072 for (store, (_, data)) in &config.sections {
9a37bd6c 1073 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1074 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1075 if allowed {
dc7a5b34
TL
1076 list.push(DataStoreListItem {
1077 store: store.clone(),
1078 comment: data["comment"].as_str().map(String::from),
1079 });
30fb6025 1080 }
54552dda
DM
1081 }
1082
44288184 1083 Ok(list)
15e9b4ed
DM
1084}
1085
0ab08ac9
DM
1086#[sortable]
1087pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1088 &ApiHandler::AsyncHttp(&download_file),
1089 &ObjectSchema::new(
1090 "Download single raw file from backup snapshot.",
1091 &sorted!([
66c49c21 1092 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 1093 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1094 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1095 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1096 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1097 ]),
dc7a5b34
TL
1098 ),
1099)
1100.access(
1101 None,
1102 &Permission::Privilege(
1103 &["datastore", "{store}"],
1104 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1105 true,
1106 ),
54552dda 1107);
691c89a0 1108
bf78f708 1109pub fn download_file(
9e47c0a5
DM
1110 _parts: Parts,
1111 _req_body: Body,
1112 param: Value,
255f378a 1113 _info: &ApiMethod,
54552dda 1114 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1115) -> ApiResponseFuture {
ad51d02a 1116 async move {
3c8c2827 1117 let store = required_string_param(&param, "store")?;
e9d2fc93 1118 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
f14a8c9a 1119
e6dc35ac 1120 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1121
3c8c2827 1122 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1123
3c8c2827
WB
1124 let backup_type = required_string_param(&param, "backup-type")?;
1125 let backup_id = required_string_param(&param, "backup-id")?;
1126 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1127
e0e5b442 1128 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1129
dc7a5b34
TL
1130 check_priv_or_backup_owner(
1131 &datastore,
1132 backup_dir.group(),
1133 &auth_id,
1134 PRIV_DATASTORE_READ,
1135 )?;
54552dda 1136
dc7a5b34
TL
1137 println!(
1138 "Download {} from {} ({}/{})",
1139 file_name, store, backup_dir, file_name
1140 );
9e47c0a5 1141
ad51d02a
DM
1142 let mut path = datastore.base_path();
1143 path.push(backup_dir.relative_path());
1144 path.push(&file_name);
1145
ba694720 1146 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1147 .await
1148 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1149
dc7a5b34
TL
1150 let payload =
1151 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1152 .map_ok(|bytes| bytes.freeze())
1153 .map_err(move |err| {
1154 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1155 err
1156 });
ad51d02a 1157 let body = Body::wrap_stream(payload);
9e47c0a5 1158
ad51d02a
DM
1159 // fixme: set other headers ?
1160 Ok(Response::builder()
dc7a5b34
TL
1161 .status(StatusCode::OK)
1162 .header(header::CONTENT_TYPE, "application/octet-stream")
1163 .body(body)
1164 .unwrap())
1165 }
1166 .boxed()
9e47c0a5
DM
1167}
1168
6ef9bb59
DC
1169#[sortable]
1170pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1171 &ApiHandler::AsyncHttp(&download_file_decoded),
1172 &ObjectSchema::new(
1173 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1174 &sorted!([
1175 ("store", false, &DATASTORE_SCHEMA),
1176 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1177 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1178 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1179 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1180 ]),
dc7a5b34
TL
1181 ),
1182)
1183.access(
1184 None,
1185 &Permission::Privilege(
1186 &["datastore", "{store}"],
1187 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1188 true,
1189 ),
6ef9bb59
DC
1190);
1191
bf78f708 1192pub fn download_file_decoded(
6ef9bb59
DC
1193 _parts: Parts,
1194 _req_body: Body,
1195 param: Value,
1196 _info: &ApiMethod,
1197 rpcenv: Box<dyn RpcEnvironment>,
1198) -> ApiResponseFuture {
6ef9bb59 1199 async move {
3c8c2827 1200 let store = required_string_param(&param, "store")?;
e9d2fc93 1201 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
6ef9bb59 1202
e6dc35ac 1203 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1204
3c8c2827 1205 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1206
3c8c2827
WB
1207 let backup_type = required_string_param(&param, "backup-type")?;
1208 let backup_id = required_string_param(&param, "backup-id")?;
1209 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1210
e0e5b442 1211 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1212
dc7a5b34
TL
1213 check_priv_or_backup_owner(
1214 &datastore,
1215 backup_dir.group(),
1216 &auth_id,
1217 PRIV_DATASTORE_READ,
1218 )?;
6ef9bb59 1219
2d55beec 1220 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1221 for file in files {
f28d9088 1222 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1223 bail!("cannot decode '{}' - is encrypted", file_name);
1224 }
1225 }
1226
dc7a5b34
TL
1227 println!(
1228 "Download {} from {} ({}/{})",
1229 file_name, store, backup_dir, file_name
1230 );
6ef9bb59
DC
1231
1232 let mut path = datastore.base_path();
1233 path.push(backup_dir.relative_path());
1234 path.push(&file_name);
1235
1236 let extension = file_name.rsplitn(2, '.').next().unwrap();
1237
1238 let body = match extension {
1239 "didx" => {
dc7a5b34
TL
1240 let index = DynamicIndexReader::open(&path).map_err(|err| {
1241 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1242 })?;
2d55beec
FG
1243 let (csum, size) = index.compute_csum();
1244 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1245
14f6c9cb 1246 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1247 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1248 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1249 eprintln!("error during streaming of '{:?}' - {}", path, err);
1250 err
1251 }))
1252 }
6ef9bb59 1253 "fidx" => {
dc7a5b34
TL
1254 let index = FixedIndexReader::open(&path).map_err(|err| {
1255 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1256 })?;
6ef9bb59 1257
2d55beec
FG
1258 let (csum, size) = index.compute_csum();
1259 manifest.verify_file(&file_name, &csum, size)?;
1260
14f6c9cb 1261 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1262 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1263 Body::wrap_stream(
1264 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1265 move |err| {
1266 eprintln!("error during streaming of '{:?}' - {}", path, err);
1267 err
1268 },
1269 ),
1270 )
1271 }
6ef9bb59
DC
1272 "blob" => {
1273 let file = std::fs::File::open(&path)
8aa67ee7 1274 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1275
2d55beec
FG
1276 // FIXME: load full blob to verify index checksum?
1277
6ef9bb59 1278 Body::wrap_stream(
dc7a5b34
TL
1279 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1280 move |err| {
6ef9bb59
DC
1281 eprintln!("error during streaming of '{:?}' - {}", path, err);
1282 err
dc7a5b34
TL
1283 },
1284 ),
6ef9bb59 1285 )
dc7a5b34 1286 }
6ef9bb59
DC
1287 extension => {
1288 bail!("cannot download '{}' files", extension);
dc7a5b34 1289 }
6ef9bb59
DC
1290 };
1291
1292 // fixme: set other headers ?
1293 Ok(Response::builder()
dc7a5b34
TL
1294 .status(StatusCode::OK)
1295 .header(header::CONTENT_TYPE, "application/octet-stream")
1296 .body(body)
1297 .unwrap())
1298 }
1299 .boxed()
6ef9bb59
DC
1300}
1301
552c2259 1302#[sortable]
0ab08ac9
DM
1303pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1304 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1305 &ObjectSchema::new(
54552dda 1306 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1307 &sorted!([
66c49c21 1308 ("store", false, &DATASTORE_SCHEMA),
255f378a 1309 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1310 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1311 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1312 ]),
dc7a5b34
TL
1313 ),
1314)
1315.access(
54552dda 1316 Some("Only the backup creator/owner is allowed to do this."),
dc7a5b34 1317 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
54552dda 1318);
9e47c0a5 1319
bf78f708 1320pub fn upload_backup_log(
07ee2235
DM
1321 _parts: Parts,
1322 req_body: Body,
1323 param: Value,
255f378a 1324 _info: &ApiMethod,
54552dda 1325 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1326) -> ApiResponseFuture {
ad51d02a 1327 async move {
3c8c2827 1328 let store = required_string_param(&param, "store")?;
e9d2fc93 1329 let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
07ee2235 1330
dc7a5b34 1331 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1332
3c8c2827
WB
1333 let backup_type = required_string_param(&param, "backup-type")?;
1334 let backup_id = required_string_param(&param, "backup-id")?;
1335 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1336
e0e5b442 1337 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1338
e6dc35ac 1339 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1340 let owner = datastore.get_owner(backup_dir.group())?;
1341 check_backup_owner(&owner, &auth_id)?;
54552dda 1342
ad51d02a
DM
1343 let mut path = datastore.base_path();
1344 path.push(backup_dir.relative_path());
1345 path.push(&file_name);
07ee2235 1346
ad51d02a
DM
1347 if path.exists() {
1348 bail!("backup already contains a log.");
1349 }
e128d4e8 1350
dc7a5b34
TL
1351 println!(
1352 "Upload backup log to {}/{}/{}/{}/{}",
1353 store,
1354 backup_type,
1355 backup_id,
1356 backup_dir.backup_time_string(),
1357 file_name
1358 );
ad51d02a
DM
1359
1360 let data = req_body
1361 .map_err(Error::from)
1362 .try_fold(Vec::new(), |mut acc, chunk| {
1363 acc.extend_from_slice(&*chunk);
1364 future::ok::<_, Error>(acc)
1365 })
1366 .await?;
1367
39f18b30
DM
1368 // always verify blob/CRC at server side
1369 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1370
e0a19d33 1371 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1372
1373 // fixme: use correct formatter
53daae8e 1374 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1375 }
1376 .boxed()
07ee2235
DM
1377}
1378
5b1cfa01
DC
1379#[api(
1380 input: {
1381 properties: {
1382 store: {
1383 schema: DATASTORE_SCHEMA,
1384 },
1385 "backup-type": {
1386 schema: BACKUP_TYPE_SCHEMA,
1387 },
1388 "backup-id": {
1389 schema: BACKUP_ID_SCHEMA,
1390 },
1391 "backup-time": {
1392 schema: BACKUP_TIME_SCHEMA,
1393 },
1394 "filepath": {
1395 description: "Base64 encoded path.",
1396 type: String,
1397 }
1398 },
1399 },
1400 access: {
1401 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1402 },
1403)]
1404/// Get the entries of the given path of the catalog
bf78f708 1405pub fn catalog(
5b1cfa01
DC
1406 store: String,
1407 backup_type: String,
1408 backup_id: String,
1409 backup_time: i64,
1410 filepath: String,
5b1cfa01 1411 rpcenv: &mut dyn RpcEnvironment,
227501c0 1412) -> Result<Vec<ArchiveEntry>, Error> {
e9d2fc93 1413 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
5b1cfa01 1414
e6dc35ac 1415 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1416
e0e5b442 1417 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1418
dc7a5b34
TL
1419 check_priv_or_backup_owner(
1420 &datastore,
1421 backup_dir.group(),
1422 &auth_id,
1423 PRIV_DATASTORE_READ,
1424 )?;
5b1cfa01 1425
9238cdf5
FG
1426 let file_name = CATALOG_NAME;
1427
2d55beec 1428 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1429 for file in files {
1430 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1431 bail!("cannot decode '{}' - is encrypted", file_name);
1432 }
1433 }
1434
5b1cfa01
DC
1435 let mut path = datastore.base_path();
1436 path.push(backup_dir.relative_path());
9238cdf5 1437 path.push(file_name);
5b1cfa01
DC
1438
1439 let index = DynamicIndexReader::open(&path)
1440 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1441
2d55beec 1442 let (csum, size) = index.compute_csum();
9a37bd6c 1443 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1444
14f6c9cb 1445 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1446 let reader = BufferedDynamicReader::new(index, chunk_reader);
1447
1448 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1449
5279ee74 1450 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1451 base64::decode(filepath)?
1452 } else {
1453 vec![b'/']
1454 };
5b1cfa01 1455
86582454 1456 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1457}
1458
d33d8f4e
DC
1459#[sortable]
1460pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1461 &ApiHandler::AsyncHttp(&pxar_file_download),
1462 &ObjectSchema::new(
1ffe0301 1463 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1464 &sorted!([
1465 ("store", false, &DATASTORE_SCHEMA),
1466 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1467 ("backup-id", false, &BACKUP_ID_SCHEMA),
1468 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1469 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1470 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1471 ]),
1472 )
1473).access(None, &Permission::Privilege(
1474 &["datastore", "{store}"],
1475 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1476 true)
1477);
1478
bf78f708 1479pub fn pxar_file_download(
d33d8f4e
DC
1480 _parts: Parts,
1481 _req_body: Body,
1482 param: Value,
1483 _info: &ApiMethod,
1484 rpcenv: Box<dyn RpcEnvironment>,
1485) -> ApiResponseFuture {
d33d8f4e 1486 async move {
3c8c2827 1487 let store = required_string_param(&param, "store")?;
e9d2fc93 1488 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d33d8f4e 1489
e6dc35ac 1490 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1491
3c8c2827 1492 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1493
3c8c2827
WB
1494 let backup_type = required_string_param(&param, "backup-type")?;
1495 let backup_id = required_string_param(&param, "backup-id")?;
1496 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1497
984ddb2f
DC
1498 let tar = param["tar"].as_bool().unwrap_or(false);
1499
e0e5b442 1500 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1501
dc7a5b34
TL
1502 check_priv_or_backup_owner(
1503 &datastore,
1504 backup_dir.group(),
1505 &auth_id,
1506 PRIV_DATASTORE_READ,
1507 )?;
d33d8f4e 1508
d33d8f4e 1509 let mut components = base64::decode(&filepath)?;
3984a5fd 1510 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1511 components.remove(0);
1512 }
1513
d8d8af98 1514 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1515 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1516 let file_path = split.next().unwrap_or(b"/");
2d55beec 1517 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1518 for file in files {
1519 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1520 bail!("cannot decode '{}' - is encrypted", pxar_name);
1521 }
1522 }
d33d8f4e 1523
9238cdf5
FG
1524 let mut path = datastore.base_path();
1525 path.push(backup_dir.relative_path());
1526 path.push(pxar_name);
d33d8f4e
DC
1527
1528 let index = DynamicIndexReader::open(&path)
1529 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1530
2d55beec 1531 let (csum, size) = index.compute_csum();
9a37bd6c 1532 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1533
14f6c9cb 1534 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1535 let reader = BufferedDynamicReader::new(index, chunk_reader);
1536 let archive_size = reader.archive_size();
1537 let reader = LocalDynamicReadAt::new(reader);
1538
1539 let decoder = Accessor::new(reader, archive_size).await?;
1540 let root = decoder.open_root().await?;
2e219481 1541 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1542 let file = root
dc7a5b34
TL
1543 .lookup(&path)
1544 .await?
2e219481 1545 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1546
804f6143
DC
1547 let body = match file.kind() {
1548 EntryKind::File { .. } => Body::wrap_stream(
1549 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1550 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1551 err
1552 }),
1553 ),
1554 EntryKind::Hardlink(_) => Body::wrap_stream(
1555 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1556 .map_err(move |err| {
dc7a5b34 1557 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1558 err
1559 }),
1560 ),
1561 EntryKind::Directory => {
984ddb2f 1562 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1563 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1564 if tar {
dc7a5b34
TL
1565 proxmox_rest_server::spawn_internal_task(create_tar(
1566 channelwriter,
1567 decoder,
1568 path.clone(),
1569 false,
1570 ));
984ddb2f
DC
1571 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1572 Body::wrap_stream(zstdstream.map_err(move |err| {
1573 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1574 err
1575 }))
1576 } else {
dc7a5b34
TL
1577 proxmox_rest_server::spawn_internal_task(create_zip(
1578 channelwriter,
1579 decoder,
1580 path.clone(),
1581 false,
1582 ));
984ddb2f
DC
1583 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1584 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1585 err
1586 }))
1587 }
804f6143
DC
1588 }
1589 other => bail!("cannot download file of type {:?}", other),
1590 };
d33d8f4e
DC
1591
1592 // fixme: set other headers ?
1593 Ok(Response::builder()
dc7a5b34
TL
1594 .status(StatusCode::OK)
1595 .header(header::CONTENT_TYPE, "application/octet-stream")
1596 .body(body)
1597 .unwrap())
1598 }
1599 .boxed()
d33d8f4e
DC
1600}
1601
1a0d3d11
DM
1602#[api(
1603 input: {
1604 properties: {
1605 store: {
1606 schema: DATASTORE_SCHEMA,
1607 },
1608 timeframe: {
c68fa58a 1609 type: RRDTimeFrame,
1a0d3d11
DM
1610 },
1611 cf: {
1612 type: RRDMode,
1613 },
1614 },
1615 },
1616 access: {
1617 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1618 },
1619)]
1620/// Read datastore stats
bf78f708 1621pub fn get_rrd_stats(
1a0d3d11 1622 store: String,
c68fa58a 1623 timeframe: RRDTimeFrame,
1a0d3d11
DM
1624 cf: RRDMode,
1625 _param: Value,
1626) -> Result<Value, Error> {
e9d2fc93 1627 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1628 let disk_manager = crate::tools::disks::DiskManage::new();
1629
1630 let mut rrd_fields = vec![
dc7a5b34
TL
1631 "total",
1632 "used",
1633 "read_ios",
1634 "read_bytes",
1635 "write_ios",
1636 "write_bytes",
f27b6086
DC
1637 ];
1638
1639 // we do not have io_ticks for zpools, so don't include them
1640 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1641 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1642 _ => rrd_fields.push("io_ticks"),
1643 };
1644
dc7a5b34 1645 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1646}
1647
5fd823c3
HL
1648#[api(
1649 input: {
1650 properties: {
1651 store: {
1652 schema: DATASTORE_SCHEMA,
1653 },
1654 },
1655 },
1656 access: {
1657 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1658 },
1659)]
1660/// Read datastore stats
dc7a5b34 1661pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1662 let active_operations = task_tracking::get_active_operations(&store)?;
1663 Ok(json!({
1664 "read": active_operations.read,
1665 "write": active_operations.write,
1666 }))
1667}
1668
d6688884
SR
1669#[api(
1670 input: {
1671 properties: {
1672 store: {
1673 schema: DATASTORE_SCHEMA,
1674 },
1675 "backup-type": {
1676 schema: BACKUP_TYPE_SCHEMA,
1677 },
1678 "backup-id": {
1679 schema: BACKUP_ID_SCHEMA,
1680 },
1681 },
1682 },
1683 access: {
1684 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1685 },
1686)]
1687/// Get "notes" for a backup group
1688pub fn get_group_notes(
1689 store: String,
1690 backup_type: String,
1691 backup_id: String,
1692 rpcenv: &mut dyn RpcEnvironment,
1693) -> Result<String, Error> {
e9d2fc93 1694 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d6688884
SR
1695
1696 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1697 let backup_group = BackupGroup::new(backup_type, backup_id);
1698
1699 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1700
1701 let note_path = get_group_note_path(&datastore, &backup_group);
1702 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1703}
1704
1705#[api(
1706 input: {
1707 properties: {
1708 store: {
1709 schema: DATASTORE_SCHEMA,
1710 },
1711 "backup-type": {
1712 schema: BACKUP_TYPE_SCHEMA,
1713 },
1714 "backup-id": {
1715 schema: BACKUP_ID_SCHEMA,
1716 },
1717 notes: {
1718 description: "A multiline text.",
1719 },
1720 },
1721 },
1722 access: {
1723 permission: &Permission::Privilege(&["datastore", "{store}"],
1724 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1725 true),
1726 },
1727)]
1728/// Set "notes" for a backup group
1729pub fn set_group_notes(
1730 store: String,
1731 backup_type: String,
1732 backup_id: String,
1733 notes: String,
1734 rpcenv: &mut dyn RpcEnvironment,
1735) -> Result<(), Error> {
e9d2fc93 1736 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
d6688884
SR
1737
1738 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1739 let backup_group = BackupGroup::new(backup_type, backup_id);
1740
1741 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1742
1743 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1744 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1745
1746 Ok(())
1747}
1748
912b3f5b
DM
1749#[api(
1750 input: {
1751 properties: {
1752 store: {
1753 schema: DATASTORE_SCHEMA,
1754 },
1755 "backup-type": {
1756 schema: BACKUP_TYPE_SCHEMA,
1757 },
1758 "backup-id": {
1759 schema: BACKUP_ID_SCHEMA,
1760 },
1761 "backup-time": {
1762 schema: BACKUP_TIME_SCHEMA,
1763 },
1764 },
1765 },
1766 access: {
1401f4be 1767 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1768 },
1769)]
1770/// Get "notes" for a specific backup
bf78f708 1771pub fn get_notes(
912b3f5b
DM
1772 store: String,
1773 backup_type: String,
1774 backup_id: String,
1775 backup_time: i64,
1776 rpcenv: &mut dyn RpcEnvironment,
1777) -> Result<String, Error> {
e9d2fc93 1778 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
912b3f5b 1779
e6dc35ac 1780 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1781 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1782
dc7a5b34
TL
1783 check_priv_or_backup_owner(
1784 &datastore,
1785 backup_dir.group(),
1786 &auth_id,
1787 PRIV_DATASTORE_AUDIT,
1788 )?;
912b3f5b 1789
883aa6d5 1790 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1791
dc7a5b34 1792 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1793
1794 Ok(String::from(notes))
1795}
1796
1797#[api(
1798 input: {
1799 properties: {
1800 store: {
1801 schema: DATASTORE_SCHEMA,
1802 },
1803 "backup-type": {
1804 schema: BACKUP_TYPE_SCHEMA,
1805 },
1806 "backup-id": {
1807 schema: BACKUP_ID_SCHEMA,
1808 },
1809 "backup-time": {
1810 schema: BACKUP_TIME_SCHEMA,
1811 },
1812 notes: {
1813 description: "A multiline text.",
1814 },
1815 },
1816 },
1817 access: {
b728a69e
FG
1818 permission: &Permission::Privilege(&["datastore", "{store}"],
1819 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1820 true),
912b3f5b
DM
1821 },
1822)]
1823/// Set "notes" for a specific backup
bf78f708 1824pub fn set_notes(
912b3f5b
DM
1825 store: String,
1826 backup_type: String,
1827 backup_id: String,
1828 backup_time: i64,
1829 notes: String,
1830 rpcenv: &mut dyn RpcEnvironment,
1831) -> Result<(), Error> {
e9d2fc93 1832 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
912b3f5b 1833
e6dc35ac 1834 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1835 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1836
dc7a5b34
TL
1837 check_priv_or_backup_owner(
1838 &datastore,
1839 backup_dir.group(),
1840 &auth_id,
1841 PRIV_DATASTORE_MODIFY,
1842 )?;
912b3f5b 1843
dc7a5b34
TL
1844 datastore
1845 .update_manifest(&backup_dir, |manifest| {
1846 manifest.unprotected["notes"] = notes.into();
1847 })
1848 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1849
1850 Ok(())
1851}
1852
8292d3d2
DC
1853#[api(
1854 input: {
1855 properties: {
1856 store: {
1857 schema: DATASTORE_SCHEMA,
1858 },
1859 "backup-type": {
1860 schema: BACKUP_TYPE_SCHEMA,
1861 },
1862 "backup-id": {
1863 schema: BACKUP_ID_SCHEMA,
1864 },
1865 "backup-time": {
1866 schema: BACKUP_TIME_SCHEMA,
1867 },
1868 },
1869 },
1870 access: {
1871 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1872 },
1873)]
1874/// Query protection for a specific backup
1875pub fn get_protection(
1876 store: String,
1877 backup_type: String,
1878 backup_id: String,
1879 backup_time: i64,
1880 rpcenv: &mut dyn RpcEnvironment,
1881) -> Result<bool, Error> {
e9d2fc93 1882 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
8292d3d2
DC
1883
1884 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1885 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1886
dc7a5b34
TL
1887 check_priv_or_backup_owner(
1888 &datastore,
1889 backup_dir.group(),
1890 &auth_id,
1891 PRIV_DATASTORE_AUDIT,
1892 )?;
8292d3d2 1893
9b1e2ae8 1894 Ok(backup_dir.is_protected(datastore.base_path()))
8292d3d2
DC
1895}
1896
1897#[api(
1898 input: {
1899 properties: {
1900 store: {
1901 schema: DATASTORE_SCHEMA,
1902 },
1903 "backup-type": {
1904 schema: BACKUP_TYPE_SCHEMA,
1905 },
1906 "backup-id": {
1907 schema: BACKUP_ID_SCHEMA,
1908 },
1909 "backup-time": {
1910 schema: BACKUP_TIME_SCHEMA,
1911 },
1912 protected: {
1913 description: "Enable/disable protection.",
1914 },
1915 },
1916 },
1917 access: {
1918 permission: &Permission::Privilege(&["datastore", "{store}"],
1919 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1920 true),
1921 },
1922)]
1923/// En- or disable protection for a specific backup
1924pub fn set_protection(
1925 store: String,
1926 backup_type: String,
1927 backup_id: String,
1928 backup_time: i64,
1929 protected: bool,
1930 rpcenv: &mut dyn RpcEnvironment,
1931) -> Result<(), Error> {
e9d2fc93 1932 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8292d3d2
DC
1933
1934 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1935 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1936
dc7a5b34
TL
1937 check_priv_or_backup_owner(
1938 &datastore,
1939 backup_dir.group(),
1940 &auth_id,
1941 PRIV_DATASTORE_MODIFY,
1942 )?;
8292d3d2
DC
1943
1944 datastore.update_protection(&backup_dir, protected)
1945}
1946
72be0eb1 1947#[api(
4940012d 1948 input: {
72be0eb1
DW
1949 properties: {
1950 store: {
1951 schema: DATASTORE_SCHEMA,
1952 },
1953 "backup-type": {
1954 schema: BACKUP_TYPE_SCHEMA,
1955 },
1956 "backup-id": {
1957 schema: BACKUP_ID_SCHEMA,
1958 },
1959 "new-owner": {
e6dc35ac 1960 type: Authid,
72be0eb1
DW
1961 },
1962 },
4940012d
FG
1963 },
1964 access: {
bff85572
FG
1965 permission: &Permission::Anybody,
1966 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1967 },
72be0eb1
DW
1968)]
1969/// Change owner of a backup group
bf78f708 1970pub fn set_backup_owner(
72be0eb1
DW
1971 store: String,
1972 backup_type: String,
1973 backup_id: String,
e6dc35ac 1974 new_owner: Authid,
bff85572 1975 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 1976) -> Result<(), Error> {
e9d2fc93 1977 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
72be0eb1
DW
1978
1979 let backup_group = BackupGroup::new(backup_type, backup_id);
1980
bff85572
FG
1981 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1982
72be0eb1
DW
1983 let user_info = CachedUserInfo::new()?;
1984
bff85572
FG
1985 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1986
1987 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1988 // High-privilege user/token
1989 true
1990 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1991 let owner = datastore.get_owner(&backup_group)?;
1992
1993 match (owner.is_token(), new_owner.is_token()) {
1994 (true, true) => {
1995 // API token to API token, owned by same user
1996 let owner = owner.user();
1997 let new_owner = new_owner.user();
1998 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 1999 }
bff85572
FG
2000 (true, false) => {
2001 // API token to API token owner
dc7a5b34
TL
2002 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
2003 }
bff85572
FG
2004 (false, true) => {
2005 // API token owner to API token
dc7a5b34
TL
2006 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
2007 }
bff85572
FG
2008 (false, false) => {
2009 // User to User, not allowed for unprivileged users
2010 false
dc7a5b34 2011 }
bff85572
FG
2012 }
2013 } else {
2014 false
2015 };
2016
2017 if !allowed {
dc7a5b34
TL
2018 return Err(http_err!(
2019 UNAUTHORIZED,
2020 "{} does not have permission to change owner of backup group '{}' to {}",
2021 auth_id,
2022 backup_group,
2023 new_owner,
bff85572
FG
2024 ));
2025 }
2026
e6dc35ac 2027 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
2028 bail!(
2029 "{} '{}' is inactive or non-existent",
2030 if new_owner.is_token() {
2031 "API token".to_string()
2032 } else {
2033 "user".to_string()
2034 },
2035 new_owner
2036 );
72be0eb1
DW
2037 }
2038
2039 datastore.set_owner(&backup_group, &new_owner, true)?;
2040
2041 Ok(())
2042}
2043
552c2259 2044#[sortable]
255f378a 2045const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
2046 (
2047 "active-operations",
dc7a5b34 2048 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 2049 ),
dc7a5b34 2050 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
2051 (
2052 "change-owner",
dc7a5b34 2053 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 2054 ),
255f378a
DM
2055 (
2056 "download",
dc7a5b34 2057 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 2058 ),
6ef9bb59
DC
2059 (
2060 "download-decoded",
dc7a5b34 2061 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 2062 ),
dc7a5b34 2063 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
2064 (
2065 "gc",
2066 &Router::new()
2067 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 2068 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 2069 ),
d6688884
SR
2070 (
2071 "group-notes",
2072 &Router::new()
2073 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 2074 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 2075 ),
255f378a
DM
2076 (
2077 "groups",
2078 &Router::new()
b31c8019 2079 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 2080 .delete(&API_METHOD_DELETE_GROUP),
255f378a 2081 ),
912b3f5b
DM
2082 (
2083 "notes",
2084 &Router::new()
2085 .get(&API_METHOD_GET_NOTES)
dc7a5b34 2086 .put(&API_METHOD_SET_NOTES),
912b3f5b 2087 ),
8292d3d2
DC
2088 (
2089 "protected",
2090 &Router::new()
2091 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 2092 .put(&API_METHOD_SET_PROTECTION),
255f378a 2093 ),
dc7a5b34 2094 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
2095 (
2096 "prune-datastore",
dc7a5b34 2097 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 2098 ),
d33d8f4e
DC
2099 (
2100 "pxar-file-download",
dc7a5b34 2101 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 2102 ),
dc7a5b34 2103 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2104 (
2105 "snapshots",
2106 &Router::new()
fc189b19 2107 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2108 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2109 ),
dc7a5b34 2110 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2111 (
2112 "upload-backup-log",
dc7a5b34 2113 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2114 ),
dc7a5b34 2115 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2116];
2117
ad51d02a 2118const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2119 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2120 .subdirs(DATASTORE_INFO_SUBDIRS);
2121
255f378a 2122pub const ROUTER: Router = Router::new()
bb34b589 2123 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2124 .match_all("store", &DATASTORE_INFO_ROUTER);