]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
ui: add summary mask when in maintenance mode
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
15e9b4ed 13use serde_json::{json, Value};
7c667013 14use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 15
dc7a5b34
TL
16use proxmox_async::blocking::WrappedReaderStream;
17use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 18use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 19use proxmox_router::{
dc7a5b34
TL
20 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
21 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
22};
23use proxmox_schema::*;
dc7a5b34
TL
24use proxmox_sys::fs::{
25 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
26};
27use proxmox_sys::sortable;
d5790a9f 28use proxmox_sys::{task_log, task_warn};
e18a6c9e 29
2e219481 30use pxar::accessor::aio::Accessor;
d33d8f4e
DC
31use pxar::EntryKind;
32
dc7a5b34 33use pbs_api_types::{
988d575d 34 Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
dc7a5b34
TL
35 GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
36 SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
37 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
38 PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
39 PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 40};
984ddb2f 41use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 42use pbs_config::CachedUserInfo;
b2065dc7
WB
43use pbs_datastore::backup_info::BackupInfo;
44use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 45use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
46use pbs_datastore::data_blob::DataBlob;
47use pbs_datastore::data_blob_reader::DataBlobReader;
48use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 49use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
50use pbs_datastore::index::IndexFile;
51use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 52use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
53use pbs_datastore::{
54 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
55 StoreProgress, CATALOG_NAME,
56};
3c8c2827 57use pbs_tools::json::{required_integer_param, required_string_param};
dc7a5b34 58use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 59
431cc7b1 60use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 61use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 62
b9700a9f 63use crate::server::jobstate::Job;
804f6143 64
d6688884
SR
65const GROUP_NOTES_FILE_NAME: &str = "notes";
66
db87d93e 67fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
d6688884 68 let mut note_path = store.base_path();
db87d93e 69 note_path.push(group.to_string());
d6688884
SR
70 note_path.push(GROUP_NOTES_FILE_NAME);
71 note_path
72}
73
bff85572 74fn check_priv_or_backup_owner(
e7cb4dc5 75 store: &DataStore,
db87d93e 76 group: &pbs_api_types::BackupGroup,
e6dc35ac 77 auth_id: &Authid,
bff85572
FG
78 required_privs: u64,
79) -> Result<(), Error> {
80 let user_info = CachedUserInfo::new()?;
9a37bd6c 81 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
82
83 if privs & required_privs == 0 {
84 let owner = store.get_owner(group)?;
85 check_backup_owner(&owner, auth_id)?;
86 }
87 Ok(())
88}
89
e7cb4dc5
WB
90fn read_backup_index(
91 store: &DataStore,
92 backup_dir: &BackupDir,
93) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
ff86ef00 94 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 95
09b1f7b2
DM
96 let mut result = Vec::new();
97 for item in manifest.files() {
98 result.push(BackupContent {
99 filename: item.filename.clone(),
f28d9088 100 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
101 size: Some(item.size),
102 });
8c70e3eb
DM
103 }
104
09b1f7b2 105 result.push(BackupContent {
96d65fbc 106 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
107 crypt_mode: match manifest.signature {
108 Some(_) => Some(CryptMode::SignOnly),
109 None => Some(CryptMode::None),
110 },
09b1f7b2
DM
111 size: Some(index_size),
112 });
4f1e40a2 113
70030b43 114 Ok((manifest, result))
8c70e3eb
DM
115}
116
1c090810
DC
117fn get_all_snapshot_files(
118 store: &DataStore,
119 info: &BackupInfo,
70030b43 120) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9a37bd6c 121 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
122
123 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
124 acc.insert(item.filename.clone());
125 acc
126 });
127
128 for file in &info.files {
dc7a5b34
TL
129 if file_set.contains(file) {
130 continue;
131 }
f28d9088
WB
132 files.push(BackupContent {
133 filename: file.to_string(),
134 size: None,
135 crypt_mode: None,
136 });
1c090810
DC
137 }
138
70030b43 139 Ok((manifest, files))
1c090810
DC
140}
141
b31c8019
DM
142#[api(
143 input: {
144 properties: {
145 store: {
146 schema: DATASTORE_SCHEMA,
147 },
148 },
149 },
7b570c17 150 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 151 access: {
54552dda
DM
152 permission: &Permission::Privilege(
153 &["datastore", "{store}"],
154 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
155 true),
bb34b589 156 },
b31c8019
DM
157)]
158/// List backup groups.
b2362a12 159pub fn list_groups(
b31c8019 160 store: String,
54552dda 161 rpcenv: &mut dyn RpcEnvironment,
b31c8019 162) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 163 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 164 let user_info = CachedUserInfo::new()?;
e6dc35ac 165 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 166
e9d2fc93 167 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee
FG
168 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
169
249dde8b
TL
170 datastore
171 .iter_backup_groups()?
172 .try_fold(Vec::new(), |mut group_info, group| {
173 let group = group?;
db87d93e 174 let owner = match datastore.get_owner(group.as_ref()) {
249dde8b
TL
175 Ok(auth_id) => auth_id,
176 Err(err) => {
177 let id = &store;
178 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
179 return Ok(group_info);
dc7a5b34 180 }
249dde8b
TL
181 };
182 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
183 return Ok(group_info);
184 }
0d08fcee 185
6da20161 186 let snapshots = match group.list_backups() {
249dde8b
TL
187 Ok(snapshots) => snapshots,
188 Err(_) => return Ok(group_info),
189 };
0d08fcee 190
249dde8b
TL
191 let backup_count: u64 = snapshots.len() as u64;
192 if backup_count == 0 {
193 return Ok(group_info);
194 }
0d08fcee 195
249dde8b
TL
196 let last_backup = snapshots
197 .iter()
198 .fold(&snapshots[0], |a, b| {
199 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
200 a
201 } else {
202 b
203 }
204 })
205 .to_owned();
206
db87d93e 207 let note_path = get_group_note_path(&datastore, group.as_ref());
249dde8b
TL
208 let comment = file_read_firstline(&note_path).ok();
209
210 group_info.push(GroupListItem {
988d575d 211 backup: group.into(),
249dde8b
TL
212 last_backup: last_backup.backup_dir.backup_time(),
213 owner: Some(owner),
214 backup_count,
215 files: last_backup.files,
216 comment,
0d08fcee
FG
217 });
218
249dde8b
TL
219 Ok(group_info)
220 })
812c6f87 221}
8f579717 222
f32791b4
DC
223#[api(
224 input: {
225 properties: {
988d575d
WB
226 store: { schema: DATASTORE_SCHEMA },
227 "backup-type": { type: BackupType },
228 "backup-id": { schema: BACKUP_ID_SCHEMA },
f32791b4
DC
229 },
230 },
231 access: {
232 permission: &Permission::Privilege(
233 &["datastore", "{store}"],
234 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
235 true),
236 },
237)]
238/// Delete backup group including all snapshots.
239pub fn delete_group(
240 store: String,
988d575d 241 backup_type: BackupType,
f32791b4
DC
242 backup_id: String,
243 _info: &ApiMethod,
244 rpcenv: &mut dyn RpcEnvironment,
245) -> Result<Value, Error> {
f32791b4
DC
246 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
247
db87d93e 248 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
e9d2fc93 249 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
f32791b4
DC
250
251 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
252
5cc7d891 253 if !datastore.remove_backup_group(&group)? {
171a00ca 254 bail!("group only partially deleted due to protected snapshots");
5cc7d891 255 }
f32791b4
DC
256
257 Ok(Value::Null)
258}
259
09b1f7b2
DM
260#[api(
261 input: {
262 properties: {
988d575d
WB
263 store: { schema: DATASTORE_SCHEMA },
264 "backup-type": { type: BackupType },
265 "backup-id": { schema: BACKUP_ID_SCHEMA },
266 "backup-time": { schema: BACKUP_TIME_SCHEMA },
09b1f7b2
DM
267 },
268 },
7b570c17 269 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 270 access: {
54552dda
DM
271 permission: &Permission::Privilege(
272 &["datastore", "{store}"],
273 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
274 true),
bb34b589 275 },
09b1f7b2
DM
276)]
277/// List snapshot files.
ea5f547f 278pub fn list_snapshot_files(
09b1f7b2 279 store: String,
988d575d 280 backup_type: BackupType,
09b1f7b2
DM
281 backup_id: String,
282 backup_time: i64,
01a13423 283 _info: &ApiMethod,
54552dda 284 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 285) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 286 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e9d2fc93 287 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
54552dda 288
6b0c6492 289 let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
01a13423 290
dc7a5b34
TL
291 check_priv_or_backup_owner(
292 &datastore,
db87d93e 293 snapshot.as_ref(),
dc7a5b34
TL
294 &auth_id,
295 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
296 )?;
54552dda 297
6da20161 298 let info = BackupInfo::new(snapshot)?;
01a13423 299
70030b43
DM
300 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
301
302 Ok(files)
01a13423
DM
303}
304
68a6a0ee
DM
305#[api(
306 input: {
307 properties: {
988d575d
WB
308 store: { schema: DATASTORE_SCHEMA },
309 "backup-type": { type: BackupType },
310 "backup-id": { schema: BACKUP_ID_SCHEMA },
311 "backup-time": { schema: BACKUP_TIME_SCHEMA },
68a6a0ee
DM
312 },
313 },
bb34b589 314 access: {
54552dda
DM
315 permission: &Permission::Privilege(
316 &["datastore", "{store}"],
317 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
318 true),
bb34b589 319 },
68a6a0ee
DM
320)]
321/// Delete backup snapshot.
bf78f708 322pub fn delete_snapshot(
68a6a0ee 323 store: String,
988d575d 324 backup_type: BackupType,
68a6a0ee
DM
325 backup_id: String,
326 backup_time: i64,
6f62c924 327 _info: &ApiMethod,
54552dda 328 rpcenv: &mut dyn RpcEnvironment,
6f62c924 329) -> Result<Value, Error> {
e6dc35ac 330 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 331
e9d2fc93 332 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
6b0c6492 333 let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
6f62c924 334
dc7a5b34
TL
335 check_priv_or_backup_owner(
336 &datastore,
db87d93e 337 snapshot.as_ref(),
dc7a5b34
TL
338 &auth_id,
339 PRIV_DATASTORE_MODIFY,
340 )?;
54552dda 341
db87d93e 342 datastore.remove_backup_dir(snapshot.as_ref(), false)?;
6f62c924
DM
343
344 Ok(Value::Null)
345}
346
fc189b19 347#[api(
b7c3eaa9 348 streaming: true,
fc189b19
DM
349 input: {
350 properties: {
988d575d 351 store: { schema: DATASTORE_SCHEMA },
fc189b19
DM
352 "backup-type": {
353 optional: true,
988d575d 354 type: BackupType,
fc189b19
DM
355 },
356 "backup-id": {
357 optional: true,
358 schema: BACKUP_ID_SCHEMA,
359 },
360 },
361 },
7b570c17 362 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 363 access: {
54552dda
DM
364 permission: &Permission::Privilege(
365 &["datastore", "{store}"],
366 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
367 true),
bb34b589 368 },
fc189b19
DM
369)]
370/// List backup snapshots.
dc7a5b34 371pub fn list_snapshots(
54552dda 372 store: String,
988d575d 373 backup_type: Option<BackupType>,
54552dda
DM
374 backup_id: Option<String>,
375 _param: Value,
184f17af 376 _info: &ApiMethod,
54552dda 377 rpcenv: &mut dyn RpcEnvironment,
fc189b19 378) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 379 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 380 let user_info = CachedUserInfo::new()?;
e6dc35ac 381 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 382
0d08fcee
FG
383 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
384
e9d2fc93 385 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 386
249dde8b
TL
387 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
388 // backup group and provide an error free (Err -> None) accessor
0d08fcee 389 let groups = match (backup_type, backup_id) {
db87d93e 390 (Some(backup_type), Some(backup_id)) => {
6b0c6492 391 vec![datastore.backup_group_from_parts(backup_type, backup_id)]
db87d93e 392 }
7d9cb8c4 393 (Some(backup_type), None) => datastore
249dde8b 394 .iter_backup_groups_ok()?
dc7a5b34
TL
395 .filter(|group| group.backup_type() == backup_type)
396 .collect(),
7d9cb8c4 397 (None, Some(backup_id)) => datastore
249dde8b 398 .iter_backup_groups_ok()?
dc7a5b34
TL
399 .filter(|group| group.backup_id() == backup_id)
400 .collect(),
7d9cb8c4 401 _ => datastore.list_backup_groups()?,
0d08fcee 402 };
54552dda 403
0d08fcee 404 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
405 let backup = pbs_api_types::BackupDir {
406 group: group.into(),
407 time: info.backup_dir.backup_time(),
408 };
6da20161 409 let protected = info.backup_dir.is_protected();
1c090810 410
79c53595 411 match get_all_snapshot_files(&datastore, &info) {
70030b43 412 Ok((manifest, files)) => {
70030b43
DM
413 // extract the first line from notes
414 let comment: Option<String> = manifest.unprotected["notes"]
415 .as_str()
416 .and_then(|notes| notes.lines().next())
417 .map(String::from);
418
035c40e6
FG
419 let fingerprint = match manifest.fingerprint() {
420 Ok(fp) => fp,
421 Err(err) => {
422 eprintln!("error parsing fingerprint: '{}'", err);
423 None
dc7a5b34 424 }
035c40e6
FG
425 };
426
79c53595 427 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
428 let verification: Option<SnapshotVerifyState> =
429 match serde_json::from_value(verification) {
430 Ok(verify) => verify,
431 Err(err) => {
432 eprintln!("error parsing verification state : '{}'", err);
433 None
434 }
435 };
3b2046d2 436
0d08fcee
FG
437 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
438
79c53595 439 SnapshotListItem {
988d575d 440 backup,
79c53595
FG
441 comment,
442 verification,
035c40e6 443 fingerprint,
79c53595
FG
444 files,
445 size,
446 owner,
02db7267 447 protected,
79c53595 448 }
dc7a5b34 449 }
1c090810
DC
450 Err(err) => {
451 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 452 let files = info
dc7a5b34
TL
453 .files
454 .into_iter()
455 .map(|filename| BackupContent {
456 filename,
457 size: None,
458 crypt_mode: None,
459 })
460 .collect();
79c53595
FG
461
462 SnapshotListItem {
988d575d 463 backup,
79c53595
FG
464 comment: None,
465 verification: None,
035c40e6 466 fingerprint: None,
79c53595
FG
467 files,
468 size: None,
469 owner,
02db7267 470 protected,
79c53595 471 }
dc7a5b34 472 }
0d08fcee
FG
473 }
474 };
184f17af 475
dc7a5b34 476 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
db87d93e 477 let owner = match datastore.get_owner(group.as_ref()) {
dc7a5b34
TL
478 Ok(auth_id) => auth_id,
479 Err(err) => {
480 eprintln!(
481 "Failed to get owner of group '{}/{}' - {}",
482 &store, group, err
483 );
0d08fcee
FG
484 return Ok(snapshots);
485 }
dc7a5b34 486 };
0d08fcee 487
dc7a5b34
TL
488 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
489 return Ok(snapshots);
490 }
0d08fcee 491
6da20161 492 let group_backups = group.list_backups()?;
0d08fcee 493
dc7a5b34
TL
494 snapshots.extend(
495 group_backups
496 .into_iter()
497 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
498 );
499
500 Ok(snapshots)
501 })
184f17af
DM
502}
503
6da20161
WB
504fn get_snapshots_count(
505 store: &Arc<DataStore>,
506 filter_owner: Option<&Authid>,
507) -> Result<Counts, Error> {
7d9cb8c4 508 store
249dde8b 509 .iter_backup_groups_ok()?
fdfcb74d 510 .filter(|group| {
db87d93e 511 let owner = match store.get_owner(group.as_ref()) {
fdfcb74d
FG
512 Ok(owner) => owner,
513 Err(err) => {
72f81545
TL
514 let id = store.name();
515 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 516 return false;
dc7a5b34 517 }
fdfcb74d 518 };
14e08625 519
fdfcb74d
FG
520 match filter_owner {
521 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
522 None => true,
523 }
524 })
525 .try_fold(Counts::default(), |mut counts, group| {
6da20161 526 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 527
72f81545 528 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
529 if snapshot_count > 0 {
530 let type_count = match group.backup_type() {
988d575d
WB
531 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
532 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
533 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 534 };
14e08625 535
b44483a8
DM
536 type_count.groups += 1;
537 type_count.snapshots += snapshot_count;
538 }
16f9f244 539
fdfcb74d
FG
540 Ok(counts)
541 })
16f9f244
DC
542}
543
1dc117bb
DM
544#[api(
545 input: {
546 properties: {
547 store: {
548 schema: DATASTORE_SCHEMA,
549 },
98afc7b1
FG
550 verbose: {
551 type: bool,
552 default: false,
553 optional: true,
554 description: "Include additional information like snapshot counts and GC status.",
555 },
1dc117bb 556 },
98afc7b1 557
1dc117bb
DM
558 },
559 returns: {
14e08625 560 type: DataStoreStatus,
1dc117bb 561 },
bb34b589 562 access: {
54552dda 563 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 564 },
1dc117bb
DM
565)]
566/// Get datastore status.
ea5f547f 567pub fn status(
1dc117bb 568 store: String,
98afc7b1 569 verbose: bool,
0eecf38f 570 _info: &ApiMethod,
fdfcb74d 571 rpcenv: &mut dyn RpcEnvironment,
14e08625 572) -> Result<DataStoreStatus, Error> {
e9d2fc93 573 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 574 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
575 let (counts, gc_status) = if verbose {
576 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
577 let user_info = CachedUserInfo::new()?;
578
579 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
580 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
581 None
582 } else {
583 Some(&auth_id)
584 };
585
586 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
587 let gc_status = Some(datastore.last_gc_status());
588
589 (counts, gc_status)
590 } else {
591 (None, None)
98afc7b1 592 };
16f9f244 593
14e08625
DC
594 Ok(DataStoreStatus {
595 total: storage.total,
596 used: storage.used,
597 avail: storage.avail,
598 gc_status,
599 counts,
600 })
0eecf38f
DM
601}
602
c2009e53
DM
603#[api(
604 input: {
605 properties: {
606 store: {
607 schema: DATASTORE_SCHEMA,
608 },
609 "backup-type": {
988d575d 610 type: BackupType,
c2009e53
DM
611 optional: true,
612 },
613 "backup-id": {
614 schema: BACKUP_ID_SCHEMA,
615 optional: true,
616 },
dcbf29e7
HL
617 "ignore-verified": {
618 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
619 optional: true,
620 },
621 "outdated-after": {
622 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
623 optional: true,
624 },
c2009e53
DM
625 "backup-time": {
626 schema: BACKUP_TIME_SCHEMA,
627 optional: true,
628 },
629 },
630 },
631 returns: {
632 schema: UPID_SCHEMA,
633 },
634 access: {
09f6a240 635 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
636 },
637)]
638/// Verify backups.
639///
640/// This function can verify a single backup snapshot, all backup from a backup group,
641/// or all backups in the datastore.
642pub fn verify(
643 store: String,
988d575d 644 backup_type: Option<BackupType>,
c2009e53
DM
645 backup_id: Option<String>,
646 backup_time: Option<i64>,
dcbf29e7
HL
647 ignore_verified: Option<bool>,
648 outdated_after: Option<i64>,
c2009e53
DM
649 rpcenv: &mut dyn RpcEnvironment,
650) -> Result<Value, Error> {
e9d2fc93 651 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 652 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 653
09f6a240 654 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 655 let worker_id;
c2009e53
DM
656
657 let mut backup_dir = None;
658 let mut backup_group = None;
133042b5 659 let mut worker_type = "verify";
c2009e53
DM
660
661 match (backup_type, backup_id, backup_time) {
662 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34
TL
663 worker_id = format!(
664 "{}:{}/{}/{:08X}",
665 store, backup_type, backup_id, backup_time
666 );
db87d93e 667 let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
09f6a240 668
db87d93e 669 check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
09f6a240 670
c2009e53 671 backup_dir = Some(dir);
133042b5 672 worker_type = "verify_snapshot";
c2009e53
DM
673 }
674 (Some(backup_type), Some(backup_id), None) => {
4ebda996 675 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
db87d93e 676 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240
FG
677
678 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
679
6b0c6492 680 backup_group = Some(datastore.backup_group(group));
133042b5 681 worker_type = "verify_group";
c2009e53
DM
682 }
683 (None, None, None) => {
8ea00f6e 684 worker_id = store.clone();
c2009e53 685 }
5a718dce 686 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
687 }
688
39735609 689 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
690
691 let upid_str = WorkerTask::new_thread(
133042b5 692 worker_type,
44288184 693 Some(worker_id),
049a22a3 694 auth_id.to_string(),
e7cb4dc5
WB
695 to_stdout,
696 move |worker| {
9c26a3d6 697 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 698 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 699 let mut res = Vec::new();
f6b1d1cc 700 if !verify_backup_dir(
9c26a3d6 701 &verify_worker,
f6b1d1cc 702 &backup_dir,
f6b1d1cc 703 worker.upid().clone(),
dc7a5b34 704 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 705 )? {
adfdc369
DC
706 res.push(backup_dir.to_string());
707 }
708 res
c2009e53 709 } else if let Some(backup_group) = backup_group {
7e25b9aa 710 let failed_dirs = verify_backup_group(
9c26a3d6 711 &verify_worker,
63d9aca9 712 &backup_group,
7e25b9aa 713 &mut StoreProgress::new(1),
f6b1d1cc 714 worker.upid(),
dc7a5b34 715 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
716 )?;
717 failed_dirs
c2009e53 718 } else {
dc7a5b34 719 let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
09f6a240
FG
720
721 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
722 Some(auth_id)
723 } else {
724 None
725 };
726
dcbf29e7
HL
727 verify_all_backups(
728 &verify_worker,
729 worker.upid(),
730 owner,
dc7a5b34 731 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 732 )?
c2009e53 733 };
3984a5fd 734 if !failed_dirs.is_empty() {
1ec0d70d 735 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 736 for dir in failed_dirs {
1ec0d70d 737 task_log!(worker, "\t{}", dir);
adfdc369 738 }
1ffe0301 739 bail!("verification failed - please check the log for details");
c2009e53
DM
740 }
741 Ok(())
e7cb4dc5
WB
742 },
743 )?;
c2009e53
DM
744
745 Ok(json!(upid_str))
746}
747
0a240aaa
DC
748#[api(
749 input: {
750 properties: {
988d575d
WB
751 "backup-id": { schema: BACKUP_ID_SCHEMA },
752 "backup-type": { type: BackupType },
0a240aaa
DC
753 "dry-run": {
754 optional: true,
755 type: bool,
756 default: false,
757 description: "Just show what prune would do, but do not delete anything.",
758 },
759 "prune-options": {
760 type: PruneOptions,
761 flatten: true,
762 },
763 store: {
764 schema: DATASTORE_SCHEMA,
765 },
766 },
767 },
7b570c17 768 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
769 access: {
770 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
771 },
772)]
9805207a 773/// Prune a group on the datastore
bf78f708 774pub fn prune(
0a240aaa 775 backup_id: String,
988d575d 776 backup_type: BackupType,
0a240aaa
DC
777 dry_run: bool,
778 prune_options: PruneOptions,
779 store: String,
780 _param: Value,
54552dda 781 rpcenv: &mut dyn RpcEnvironment,
83b7db02 782) -> Result<Value, Error> {
e6dc35ac 783 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 784
e9d2fc93 785 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
54552dda 786
6b0c6492 787 let group = datastore.backup_group_from_parts(backup_type, &backup_id);
db87d93e
WB
788
789 check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 790
988d575d 791 let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
503995c7 792
dda70154
DM
793 let mut prune_result = Vec::new();
794
6da20161 795 let list = group.list_backups()?;
dda70154
DM
796
797 let mut prune_info = compute_prune_info(list, &prune_options)?;
798
799 prune_info.reverse(); // delete older snapshots first
800
89725197 801 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
802
803 if dry_run {
02db7267
DC
804 for (info, mark) in prune_info {
805 let keep = keep_all || mark.keep();
dda70154 806
dda70154 807 prune_result.push(json!({
db87d93e
WB
808 "backup-type": info.backup_dir.backup_type(),
809 "backup-id": info.backup_dir.backup_id(),
810 "backup-time": info.backup_dir.backup_time(),
dda70154 811 "keep": keep,
02db7267 812 "protected": mark.protected(),
dda70154
DM
813 }));
814 }
815 return Ok(json!(prune_result));
816 }
817
163e9bbe 818 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 819 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 820
f1539300 821 if keep_all {
1ec0d70d 822 task_log!(worker, "No prune selection - keeping all files.");
f1539300 823 } else {
dc7a5b34
TL
824 task_log!(
825 worker,
826 "retention options: {}",
827 pbs_datastore::prune::cli_options_string(&prune_options)
828 );
829 task_log!(
830 worker,
831 "Starting prune on store \"{}\" group \"{}/{}\"",
832 store,
833 backup_type,
834 backup_id
835 );
f1539300 836 }
3b03abfe 837
02db7267
DC
838 for (info, mark) in prune_info {
839 let keep = keep_all || mark.keep();
dda70154 840
f1539300
SR
841 let backup_time = info.backup_dir.backup_time();
842 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
843 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
844
845 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 846
1ec0d70d 847 task_log!(worker, "{}", msg);
f1539300
SR
848
849 prune_result.push(json!({
db87d93e
WB
850 "backup-type": group.ty,
851 "backup-id": group.id,
f1539300
SR
852 "backup-time": backup_time,
853 "keep": keep,
02db7267 854 "protected": mark.protected(),
f1539300
SR
855 }));
856
857 if !(dry_run || keep) {
db87d93e 858 if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
1ec0d70d
DM
859 task_warn!(
860 worker,
861 "failed to remove dir {:?}: {}",
862 info.backup_dir.relative_path(),
863 err,
f1539300 864 );
8f0b4c1f 865 }
8f579717 866 }
f1539300 867 }
dd8e744f 868
f1539300 869 worker.log_result(&Ok(()));
83b7db02 870
dda70154 871 Ok(json!(prune_result))
83b7db02
DM
872}
873
9805207a
DC
874#[api(
875 input: {
876 properties: {
877 "dry-run": {
878 optional: true,
879 type: bool,
880 default: false,
881 description: "Just show what prune would do, but do not delete anything.",
882 },
883 "prune-options": {
884 type: PruneOptions,
885 flatten: true,
886 },
887 store: {
888 schema: DATASTORE_SCHEMA,
889 },
890 },
891 },
892 returns: {
893 schema: UPID_SCHEMA,
894 },
895 access: {
896 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
897 },
898)]
899/// Prune the datastore
900pub fn prune_datastore(
901 dry_run: bool,
902 prune_options: PruneOptions,
903 store: String,
904 _param: Value,
905 rpcenv: &mut dyn RpcEnvironment,
906) -> Result<String, Error> {
9805207a
DC
907 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
908
e9d2fc93 909 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 910
bfa942c0
DC
911 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
912
9805207a
DC
913 let upid_str = WorkerTask::new_thread(
914 "prune",
915 Some(store.clone()),
049a22a3 916 auth_id.to_string(),
bfa942c0 917 to_stdout,
dc7a5b34
TL
918 move |worker| {
919 crate::server::prune_datastore(
920 worker,
921 auth_id,
922 prune_options,
923 &store,
924 datastore,
925 dry_run,
926 )
927 },
9805207a
DC
928 )?;
929
930 Ok(upid_str)
931}
932
dfc58d47
DM
933#[api(
934 input: {
935 properties: {
936 store: {
937 schema: DATASTORE_SCHEMA,
938 },
939 },
940 },
941 returns: {
942 schema: UPID_SCHEMA,
943 },
bb34b589 944 access: {
54552dda 945 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 946 },
dfc58d47
DM
947)]
948/// Start garbage collection.
bf78f708 949pub fn start_garbage_collection(
dfc58d47 950 store: String,
6049b71f 951 _info: &ApiMethod,
dd5495d6 952 rpcenv: &mut dyn RpcEnvironment,
6049b71f 953) -> Result<Value, Error> {
e9d2fc93 954 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 955 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 956
dc7a5b34 957 let job = Job::new("garbage_collection", &store)
4fdf5ddf 958 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 959
39735609 960 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 961
dc7a5b34
TL
962 let upid_str =
963 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
964 .map_err(|err| {
965 format_err!(
966 "unable to start garbage collection job on datastore {} - {}",
967 store,
968 err
969 )
970 })?;
0f778e06
DM
971
972 Ok(json!(upid_str))
15e9b4ed
DM
973}
974
a92830dc
DM
975#[api(
976 input: {
977 properties: {
978 store: {
979 schema: DATASTORE_SCHEMA,
980 },
981 },
982 },
983 returns: {
984 type: GarbageCollectionStatus,
bb34b589
DM
985 },
986 access: {
987 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
988 },
a92830dc
DM
989)]
990/// Garbage collection status.
5eeea607 991pub fn garbage_collection_status(
a92830dc 992 store: String,
6049b71f 993 _info: &ApiMethod,
dd5495d6 994 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 995) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 996 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 997
f2b99c34 998 let status = datastore.last_gc_status();
691c89a0 999
a92830dc 1000 Ok(status)
691c89a0
DM
1001}
1002
bb34b589 1003#[api(
30fb6025
DM
1004 returns: {
1005 description: "List the accessible datastores.",
1006 type: Array,
9b93c620 1007 items: { type: DataStoreListItem },
30fb6025 1008 },
bb34b589 1009 access: {
54552dda 1010 permission: &Permission::Anybody,
bb34b589
DM
1011 },
1012)]
1013/// Datastore list
bf78f708 1014pub fn get_datastore_list(
6049b71f
DM
1015 _param: Value,
1016 _info: &ApiMethod,
54552dda 1017 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1018) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1019 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1020
e6dc35ac 1021 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1022 let user_info = CachedUserInfo::new()?;
1023
30fb6025 1024 let mut list = Vec::new();
54552dda 1025
30fb6025 1026 for (store, (_, data)) in &config.sections {
9a37bd6c 1027 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1028 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1029 if allowed {
dc7a5b34
TL
1030 list.push(DataStoreListItem {
1031 store: store.clone(),
1032 comment: data["comment"].as_str().map(String::from),
1033 });
30fb6025 1034 }
54552dda
DM
1035 }
1036
44288184 1037 Ok(list)
15e9b4ed
DM
1038}
1039
0ab08ac9
DM
1040#[sortable]
1041pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1042 &ApiHandler::AsyncHttp(&download_file),
1043 &ObjectSchema::new(
1044 "Download single raw file from backup snapshot.",
1045 &sorted!([
66c49c21 1046 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 1047 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1048 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1049 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1050 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1051 ]),
dc7a5b34
TL
1052 ),
1053)
1054.access(
1055 None,
1056 &Permission::Privilege(
1057 &["datastore", "{store}"],
1058 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1059 true,
1060 ),
54552dda 1061);
691c89a0 1062
bf78f708 1063pub fn download_file(
9e47c0a5
DM
1064 _parts: Parts,
1065 _req_body: Body,
1066 param: Value,
255f378a 1067 _info: &ApiMethod,
54552dda 1068 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1069) -> ApiResponseFuture {
ad51d02a 1070 async move {
3c8c2827 1071 let store = required_string_param(&param, "store")?;
e9d2fc93 1072 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
f14a8c9a 1073
e6dc35ac 1074 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1075
3c8c2827 1076 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1077
988d575d 1078 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
db87d93e 1079 let backup_id = required_string_param(&param, "backup-id")?.to_owned();
3c8c2827 1080 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1081
db87d93e 1082 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
54552dda 1083
dc7a5b34
TL
1084 check_priv_or_backup_owner(
1085 &datastore,
db87d93e 1086 backup_dir.as_ref(),
dc7a5b34
TL
1087 &auth_id,
1088 PRIV_DATASTORE_READ,
1089 )?;
54552dda 1090
dc7a5b34
TL
1091 println!(
1092 "Download {} from {} ({}/{})",
1093 file_name, store, backup_dir, file_name
1094 );
9e47c0a5 1095
ad51d02a
DM
1096 let mut path = datastore.base_path();
1097 path.push(backup_dir.relative_path());
1098 path.push(&file_name);
1099
ba694720 1100 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1101 .await
1102 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1103
dc7a5b34
TL
1104 let payload =
1105 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1106 .map_ok(|bytes| bytes.freeze())
1107 .map_err(move |err| {
1108 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1109 err
1110 });
ad51d02a 1111 let body = Body::wrap_stream(payload);
9e47c0a5 1112
ad51d02a
DM
1113 // fixme: set other headers ?
1114 Ok(Response::builder()
dc7a5b34
TL
1115 .status(StatusCode::OK)
1116 .header(header::CONTENT_TYPE, "application/octet-stream")
1117 .body(body)
1118 .unwrap())
1119 }
1120 .boxed()
9e47c0a5
DM
1121}
1122
6ef9bb59
DC
1123#[sortable]
1124pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1125 &ApiHandler::AsyncHttp(&download_file_decoded),
1126 &ObjectSchema::new(
1127 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1128 &sorted!([
1129 ("store", false, &DATASTORE_SCHEMA),
1130 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1131 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1132 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1133 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1134 ]),
dc7a5b34
TL
1135 ),
1136)
1137.access(
1138 None,
1139 &Permission::Privilege(
1140 &["datastore", "{store}"],
1141 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1142 true,
1143 ),
6ef9bb59
DC
1144);
1145
bf78f708 1146pub fn download_file_decoded(
6ef9bb59
DC
1147 _parts: Parts,
1148 _req_body: Body,
1149 param: Value,
1150 _info: &ApiMethod,
1151 rpcenv: Box<dyn RpcEnvironment>,
1152) -> ApiResponseFuture {
6ef9bb59 1153 async move {
3c8c2827 1154 let store = required_string_param(&param, "store")?;
e9d2fc93 1155 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
6ef9bb59 1156
e6dc35ac 1157 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1158
3c8c2827 1159 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1160
988d575d 1161 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
db87d93e 1162 let backup_id = required_string_param(&param, "backup-id")?.to_owned();
3c8c2827 1163 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1164
db87d93e 1165 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
6ef9bb59 1166
dc7a5b34
TL
1167 check_priv_or_backup_owner(
1168 &datastore,
db87d93e 1169 backup_dir.as_ref(),
dc7a5b34
TL
1170 &auth_id,
1171 PRIV_DATASTORE_READ,
1172 )?;
6ef9bb59 1173
2d55beec 1174 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1175 for file in files {
f28d9088 1176 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1177 bail!("cannot decode '{}' - is encrypted", file_name);
1178 }
1179 }
1180
dc7a5b34
TL
1181 println!(
1182 "Download {} from {} ({}/{})",
1183 file_name, store, backup_dir, file_name
1184 );
6ef9bb59
DC
1185
1186 let mut path = datastore.base_path();
1187 path.push(backup_dir.relative_path());
1188 path.push(&file_name);
1189
1190 let extension = file_name.rsplitn(2, '.').next().unwrap();
1191
1192 let body = match extension {
1193 "didx" => {
dc7a5b34
TL
1194 let index = DynamicIndexReader::open(&path).map_err(|err| {
1195 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1196 })?;
2d55beec
FG
1197 let (csum, size) = index.compute_csum();
1198 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1199
14f6c9cb 1200 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1201 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1202 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1203 eprintln!("error during streaming of '{:?}' - {}", path, err);
1204 err
1205 }))
1206 }
6ef9bb59 1207 "fidx" => {
dc7a5b34
TL
1208 let index = FixedIndexReader::open(&path).map_err(|err| {
1209 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1210 })?;
6ef9bb59 1211
2d55beec
FG
1212 let (csum, size) = index.compute_csum();
1213 manifest.verify_file(&file_name, &csum, size)?;
1214
14f6c9cb 1215 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1216 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1217 Body::wrap_stream(
1218 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1219 move |err| {
1220 eprintln!("error during streaming of '{:?}' - {}", path, err);
1221 err
1222 },
1223 ),
1224 )
1225 }
6ef9bb59
DC
1226 "blob" => {
1227 let file = std::fs::File::open(&path)
8aa67ee7 1228 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1229
2d55beec
FG
1230 // FIXME: load full blob to verify index checksum?
1231
6ef9bb59 1232 Body::wrap_stream(
dc7a5b34
TL
1233 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1234 move |err| {
6ef9bb59
DC
1235 eprintln!("error during streaming of '{:?}' - {}", path, err);
1236 err
dc7a5b34
TL
1237 },
1238 ),
6ef9bb59 1239 )
dc7a5b34 1240 }
6ef9bb59
DC
1241 extension => {
1242 bail!("cannot download '{}' files", extension);
dc7a5b34 1243 }
6ef9bb59
DC
1244 };
1245
1246 // fixme: set other headers ?
1247 Ok(Response::builder()
dc7a5b34
TL
1248 .status(StatusCode::OK)
1249 .header(header::CONTENT_TYPE, "application/octet-stream")
1250 .body(body)
1251 .unwrap())
1252 }
1253 .boxed()
6ef9bb59
DC
1254}
1255
552c2259 1256#[sortable]
0ab08ac9
DM
1257pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1258 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1259 &ObjectSchema::new(
54552dda 1260 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1261 &sorted!([
66c49c21 1262 ("store", false, &DATASTORE_SCHEMA),
255f378a 1263 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1264 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1265 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1266 ]),
dc7a5b34
TL
1267 ),
1268)
1269.access(
54552dda 1270 Some("Only the backup creator/owner is allowed to do this."),
dc7a5b34 1271 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
54552dda 1272);
9e47c0a5 1273
bf78f708 1274pub fn upload_backup_log(
07ee2235
DM
1275 _parts: Parts,
1276 req_body: Body,
1277 param: Value,
255f378a 1278 _info: &ApiMethod,
54552dda 1279 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1280) -> ApiResponseFuture {
ad51d02a 1281 async move {
3c8c2827 1282 let store = required_string_param(&param, "store")?;
e9d2fc93 1283 let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
07ee2235 1284
dc7a5b34 1285 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1286
988d575d 1287 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
3c8c2827
WB
1288 let backup_id = required_string_param(&param, "backup-id")?;
1289 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1290
db87d93e 1291 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
07ee2235 1292
e6dc35ac 1293 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1294 let owner = datastore.get_owner(backup_dir.as_ref())?;
bff85572 1295 check_backup_owner(&owner, &auth_id)?;
54552dda 1296
ad51d02a
DM
1297 let mut path = datastore.base_path();
1298 path.push(backup_dir.relative_path());
1299 path.push(&file_name);
07ee2235 1300
ad51d02a
DM
1301 if path.exists() {
1302 bail!("backup already contains a log.");
1303 }
e128d4e8 1304
dc7a5b34
TL
1305 println!(
1306 "Upload backup log to {}/{}/{}/{}/{}",
1307 store,
1308 backup_type,
1309 backup_id,
1310 backup_dir.backup_time_string(),
1311 file_name
1312 );
ad51d02a
DM
1313
1314 let data = req_body
1315 .map_err(Error::from)
1316 .try_fold(Vec::new(), |mut acc, chunk| {
1317 acc.extend_from_slice(&*chunk);
1318 future::ok::<_, Error>(acc)
1319 })
1320 .await?;
1321
39f18b30
DM
1322 // always verify blob/CRC at server side
1323 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1324
e0a19d33 1325 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1326
1327 // fixme: use correct formatter
53daae8e 1328 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1329 }
1330 .boxed()
07ee2235
DM
1331}
1332
5b1cfa01
DC
1333#[api(
1334 input: {
1335 properties: {
988d575d
WB
1336 store: { schema: DATASTORE_SCHEMA },
1337 "backup-type": { type: BackupType },
1338 "backup-id": { schema: BACKUP_ID_SCHEMA },
1339 "backup-time": { schema: BACKUP_TIME_SCHEMA },
5b1cfa01
DC
1340 "filepath": {
1341 description: "Base64 encoded path.",
1342 type: String,
1343 }
1344 },
1345 },
1346 access: {
1347 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1348 },
1349)]
1350/// Get the entries of the given path of the catalog
bf78f708 1351pub fn catalog(
5b1cfa01 1352 store: String,
988d575d 1353 backup_type: BackupType,
5b1cfa01
DC
1354 backup_id: String,
1355 backup_time: i64,
1356 filepath: String,
5b1cfa01 1357 rpcenv: &mut dyn RpcEnvironment,
227501c0 1358) -> Result<Vec<ArchiveEntry>, Error> {
e9d2fc93 1359 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
5b1cfa01 1360
e6dc35ac 1361 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1362
6b0c6492 1363 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
5b1cfa01 1364
dc7a5b34
TL
1365 check_priv_or_backup_owner(
1366 &datastore,
db87d93e 1367 backup_dir.as_ref(),
dc7a5b34
TL
1368 &auth_id,
1369 PRIV_DATASTORE_READ,
1370 )?;
5b1cfa01 1371
9238cdf5
FG
1372 let file_name = CATALOG_NAME;
1373
2d55beec 1374 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1375 for file in files {
1376 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1377 bail!("cannot decode '{}' - is encrypted", file_name);
1378 }
1379 }
1380
5b1cfa01
DC
1381 let mut path = datastore.base_path();
1382 path.push(backup_dir.relative_path());
9238cdf5 1383 path.push(file_name);
5b1cfa01
DC
1384
1385 let index = DynamicIndexReader::open(&path)
1386 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1387
2d55beec 1388 let (csum, size) = index.compute_csum();
9a37bd6c 1389 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1390
14f6c9cb 1391 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1392 let reader = BufferedDynamicReader::new(index, chunk_reader);
1393
1394 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1395
5279ee74 1396 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1397 base64::decode(filepath)?
1398 } else {
1399 vec![b'/']
1400 };
5b1cfa01 1401
86582454 1402 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1403}
1404
d33d8f4e
DC
1405#[sortable]
1406pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1407 &ApiHandler::AsyncHttp(&pxar_file_download),
1408 &ObjectSchema::new(
1ffe0301 1409 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1410 &sorted!([
1411 ("store", false, &DATASTORE_SCHEMA),
1412 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1413 ("backup-id", false, &BACKUP_ID_SCHEMA),
1414 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1415 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1416 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1417 ]),
1418 )
1419).access(None, &Permission::Privilege(
1420 &["datastore", "{store}"],
1421 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1422 true)
1423);
1424
bf78f708 1425pub fn pxar_file_download(
d33d8f4e
DC
1426 _parts: Parts,
1427 _req_body: Body,
1428 param: Value,
1429 _info: &ApiMethod,
1430 rpcenv: Box<dyn RpcEnvironment>,
1431) -> ApiResponseFuture {
d33d8f4e 1432 async move {
3c8c2827 1433 let store = required_string_param(&param, "store")?;
e9d2fc93 1434 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d33d8f4e 1435
e6dc35ac 1436 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1437
3c8c2827 1438 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1439
988d575d 1440 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
3c8c2827
WB
1441 let backup_id = required_string_param(&param, "backup-id")?;
1442 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1443
984ddb2f
DC
1444 let tar = param["tar"].as_bool().unwrap_or(false);
1445
6b0c6492 1446 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
d33d8f4e 1447
dc7a5b34
TL
1448 check_priv_or_backup_owner(
1449 &datastore,
db87d93e 1450 backup_dir.as_ref(),
dc7a5b34
TL
1451 &auth_id,
1452 PRIV_DATASTORE_READ,
1453 )?;
d33d8f4e 1454
d33d8f4e 1455 let mut components = base64::decode(&filepath)?;
3984a5fd 1456 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1457 components.remove(0);
1458 }
1459
d8d8af98 1460 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1461 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1462 let file_path = split.next().unwrap_or(b"/");
2d55beec 1463 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1464 for file in files {
1465 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1466 bail!("cannot decode '{}' - is encrypted", pxar_name);
1467 }
1468 }
d33d8f4e 1469
9238cdf5
FG
1470 let mut path = datastore.base_path();
1471 path.push(backup_dir.relative_path());
1472 path.push(pxar_name);
d33d8f4e
DC
1473
1474 let index = DynamicIndexReader::open(&path)
1475 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1476
2d55beec 1477 let (csum, size) = index.compute_csum();
9a37bd6c 1478 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1479
14f6c9cb 1480 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1481 let reader = BufferedDynamicReader::new(index, chunk_reader);
1482 let archive_size = reader.archive_size();
1483 let reader = LocalDynamicReadAt::new(reader);
1484
1485 let decoder = Accessor::new(reader, archive_size).await?;
1486 let root = decoder.open_root().await?;
2e219481 1487 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1488 let file = root
dc7a5b34
TL
1489 .lookup(&path)
1490 .await?
2e219481 1491 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1492
804f6143
DC
1493 let body = match file.kind() {
1494 EntryKind::File { .. } => Body::wrap_stream(
1495 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1496 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1497 err
1498 }),
1499 ),
1500 EntryKind::Hardlink(_) => Body::wrap_stream(
1501 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1502 .map_err(move |err| {
dc7a5b34 1503 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1504 err
1505 }),
1506 ),
1507 EntryKind::Directory => {
984ddb2f 1508 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1509 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1510 if tar {
dc7a5b34
TL
1511 proxmox_rest_server::spawn_internal_task(create_tar(
1512 channelwriter,
1513 decoder,
1514 path.clone(),
1515 false,
1516 ));
984ddb2f
DC
1517 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1518 Body::wrap_stream(zstdstream.map_err(move |err| {
1519 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1520 err
1521 }))
1522 } else {
dc7a5b34
TL
1523 proxmox_rest_server::spawn_internal_task(create_zip(
1524 channelwriter,
1525 decoder,
1526 path.clone(),
1527 false,
1528 ));
984ddb2f
DC
1529 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1530 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1531 err
1532 }))
1533 }
804f6143
DC
1534 }
1535 other => bail!("cannot download file of type {:?}", other),
1536 };
d33d8f4e
DC
1537
1538 // fixme: set other headers ?
1539 Ok(Response::builder()
dc7a5b34
TL
1540 .status(StatusCode::OK)
1541 .header(header::CONTENT_TYPE, "application/octet-stream")
1542 .body(body)
1543 .unwrap())
1544 }
1545 .boxed()
d33d8f4e
DC
1546}
1547
1a0d3d11
DM
1548#[api(
1549 input: {
1550 properties: {
1551 store: {
1552 schema: DATASTORE_SCHEMA,
1553 },
1554 timeframe: {
c68fa58a 1555 type: RRDTimeFrame,
1a0d3d11
DM
1556 },
1557 cf: {
1558 type: RRDMode,
1559 },
1560 },
1561 },
1562 access: {
1563 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1564 },
1565)]
1566/// Read datastore stats
bf78f708 1567pub fn get_rrd_stats(
1a0d3d11 1568 store: String,
c68fa58a 1569 timeframe: RRDTimeFrame,
1a0d3d11
DM
1570 cf: RRDMode,
1571 _param: Value,
1572) -> Result<Value, Error> {
e9d2fc93 1573 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1574 let disk_manager = crate::tools::disks::DiskManage::new();
1575
1576 let mut rrd_fields = vec![
dc7a5b34
TL
1577 "total",
1578 "used",
1579 "read_ios",
1580 "read_bytes",
1581 "write_ios",
1582 "write_bytes",
f27b6086
DC
1583 ];
1584
1585 // we do not have io_ticks for zpools, so don't include them
1586 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1587 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1588 _ => rrd_fields.push("io_ticks"),
1589 };
1590
dc7a5b34 1591 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1592}
1593
5fd823c3
HL
1594#[api(
1595 input: {
1596 properties: {
1597 store: {
1598 schema: DATASTORE_SCHEMA,
1599 },
1600 },
1601 },
1602 access: {
1603 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1604 },
1605)]
1606/// Read datastore stats
dc7a5b34 1607pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1608 let active_operations = task_tracking::get_active_operations(&store)?;
1609 Ok(json!({
1610 "read": active_operations.read,
1611 "write": active_operations.write,
1612 }))
1613}
1614
d6688884
SR
1615#[api(
1616 input: {
1617 properties: {
988d575d
WB
1618 store: { schema: DATASTORE_SCHEMA },
1619 "backup-type": { type: BackupType },
1620 "backup-id": { schema: BACKUP_ID_SCHEMA },
d6688884
SR
1621 },
1622 },
1623 access: {
1624 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1625 },
1626)]
1627/// Get "notes" for a backup group
1628pub fn get_group_notes(
1629 store: String,
988d575d 1630 backup_type: BackupType,
d6688884
SR
1631 backup_id: String,
1632 rpcenv: &mut dyn RpcEnvironment,
1633) -> Result<String, Error> {
e9d2fc93 1634 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d6688884
SR
1635
1636 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1637 let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
d6688884
SR
1638
1639 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1640
1641 let note_path = get_group_note_path(&datastore, &backup_group);
1642 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1643}
1644
1645#[api(
1646 input: {
1647 properties: {
988d575d
WB
1648 store: { schema: DATASTORE_SCHEMA },
1649 "backup-type": { type: BackupType },
1650 "backup-id": { schema: BACKUP_ID_SCHEMA },
d6688884
SR
1651 notes: {
1652 description: "A multiline text.",
1653 },
1654 },
1655 },
1656 access: {
1657 permission: &Permission::Privilege(&["datastore", "{store}"],
1658 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1659 true),
1660 },
1661)]
1662/// Set "notes" for a backup group
1663pub fn set_group_notes(
1664 store: String,
988d575d 1665 backup_type: BackupType,
d6688884
SR
1666 backup_id: String,
1667 notes: String,
1668 rpcenv: &mut dyn RpcEnvironment,
1669) -> Result<(), Error> {
e9d2fc93 1670 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
d6688884
SR
1671
1672 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1673 let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
d6688884
SR
1674
1675 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1676
1677 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1678 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1679
1680 Ok(())
1681}
1682
912b3f5b
DM
1683#[api(
1684 input: {
1685 properties: {
988d575d
WB
1686 store: { schema: DATASTORE_SCHEMA },
1687 "backup-type": { type: BackupType },
1688 "backup-id": { schema: BACKUP_ID_SCHEMA },
1689 "backup-time": { schema: BACKUP_TIME_SCHEMA },
912b3f5b
DM
1690 },
1691 },
1692 access: {
1401f4be 1693 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1694 },
1695)]
1696/// Get "notes" for a specific backup
bf78f708 1697pub fn get_notes(
912b3f5b 1698 store: String,
988d575d 1699 backup_type: BackupType,
912b3f5b
DM
1700 backup_id: String,
1701 backup_time: i64,
1702 rpcenv: &mut dyn RpcEnvironment,
1703) -> Result<String, Error> {
e9d2fc93 1704 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
912b3f5b 1705
e6dc35ac 1706 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1707 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
912b3f5b 1708
dc7a5b34
TL
1709 check_priv_or_backup_owner(
1710 &datastore,
db87d93e 1711 backup_dir.as_ref(),
dc7a5b34
TL
1712 &auth_id,
1713 PRIV_DATASTORE_AUDIT,
1714 )?;
912b3f5b 1715
883aa6d5 1716 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1717
dc7a5b34 1718 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1719
1720 Ok(String::from(notes))
1721}
1722
1723#[api(
1724 input: {
1725 properties: {
988d575d
WB
1726 store: { schema: DATASTORE_SCHEMA },
1727 "backup-type": { type: BackupType },
1728 "backup-id": { schema: BACKUP_ID_SCHEMA },
1729 "backup-time": { schema: BACKUP_TIME_SCHEMA },
912b3f5b
DM
1730 notes: {
1731 description: "A multiline text.",
1732 },
1733 },
1734 },
1735 access: {
b728a69e
FG
1736 permission: &Permission::Privilege(&["datastore", "{store}"],
1737 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1738 true),
912b3f5b
DM
1739 },
1740)]
1741/// Set "notes" for a specific backup
bf78f708 1742pub fn set_notes(
912b3f5b 1743 store: String,
988d575d 1744 backup_type: BackupType,
912b3f5b
DM
1745 backup_id: String,
1746 backup_time: i64,
1747 notes: String,
1748 rpcenv: &mut dyn RpcEnvironment,
1749) -> Result<(), Error> {
e9d2fc93 1750 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
912b3f5b 1751
e6dc35ac 1752 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1753 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
912b3f5b 1754
dc7a5b34
TL
1755 check_priv_or_backup_owner(
1756 &datastore,
db87d93e 1757 backup_dir.as_ref(),
dc7a5b34
TL
1758 &auth_id,
1759 PRIV_DATASTORE_MODIFY,
1760 )?;
912b3f5b 1761
dc7a5b34
TL
1762 datastore
1763 .update_manifest(&backup_dir, |manifest| {
1764 manifest.unprotected["notes"] = notes.into();
1765 })
1766 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1767
1768 Ok(())
1769}
1770
8292d3d2
DC
1771#[api(
1772 input: {
1773 properties: {
988d575d
WB
1774 store: { schema: DATASTORE_SCHEMA },
1775 "backup-type": { type: BackupType },
1776 "backup-id": { schema: BACKUP_ID_SCHEMA },
1777 "backup-time": { schema: BACKUP_TIME_SCHEMA },
8292d3d2
DC
1778 },
1779 },
1780 access: {
1781 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1782 },
1783)]
1784/// Query protection for a specific backup
1785pub fn get_protection(
1786 store: String,
988d575d 1787 backup_type: BackupType,
8292d3d2
DC
1788 backup_id: String,
1789 backup_time: i64,
1790 rpcenv: &mut dyn RpcEnvironment,
1791) -> Result<bool, Error> {
e9d2fc93 1792 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
8292d3d2
DC
1793
1794 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1795 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
8292d3d2 1796
dc7a5b34
TL
1797 check_priv_or_backup_owner(
1798 &datastore,
db87d93e 1799 backup_dir.as_ref(),
dc7a5b34
TL
1800 &auth_id,
1801 PRIV_DATASTORE_AUDIT,
1802 )?;
8292d3d2 1803
6da20161 1804 Ok(backup_dir.is_protected())
8292d3d2
DC
1805}
1806
1807#[api(
1808 input: {
1809 properties: {
988d575d
WB
1810 store: { schema: DATASTORE_SCHEMA },
1811 "backup-type": { type: BackupType },
1812 "backup-id": { schema: BACKUP_ID_SCHEMA },
1813 "backup-time": { schema: BACKUP_TIME_SCHEMA },
8292d3d2
DC
1814 protected: {
1815 description: "Enable/disable protection.",
1816 },
1817 },
1818 },
1819 access: {
1820 permission: &Permission::Privilege(&["datastore", "{store}"],
1821 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1822 true),
1823 },
1824)]
1825/// En- or disable protection for a specific backup
1826pub fn set_protection(
1827 store: String,
988d575d 1828 backup_type: BackupType,
8292d3d2
DC
1829 backup_id: String,
1830 backup_time: i64,
1831 protected: bool,
1832 rpcenv: &mut dyn RpcEnvironment,
1833) -> Result<(), Error> {
e9d2fc93 1834 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8292d3d2
DC
1835
1836 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1837 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
8292d3d2 1838
dc7a5b34
TL
1839 check_priv_or_backup_owner(
1840 &datastore,
db87d93e 1841 backup_dir.as_ref(),
dc7a5b34
TL
1842 &auth_id,
1843 PRIV_DATASTORE_MODIFY,
1844 )?;
8292d3d2
DC
1845
1846 datastore.update_protection(&backup_dir, protected)
1847}
1848
72be0eb1 1849#[api(
4940012d 1850 input: {
72be0eb1 1851 properties: {
988d575d
WB
1852 store: { schema: DATASTORE_SCHEMA },
1853 "backup-type": { type: BackupType },
1854 "backup-id": { schema: BACKUP_ID_SCHEMA },
72be0eb1 1855 "new-owner": {
e6dc35ac 1856 type: Authid,
72be0eb1
DW
1857 },
1858 },
4940012d
FG
1859 },
1860 access: {
bff85572
FG
1861 permission: &Permission::Anybody,
1862 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1863 },
72be0eb1
DW
1864)]
1865/// Change owner of a backup group
bf78f708 1866pub fn set_backup_owner(
72be0eb1 1867 store: String,
988d575d 1868 backup_type: BackupType,
72be0eb1 1869 backup_id: String,
e6dc35ac 1870 new_owner: Authid,
bff85572 1871 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 1872) -> Result<(), Error> {
e9d2fc93 1873 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
72be0eb1 1874
6b0c6492 1875 let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
72be0eb1 1876
bff85572
FG
1877 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1878
72be0eb1
DW
1879 let user_info = CachedUserInfo::new()?;
1880
bff85572
FG
1881 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1882
1883 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1884 // High-privilege user/token
1885 true
1886 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
db87d93e 1887 let owner = datastore.get_owner(backup_group.as_ref())?;
bff85572
FG
1888
1889 match (owner.is_token(), new_owner.is_token()) {
1890 (true, true) => {
1891 // API token to API token, owned by same user
1892 let owner = owner.user();
1893 let new_owner = new_owner.user();
1894 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 1895 }
bff85572
FG
1896 (true, false) => {
1897 // API token to API token owner
dc7a5b34
TL
1898 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
1899 }
bff85572
FG
1900 (false, true) => {
1901 // API token owner to API token
dc7a5b34
TL
1902 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
1903 }
bff85572
FG
1904 (false, false) => {
1905 // User to User, not allowed for unprivileged users
1906 false
dc7a5b34 1907 }
bff85572
FG
1908 }
1909 } else {
1910 false
1911 };
1912
1913 if !allowed {
dc7a5b34
TL
1914 return Err(http_err!(
1915 UNAUTHORIZED,
1916 "{} does not have permission to change owner of backup group '{}' to {}",
1917 auth_id,
1918 backup_group,
1919 new_owner,
bff85572
FG
1920 ));
1921 }
1922
e6dc35ac 1923 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
1924 bail!(
1925 "{} '{}' is inactive or non-existent",
1926 if new_owner.is_token() {
1927 "API token".to_string()
1928 } else {
1929 "user".to_string()
1930 },
1931 new_owner
1932 );
72be0eb1
DW
1933 }
1934
db87d93e 1935 datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
72be0eb1
DW
1936
1937 Ok(())
1938}
1939
552c2259 1940#[sortable]
255f378a 1941const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
1942 (
1943 "active-operations",
dc7a5b34 1944 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 1945 ),
dc7a5b34 1946 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
1947 (
1948 "change-owner",
dc7a5b34 1949 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 1950 ),
255f378a
DM
1951 (
1952 "download",
dc7a5b34 1953 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 1954 ),
6ef9bb59
DC
1955 (
1956 "download-decoded",
dc7a5b34 1957 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 1958 ),
dc7a5b34 1959 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
1960 (
1961 "gc",
1962 &Router::new()
1963 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 1964 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 1965 ),
d6688884
SR
1966 (
1967 "group-notes",
1968 &Router::new()
1969 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 1970 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 1971 ),
255f378a
DM
1972 (
1973 "groups",
1974 &Router::new()
b31c8019 1975 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 1976 .delete(&API_METHOD_DELETE_GROUP),
255f378a 1977 ),
912b3f5b
DM
1978 (
1979 "notes",
1980 &Router::new()
1981 .get(&API_METHOD_GET_NOTES)
dc7a5b34 1982 .put(&API_METHOD_SET_NOTES),
912b3f5b 1983 ),
8292d3d2
DC
1984 (
1985 "protected",
1986 &Router::new()
1987 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 1988 .put(&API_METHOD_SET_PROTECTION),
255f378a 1989 ),
dc7a5b34 1990 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
1991 (
1992 "prune-datastore",
dc7a5b34 1993 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 1994 ),
d33d8f4e
DC
1995 (
1996 "pxar-file-download",
dc7a5b34 1997 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 1998 ),
dc7a5b34 1999 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2000 (
2001 "snapshots",
2002 &Router::new()
fc189b19 2003 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2004 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2005 ),
dc7a5b34 2006 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2007 (
2008 "upload-backup-log",
dc7a5b34 2009 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2010 ),
dc7a5b34 2011 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2012];
2013
ad51d02a 2014const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2015 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2016 .subdirs(DATASTORE_INFO_SUBDIRS);
2017
255f378a 2018pub const ROUTER: Router = Router::new()
bb34b589 2019 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2020 .match_all("store", &DATASTORE_INFO_ROUTER);