]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
ns: max depth: set constant to upper inclusive boundary
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
6da20161 7use std::sync::Arc;
cad540e9 8
6ef9bb59 9use anyhow::{bail, format_err, Error};
9e47c0a5 10use futures::*;
cad540e9
WB
11use hyper::http::request::Parts;
12use hyper::{header, Body, Response, StatusCode};
15e9b4ed 13use serde_json::{json, Value};
7c667013 14use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 15
dc7a5b34
TL
16use proxmox_async::blocking::WrappedReaderStream;
17use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
984ddb2f 18use proxmox_compression::zstd::ZstdEncoder;
6ef1b649 19use proxmox_router::{
dc7a5b34
TL
20 http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
21 Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
6ef1b649
WB
22};
23use proxmox_schema::*;
dc7a5b34
TL
24use proxmox_sys::fs::{
25 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
26};
27use proxmox_sys::sortable;
d5790a9f 28use proxmox_sys::{task_log, task_warn};
e18a6c9e 29
2e219481 30use pxar::accessor::aio::Accessor;
d33d8f4e
DC
31use pxar::EntryKind;
32
dc7a5b34 33use pbs_api_types::{
988d575d 34 Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
dc7a5b34
TL
35 GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
36 SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
37 BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
38 PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
39 PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
b2065dc7 40};
984ddb2f 41use pbs_client::pxar::{create_tar, create_zip};
dc7a5b34 42use pbs_config::CachedUserInfo;
b2065dc7
WB
43use pbs_datastore::backup_info::BackupInfo;
44use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 45use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
46use pbs_datastore::data_blob::DataBlob;
47use pbs_datastore::data_blob_reader::DataBlobReader;
48use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
dc7a5b34 49use pbs_datastore::fixed_index::FixedIndexReader;
b2065dc7
WB
50use pbs_datastore::index::IndexFile;
51use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 52use pbs_datastore::prune::compute_prune_info;
dc7a5b34
TL
53use pbs_datastore::{
54 check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
55 StoreProgress, CATALOG_NAME,
56};
3c8c2827 57use pbs_tools::json::{required_integer_param, required_string_param};
dc7a5b34 58use proxmox_rest_server::{formatter, WorkerTask};
2b7f8dd5 59
431cc7b1 60use crate::api2::node::rrd::create_value_from_rrd;
dc7a5b34 61use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
54552dda 62
b9700a9f 63use crate::server::jobstate::Job;
804f6143 64
d6688884
SR
65const GROUP_NOTES_FILE_NAME: &str = "notes";
66
db87d93e 67fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
d6688884 68 let mut note_path = store.base_path();
db87d93e 69 note_path.push(group.to_string());
d6688884
SR
70 note_path.push(GROUP_NOTES_FILE_NAME);
71 note_path
72}
73
bff85572 74fn check_priv_or_backup_owner(
e7cb4dc5 75 store: &DataStore,
db87d93e 76 group: &pbs_api_types::BackupGroup,
e6dc35ac 77 auth_id: &Authid,
bff85572
FG
78 required_privs: u64,
79) -> Result<(), Error> {
80 let user_info = CachedUserInfo::new()?;
9a37bd6c 81 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
82
83 if privs & required_privs == 0 {
84 let owner = store.get_owner(group)?;
85 check_backup_owner(&owner, auth_id)?;
86 }
87 Ok(())
88}
89
e7cb4dc5
WB
90fn read_backup_index(
91 store: &DataStore,
92 backup_dir: &BackupDir,
93) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
ff86ef00 94 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 95
09b1f7b2
DM
96 let mut result = Vec::new();
97 for item in manifest.files() {
98 result.push(BackupContent {
99 filename: item.filename.clone(),
f28d9088 100 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
101 size: Some(item.size),
102 });
8c70e3eb
DM
103 }
104
09b1f7b2 105 result.push(BackupContent {
96d65fbc 106 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
107 crypt_mode: match manifest.signature {
108 Some(_) => Some(CryptMode::SignOnly),
109 None => Some(CryptMode::None),
110 },
09b1f7b2
DM
111 size: Some(index_size),
112 });
4f1e40a2 113
70030b43 114 Ok((manifest, result))
8c70e3eb
DM
115}
116
1c090810
DC
117fn get_all_snapshot_files(
118 store: &DataStore,
119 info: &BackupInfo,
70030b43 120) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
9a37bd6c 121 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
122
123 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
124 acc.insert(item.filename.clone());
125 acc
126 });
127
128 for file in &info.files {
dc7a5b34
TL
129 if file_set.contains(file) {
130 continue;
131 }
f28d9088
WB
132 files.push(BackupContent {
133 filename: file.to_string(),
134 size: None,
135 crypt_mode: None,
136 });
1c090810
DC
137 }
138
70030b43 139 Ok((manifest, files))
1c090810
DC
140}
141
b31c8019
DM
142#[api(
143 input: {
144 properties: {
145 store: {
146 schema: DATASTORE_SCHEMA,
147 },
148 },
149 },
7b570c17 150 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 151 access: {
54552dda
DM
152 permission: &Permission::Privilege(
153 &["datastore", "{store}"],
154 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
155 true),
bb34b589 156 },
b31c8019
DM
157)]
158/// List backup groups.
b2362a12 159pub fn list_groups(
b31c8019 160 store: String,
54552dda 161 rpcenv: &mut dyn RpcEnvironment,
b31c8019 162) -> Result<Vec<GroupListItem>, Error> {
e6dc35ac 163 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 164 let user_info = CachedUserInfo::new()?;
e6dc35ac 165 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 166
e9d2fc93 167 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee
FG
168 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
169
249dde8b
TL
170 datastore
171 .iter_backup_groups()?
172 .try_fold(Vec::new(), |mut group_info, group| {
173 let group = group?;
db87d93e 174 let owner = match datastore.get_owner(group.as_ref()) {
249dde8b
TL
175 Ok(auth_id) => auth_id,
176 Err(err) => {
177 let id = &store;
178 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
179 return Ok(group_info);
dc7a5b34 180 }
249dde8b
TL
181 };
182 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
183 return Ok(group_info);
184 }
0d08fcee 185
6da20161 186 let snapshots = match group.list_backups() {
249dde8b
TL
187 Ok(snapshots) => snapshots,
188 Err(_) => return Ok(group_info),
189 };
0d08fcee 190
249dde8b
TL
191 let backup_count: u64 = snapshots.len() as u64;
192 if backup_count == 0 {
193 return Ok(group_info);
194 }
0d08fcee 195
249dde8b
TL
196 let last_backup = snapshots
197 .iter()
198 .fold(&snapshots[0], |a, b| {
199 if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
200 a
201 } else {
202 b
203 }
204 })
205 .to_owned();
206
db87d93e 207 let note_path = get_group_note_path(&datastore, group.as_ref());
249dde8b
TL
208 let comment = file_read_firstline(&note_path).ok();
209
210 group_info.push(GroupListItem {
988d575d 211 backup: group.into(),
249dde8b
TL
212 last_backup: last_backup.backup_dir.backup_time(),
213 owner: Some(owner),
214 backup_count,
215 files: last_backup.files,
216 comment,
0d08fcee
FG
217 });
218
249dde8b
TL
219 Ok(group_info)
220 })
812c6f87 221}
8f579717 222
f32791b4
DC
223#[api(
224 input: {
225 properties: {
988d575d
WB
226 store: { schema: DATASTORE_SCHEMA },
227 "backup-type": { type: BackupType },
228 "backup-id": { schema: BACKUP_ID_SCHEMA },
f32791b4
DC
229 },
230 },
231 access: {
232 permission: &Permission::Privilege(
233 &["datastore", "{store}"],
234 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
235 true),
236 },
237)]
238/// Delete backup group including all snapshots.
239pub fn delete_group(
240 store: String,
988d575d 241 backup_type: BackupType,
f32791b4
DC
242 backup_id: String,
243 _info: &ApiMethod,
244 rpcenv: &mut dyn RpcEnvironment,
245) -> Result<Value, Error> {
f32791b4
DC
246 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
247
db87d93e 248 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
e9d2fc93 249 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
f32791b4
DC
250
251 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
252
5cc7d891 253 if !datastore.remove_backup_group(&group)? {
171a00ca 254 bail!("group only partially deleted due to protected snapshots");
5cc7d891 255 }
f32791b4
DC
256
257 Ok(Value::Null)
258}
259
09b1f7b2
DM
260#[api(
261 input: {
262 properties: {
988d575d
WB
263 store: { schema: DATASTORE_SCHEMA },
264 "backup-type": { type: BackupType },
265 "backup-id": { schema: BACKUP_ID_SCHEMA },
266 "backup-time": { schema: BACKUP_TIME_SCHEMA },
09b1f7b2
DM
267 },
268 },
7b570c17 269 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 270 access: {
54552dda
DM
271 permission: &Permission::Privilege(
272 &["datastore", "{store}"],
273 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
274 true),
bb34b589 275 },
09b1f7b2
DM
276)]
277/// List snapshot files.
ea5f547f 278pub fn list_snapshot_files(
09b1f7b2 279 store: String,
988d575d 280 backup_type: BackupType,
09b1f7b2
DM
281 backup_id: String,
282 backup_time: i64,
01a13423 283 _info: &ApiMethod,
54552dda 284 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 285) -> Result<Vec<BackupContent>, Error> {
e6dc35ac 286 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e9d2fc93 287 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
54552dda 288
6b0c6492 289 let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
01a13423 290
dc7a5b34
TL
291 check_priv_or_backup_owner(
292 &datastore,
db87d93e 293 snapshot.as_ref(),
dc7a5b34
TL
294 &auth_id,
295 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
296 )?;
54552dda 297
6da20161 298 let info = BackupInfo::new(snapshot)?;
01a13423 299
70030b43
DM
300 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
301
302 Ok(files)
01a13423
DM
303}
304
68a6a0ee
DM
305#[api(
306 input: {
307 properties: {
988d575d
WB
308 store: { schema: DATASTORE_SCHEMA },
309 "backup-type": { type: BackupType },
310 "backup-id": { schema: BACKUP_ID_SCHEMA },
311 "backup-time": { schema: BACKUP_TIME_SCHEMA },
68a6a0ee
DM
312 },
313 },
bb34b589 314 access: {
54552dda
DM
315 permission: &Permission::Privilege(
316 &["datastore", "{store}"],
317 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
318 true),
bb34b589 319 },
68a6a0ee
DM
320)]
321/// Delete backup snapshot.
bf78f708 322pub fn delete_snapshot(
68a6a0ee 323 store: String,
988d575d 324 backup_type: BackupType,
68a6a0ee
DM
325 backup_id: String,
326 backup_time: i64,
6f62c924 327 _info: &ApiMethod,
54552dda 328 rpcenv: &mut dyn RpcEnvironment,
6f62c924 329) -> Result<Value, Error> {
e6dc35ac 330 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 331
e9d2fc93 332 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
6b0c6492 333 let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
6f62c924 334
dc7a5b34
TL
335 check_priv_or_backup_owner(
336 &datastore,
db87d93e 337 snapshot.as_ref(),
dc7a5b34
TL
338 &auth_id,
339 PRIV_DATASTORE_MODIFY,
340 )?;
54552dda 341
db87d93e 342 datastore.remove_backup_dir(snapshot.as_ref(), false)?;
6f62c924
DM
343
344 Ok(Value::Null)
345}
346
fc189b19 347#[api(
b7c3eaa9 348 streaming: true,
fc189b19
DM
349 input: {
350 properties: {
988d575d 351 store: { schema: DATASTORE_SCHEMA },
fc189b19
DM
352 "backup-type": {
353 optional: true,
988d575d 354 type: BackupType,
fc189b19
DM
355 },
356 "backup-id": {
357 optional: true,
358 schema: BACKUP_ID_SCHEMA,
359 },
360 },
361 },
7b570c17 362 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 363 access: {
54552dda
DM
364 permission: &Permission::Privilege(
365 &["datastore", "{store}"],
366 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
367 true),
bb34b589 368 },
fc189b19
DM
369)]
370/// List backup snapshots.
dc7a5b34 371pub fn list_snapshots(
54552dda 372 store: String,
988d575d 373 backup_type: Option<BackupType>,
54552dda
DM
374 backup_id: Option<String>,
375 _param: Value,
184f17af 376 _info: &ApiMethod,
54552dda 377 rpcenv: &mut dyn RpcEnvironment,
fc189b19 378) -> Result<Vec<SnapshotListItem>, Error> {
e6dc35ac 379 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 380 let user_info = CachedUserInfo::new()?;
e6dc35ac 381 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 382
0d08fcee
FG
383 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
384
e9d2fc93 385 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 386
249dde8b
TL
387 // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
388 // backup group and provide an error free (Err -> None) accessor
0d08fcee 389 let groups = match (backup_type, backup_id) {
db87d93e 390 (Some(backup_type), Some(backup_id)) => {
6b0c6492 391 vec![datastore.backup_group_from_parts(backup_type, backup_id)]
db87d93e 392 }
7d9cb8c4 393 (Some(backup_type), None) => datastore
249dde8b 394 .iter_backup_groups_ok()?
dc7a5b34
TL
395 .filter(|group| group.backup_type() == backup_type)
396 .collect(),
7d9cb8c4 397 (None, Some(backup_id)) => datastore
249dde8b 398 .iter_backup_groups_ok()?
dc7a5b34
TL
399 .filter(|group| group.backup_id() == backup_id)
400 .collect(),
7d9cb8c4 401 _ => datastore.list_backup_groups()?,
0d08fcee 402 };
54552dda 403
0d08fcee 404 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
988d575d
WB
405 let backup = pbs_api_types::BackupDir {
406 group: group.into(),
407 time: info.backup_dir.backup_time(),
408 };
6da20161 409 let protected = info.backup_dir.is_protected();
1c090810 410
79c53595 411 match get_all_snapshot_files(&datastore, &info) {
70030b43 412 Ok((manifest, files)) => {
70030b43
DM
413 // extract the first line from notes
414 let comment: Option<String> = manifest.unprotected["notes"]
415 .as_str()
416 .and_then(|notes| notes.lines().next())
417 .map(String::from);
418
035c40e6
FG
419 let fingerprint = match manifest.fingerprint() {
420 Ok(fp) => fp,
421 Err(err) => {
422 eprintln!("error parsing fingerprint: '{}'", err);
423 None
dc7a5b34 424 }
035c40e6
FG
425 };
426
79c53595 427 let verification = manifest.unprotected["verify_state"].clone();
dc7a5b34
TL
428 let verification: Option<SnapshotVerifyState> =
429 match serde_json::from_value(verification) {
430 Ok(verify) => verify,
431 Err(err) => {
432 eprintln!("error parsing verification state : '{}'", err);
433 None
434 }
435 };
3b2046d2 436
0d08fcee
FG
437 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
438
79c53595 439 SnapshotListItem {
988d575d 440 backup,
79c53595
FG
441 comment,
442 verification,
035c40e6 443 fingerprint,
79c53595
FG
444 files,
445 size,
446 owner,
02db7267 447 protected,
79c53595 448 }
dc7a5b34 449 }
1c090810
DC
450 Err(err) => {
451 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 452 let files = info
dc7a5b34
TL
453 .files
454 .into_iter()
455 .map(|filename| BackupContent {
456 filename,
457 size: None,
458 crypt_mode: None,
459 })
460 .collect();
79c53595
FG
461
462 SnapshotListItem {
988d575d 463 backup,
79c53595
FG
464 comment: None,
465 verification: None,
035c40e6 466 fingerprint: None,
79c53595
FG
467 files,
468 size: None,
469 owner,
02db7267 470 protected,
79c53595 471 }
dc7a5b34 472 }
0d08fcee
FG
473 }
474 };
184f17af 475
dc7a5b34 476 groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
db87d93e 477 let owner = match datastore.get_owner(group.as_ref()) {
dc7a5b34
TL
478 Ok(auth_id) => auth_id,
479 Err(err) => {
480 eprintln!(
481 "Failed to get owner of group '{}/{}' - {}",
482 &store, group, err
483 );
0d08fcee
FG
484 return Ok(snapshots);
485 }
dc7a5b34 486 };
0d08fcee 487
dc7a5b34
TL
488 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
489 return Ok(snapshots);
490 }
0d08fcee 491
6da20161 492 let group_backups = group.list_backups()?;
0d08fcee 493
dc7a5b34
TL
494 snapshots.extend(
495 group_backups
496 .into_iter()
497 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
498 );
499
500 Ok(snapshots)
501 })
184f17af
DM
502}
503
6da20161
WB
504fn get_snapshots_count(
505 store: &Arc<DataStore>,
506 filter_owner: Option<&Authid>,
507) -> Result<Counts, Error> {
7d9cb8c4 508 store
249dde8b 509 .iter_backup_groups_ok()?
fdfcb74d 510 .filter(|group| {
db87d93e 511 let owner = match store.get_owner(group.as_ref()) {
fdfcb74d
FG
512 Ok(owner) => owner,
513 Err(err) => {
72f81545
TL
514 let id = store.name();
515 eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
fdfcb74d 516 return false;
dc7a5b34 517 }
fdfcb74d 518 };
14e08625 519
fdfcb74d
FG
520 match filter_owner {
521 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
522 None => true,
523 }
524 })
525 .try_fold(Counts::default(), |mut counts, group| {
6da20161 526 let snapshot_count = group.list_backups()?.len() as u64;
fdfcb74d 527
72f81545 528 // only include groups with snapshots, counting/displaying emtpy groups can confuse
b44483a8
DM
529 if snapshot_count > 0 {
530 let type_count = match group.backup_type() {
988d575d
WB
531 BackupType::Ct => counts.ct.get_or_insert(Default::default()),
532 BackupType::Vm => counts.vm.get_or_insert(Default::default()),
533 BackupType::Host => counts.host.get_or_insert(Default::default()),
b44483a8 534 };
14e08625 535
b44483a8
DM
536 type_count.groups += 1;
537 type_count.snapshots += snapshot_count;
538 }
16f9f244 539
fdfcb74d
FG
540 Ok(counts)
541 })
16f9f244
DC
542}
543
1dc117bb
DM
544#[api(
545 input: {
546 properties: {
547 store: {
548 schema: DATASTORE_SCHEMA,
549 },
98afc7b1
FG
550 verbose: {
551 type: bool,
552 default: false,
553 optional: true,
554 description: "Include additional information like snapshot counts and GC status.",
555 },
1dc117bb 556 },
98afc7b1 557
1dc117bb
DM
558 },
559 returns: {
14e08625 560 type: DataStoreStatus,
1dc117bb 561 },
bb34b589 562 access: {
54552dda 563 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 564 },
1dc117bb
DM
565)]
566/// Get datastore status.
ea5f547f 567pub fn status(
1dc117bb 568 store: String,
98afc7b1 569 verbose: bool,
0eecf38f 570 _info: &ApiMethod,
fdfcb74d 571 rpcenv: &mut dyn RpcEnvironment,
14e08625 572) -> Result<DataStoreStatus, Error> {
e9d2fc93 573 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 574 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
575 let (counts, gc_status) = if verbose {
576 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
577 let user_info = CachedUserInfo::new()?;
578
579 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
580 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
581 None
582 } else {
583 Some(&auth_id)
584 };
585
586 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
587 let gc_status = Some(datastore.last_gc_status());
588
589 (counts, gc_status)
590 } else {
591 (None, None)
98afc7b1 592 };
16f9f244 593
14e08625
DC
594 Ok(DataStoreStatus {
595 total: storage.total,
596 used: storage.used,
597 avail: storage.avail,
598 gc_status,
599 counts,
600 })
0eecf38f
DM
601}
602
c2009e53
DM
603#[api(
604 input: {
605 properties: {
606 store: {
607 schema: DATASTORE_SCHEMA,
608 },
609 "backup-type": {
988d575d 610 type: BackupType,
c2009e53
DM
611 optional: true,
612 },
613 "backup-id": {
614 schema: BACKUP_ID_SCHEMA,
615 optional: true,
616 },
dcbf29e7
HL
617 "ignore-verified": {
618 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
619 optional: true,
620 },
621 "outdated-after": {
622 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
623 optional: true,
624 },
c2009e53
DM
625 "backup-time": {
626 schema: BACKUP_TIME_SCHEMA,
627 optional: true,
628 },
629 },
630 },
631 returns: {
632 schema: UPID_SCHEMA,
633 },
634 access: {
09f6a240 635 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
636 },
637)]
638/// Verify backups.
639///
640/// This function can verify a single backup snapshot, all backup from a backup group,
641/// or all backups in the datastore.
642pub fn verify(
643 store: String,
988d575d 644 backup_type: Option<BackupType>,
c2009e53
DM
645 backup_id: Option<String>,
646 backup_time: Option<i64>,
dcbf29e7
HL
647 ignore_verified: Option<bool>,
648 outdated_after: Option<i64>,
c2009e53
DM
649 rpcenv: &mut dyn RpcEnvironment,
650) -> Result<Value, Error> {
e9d2fc93 651 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 652 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 653
09f6a240 654 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 655 let worker_id;
c2009e53
DM
656
657 let mut backup_dir = None;
658 let mut backup_group = None;
133042b5 659 let mut worker_type = "verify";
c2009e53
DM
660
661 match (backup_type, backup_id, backup_time) {
662 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
dc7a5b34
TL
663 worker_id = format!(
664 "{}:{}/{}/{:08X}",
665 store, backup_type, backup_id, backup_time
666 );
db87d93e 667 let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
09f6a240 668
db87d93e 669 check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
09f6a240 670
c2009e53 671 backup_dir = Some(dir);
133042b5 672 worker_type = "verify_snapshot";
c2009e53
DM
673 }
674 (Some(backup_type), Some(backup_id), None) => {
4ebda996 675 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
db87d93e 676 let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
09f6a240
FG
677
678 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
679
6b0c6492 680 backup_group = Some(datastore.backup_group(group));
133042b5 681 worker_type = "verify_group";
c2009e53
DM
682 }
683 (None, None, None) => {
8ea00f6e 684 worker_id = store.clone();
c2009e53 685 }
5a718dce 686 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
687 }
688
39735609 689 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
690
691 let upid_str = WorkerTask::new_thread(
133042b5 692 worker_type,
44288184 693 Some(worker_id),
049a22a3 694 auth_id.to_string(),
e7cb4dc5
WB
695 to_stdout,
696 move |worker| {
9c26a3d6 697 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 698 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 699 let mut res = Vec::new();
f6b1d1cc 700 if !verify_backup_dir(
9c26a3d6 701 &verify_worker,
f6b1d1cc 702 &backup_dir,
f6b1d1cc 703 worker.upid().clone(),
dc7a5b34 704 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
f6b1d1cc 705 )? {
adfdc369
DC
706 res.push(backup_dir.to_string());
707 }
708 res
c2009e53 709 } else if let Some(backup_group) = backup_group {
7e25b9aa 710 let failed_dirs = verify_backup_group(
9c26a3d6 711 &verify_worker,
63d9aca9 712 &backup_group,
7e25b9aa 713 &mut StoreProgress::new(1),
f6b1d1cc 714 worker.upid(),
dc7a5b34 715 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
63d9aca9
DM
716 )?;
717 failed_dirs
c2009e53 718 } else {
dc7a5b34 719 let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
09f6a240
FG
720
721 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
722 Some(auth_id)
723 } else {
724 None
725 };
726
dcbf29e7
HL
727 verify_all_backups(
728 &verify_worker,
729 worker.upid(),
730 owner,
dc7a5b34 731 Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
dcbf29e7 732 )?
c2009e53 733 };
3984a5fd 734 if !failed_dirs.is_empty() {
1ec0d70d 735 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 736 for dir in failed_dirs {
1ec0d70d 737 task_log!(worker, "\t{}", dir);
adfdc369 738 }
1ffe0301 739 bail!("verification failed - please check the log for details");
c2009e53
DM
740 }
741 Ok(())
e7cb4dc5
WB
742 },
743 )?;
c2009e53
DM
744
745 Ok(json!(upid_str))
746}
747
0a240aaa
DC
748#[api(
749 input: {
750 properties: {
988d575d
WB
751 "backup-id": { schema: BACKUP_ID_SCHEMA },
752 "backup-type": { type: BackupType },
0a240aaa
DC
753 "dry-run": {
754 optional: true,
755 type: bool,
756 default: false,
757 description: "Just show what prune would do, but do not delete anything.",
758 },
759 "prune-options": {
760 type: PruneOptions,
761 flatten: true,
762 },
763 store: {
764 schema: DATASTORE_SCHEMA,
765 },
766 },
767 },
7b570c17 768 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
769 access: {
770 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
771 },
772)]
9805207a 773/// Prune a group on the datastore
bf78f708 774pub fn prune(
0a240aaa 775 backup_id: String,
988d575d 776 backup_type: BackupType,
0a240aaa
DC
777 dry_run: bool,
778 prune_options: PruneOptions,
779 store: String,
780 _param: Value,
54552dda 781 rpcenv: &mut dyn RpcEnvironment,
83b7db02 782) -> Result<Value, Error> {
e6dc35ac 783 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 784
e9d2fc93 785 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
54552dda 786
6b0c6492 787 let group = datastore.backup_group_from_parts(backup_type, &backup_id);
db87d93e
WB
788
789 check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 790
988d575d 791 let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
503995c7 792
dda70154
DM
793 let mut prune_result = Vec::new();
794
6da20161 795 let list = group.list_backups()?;
dda70154
DM
796
797 let mut prune_info = compute_prune_info(list, &prune_options)?;
798
799 prune_info.reverse(); // delete older snapshots first
800
89725197 801 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
802
803 if dry_run {
02db7267
DC
804 for (info, mark) in prune_info {
805 let keep = keep_all || mark.keep();
dda70154 806
dda70154 807 prune_result.push(json!({
db87d93e
WB
808 "backup-type": info.backup_dir.backup_type(),
809 "backup-id": info.backup_dir.backup_id(),
810 "backup-time": info.backup_dir.backup_time(),
dda70154 811 "keep": keep,
02db7267 812 "protected": mark.protected(),
dda70154
DM
813 }));
814 }
815 return Ok(json!(prune_result));
816 }
817
163e9bbe 818 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 819 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 820
f1539300 821 if keep_all {
1ec0d70d 822 task_log!(worker, "No prune selection - keeping all files.");
f1539300 823 } else {
dc7a5b34
TL
824 task_log!(
825 worker,
826 "retention options: {}",
827 pbs_datastore::prune::cli_options_string(&prune_options)
828 );
829 task_log!(
830 worker,
831 "Starting prune on store \"{}\" group \"{}/{}\"",
832 store,
833 backup_type,
834 backup_id
835 );
f1539300 836 }
3b03abfe 837
02db7267
DC
838 for (info, mark) in prune_info {
839 let keep = keep_all || mark.keep();
dda70154 840
f1539300
SR
841 let backup_time = info.backup_dir.backup_time();
842 let timestamp = info.backup_dir.backup_time_string();
db87d93e
WB
843 let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
844
845 let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
f1539300 846
1ec0d70d 847 task_log!(worker, "{}", msg);
f1539300
SR
848
849 prune_result.push(json!({
db87d93e
WB
850 "backup-type": group.ty,
851 "backup-id": group.id,
f1539300
SR
852 "backup-time": backup_time,
853 "keep": keep,
02db7267 854 "protected": mark.protected(),
f1539300
SR
855 }));
856
857 if !(dry_run || keep) {
db87d93e 858 if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
1ec0d70d
DM
859 task_warn!(
860 worker,
861 "failed to remove dir {:?}: {}",
862 info.backup_dir.relative_path(),
863 err,
f1539300 864 );
8f0b4c1f 865 }
8f579717 866 }
f1539300 867 }
dd8e744f 868
f1539300 869 worker.log_result(&Ok(()));
83b7db02 870
dda70154 871 Ok(json!(prune_result))
83b7db02
DM
872}
873
9805207a
DC
874#[api(
875 input: {
876 properties: {
877 "dry-run": {
878 optional: true,
879 type: bool,
880 default: false,
881 description: "Just show what prune would do, but do not delete anything.",
882 },
883 "prune-options": {
884 type: PruneOptions,
885 flatten: true,
886 },
887 store: {
888 schema: DATASTORE_SCHEMA,
889 },
890 },
891 },
892 returns: {
893 schema: UPID_SCHEMA,
894 },
895 access: {
896 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
897 },
898)]
899/// Prune the datastore
900pub fn prune_datastore(
901 dry_run: bool,
902 prune_options: PruneOptions,
903 store: String,
904 _param: Value,
905 rpcenv: &mut dyn RpcEnvironment,
906) -> Result<String, Error> {
9805207a
DC
907 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
908
e9d2fc93 909 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 910
bfa942c0
DC
911 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
912
9805207a
DC
913 let upid_str = WorkerTask::new_thread(
914 "prune",
915 Some(store.clone()),
049a22a3 916 auth_id.to_string(),
bfa942c0 917 to_stdout,
dc7a5b34
TL
918 move |worker| {
919 crate::server::prune_datastore(
920 worker,
921 auth_id,
922 prune_options,
923 &store,
924 datastore,
925 dry_run,
926 )
927 },
9805207a
DC
928 )?;
929
930 Ok(upid_str)
931}
932
dfc58d47
DM
933#[api(
934 input: {
935 properties: {
936 store: {
937 schema: DATASTORE_SCHEMA,
938 },
939 },
940 },
941 returns: {
942 schema: UPID_SCHEMA,
943 },
bb34b589 944 access: {
54552dda 945 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 946 },
dfc58d47
DM
947)]
948/// Start garbage collection.
bf78f708 949pub fn start_garbage_collection(
dfc58d47 950 store: String,
6049b71f 951 _info: &ApiMethod,
dd5495d6 952 rpcenv: &mut dyn RpcEnvironment,
6049b71f 953) -> Result<Value, Error> {
e9d2fc93 954 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 955 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 956
dc7a5b34 957 let job = Job::new("garbage_collection", &store)
4fdf5ddf 958 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 959
39735609 960 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 961
dc7a5b34
TL
962 let upid_str =
963 crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
964 .map_err(|err| {
965 format_err!(
966 "unable to start garbage collection job on datastore {} - {}",
967 store,
968 err
969 )
970 })?;
0f778e06
DM
971
972 Ok(json!(upid_str))
15e9b4ed
DM
973}
974
a92830dc
DM
975#[api(
976 input: {
977 properties: {
978 store: {
979 schema: DATASTORE_SCHEMA,
980 },
981 },
982 },
983 returns: {
984 type: GarbageCollectionStatus,
bb34b589
DM
985 },
986 access: {
987 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
988 },
a92830dc
DM
989)]
990/// Garbage collection status.
5eeea607 991pub fn garbage_collection_status(
a92830dc 992 store: String,
6049b71f 993 _info: &ApiMethod,
dd5495d6 994 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 995) -> Result<GarbageCollectionStatus, Error> {
e9d2fc93 996 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 997
f2b99c34 998 let status = datastore.last_gc_status();
691c89a0 999
a92830dc 1000 Ok(status)
691c89a0
DM
1001}
1002
bb34b589 1003#[api(
30fb6025
DM
1004 returns: {
1005 description: "List the accessible datastores.",
1006 type: Array,
9b93c620 1007 items: { type: DataStoreListItem },
30fb6025 1008 },
bb34b589 1009 access: {
54552dda 1010 permission: &Permission::Anybody,
bb34b589
DM
1011 },
1012)]
1013/// Datastore list
bf78f708 1014pub fn get_datastore_list(
6049b71f
DM
1015 _param: Value,
1016 _info: &ApiMethod,
54552dda 1017 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1018) -> Result<Vec<DataStoreListItem>, Error> {
e7d4be9d 1019 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1020
e6dc35ac 1021 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1022 let user_info = CachedUserInfo::new()?;
1023
30fb6025 1024 let mut list = Vec::new();
54552dda 1025
30fb6025 1026 for (store, (_, data)) in &config.sections {
9a37bd6c 1027 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
dc7a5b34 1028 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1029 if allowed {
dc7a5b34
TL
1030 list.push(DataStoreListItem {
1031 store: store.clone(),
1032 comment: data["comment"].as_str().map(String::from),
e022d13c 1033 maintenance: data["maintenance-mode"].as_str().map(String::from),
dc7a5b34 1034 });
30fb6025 1035 }
54552dda
DM
1036 }
1037
44288184 1038 Ok(list)
15e9b4ed
DM
1039}
1040
0ab08ac9
DM
1041#[sortable]
1042pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1043 &ApiHandler::AsyncHttp(&download_file),
1044 &ObjectSchema::new(
1045 "Download single raw file from backup snapshot.",
1046 &sorted!([
66c49c21 1047 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 1048 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1049 ("backup-id", false, &BACKUP_ID_SCHEMA),
0ab08ac9 1050 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1051 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9 1052 ]),
dc7a5b34
TL
1053 ),
1054)
1055.access(
1056 None,
1057 &Permission::Privilege(
1058 &["datastore", "{store}"],
1059 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1060 true,
1061 ),
54552dda 1062);
691c89a0 1063
bf78f708 1064pub fn download_file(
9e47c0a5
DM
1065 _parts: Parts,
1066 _req_body: Body,
1067 param: Value,
255f378a 1068 _info: &ApiMethod,
54552dda 1069 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1070) -> ApiResponseFuture {
ad51d02a 1071 async move {
3c8c2827 1072 let store = required_string_param(&param, "store")?;
e9d2fc93 1073 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
f14a8c9a 1074
e6dc35ac 1075 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1076
3c8c2827 1077 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1078
988d575d 1079 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
db87d93e 1080 let backup_id = required_string_param(&param, "backup-id")?.to_owned();
3c8c2827 1081 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1082
db87d93e 1083 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
54552dda 1084
dc7a5b34
TL
1085 check_priv_or_backup_owner(
1086 &datastore,
db87d93e 1087 backup_dir.as_ref(),
dc7a5b34
TL
1088 &auth_id,
1089 PRIV_DATASTORE_READ,
1090 )?;
54552dda 1091
dc7a5b34
TL
1092 println!(
1093 "Download {} from {} ({}/{})",
1094 file_name, store, backup_dir, file_name
1095 );
9e47c0a5 1096
ad51d02a
DM
1097 let mut path = datastore.base_path();
1098 path.push(backup_dir.relative_path());
1099 path.push(&file_name);
1100
ba694720 1101 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1102 .await
1103 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1104
dc7a5b34
TL
1105 let payload =
1106 tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
1107 .map_ok(|bytes| bytes.freeze())
1108 .map_err(move |err| {
1109 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1110 err
1111 });
ad51d02a 1112 let body = Body::wrap_stream(payload);
9e47c0a5 1113
ad51d02a
DM
1114 // fixme: set other headers ?
1115 Ok(Response::builder()
dc7a5b34
TL
1116 .status(StatusCode::OK)
1117 .header(header::CONTENT_TYPE, "application/octet-stream")
1118 .body(body)
1119 .unwrap())
1120 }
1121 .boxed()
9e47c0a5
DM
1122}
1123
6ef9bb59
DC
1124#[sortable]
1125pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1126 &ApiHandler::AsyncHttp(&download_file_decoded),
1127 &ObjectSchema::new(
1128 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1129 &sorted!([
1130 ("store", false, &DATASTORE_SCHEMA),
1131 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
dc7a5b34 1132 ("backup-id", false, &BACKUP_ID_SCHEMA),
6ef9bb59
DC
1133 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1134 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1135 ]),
dc7a5b34
TL
1136 ),
1137)
1138.access(
1139 None,
1140 &Permission::Privilege(
1141 &["datastore", "{store}"],
1142 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1143 true,
1144 ),
6ef9bb59
DC
1145);
1146
bf78f708 1147pub fn download_file_decoded(
6ef9bb59
DC
1148 _parts: Parts,
1149 _req_body: Body,
1150 param: Value,
1151 _info: &ApiMethod,
1152 rpcenv: Box<dyn RpcEnvironment>,
1153) -> ApiResponseFuture {
6ef9bb59 1154 async move {
3c8c2827 1155 let store = required_string_param(&param, "store")?;
e9d2fc93 1156 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
6ef9bb59 1157
e6dc35ac 1158 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1159
3c8c2827 1160 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1161
988d575d 1162 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
db87d93e 1163 let backup_id = required_string_param(&param, "backup-id")?.to_owned();
3c8c2827 1164 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1165
db87d93e 1166 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
6ef9bb59 1167
dc7a5b34
TL
1168 check_priv_or_backup_owner(
1169 &datastore,
db87d93e 1170 backup_dir.as_ref(),
dc7a5b34
TL
1171 &auth_id,
1172 PRIV_DATASTORE_READ,
1173 )?;
6ef9bb59 1174
2d55beec 1175 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1176 for file in files {
f28d9088 1177 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1178 bail!("cannot decode '{}' - is encrypted", file_name);
1179 }
1180 }
1181
dc7a5b34
TL
1182 println!(
1183 "Download {} from {} ({}/{})",
1184 file_name, store, backup_dir, file_name
1185 );
6ef9bb59
DC
1186
1187 let mut path = datastore.base_path();
1188 path.push(backup_dir.relative_path());
1189 path.push(&file_name);
1190
1191 let extension = file_name.rsplitn(2, '.').next().unwrap();
1192
1193 let body = match extension {
1194 "didx" => {
dc7a5b34
TL
1195 let index = DynamicIndexReader::open(&path).map_err(|err| {
1196 format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
1197 })?;
2d55beec
FG
1198 let (csum, size) = index.compute_csum();
1199 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1200
14f6c9cb 1201 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1202 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1203 Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
1204 eprintln!("error during streaming of '{:?}' - {}", path, err);
1205 err
1206 }))
1207 }
6ef9bb59 1208 "fidx" => {
dc7a5b34
TL
1209 let index = FixedIndexReader::open(&path).map_err(|err| {
1210 format_err!("unable to read fixed index '{:?}' - {}", &path, err)
1211 })?;
6ef9bb59 1212
2d55beec
FG
1213 let (csum, size) = index.compute_csum();
1214 manifest.verify_file(&file_name, &csum, size)?;
1215
14f6c9cb 1216 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1217 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
dc7a5b34
TL
1218 Body::wrap_stream(
1219 AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
1220 move |err| {
1221 eprintln!("error during streaming of '{:?}' - {}", path, err);
1222 err
1223 },
1224 ),
1225 )
1226 }
6ef9bb59
DC
1227 "blob" => {
1228 let file = std::fs::File::open(&path)
8aa67ee7 1229 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1230
2d55beec
FG
1231 // FIXME: load full blob to verify index checksum?
1232
6ef9bb59 1233 Body::wrap_stream(
dc7a5b34
TL
1234 WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
1235 move |err| {
6ef9bb59
DC
1236 eprintln!("error during streaming of '{:?}' - {}", path, err);
1237 err
dc7a5b34
TL
1238 },
1239 ),
6ef9bb59 1240 )
dc7a5b34 1241 }
6ef9bb59
DC
1242 extension => {
1243 bail!("cannot download '{}' files", extension);
dc7a5b34 1244 }
6ef9bb59
DC
1245 };
1246
1247 // fixme: set other headers ?
1248 Ok(Response::builder()
dc7a5b34
TL
1249 .status(StatusCode::OK)
1250 .header(header::CONTENT_TYPE, "application/octet-stream")
1251 .body(body)
1252 .unwrap())
1253 }
1254 .boxed()
6ef9bb59
DC
1255}
1256
552c2259 1257#[sortable]
0ab08ac9
DM
1258pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1259 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1260 &ObjectSchema::new(
54552dda 1261 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1262 &sorted!([
66c49c21 1263 ("store", false, &DATASTORE_SCHEMA),
255f378a 1264 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1265 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1266 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1267 ]),
dc7a5b34
TL
1268 ),
1269)
1270.access(
54552dda 1271 Some("Only the backup creator/owner is allowed to do this."),
dc7a5b34 1272 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
54552dda 1273);
9e47c0a5 1274
bf78f708 1275pub fn upload_backup_log(
07ee2235
DM
1276 _parts: Parts,
1277 req_body: Body,
1278 param: Value,
255f378a 1279 _info: &ApiMethod,
54552dda 1280 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1281) -> ApiResponseFuture {
ad51d02a 1282 async move {
3c8c2827 1283 let store = required_string_param(&param, "store")?;
e9d2fc93 1284 let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
07ee2235 1285
dc7a5b34 1286 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1287
988d575d 1288 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
3c8c2827
WB
1289 let backup_id = required_string_param(&param, "backup-id")?;
1290 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1291
db87d93e 1292 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
07ee2235 1293
e6dc35ac 1294 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1295 let owner = datastore.get_owner(backup_dir.as_ref())?;
bff85572 1296 check_backup_owner(&owner, &auth_id)?;
54552dda 1297
ad51d02a
DM
1298 let mut path = datastore.base_path();
1299 path.push(backup_dir.relative_path());
1300 path.push(&file_name);
07ee2235 1301
ad51d02a
DM
1302 if path.exists() {
1303 bail!("backup already contains a log.");
1304 }
e128d4e8 1305
dc7a5b34
TL
1306 println!(
1307 "Upload backup log to {}/{}/{}/{}/{}",
1308 store,
1309 backup_type,
1310 backup_id,
1311 backup_dir.backup_time_string(),
1312 file_name
1313 );
ad51d02a
DM
1314
1315 let data = req_body
1316 .map_err(Error::from)
1317 .try_fold(Vec::new(), |mut acc, chunk| {
1318 acc.extend_from_slice(&*chunk);
1319 future::ok::<_, Error>(acc)
1320 })
1321 .await?;
1322
39f18b30
DM
1323 // always verify blob/CRC at server side
1324 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1325
e0a19d33 1326 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1327
1328 // fixme: use correct formatter
53daae8e 1329 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
dc7a5b34
TL
1330 }
1331 .boxed()
07ee2235
DM
1332}
1333
5b1cfa01
DC
1334#[api(
1335 input: {
1336 properties: {
988d575d
WB
1337 store: { schema: DATASTORE_SCHEMA },
1338 "backup-type": { type: BackupType },
1339 "backup-id": { schema: BACKUP_ID_SCHEMA },
1340 "backup-time": { schema: BACKUP_TIME_SCHEMA },
5b1cfa01
DC
1341 "filepath": {
1342 description: "Base64 encoded path.",
1343 type: String,
1344 }
1345 },
1346 },
1347 access: {
1348 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1349 },
1350)]
1351/// Get the entries of the given path of the catalog
bf78f708 1352pub fn catalog(
5b1cfa01 1353 store: String,
988d575d 1354 backup_type: BackupType,
5b1cfa01
DC
1355 backup_id: String,
1356 backup_time: i64,
1357 filepath: String,
5b1cfa01 1358 rpcenv: &mut dyn RpcEnvironment,
227501c0 1359) -> Result<Vec<ArchiveEntry>, Error> {
e9d2fc93 1360 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
5b1cfa01 1361
e6dc35ac 1362 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1363
6b0c6492 1364 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
5b1cfa01 1365
dc7a5b34
TL
1366 check_priv_or_backup_owner(
1367 &datastore,
db87d93e 1368 backup_dir.as_ref(),
dc7a5b34
TL
1369 &auth_id,
1370 PRIV_DATASTORE_READ,
1371 )?;
5b1cfa01 1372
9238cdf5
FG
1373 let file_name = CATALOG_NAME;
1374
2d55beec 1375 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1376 for file in files {
1377 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1378 bail!("cannot decode '{}' - is encrypted", file_name);
1379 }
1380 }
1381
5b1cfa01
DC
1382 let mut path = datastore.base_path();
1383 path.push(backup_dir.relative_path());
9238cdf5 1384 path.push(file_name);
5b1cfa01
DC
1385
1386 let index = DynamicIndexReader::open(&path)
1387 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1388
2d55beec 1389 let (csum, size) = index.compute_csum();
9a37bd6c 1390 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1391
14f6c9cb 1392 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1393 let reader = BufferedDynamicReader::new(index, chunk_reader);
1394
1395 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1396
5279ee74 1397 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1398 base64::decode(filepath)?
1399 } else {
1400 vec![b'/']
1401 };
5b1cfa01 1402
86582454 1403 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1404}
1405
d33d8f4e
DC
1406#[sortable]
1407pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1408 &ApiHandler::AsyncHttp(&pxar_file_download),
1409 &ObjectSchema::new(
1ffe0301 1410 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1411 &sorted!([
1412 ("store", false, &DATASTORE_SCHEMA),
1413 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1414 ("backup-id", false, &BACKUP_ID_SCHEMA),
1415 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1416 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
984ddb2f 1417 ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
d33d8f4e
DC
1418 ]),
1419 )
1420).access(None, &Permission::Privilege(
1421 &["datastore", "{store}"],
1422 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1423 true)
1424);
1425
bf78f708 1426pub fn pxar_file_download(
d33d8f4e
DC
1427 _parts: Parts,
1428 _req_body: Body,
1429 param: Value,
1430 _info: &ApiMethod,
1431 rpcenv: Box<dyn RpcEnvironment>,
1432) -> ApiResponseFuture {
d33d8f4e 1433 async move {
3c8c2827 1434 let store = required_string_param(&param, "store")?;
e9d2fc93 1435 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d33d8f4e 1436
e6dc35ac 1437 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1438
3c8c2827 1439 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1440
988d575d 1441 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
3c8c2827
WB
1442 let backup_id = required_string_param(&param, "backup-id")?;
1443 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1444
984ddb2f
DC
1445 let tar = param["tar"].as_bool().unwrap_or(false);
1446
6b0c6492 1447 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
d33d8f4e 1448
dc7a5b34
TL
1449 check_priv_or_backup_owner(
1450 &datastore,
db87d93e 1451 backup_dir.as_ref(),
dc7a5b34
TL
1452 &auth_id,
1453 PRIV_DATASTORE_READ,
1454 )?;
d33d8f4e 1455
d33d8f4e 1456 let mut components = base64::decode(&filepath)?;
3984a5fd 1457 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1458 components.remove(0);
1459 }
1460
d8d8af98 1461 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1462 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1463 let file_path = split.next().unwrap_or(b"/");
2d55beec 1464 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1465 for file in files {
1466 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1467 bail!("cannot decode '{}' - is encrypted", pxar_name);
1468 }
1469 }
d33d8f4e 1470
9238cdf5
FG
1471 let mut path = datastore.base_path();
1472 path.push(backup_dir.relative_path());
1473 path.push(pxar_name);
d33d8f4e
DC
1474
1475 let index = DynamicIndexReader::open(&path)
1476 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1477
2d55beec 1478 let (csum, size) = index.compute_csum();
9a37bd6c 1479 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1480
14f6c9cb 1481 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1482 let reader = BufferedDynamicReader::new(index, chunk_reader);
1483 let archive_size = reader.archive_size();
1484 let reader = LocalDynamicReadAt::new(reader);
1485
1486 let decoder = Accessor::new(reader, archive_size).await?;
1487 let root = decoder.open_root().await?;
2e219481 1488 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1489 let file = root
dc7a5b34
TL
1490 .lookup(&path)
1491 .await?
2e219481 1492 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1493
804f6143
DC
1494 let body = match file.kind() {
1495 EntryKind::File { .. } => Body::wrap_stream(
1496 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1497 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1498 err
1499 }),
1500 ),
1501 EntryKind::Hardlink(_) => Body::wrap_stream(
1502 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1503 .map_err(move |err| {
dc7a5b34 1504 eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
804f6143
DC
1505 err
1506 }),
1507 ),
1508 EntryKind::Directory => {
984ddb2f 1509 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
804f6143 1510 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
984ddb2f 1511 if tar {
dc7a5b34
TL
1512 proxmox_rest_server::spawn_internal_task(create_tar(
1513 channelwriter,
1514 decoder,
1515 path.clone(),
1516 false,
1517 ));
984ddb2f
DC
1518 let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
1519 Body::wrap_stream(zstdstream.map_err(move |err| {
1520 eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
1521 err
1522 }))
1523 } else {
dc7a5b34
TL
1524 proxmox_rest_server::spawn_internal_task(create_zip(
1525 channelwriter,
1526 decoder,
1527 path.clone(),
1528 false,
1529 ));
984ddb2f
DC
1530 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
1531 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
1532 err
1533 }))
1534 }
804f6143
DC
1535 }
1536 other => bail!("cannot download file of type {:?}", other),
1537 };
d33d8f4e
DC
1538
1539 // fixme: set other headers ?
1540 Ok(Response::builder()
dc7a5b34
TL
1541 .status(StatusCode::OK)
1542 .header(header::CONTENT_TYPE, "application/octet-stream")
1543 .body(body)
1544 .unwrap())
1545 }
1546 .boxed()
d33d8f4e
DC
1547}
1548
1a0d3d11
DM
1549#[api(
1550 input: {
1551 properties: {
1552 store: {
1553 schema: DATASTORE_SCHEMA,
1554 },
1555 timeframe: {
c68fa58a 1556 type: RRDTimeFrame,
1a0d3d11
DM
1557 },
1558 cf: {
1559 type: RRDMode,
1560 },
1561 },
1562 },
1563 access: {
1564 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1565 },
1566)]
1567/// Read datastore stats
bf78f708 1568pub fn get_rrd_stats(
1a0d3d11 1569 store: String,
c68fa58a 1570 timeframe: RRDTimeFrame,
1a0d3d11
DM
1571 cf: RRDMode,
1572 _param: Value,
1573) -> Result<Value, Error> {
e9d2fc93 1574 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1575 let disk_manager = crate::tools::disks::DiskManage::new();
1576
1577 let mut rrd_fields = vec![
dc7a5b34
TL
1578 "total",
1579 "used",
1580 "read_ios",
1581 "read_bytes",
1582 "write_ios",
1583 "write_bytes",
f27b6086
DC
1584 ];
1585
1586 // we do not have io_ticks for zpools, so don't include them
1587 match disk_manager.find_mounted_device(&datastore.base_path()) {
dc7a5b34 1588 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
f27b6086
DC
1589 _ => rrd_fields.push("io_ticks"),
1590 };
1591
dc7a5b34 1592 create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
1a0d3d11
DM
1593}
1594
5fd823c3
HL
1595#[api(
1596 input: {
1597 properties: {
1598 store: {
1599 schema: DATASTORE_SCHEMA,
1600 },
1601 },
1602 },
1603 access: {
1604 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1605 },
1606)]
1607/// Read datastore stats
dc7a5b34 1608pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
5fd823c3
HL
1609 let active_operations = task_tracking::get_active_operations(&store)?;
1610 Ok(json!({
1611 "read": active_operations.read,
1612 "write": active_operations.write,
1613 }))
1614}
1615
d6688884
SR
1616#[api(
1617 input: {
1618 properties: {
988d575d
WB
1619 store: { schema: DATASTORE_SCHEMA },
1620 "backup-type": { type: BackupType },
1621 "backup-id": { schema: BACKUP_ID_SCHEMA },
d6688884
SR
1622 },
1623 },
1624 access: {
1625 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1626 },
1627)]
1628/// Get "notes" for a backup group
1629pub fn get_group_notes(
1630 store: String,
988d575d 1631 backup_type: BackupType,
d6688884
SR
1632 backup_id: String,
1633 rpcenv: &mut dyn RpcEnvironment,
1634) -> Result<String, Error> {
e9d2fc93 1635 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d6688884
SR
1636
1637 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1638 let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
d6688884
SR
1639
1640 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1641
1642 let note_path = get_group_note_path(&datastore, &backup_group);
1643 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1644}
1645
1646#[api(
1647 input: {
1648 properties: {
988d575d
WB
1649 store: { schema: DATASTORE_SCHEMA },
1650 "backup-type": { type: BackupType },
1651 "backup-id": { schema: BACKUP_ID_SCHEMA },
d6688884
SR
1652 notes: {
1653 description: "A multiline text.",
1654 },
1655 },
1656 },
1657 access: {
1658 permission: &Permission::Privilege(&["datastore", "{store}"],
1659 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1660 true),
1661 },
1662)]
1663/// Set "notes" for a backup group
1664pub fn set_group_notes(
1665 store: String,
988d575d 1666 backup_type: BackupType,
d6688884
SR
1667 backup_id: String,
1668 notes: String,
1669 rpcenv: &mut dyn RpcEnvironment,
1670) -> Result<(), Error> {
e9d2fc93 1671 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
d6688884
SR
1672
1673 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
db87d93e 1674 let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
d6688884
SR
1675
1676 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1677
1678 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1679 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1680
1681 Ok(())
1682}
1683
912b3f5b
DM
1684#[api(
1685 input: {
1686 properties: {
988d575d
WB
1687 store: { schema: DATASTORE_SCHEMA },
1688 "backup-type": { type: BackupType },
1689 "backup-id": { schema: BACKUP_ID_SCHEMA },
1690 "backup-time": { schema: BACKUP_TIME_SCHEMA },
912b3f5b
DM
1691 },
1692 },
1693 access: {
1401f4be 1694 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1695 },
1696)]
1697/// Get "notes" for a specific backup
bf78f708 1698pub fn get_notes(
912b3f5b 1699 store: String,
988d575d 1700 backup_type: BackupType,
912b3f5b
DM
1701 backup_id: String,
1702 backup_time: i64,
1703 rpcenv: &mut dyn RpcEnvironment,
1704) -> Result<String, Error> {
e9d2fc93 1705 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
912b3f5b 1706
e6dc35ac 1707 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1708 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
912b3f5b 1709
dc7a5b34
TL
1710 check_priv_or_backup_owner(
1711 &datastore,
db87d93e 1712 backup_dir.as_ref(),
dc7a5b34
TL
1713 &auth_id,
1714 PRIV_DATASTORE_AUDIT,
1715 )?;
912b3f5b 1716
883aa6d5 1717 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1718
dc7a5b34 1719 let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
912b3f5b
DM
1720
1721 Ok(String::from(notes))
1722}
1723
1724#[api(
1725 input: {
1726 properties: {
988d575d
WB
1727 store: { schema: DATASTORE_SCHEMA },
1728 "backup-type": { type: BackupType },
1729 "backup-id": { schema: BACKUP_ID_SCHEMA },
1730 "backup-time": { schema: BACKUP_TIME_SCHEMA },
912b3f5b
DM
1731 notes: {
1732 description: "A multiline text.",
1733 },
1734 },
1735 },
1736 access: {
b728a69e
FG
1737 permission: &Permission::Privilege(&["datastore", "{store}"],
1738 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1739 true),
912b3f5b
DM
1740 },
1741)]
1742/// Set "notes" for a specific backup
bf78f708 1743pub fn set_notes(
912b3f5b 1744 store: String,
988d575d 1745 backup_type: BackupType,
912b3f5b
DM
1746 backup_id: String,
1747 backup_time: i64,
1748 notes: String,
1749 rpcenv: &mut dyn RpcEnvironment,
1750) -> Result<(), Error> {
e9d2fc93 1751 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
912b3f5b 1752
e6dc35ac 1753 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1754 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
912b3f5b 1755
dc7a5b34
TL
1756 check_priv_or_backup_owner(
1757 &datastore,
db87d93e 1758 backup_dir.as_ref(),
dc7a5b34
TL
1759 &auth_id,
1760 PRIV_DATASTORE_MODIFY,
1761 )?;
912b3f5b 1762
dc7a5b34
TL
1763 datastore
1764 .update_manifest(&backup_dir, |manifest| {
1765 manifest.unprotected["notes"] = notes.into();
1766 })
1767 .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1768
1769 Ok(())
1770}
1771
8292d3d2
DC
1772#[api(
1773 input: {
1774 properties: {
988d575d
WB
1775 store: { schema: DATASTORE_SCHEMA },
1776 "backup-type": { type: BackupType },
1777 "backup-id": { schema: BACKUP_ID_SCHEMA },
1778 "backup-time": { schema: BACKUP_TIME_SCHEMA },
8292d3d2
DC
1779 },
1780 },
1781 access: {
1782 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1783 },
1784)]
1785/// Query protection for a specific backup
1786pub fn get_protection(
1787 store: String,
988d575d 1788 backup_type: BackupType,
8292d3d2
DC
1789 backup_id: String,
1790 backup_time: i64,
1791 rpcenv: &mut dyn RpcEnvironment,
1792) -> Result<bool, Error> {
e9d2fc93 1793 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
8292d3d2
DC
1794
1795 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1796 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
8292d3d2 1797
dc7a5b34
TL
1798 check_priv_or_backup_owner(
1799 &datastore,
db87d93e 1800 backup_dir.as_ref(),
dc7a5b34
TL
1801 &auth_id,
1802 PRIV_DATASTORE_AUDIT,
1803 )?;
8292d3d2 1804
6da20161 1805 Ok(backup_dir.is_protected())
8292d3d2
DC
1806}
1807
1808#[api(
1809 input: {
1810 properties: {
988d575d
WB
1811 store: { schema: DATASTORE_SCHEMA },
1812 "backup-type": { type: BackupType },
1813 "backup-id": { schema: BACKUP_ID_SCHEMA },
1814 "backup-time": { schema: BACKUP_TIME_SCHEMA },
8292d3d2
DC
1815 protected: {
1816 description: "Enable/disable protection.",
1817 },
1818 },
1819 },
1820 access: {
1821 permission: &Permission::Privilege(&["datastore", "{store}"],
1822 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1823 true),
1824 },
1825)]
1826/// En- or disable protection for a specific backup
1827pub fn set_protection(
1828 store: String,
988d575d 1829 backup_type: BackupType,
8292d3d2
DC
1830 backup_id: String,
1831 backup_time: i64,
1832 protected: bool,
1833 rpcenv: &mut dyn RpcEnvironment,
1834) -> Result<(), Error> {
e9d2fc93 1835 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8292d3d2
DC
1836
1837 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6b0c6492 1838 let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
8292d3d2 1839
dc7a5b34
TL
1840 check_priv_or_backup_owner(
1841 &datastore,
db87d93e 1842 backup_dir.as_ref(),
dc7a5b34
TL
1843 &auth_id,
1844 PRIV_DATASTORE_MODIFY,
1845 )?;
8292d3d2
DC
1846
1847 datastore.update_protection(&backup_dir, protected)
1848}
1849
72be0eb1 1850#[api(
4940012d 1851 input: {
72be0eb1 1852 properties: {
988d575d
WB
1853 store: { schema: DATASTORE_SCHEMA },
1854 "backup-type": { type: BackupType },
1855 "backup-id": { schema: BACKUP_ID_SCHEMA },
72be0eb1 1856 "new-owner": {
e6dc35ac 1857 type: Authid,
72be0eb1
DW
1858 },
1859 },
4940012d
FG
1860 },
1861 access: {
bff85572
FG
1862 permission: &Permission::Anybody,
1863 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1864 },
72be0eb1
DW
1865)]
1866/// Change owner of a backup group
bf78f708 1867pub fn set_backup_owner(
72be0eb1 1868 store: String,
988d575d 1869 backup_type: BackupType,
72be0eb1 1870 backup_id: String,
e6dc35ac 1871 new_owner: Authid,
bff85572 1872 rpcenv: &mut dyn RpcEnvironment,
72be0eb1 1873) -> Result<(), Error> {
e9d2fc93 1874 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
72be0eb1 1875
6b0c6492 1876 let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
72be0eb1 1877
bff85572
FG
1878 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1879
72be0eb1
DW
1880 let user_info = CachedUserInfo::new()?;
1881
bff85572
FG
1882 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1883
1884 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1885 // High-privilege user/token
1886 true
1887 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
db87d93e 1888 let owner = datastore.get_owner(backup_group.as_ref())?;
bff85572
FG
1889
1890 match (owner.is_token(), new_owner.is_token()) {
1891 (true, true) => {
1892 // API token to API token, owned by same user
1893 let owner = owner.user();
1894 let new_owner = new_owner.user();
1895 owner == new_owner && Authid::from(owner.clone()) == auth_id
dc7a5b34 1896 }
bff85572
FG
1897 (true, false) => {
1898 // API token to API token owner
dc7a5b34
TL
1899 Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
1900 }
bff85572
FG
1901 (false, true) => {
1902 // API token owner to API token
dc7a5b34
TL
1903 owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
1904 }
bff85572
FG
1905 (false, false) => {
1906 // User to User, not allowed for unprivileged users
1907 false
dc7a5b34 1908 }
bff85572
FG
1909 }
1910 } else {
1911 false
1912 };
1913
1914 if !allowed {
dc7a5b34
TL
1915 return Err(http_err!(
1916 UNAUTHORIZED,
1917 "{} does not have permission to change owner of backup group '{}' to {}",
1918 auth_id,
1919 backup_group,
1920 new_owner,
bff85572
FG
1921 ));
1922 }
1923
e6dc35ac 1924 if !user_info.is_active_auth_id(&new_owner) {
dc7a5b34
TL
1925 bail!(
1926 "{} '{}' is inactive or non-existent",
1927 if new_owner.is_token() {
1928 "API token".to_string()
1929 } else {
1930 "user".to_string()
1931 },
1932 new_owner
1933 );
72be0eb1
DW
1934 }
1935
db87d93e 1936 datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
72be0eb1
DW
1937
1938 Ok(())
1939}
1940
552c2259 1941#[sortable]
255f378a 1942const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
1943 (
1944 "active-operations",
dc7a5b34 1945 &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
5b1cfa01 1946 ),
dc7a5b34 1947 ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
72be0eb1
DW
1948 (
1949 "change-owner",
dc7a5b34 1950 &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
72be0eb1 1951 ),
255f378a
DM
1952 (
1953 "download",
dc7a5b34 1954 &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
255f378a 1955 ),
6ef9bb59
DC
1956 (
1957 "download-decoded",
dc7a5b34 1958 &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
255f378a 1959 ),
dc7a5b34 1960 ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
255f378a
DM
1961 (
1962 "gc",
1963 &Router::new()
1964 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
dc7a5b34 1965 .post(&API_METHOD_START_GARBAGE_COLLECTION),
255f378a 1966 ),
d6688884
SR
1967 (
1968 "group-notes",
1969 &Router::new()
1970 .get(&API_METHOD_GET_GROUP_NOTES)
dc7a5b34 1971 .put(&API_METHOD_SET_GROUP_NOTES),
d6688884 1972 ),
255f378a
DM
1973 (
1974 "groups",
1975 &Router::new()
b31c8019 1976 .get(&API_METHOD_LIST_GROUPS)
dc7a5b34 1977 .delete(&API_METHOD_DELETE_GROUP),
255f378a 1978 ),
912b3f5b
DM
1979 (
1980 "notes",
1981 &Router::new()
1982 .get(&API_METHOD_GET_NOTES)
dc7a5b34 1983 .put(&API_METHOD_SET_NOTES),
912b3f5b 1984 ),
8292d3d2
DC
1985 (
1986 "protected",
1987 &Router::new()
1988 .get(&API_METHOD_GET_PROTECTION)
dc7a5b34 1989 .put(&API_METHOD_SET_PROTECTION),
255f378a 1990 ),
dc7a5b34 1991 ("prune", &Router::new().post(&API_METHOD_PRUNE)),
9805207a
DC
1992 (
1993 "prune-datastore",
dc7a5b34 1994 &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
9805207a 1995 ),
d33d8f4e
DC
1996 (
1997 "pxar-file-download",
dc7a5b34 1998 &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
1a0d3d11 1999 ),
dc7a5b34 2000 ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
255f378a
DM
2001 (
2002 "snapshots",
2003 &Router::new()
fc189b19 2004 .get(&API_METHOD_LIST_SNAPSHOTS)
dc7a5b34 2005 .delete(&API_METHOD_DELETE_SNAPSHOT),
255f378a 2006 ),
dc7a5b34 2007 ("status", &Router::new().get(&API_METHOD_STATUS)),
255f378a
DM
2008 (
2009 "upload-backup-log",
dc7a5b34 2010 &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
c2009e53 2011 ),
dc7a5b34 2012 ("verify", &Router::new().post(&API_METHOD_VERIFY)),
255f378a
DM
2013];
2014
ad51d02a 2015const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2016 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2017 .subdirs(DATASTORE_INFO_SUBDIRS);
2018
255f378a 2019pub const ROUTER: Router = Router::new()
bb34b589 2020 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2021 .match_all("store", &DATASTORE_INFO_ROUTER);