]>
Commit | Line | Data |
---|---|---|
0d08fcee | 1 | use std::collections::HashSet; |
d33d8f4e DC |
2 | use std::ffi::OsStr; |
3 | use std::os::unix::ffi::OsStrExt; | |
6b809ff5 | 4 | use std::sync::{Arc, Mutex}; |
53a561a2 | 5 | use std::path::{Path, PathBuf}; |
804f6143 | 6 | use std::pin::Pin; |
cad540e9 | 7 | |
6ef9bb59 | 8 | use anyhow::{bail, format_err, Error}; |
9e47c0a5 | 9 | use futures::*; |
cad540e9 WB |
10 | use hyper::http::request::Parts; |
11 | use hyper::{header, Body, Response, StatusCode}; | |
15e9b4ed DM |
12 | use serde_json::{json, Value}; |
13 | ||
bb34b589 DM |
14 | use proxmox::api::{ |
15 | api, ApiResponseFuture, ApiHandler, ApiMethod, Router, | |
e7cb4dc5 WB |
16 | RpcEnvironment, RpcEnvironmentType, Permission |
17 | }; | |
cad540e9 WB |
18 | use proxmox::api::router::SubdirMap; |
19 | use proxmox::api::schema::*; | |
60f9a6ea | 20 | use proxmox::tools::fs::{replace_file, CreateOptions}; |
9ea4bce4 | 21 | use proxmox::{http_err, identity, list_subdirs_api_method, sortable}; |
e18a6c9e | 22 | |
804f6143 | 23 | use pxar::accessor::aio::{Accessor, FileContents, FileEntry}; |
d33d8f4e DC |
24 | use pxar::EntryKind; |
25 | ||
cad540e9 | 26 | use crate::api2::types::*; |
431cc7b1 | 27 | use crate::api2::node::rrd::create_value_from_rrd; |
e5064ba6 | 28 | use crate::backup::*; |
cad540e9 | 29 | use crate::config::datastore; |
54552dda DM |
30 | use crate::config::cached_user_info::CachedUserInfo; |
31 | ||
4fdf5ddf | 32 | use crate::server::{jobstate::Job, WorkerTask}; |
804f6143 DC |
33 | use crate::tools::{ |
34 | self, | |
35 | zip::{ZipEncoder, ZipEntry}, | |
36 | AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream, | |
37 | }; | |
38 | ||
d00e1a21 DM |
39 | use crate::config::acl::{ |
40 | PRIV_DATASTORE_AUDIT, | |
54552dda | 41 | PRIV_DATASTORE_MODIFY, |
d00e1a21 DM |
42 | PRIV_DATASTORE_READ, |
43 | PRIV_DATASTORE_PRUNE, | |
54552dda | 44 | PRIV_DATASTORE_BACKUP, |
09f6a240 | 45 | PRIV_DATASTORE_VERIFY, |
d00e1a21 | 46 | }; |
1629d2ad | 47 | |
bff85572 | 48 | fn check_priv_or_backup_owner( |
e7cb4dc5 WB |
49 | store: &DataStore, |
50 | group: &BackupGroup, | |
e6dc35ac | 51 | auth_id: &Authid, |
bff85572 FG |
52 | required_privs: u64, |
53 | ) -> Result<(), Error> { | |
54 | let user_info = CachedUserInfo::new()?; | |
55 | let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]); | |
56 | ||
57 | if privs & required_privs == 0 { | |
58 | let owner = store.get_owner(group)?; | |
59 | check_backup_owner(&owner, auth_id)?; | |
60 | } | |
61 | Ok(()) | |
62 | } | |
63 | ||
64 | fn check_backup_owner( | |
65 | owner: &Authid, | |
66 | auth_id: &Authid, | |
e7cb4dc5 | 67 | ) -> Result<(), Error> { |
bff85572 FG |
68 | let correct_owner = owner == auth_id |
69 | || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id); | |
70 | if !correct_owner { | |
e6dc35ac | 71 | bail!("backup owner check failed ({} != {})", auth_id, owner); |
54552dda DM |
72 | } |
73 | Ok(()) | |
74 | } | |
75 | ||
e7cb4dc5 WB |
76 | fn read_backup_index( |
77 | store: &DataStore, | |
78 | backup_dir: &BackupDir, | |
79 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { | |
8c70e3eb | 80 | |
ff86ef00 | 81 | let (manifest, index_size) = store.load_manifest(backup_dir)?; |
8c70e3eb | 82 | |
09b1f7b2 DM |
83 | let mut result = Vec::new(); |
84 | for item in manifest.files() { | |
85 | result.push(BackupContent { | |
86 | filename: item.filename.clone(), | |
f28d9088 | 87 | crypt_mode: Some(item.crypt_mode), |
09b1f7b2 DM |
88 | size: Some(item.size), |
89 | }); | |
8c70e3eb DM |
90 | } |
91 | ||
09b1f7b2 | 92 | result.push(BackupContent { |
96d65fbc | 93 | filename: MANIFEST_BLOB_NAME.to_string(), |
882c0823 FG |
94 | crypt_mode: match manifest.signature { |
95 | Some(_) => Some(CryptMode::SignOnly), | |
96 | None => Some(CryptMode::None), | |
97 | }, | |
09b1f7b2 DM |
98 | size: Some(index_size), |
99 | }); | |
4f1e40a2 | 100 | |
70030b43 | 101 | Ok((manifest, result)) |
8c70e3eb DM |
102 | } |
103 | ||
1c090810 DC |
104 | fn get_all_snapshot_files( |
105 | store: &DataStore, | |
106 | info: &BackupInfo, | |
70030b43 DM |
107 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { |
108 | ||
109 | let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?; | |
1c090810 DC |
110 | |
111 | let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { | |
112 | acc.insert(item.filename.clone()); | |
113 | acc | |
114 | }); | |
115 | ||
116 | for file in &info.files { | |
117 | if file_set.contains(file) { continue; } | |
f28d9088 WB |
118 | files.push(BackupContent { |
119 | filename: file.to_string(), | |
120 | size: None, | |
121 | crypt_mode: None, | |
122 | }); | |
1c090810 DC |
123 | } |
124 | ||
70030b43 | 125 | Ok((manifest, files)) |
1c090810 DC |
126 | } |
127 | ||
b31c8019 DM |
128 | #[api( |
129 | input: { | |
130 | properties: { | |
131 | store: { | |
132 | schema: DATASTORE_SCHEMA, | |
133 | }, | |
134 | }, | |
135 | }, | |
136 | returns: { | |
137 | type: Array, | |
138 | description: "Returns the list of backup groups.", | |
139 | items: { | |
140 | type: GroupListItem, | |
141 | } | |
142 | }, | |
bb34b589 | 143 | access: { |
54552dda DM |
144 | permission: &Permission::Privilege( |
145 | &["datastore", "{store}"], | |
146 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
147 | true), | |
bb34b589 | 148 | }, |
b31c8019 DM |
149 | )] |
150 | /// List backup groups. | |
ad20d198 | 151 | fn list_groups( |
b31c8019 | 152 | store: String, |
54552dda | 153 | rpcenv: &mut dyn RpcEnvironment, |
b31c8019 | 154 | ) -> Result<Vec<GroupListItem>, Error> { |
812c6f87 | 155 | |
e6dc35ac | 156 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 157 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 158 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 159 | |
b31c8019 | 160 | let datastore = DataStore::lookup_datastore(&store)?; |
0d08fcee FG |
161 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
162 | ||
163 | let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
164 | ||
165 | let group_info = backup_groups | |
166 | .into_iter() | |
167 | .fold(Vec::new(), |mut group_info, group| { | |
168 | let owner = match datastore.get_owner(&group) { | |
169 | Ok(auth_id) => auth_id, | |
170 | Err(err) => { | |
1ed02257 FG |
171 | eprintln!("Failed to get owner of group '{}/{}' - {}", |
172 | &store, | |
173 | group, | |
174 | err); | |
0d08fcee FG |
175 | return group_info; |
176 | }, | |
177 | }; | |
178 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
179 | return group_info; | |
180 | } | |
181 | ||
182 | let snapshots = match group.list_backups(&datastore.base_path()) { | |
183 | Ok(snapshots) => snapshots, | |
184 | Err(_) => { | |
185 | return group_info; | |
186 | }, | |
187 | }; | |
188 | ||
189 | let backup_count: u64 = snapshots.len() as u64; | |
190 | if backup_count == 0 { | |
191 | return group_info; | |
192 | } | |
193 | ||
194 | let last_backup = snapshots | |
195 | .iter() | |
196 | .fold(&snapshots[0], |last, curr| { | |
197 | if curr.is_finished() | |
198 | && curr.backup_dir.backup_time() > last.backup_dir.backup_time() { | |
199 | curr | |
200 | } else { | |
201 | last | |
202 | } | |
203 | }) | |
204 | .to_owned(); | |
205 | ||
206 | group_info.push(GroupListItem { | |
207 | backup_type: group.backup_type().to_string(), | |
208 | backup_id: group.backup_id().to_string(), | |
209 | last_backup: last_backup.backup_dir.backup_time(), | |
210 | owner: Some(owner), | |
211 | backup_count, | |
212 | files: last_backup.files, | |
213 | }); | |
214 | ||
215 | group_info | |
216 | }); | |
812c6f87 | 217 | |
0d08fcee | 218 | Ok(group_info) |
812c6f87 | 219 | } |
8f579717 | 220 | |
09b1f7b2 DM |
221 | #[api( |
222 | input: { | |
223 | properties: { | |
224 | store: { | |
225 | schema: DATASTORE_SCHEMA, | |
226 | }, | |
227 | "backup-type": { | |
228 | schema: BACKUP_TYPE_SCHEMA, | |
229 | }, | |
230 | "backup-id": { | |
231 | schema: BACKUP_ID_SCHEMA, | |
232 | }, | |
233 | "backup-time": { | |
234 | schema: BACKUP_TIME_SCHEMA, | |
235 | }, | |
236 | }, | |
237 | }, | |
238 | returns: { | |
239 | type: Array, | |
240 | description: "Returns the list of archive files inside a backup snapshots.", | |
241 | items: { | |
242 | type: BackupContent, | |
243 | } | |
244 | }, | |
bb34b589 | 245 | access: { |
54552dda DM |
246 | permission: &Permission::Privilege( |
247 | &["datastore", "{store}"], | |
248 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
249 | true), | |
bb34b589 | 250 | }, |
09b1f7b2 DM |
251 | )] |
252 | /// List snapshot files. | |
ea5f547f | 253 | pub fn list_snapshot_files( |
09b1f7b2 DM |
254 | store: String, |
255 | backup_type: String, | |
256 | backup_id: String, | |
257 | backup_time: i64, | |
01a13423 | 258 | _info: &ApiMethod, |
54552dda | 259 | rpcenv: &mut dyn RpcEnvironment, |
09b1f7b2 | 260 | ) -> Result<Vec<BackupContent>, Error> { |
01a13423 | 261 | |
e6dc35ac | 262 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
09b1f7b2 | 263 | let datastore = DataStore::lookup_datastore(&store)?; |
54552dda | 264 | |
e0e5b442 | 265 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
01a13423 | 266 | |
bff85572 | 267 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?; |
54552dda | 268 | |
d7c24397 | 269 | let info = BackupInfo::new(&datastore.base_path(), snapshot)?; |
01a13423 | 270 | |
70030b43 DM |
271 | let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; |
272 | ||
273 | Ok(files) | |
01a13423 DM |
274 | } |
275 | ||
68a6a0ee DM |
276 | #[api( |
277 | input: { | |
278 | properties: { | |
279 | store: { | |
280 | schema: DATASTORE_SCHEMA, | |
281 | }, | |
282 | "backup-type": { | |
283 | schema: BACKUP_TYPE_SCHEMA, | |
284 | }, | |
285 | "backup-id": { | |
286 | schema: BACKUP_ID_SCHEMA, | |
287 | }, | |
288 | "backup-time": { | |
289 | schema: BACKUP_TIME_SCHEMA, | |
290 | }, | |
291 | }, | |
292 | }, | |
bb34b589 | 293 | access: { |
54552dda DM |
294 | permission: &Permission::Privilege( |
295 | &["datastore", "{store}"], | |
296 | PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE, | |
297 | true), | |
bb34b589 | 298 | }, |
68a6a0ee DM |
299 | )] |
300 | /// Delete backup snapshot. | |
301 | fn delete_snapshot( | |
302 | store: String, | |
303 | backup_type: String, | |
304 | backup_id: String, | |
305 | backup_time: i64, | |
6f62c924 | 306 | _info: &ApiMethod, |
54552dda | 307 | rpcenv: &mut dyn RpcEnvironment, |
6f62c924 DM |
308 | ) -> Result<Value, Error> { |
309 | ||
e6dc35ac | 310 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 311 | |
e0e5b442 | 312 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
68a6a0ee | 313 | let datastore = DataStore::lookup_datastore(&store)?; |
6f62c924 | 314 | |
bff85572 | 315 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
54552dda | 316 | |
c9756b40 | 317 | datastore.remove_backup_dir(&snapshot, false)?; |
6f62c924 DM |
318 | |
319 | Ok(Value::Null) | |
320 | } | |
321 | ||
fc189b19 DM |
322 | #[api( |
323 | input: { | |
324 | properties: { | |
325 | store: { | |
326 | schema: DATASTORE_SCHEMA, | |
327 | }, | |
328 | "backup-type": { | |
329 | optional: true, | |
330 | schema: BACKUP_TYPE_SCHEMA, | |
331 | }, | |
332 | "backup-id": { | |
333 | optional: true, | |
334 | schema: BACKUP_ID_SCHEMA, | |
335 | }, | |
336 | }, | |
337 | }, | |
338 | returns: { | |
339 | type: Array, | |
340 | description: "Returns the list of snapshots.", | |
341 | items: { | |
342 | type: SnapshotListItem, | |
343 | } | |
344 | }, | |
bb34b589 | 345 | access: { |
54552dda DM |
346 | permission: &Permission::Privilege( |
347 | &["datastore", "{store}"], | |
348 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
349 | true), | |
bb34b589 | 350 | }, |
fc189b19 DM |
351 | )] |
352 | /// List backup snapshots. | |
f24fc116 | 353 | pub fn list_snapshots ( |
54552dda DM |
354 | store: String, |
355 | backup_type: Option<String>, | |
356 | backup_id: Option<String>, | |
357 | _param: Value, | |
184f17af | 358 | _info: &ApiMethod, |
54552dda | 359 | rpcenv: &mut dyn RpcEnvironment, |
fc189b19 | 360 | ) -> Result<Vec<SnapshotListItem>, Error> { |
184f17af | 361 | |
e6dc35ac | 362 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 363 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 364 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
184f17af | 365 | |
0d08fcee FG |
366 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
367 | ||
54552dda | 368 | let datastore = DataStore::lookup_datastore(&store)?; |
184f17af | 369 | |
c0977501 | 370 | let base_path = datastore.base_path(); |
184f17af | 371 | |
0d08fcee FG |
372 | let groups = match (backup_type, backup_id) { |
373 | (Some(backup_type), Some(backup_id)) => { | |
374 | let mut groups = Vec::with_capacity(1); | |
375 | groups.push(BackupGroup::new(backup_type, backup_id)); | |
376 | groups | |
377 | }, | |
378 | (Some(backup_type), None) => { | |
379 | BackupInfo::list_backup_groups(&base_path)? | |
380 | .into_iter() | |
381 | .filter(|group| group.backup_type() == backup_type) | |
382 | .collect() | |
383 | }, | |
384 | (None, Some(backup_id)) => { | |
385 | BackupInfo::list_backup_groups(&base_path)? | |
386 | .into_iter() | |
387 | .filter(|group| group.backup_id() == backup_id) | |
388 | .collect() | |
389 | }, | |
390 | _ => BackupInfo::list_backup_groups(&base_path)?, | |
391 | }; | |
54552dda | 392 | |
0d08fcee | 393 | let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { |
79c53595 FG |
394 | let backup_type = group.backup_type().to_string(); |
395 | let backup_id = group.backup_id().to_string(); | |
0d08fcee | 396 | let backup_time = info.backup_dir.backup_time(); |
1c090810 | 397 | |
79c53595 | 398 | match get_all_snapshot_files(&datastore, &info) { |
70030b43 | 399 | Ok((manifest, files)) => { |
70030b43 DM |
400 | // extract the first line from notes |
401 | let comment: Option<String> = manifest.unprotected["notes"] | |
402 | .as_str() | |
403 | .and_then(|notes| notes.lines().next()) | |
404 | .map(String::from); | |
405 | ||
79c53595 FG |
406 | let verification = manifest.unprotected["verify_state"].clone(); |
407 | let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) { | |
3b2046d2 TL |
408 | Ok(verify) => verify, |
409 | Err(err) => { | |
410 | eprintln!("error parsing verification state : '{}'", err); | |
411 | None | |
412 | } | |
413 | }; | |
414 | ||
0d08fcee FG |
415 | let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); |
416 | ||
79c53595 FG |
417 | SnapshotListItem { |
418 | backup_type, | |
419 | backup_id, | |
420 | backup_time, | |
421 | comment, | |
422 | verification, | |
423 | files, | |
424 | size, | |
425 | owner, | |
426 | } | |
1c090810 DC |
427 | }, |
428 | Err(err) => { | |
429 | eprintln!("error during snapshot file listing: '{}'", err); | |
79c53595 | 430 | let files = info |
70030b43 | 431 | .files |
0d08fcee | 432 | .into_iter() |
70030b43 DM |
433 | .map(|x| BackupContent { |
434 | filename: x.to_string(), | |
435 | size: None, | |
436 | crypt_mode: None, | |
437 | }) | |
79c53595 FG |
438 | .collect(); |
439 | ||
440 | SnapshotListItem { | |
441 | backup_type, | |
442 | backup_id, | |
443 | backup_time, | |
444 | comment: None, | |
445 | verification: None, | |
446 | files, | |
447 | size: None, | |
448 | owner, | |
449 | } | |
1c090810 | 450 | }, |
0d08fcee FG |
451 | } |
452 | }; | |
184f17af | 453 | |
0d08fcee FG |
454 | groups |
455 | .iter() | |
456 | .try_fold(Vec::new(), |mut snapshots, group| { | |
457 | let owner = match datastore.get_owner(group) { | |
458 | Ok(auth_id) => auth_id, | |
459 | Err(err) => { | |
460 | eprintln!("Failed to get owner of group '{}/{}' - {}", | |
461 | &store, | |
462 | group, | |
463 | err); | |
464 | return Ok(snapshots); | |
465 | }, | |
466 | }; | |
467 | ||
468 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
469 | return Ok(snapshots); | |
470 | } | |
471 | ||
472 | let group_backups = group.list_backups(&datastore.base_path())?; | |
473 | ||
474 | snapshots.extend( | |
475 | group_backups | |
476 | .into_iter() | |
477 | .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info)) | |
478 | ); | |
479 | ||
480 | Ok(snapshots) | |
481 | }) | |
184f17af DM |
482 | } |
483 | ||
fdfcb74d | 484 | fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> { |
16f9f244 | 485 | let base_path = store.base_path(); |
fdfcb74d | 486 | let groups = BackupInfo::list_backup_groups(&base_path)?; |
16f9f244 | 487 | |
fdfcb74d FG |
488 | groups.iter() |
489 | .filter(|group| { | |
490 | let owner = match store.get_owner(&group) { | |
491 | Ok(owner) => owner, | |
492 | Err(err) => { | |
1ed02257 FG |
493 | eprintln!("Failed to get owner of group '{}/{}' - {}", |
494 | store.name(), | |
495 | group, | |
496 | err); | |
fdfcb74d FG |
497 | return false; |
498 | }, | |
499 | }; | |
14e08625 | 500 | |
fdfcb74d FG |
501 | match filter_owner { |
502 | Some(filter) => check_backup_owner(&owner, filter).is_ok(), | |
503 | None => true, | |
504 | } | |
505 | }) | |
506 | .try_fold(Counts::default(), |mut counts, group| { | |
507 | let snapshot_count = group.list_backups(&base_path)?.len() as u64; | |
508 | ||
509 | let type_count = match group.backup_type() { | |
510 | "ct" => counts.ct.get_or_insert(Default::default()), | |
511 | "vm" => counts.vm.get_or_insert(Default::default()), | |
512 | "host" => counts.host.get_or_insert(Default::default()), | |
513 | _ => counts.other.get_or_insert(Default::default()), | |
514 | }; | |
14e08625 | 515 | |
fdfcb74d FG |
516 | type_count.groups += 1; |
517 | type_count.snapshots += snapshot_count; | |
16f9f244 | 518 | |
fdfcb74d FG |
519 | Ok(counts) |
520 | }) | |
16f9f244 DC |
521 | } |
522 | ||
1dc117bb DM |
523 | #[api( |
524 | input: { | |
525 | properties: { | |
526 | store: { | |
527 | schema: DATASTORE_SCHEMA, | |
528 | }, | |
98afc7b1 FG |
529 | verbose: { |
530 | type: bool, | |
531 | default: false, | |
532 | optional: true, | |
533 | description: "Include additional information like snapshot counts and GC status.", | |
534 | }, | |
1dc117bb | 535 | }, |
98afc7b1 | 536 | |
1dc117bb DM |
537 | }, |
538 | returns: { | |
14e08625 | 539 | type: DataStoreStatus, |
1dc117bb | 540 | }, |
bb34b589 | 541 | access: { |
54552dda | 542 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
bb34b589 | 543 | }, |
1dc117bb DM |
544 | )] |
545 | /// Get datastore status. | |
ea5f547f | 546 | pub fn status( |
1dc117bb | 547 | store: String, |
98afc7b1 | 548 | verbose: bool, |
0eecf38f | 549 | _info: &ApiMethod, |
fdfcb74d | 550 | rpcenv: &mut dyn RpcEnvironment, |
14e08625 | 551 | ) -> Result<DataStoreStatus, Error> { |
1dc117bb | 552 | let datastore = DataStore::lookup_datastore(&store)?; |
14e08625 | 553 | let storage = crate::tools::disks::disk_usage(&datastore.base_path())?; |
fdfcb74d FG |
554 | let (counts, gc_status) = if verbose { |
555 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
556 | let user_info = CachedUserInfo::new()?; | |
557 | ||
558 | let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
559 | let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { | |
560 | None | |
561 | } else { | |
562 | Some(&auth_id) | |
563 | }; | |
564 | ||
565 | let counts = Some(get_snapshots_count(&datastore, filter_owner)?); | |
566 | let gc_status = Some(datastore.last_gc_status()); | |
567 | ||
568 | (counts, gc_status) | |
569 | } else { | |
570 | (None, None) | |
98afc7b1 | 571 | }; |
16f9f244 | 572 | |
14e08625 DC |
573 | Ok(DataStoreStatus { |
574 | total: storage.total, | |
575 | used: storage.used, | |
576 | avail: storage.avail, | |
577 | gc_status, | |
578 | counts, | |
579 | }) | |
0eecf38f DM |
580 | } |
581 | ||
c2009e53 DM |
582 | #[api( |
583 | input: { | |
584 | properties: { | |
585 | store: { | |
586 | schema: DATASTORE_SCHEMA, | |
587 | }, | |
588 | "backup-type": { | |
589 | schema: BACKUP_TYPE_SCHEMA, | |
590 | optional: true, | |
591 | }, | |
592 | "backup-id": { | |
593 | schema: BACKUP_ID_SCHEMA, | |
594 | optional: true, | |
595 | }, | |
596 | "backup-time": { | |
597 | schema: BACKUP_TIME_SCHEMA, | |
598 | optional: true, | |
599 | }, | |
600 | }, | |
601 | }, | |
602 | returns: { | |
603 | schema: UPID_SCHEMA, | |
604 | }, | |
605 | access: { | |
09f6a240 | 606 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true), |
c2009e53 DM |
607 | }, |
608 | )] | |
609 | /// Verify backups. | |
610 | /// | |
611 | /// This function can verify a single backup snapshot, all backup from a backup group, | |
612 | /// or all backups in the datastore. | |
613 | pub fn verify( | |
614 | store: String, | |
615 | backup_type: Option<String>, | |
616 | backup_id: Option<String>, | |
617 | backup_time: Option<i64>, | |
618 | rpcenv: &mut dyn RpcEnvironment, | |
619 | ) -> Result<Value, Error> { | |
620 | let datastore = DataStore::lookup_datastore(&store)?; | |
621 | ||
09f6a240 | 622 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
8ea00f6e | 623 | let worker_id; |
c2009e53 DM |
624 | |
625 | let mut backup_dir = None; | |
626 | let mut backup_group = None; | |
133042b5 | 627 | let mut worker_type = "verify"; |
c2009e53 DM |
628 | |
629 | match (backup_type, backup_id, backup_time) { | |
630 | (Some(backup_type), Some(backup_id), Some(backup_time)) => { | |
4ebda996 | 631 | worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time); |
e0e5b442 | 632 | let dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
09f6a240 FG |
633 | |
634 | check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?; | |
635 | ||
c2009e53 | 636 | backup_dir = Some(dir); |
133042b5 | 637 | worker_type = "verify_snapshot"; |
c2009e53 DM |
638 | } |
639 | (Some(backup_type), Some(backup_id), None) => { | |
4ebda996 | 640 | worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
c2009e53 | 641 | let group = BackupGroup::new(backup_type, backup_id); |
09f6a240 FG |
642 | |
643 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; | |
644 | ||
c2009e53 | 645 | backup_group = Some(group); |
133042b5 | 646 | worker_type = "verify_group"; |
c2009e53 DM |
647 | } |
648 | (None, None, None) => { | |
8ea00f6e | 649 | worker_id = store.clone(); |
c2009e53 | 650 | } |
5a718dce | 651 | _ => bail!("parameters do not specify a backup group or snapshot"), |
c2009e53 DM |
652 | } |
653 | ||
c2009e53 DM |
654 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
655 | ||
656 | let upid_str = WorkerTask::new_thread( | |
133042b5 | 657 | worker_type, |
e7cb4dc5 | 658 | Some(worker_id.clone()), |
09f6a240 | 659 | auth_id.clone(), |
e7cb4dc5 WB |
660 | to_stdout, |
661 | move |worker| { | |
4f09d310 DM |
662 | let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); |
663 | let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); | |
664 | ||
adfdc369 | 665 | let failed_dirs = if let Some(backup_dir) = backup_dir { |
adfdc369 | 666 | let mut res = Vec::new(); |
f6b1d1cc WB |
667 | if !verify_backup_dir( |
668 | datastore, | |
669 | &backup_dir, | |
670 | verified_chunks, | |
671 | corrupt_chunks, | |
672 | worker.clone(), | |
673 | worker.upid().clone(), | |
d771a608 | 674 | None, |
f6b1d1cc | 675 | )? { |
adfdc369 DC |
676 | res.push(backup_dir.to_string()); |
677 | } | |
678 | res | |
c2009e53 | 679 | } else if let Some(backup_group) = backup_group { |
63d9aca9 DM |
680 | let (_count, failed_dirs) = verify_backup_group( |
681 | datastore, | |
682 | &backup_group, | |
683 | verified_chunks, | |
684 | corrupt_chunks, | |
685 | None, | |
686 | worker.clone(), | |
f6b1d1cc | 687 | worker.upid(), |
d771a608 | 688 | None, |
63d9aca9 DM |
689 | )?; |
690 | failed_dirs | |
c2009e53 | 691 | } else { |
09f6a240 FG |
692 | let privs = CachedUserInfo::new()? |
693 | .lookup_privs(&auth_id, &["datastore", &store]); | |
694 | ||
695 | let owner = if privs & PRIV_DATASTORE_VERIFY == 0 { | |
696 | Some(auth_id) | |
697 | } else { | |
698 | None | |
699 | }; | |
700 | ||
701 | verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)? | |
c2009e53 | 702 | }; |
adfdc369 | 703 | if failed_dirs.len() > 0 { |
a4fa3fc2 | 704 | worker.log("Failed to verify the following snapshots/groups:"); |
adfdc369 DC |
705 | for dir in failed_dirs { |
706 | worker.log(format!("\t{}", dir)); | |
707 | } | |
1ffe0301 | 708 | bail!("verification failed - please check the log for details"); |
c2009e53 DM |
709 | } |
710 | Ok(()) | |
e7cb4dc5 WB |
711 | }, |
712 | )?; | |
c2009e53 DM |
713 | |
714 | Ok(json!(upid_str)) | |
715 | } | |
716 | ||
255f378a DM |
717 | #[macro_export] |
718 | macro_rules! add_common_prune_prameters { | |
552c2259 DM |
719 | ( [ $( $list1:tt )* ] ) => { |
720 | add_common_prune_prameters!([$( $list1 )* ] , []) | |
721 | }; | |
722 | ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => { | |
255f378a | 723 | [ |
552c2259 | 724 | $( $list1 )* |
255f378a | 725 | ( |
552c2259 | 726 | "keep-daily", |
255f378a | 727 | true, |
49ff1092 | 728 | &PRUNE_SCHEMA_KEEP_DAILY, |
255f378a | 729 | ), |
102d8d41 DM |
730 | ( |
731 | "keep-hourly", | |
732 | true, | |
49ff1092 | 733 | &PRUNE_SCHEMA_KEEP_HOURLY, |
102d8d41 | 734 | ), |
255f378a | 735 | ( |
552c2259 | 736 | "keep-last", |
255f378a | 737 | true, |
49ff1092 | 738 | &PRUNE_SCHEMA_KEEP_LAST, |
255f378a DM |
739 | ), |
740 | ( | |
552c2259 | 741 | "keep-monthly", |
255f378a | 742 | true, |
49ff1092 | 743 | &PRUNE_SCHEMA_KEEP_MONTHLY, |
255f378a DM |
744 | ), |
745 | ( | |
552c2259 | 746 | "keep-weekly", |
255f378a | 747 | true, |
49ff1092 | 748 | &PRUNE_SCHEMA_KEEP_WEEKLY, |
255f378a DM |
749 | ), |
750 | ( | |
751 | "keep-yearly", | |
752 | true, | |
49ff1092 | 753 | &PRUNE_SCHEMA_KEEP_YEARLY, |
255f378a | 754 | ), |
552c2259 | 755 | $( $list2 )* |
255f378a DM |
756 | ] |
757 | } | |
0eecf38f DM |
758 | } |
759 | ||
db1e061d DM |
760 | pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new( |
761 | "Returns the list of snapshots and a flag indicating if there are kept or removed.", | |
660a3489 | 762 | &PruneListItem::API_SCHEMA |
db1e061d DM |
763 | ).schema(); |
764 | ||
0ab08ac9 DM |
765 | const API_METHOD_PRUNE: ApiMethod = ApiMethod::new( |
766 | &ApiHandler::Sync(&prune), | |
255f378a | 767 | &ObjectSchema::new( |
0ab08ac9 DM |
768 | "Prune the datastore.", |
769 | &add_common_prune_prameters!([ | |
770 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
771 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
3b03abfe DM |
772 | ("dry-run", true, &BooleanSchema::new( |
773 | "Just show what prune would do, but do not delete anything.") | |
774 | .schema() | |
775 | ), | |
0ab08ac9 | 776 | ],[ |
66c49c21 | 777 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 | 778 | ]) |
db1e061d DM |
779 | )) |
780 | .returns(&API_RETURN_SCHEMA_PRUNE) | |
781 | .access(None, &Permission::Privilege( | |
54552dda DM |
782 | &["datastore", "{store}"], |
783 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, | |
784 | true) | |
785 | ); | |
255f378a | 786 | |
83b7db02 DM |
787 | fn prune( |
788 | param: Value, | |
789 | _info: &ApiMethod, | |
54552dda | 790 | rpcenv: &mut dyn RpcEnvironment, |
83b7db02 DM |
791 | ) -> Result<Value, Error> { |
792 | ||
54552dda | 793 | let store = tools::required_string_param(¶m, "store")?; |
9fdc3ef4 DM |
794 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
795 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
796 | ||
e6dc35ac | 797 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 798 | |
3b03abfe DM |
799 | let dry_run = param["dry-run"].as_bool().unwrap_or(false); |
800 | ||
9fdc3ef4 DM |
801 | let group = BackupGroup::new(backup_type, backup_id); |
802 | ||
54552dda DM |
803 | let datastore = DataStore::lookup_datastore(&store)?; |
804 | ||
bff85572 | 805 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; |
83b7db02 | 806 | |
9e3f0088 DM |
807 | let prune_options = PruneOptions { |
808 | keep_last: param["keep-last"].as_u64(), | |
102d8d41 | 809 | keep_hourly: param["keep-hourly"].as_u64(), |
9e3f0088 DM |
810 | keep_daily: param["keep-daily"].as_u64(), |
811 | keep_weekly: param["keep-weekly"].as_u64(), | |
812 | keep_monthly: param["keep-monthly"].as_u64(), | |
813 | keep_yearly: param["keep-yearly"].as_u64(), | |
814 | }; | |
8f579717 | 815 | |
4ebda996 | 816 | let worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
503995c7 | 817 | |
dda70154 DM |
818 | let mut prune_result = Vec::new(); |
819 | ||
820 | let list = group.list_backups(&datastore.base_path())?; | |
821 | ||
822 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
823 | ||
824 | prune_info.reverse(); // delete older snapshots first | |
825 | ||
826 | let keep_all = !prune_options.keeps_something(); | |
827 | ||
828 | if dry_run { | |
829 | for (info, mut keep) in prune_info { | |
830 | if keep_all { keep = true; } | |
831 | ||
832 | let backup_time = info.backup_dir.backup_time(); | |
833 | let group = info.backup_dir.group(); | |
834 | ||
835 | prune_result.push(json!({ | |
836 | "backup-type": group.backup_type(), | |
837 | "backup-id": group.backup_id(), | |
6a7be83e | 838 | "backup-time": backup_time, |
dda70154 DM |
839 | "keep": keep, |
840 | })); | |
841 | } | |
842 | return Ok(json!(prune_result)); | |
843 | } | |
844 | ||
845 | ||
163e9bbe | 846 | // We use a WorkerTask just to have a task log, but run synchrounously |
e6dc35ac | 847 | let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?; |
dda70154 | 848 | |
f1539300 SR |
849 | if keep_all { |
850 | worker.log("No prune selection - keeping all files."); | |
851 | } else { | |
852 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); | |
853 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
854 | store, backup_type, backup_id)); | |
855 | } | |
3b03abfe | 856 | |
f1539300 SR |
857 | for (info, mut keep) in prune_info { |
858 | if keep_all { keep = true; } | |
dda70154 | 859 | |
f1539300 SR |
860 | let backup_time = info.backup_dir.backup_time(); |
861 | let timestamp = info.backup_dir.backup_time_string(); | |
862 | let group = info.backup_dir.group(); | |
3b03abfe | 863 | |
3b03abfe | 864 | |
f1539300 SR |
865 | let msg = format!( |
866 | "{}/{}/{} {}", | |
867 | group.backup_type(), | |
868 | group.backup_id(), | |
869 | timestamp, | |
870 | if keep { "keep" } else { "remove" }, | |
871 | ); | |
872 | ||
873 | worker.log(msg); | |
874 | ||
875 | prune_result.push(json!({ | |
876 | "backup-type": group.backup_type(), | |
877 | "backup-id": group.backup_id(), | |
878 | "backup-time": backup_time, | |
879 | "keep": keep, | |
880 | })); | |
881 | ||
882 | if !(dry_run || keep) { | |
883 | if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) { | |
884 | worker.warn( | |
885 | format!( | |
886 | "failed to remove dir {:?}: {}", | |
887 | info.backup_dir.relative_path(), err | |
888 | ) | |
889 | ); | |
8f0b4c1f | 890 | } |
8f579717 | 891 | } |
f1539300 | 892 | } |
dd8e744f | 893 | |
f1539300 | 894 | worker.log_result(&Ok(())); |
83b7db02 | 895 | |
dda70154 | 896 | Ok(json!(prune_result)) |
83b7db02 DM |
897 | } |
898 | ||
dfc58d47 DM |
899 | #[api( |
900 | input: { | |
901 | properties: { | |
902 | store: { | |
903 | schema: DATASTORE_SCHEMA, | |
904 | }, | |
905 | }, | |
906 | }, | |
907 | returns: { | |
908 | schema: UPID_SCHEMA, | |
909 | }, | |
bb34b589 | 910 | access: { |
54552dda | 911 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false), |
bb34b589 | 912 | }, |
dfc58d47 DM |
913 | )] |
914 | /// Start garbage collection. | |
6049b71f | 915 | fn start_garbage_collection( |
dfc58d47 | 916 | store: String, |
6049b71f | 917 | _info: &ApiMethod, |
dd5495d6 | 918 | rpcenv: &mut dyn RpcEnvironment, |
6049b71f | 919 | ) -> Result<Value, Error> { |
15e9b4ed | 920 | |
3e6a7dee | 921 | let datastore = DataStore::lookup_datastore(&store)?; |
e6dc35ac | 922 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
15e9b4ed | 923 | |
4fdf5ddf DC |
924 | let job = Job::new("garbage_collection", &store) |
925 | .map_err(|_| format_err!("garbage collection already running"))?; | |
15e9b4ed | 926 | |
0f778e06 | 927 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
15e9b4ed | 928 | |
4fdf5ddf DC |
929 | let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout) |
930 | .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?; | |
0f778e06 DM |
931 | |
932 | Ok(json!(upid_str)) | |
15e9b4ed DM |
933 | } |
934 | ||
a92830dc DM |
935 | #[api( |
936 | input: { | |
937 | properties: { | |
938 | store: { | |
939 | schema: DATASTORE_SCHEMA, | |
940 | }, | |
941 | }, | |
942 | }, | |
943 | returns: { | |
944 | type: GarbageCollectionStatus, | |
bb34b589 DM |
945 | }, |
946 | access: { | |
947 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
948 | }, | |
a92830dc DM |
949 | )] |
950 | /// Garbage collection status. | |
5eeea607 | 951 | pub fn garbage_collection_status( |
a92830dc | 952 | store: String, |
6049b71f | 953 | _info: &ApiMethod, |
dd5495d6 | 954 | _rpcenv: &mut dyn RpcEnvironment, |
a92830dc | 955 | ) -> Result<GarbageCollectionStatus, Error> { |
691c89a0 | 956 | |
f2b99c34 DM |
957 | let datastore = DataStore::lookup_datastore(&store)?; |
958 | ||
f2b99c34 | 959 | let status = datastore.last_gc_status(); |
691c89a0 | 960 | |
a92830dc | 961 | Ok(status) |
691c89a0 DM |
962 | } |
963 | ||
bb34b589 | 964 | #[api( |
30fb6025 DM |
965 | returns: { |
966 | description: "List the accessible datastores.", | |
967 | type: Array, | |
968 | items: { | |
969 | description: "Datastore name and description.", | |
455e5f71 | 970 | type: DataStoreListItem, |
30fb6025 DM |
971 | }, |
972 | }, | |
bb34b589 | 973 | access: { |
54552dda | 974 | permission: &Permission::Anybody, |
bb34b589 DM |
975 | }, |
976 | )] | |
977 | /// Datastore list | |
6049b71f DM |
978 | fn get_datastore_list( |
979 | _param: Value, | |
980 | _info: &ApiMethod, | |
54552dda | 981 | rpcenv: &mut dyn RpcEnvironment, |
455e5f71 | 982 | ) -> Result<Vec<DataStoreListItem>, Error> { |
15e9b4ed | 983 | |
d0187a51 | 984 | let (config, _digest) = datastore::config()?; |
15e9b4ed | 985 | |
e6dc35ac | 986 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda DM |
987 | let user_info = CachedUserInfo::new()?; |
988 | ||
30fb6025 | 989 | let mut list = Vec::new(); |
54552dda | 990 | |
30fb6025 | 991 | for (store, (_, data)) in &config.sections { |
e6dc35ac | 992 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 993 | let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; |
30fb6025 | 994 | if allowed { |
455e5f71 FG |
995 | list.push( |
996 | DataStoreListItem { | |
997 | store: store.clone(), | |
998 | comment: data["comment"].as_str().map(String::from), | |
999 | } | |
1000 | ); | |
30fb6025 | 1001 | } |
54552dda DM |
1002 | } |
1003 | ||
30fb6025 | 1004 | Ok(list.into()) |
15e9b4ed DM |
1005 | } |
1006 | ||
0ab08ac9 DM |
1007 | #[sortable] |
1008 | pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new( | |
1009 | &ApiHandler::AsyncHttp(&download_file), | |
1010 | &ObjectSchema::new( | |
1011 | "Download single raw file from backup snapshot.", | |
1012 | &sorted!([ | |
66c49c21 | 1013 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 DM |
1014 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
1015 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1016 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
4191018c | 1017 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), |
0ab08ac9 DM |
1018 | ]), |
1019 | ) | |
54552dda DM |
1020 | ).access(None, &Permission::Privilege( |
1021 | &["datastore", "{store}"], | |
1022 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1023 | true) | |
1024 | ); | |
691c89a0 | 1025 | |
9e47c0a5 DM |
1026 | fn download_file( |
1027 | _parts: Parts, | |
1028 | _req_body: Body, | |
1029 | param: Value, | |
255f378a | 1030 | _info: &ApiMethod, |
54552dda | 1031 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1032 | ) -> ApiResponseFuture { |
9e47c0a5 | 1033 | |
ad51d02a DM |
1034 | async move { |
1035 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1036 | let datastore = DataStore::lookup_datastore(store)?; |
f14a8c9a | 1037 | |
e6dc35ac | 1038 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 1039 | |
ad51d02a | 1040 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); |
9e47c0a5 | 1041 | |
ad51d02a DM |
1042 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1043 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1044 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
9e47c0a5 | 1045 | |
e0e5b442 | 1046 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
54552dda | 1047 | |
bff85572 | 1048 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
54552dda | 1049 | |
abdb9763 | 1050 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); |
9e47c0a5 | 1051 | |
ad51d02a DM |
1052 | let mut path = datastore.base_path(); |
1053 | path.push(backup_dir.relative_path()); | |
1054 | path.push(&file_name); | |
1055 | ||
ba694720 | 1056 | let file = tokio::fs::File::open(&path) |
8aa67ee7 WB |
1057 | .await |
1058 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; | |
ad51d02a | 1059 | |
db0cb9ce | 1060 | let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) |
ba694720 DC |
1061 | .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) |
1062 | .map_err(move |err| { | |
1063 | eprintln!("error during streaming of '{:?}' - {}", &path, err); | |
1064 | err | |
1065 | }); | |
ad51d02a | 1066 | let body = Body::wrap_stream(payload); |
9e47c0a5 | 1067 | |
ad51d02a DM |
1068 | // fixme: set other headers ? |
1069 | Ok(Response::builder() | |
1070 | .status(StatusCode::OK) | |
1071 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1072 | .body(body) | |
1073 | .unwrap()) | |
1074 | }.boxed() | |
9e47c0a5 DM |
1075 | } |
1076 | ||
6ef9bb59 DC |
1077 | #[sortable] |
1078 | pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new( | |
1079 | &ApiHandler::AsyncHttp(&download_file_decoded), | |
1080 | &ObjectSchema::new( | |
1081 | "Download single decoded file from backup snapshot. Only works if it's not encrypted.", | |
1082 | &sorted!([ | |
1083 | ("store", false, &DATASTORE_SCHEMA), | |
1084 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1085 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1086 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1087 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
1088 | ]), | |
1089 | ) | |
1090 | ).access(None, &Permission::Privilege( | |
1091 | &["datastore", "{store}"], | |
1092 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1093 | true) | |
1094 | ); | |
1095 | ||
1096 | fn download_file_decoded( | |
1097 | _parts: Parts, | |
1098 | _req_body: Body, | |
1099 | param: Value, | |
1100 | _info: &ApiMethod, | |
1101 | rpcenv: Box<dyn RpcEnvironment>, | |
1102 | ) -> ApiResponseFuture { | |
1103 | ||
1104 | async move { | |
1105 | let store = tools::required_string_param(¶m, "store")?; | |
1106 | let datastore = DataStore::lookup_datastore(store)?; | |
1107 | ||
e6dc35ac | 1108 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
6ef9bb59 DC |
1109 | |
1110 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); | |
1111 | ||
1112 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1113 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1114 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1115 | ||
e0e5b442 | 1116 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
6ef9bb59 | 1117 | |
bff85572 | 1118 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
6ef9bb59 | 1119 | |
2d55beec | 1120 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
6ef9bb59 | 1121 | for file in files { |
f28d9088 | 1122 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { |
6ef9bb59 DC |
1123 | bail!("cannot decode '{}' - is encrypted", file_name); |
1124 | } | |
1125 | } | |
1126 | ||
1127 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); | |
1128 | ||
1129 | let mut path = datastore.base_path(); | |
1130 | path.push(backup_dir.relative_path()); | |
1131 | path.push(&file_name); | |
1132 | ||
1133 | let extension = file_name.rsplitn(2, '.').next().unwrap(); | |
1134 | ||
1135 | let body = match extension { | |
1136 | "didx" => { | |
1137 | let index = DynamicIndexReader::open(&path) | |
1138 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
2d55beec FG |
1139 | let (csum, size) = index.compute_csum(); |
1140 | manifest.verify_file(&file_name, &csum, size)?; | |
6ef9bb59 | 1141 | |
14f6c9cb | 1142 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1143 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1144 | Body::wrap_stream(AsyncReaderStream::new(reader) |
6ef9bb59 DC |
1145 | .map_err(move |err| { |
1146 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1147 | err | |
1148 | })) | |
1149 | }, | |
1150 | "fidx" => { | |
1151 | let index = FixedIndexReader::open(&path) | |
1152 | .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?; | |
1153 | ||
2d55beec FG |
1154 | let (csum, size) = index.compute_csum(); |
1155 | manifest.verify_file(&file_name, &csum, size)?; | |
1156 | ||
14f6c9cb | 1157 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1158 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1159 | Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024) |
6ef9bb59 DC |
1160 | .map_err(move |err| { |
1161 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1162 | err | |
1163 | })) | |
1164 | }, | |
1165 | "blob" => { | |
1166 | let file = std::fs::File::open(&path) | |
8aa67ee7 | 1167 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; |
6ef9bb59 | 1168 | |
2d55beec FG |
1169 | // FIXME: load full blob to verify index checksum? |
1170 | ||
6ef9bb59 DC |
1171 | Body::wrap_stream( |
1172 | WrappedReaderStream::new(DataBlobReader::new(file, None)?) | |
1173 | .map_err(move |err| { | |
1174 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1175 | err | |
1176 | }) | |
1177 | ) | |
1178 | }, | |
1179 | extension => { | |
1180 | bail!("cannot download '{}' files", extension); | |
1181 | }, | |
1182 | }; | |
1183 | ||
1184 | // fixme: set other headers ? | |
1185 | Ok(Response::builder() | |
1186 | .status(StatusCode::OK) | |
1187 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1188 | .body(body) | |
1189 | .unwrap()) | |
1190 | }.boxed() | |
1191 | } | |
1192 | ||
552c2259 | 1193 | #[sortable] |
0ab08ac9 DM |
1194 | pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( |
1195 | &ApiHandler::AsyncHttp(&upload_backup_log), | |
255f378a | 1196 | &ObjectSchema::new( |
54552dda | 1197 | "Upload the client backup log file into a backup snapshot ('client.log.blob').", |
552c2259 | 1198 | &sorted!([ |
66c49c21 | 1199 | ("store", false, &DATASTORE_SCHEMA), |
255f378a | 1200 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
0ab08ac9 | 1201 | ("backup-id", false, &BACKUP_ID_SCHEMA), |
255f378a | 1202 | ("backup-time", false, &BACKUP_TIME_SCHEMA), |
552c2259 | 1203 | ]), |
9e47c0a5 | 1204 | ) |
54552dda DM |
1205 | ).access( |
1206 | Some("Only the backup creator/owner is allowed to do this."), | |
1207 | &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false) | |
1208 | ); | |
9e47c0a5 | 1209 | |
07ee2235 DM |
1210 | fn upload_backup_log( |
1211 | _parts: Parts, | |
1212 | req_body: Body, | |
1213 | param: Value, | |
255f378a | 1214 | _info: &ApiMethod, |
54552dda | 1215 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1216 | ) -> ApiResponseFuture { |
07ee2235 | 1217 | |
ad51d02a DM |
1218 | async move { |
1219 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1220 | let datastore = DataStore::lookup_datastore(store)?; |
07ee2235 | 1221 | |
96d65fbc | 1222 | let file_name = CLIENT_LOG_BLOB_NAME; |
07ee2235 | 1223 | |
ad51d02a DM |
1224 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1225 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1226 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
07ee2235 | 1227 | |
e0e5b442 | 1228 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
07ee2235 | 1229 | |
e6dc35ac | 1230 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
bff85572 FG |
1231 | let owner = datastore.get_owner(backup_dir.group())?; |
1232 | check_backup_owner(&owner, &auth_id)?; | |
54552dda | 1233 | |
ad51d02a DM |
1234 | let mut path = datastore.base_path(); |
1235 | path.push(backup_dir.relative_path()); | |
1236 | path.push(&file_name); | |
07ee2235 | 1237 | |
ad51d02a DM |
1238 | if path.exists() { |
1239 | bail!("backup already contains a log."); | |
1240 | } | |
e128d4e8 | 1241 | |
ad51d02a | 1242 | println!("Upload backup log to {}/{}/{}/{}/{}", store, |
6a7be83e | 1243 | backup_type, backup_id, backup_dir.backup_time_string(), file_name); |
ad51d02a DM |
1244 | |
1245 | let data = req_body | |
1246 | .map_err(Error::from) | |
1247 | .try_fold(Vec::new(), |mut acc, chunk| { | |
1248 | acc.extend_from_slice(&*chunk); | |
1249 | future::ok::<_, Error>(acc) | |
1250 | }) | |
1251 | .await?; | |
1252 | ||
39f18b30 DM |
1253 | // always verify blob/CRC at server side |
1254 | let blob = DataBlob::load_from_reader(&mut &data[..])?; | |
1255 | ||
1256 | replace_file(&path, blob.raw_data(), CreateOptions::new())?; | |
ad51d02a DM |
1257 | |
1258 | // fixme: use correct formatter | |
1259 | Ok(crate::server::formatter::json_response(Ok(Value::Null))) | |
1260 | }.boxed() | |
07ee2235 DM |
1261 | } |
1262 | ||
5b1cfa01 DC |
1263 | #[api( |
1264 | input: { | |
1265 | properties: { | |
1266 | store: { | |
1267 | schema: DATASTORE_SCHEMA, | |
1268 | }, | |
1269 | "backup-type": { | |
1270 | schema: BACKUP_TYPE_SCHEMA, | |
1271 | }, | |
1272 | "backup-id": { | |
1273 | schema: BACKUP_ID_SCHEMA, | |
1274 | }, | |
1275 | "backup-time": { | |
1276 | schema: BACKUP_TIME_SCHEMA, | |
1277 | }, | |
1278 | "filepath": { | |
1279 | description: "Base64 encoded path.", | |
1280 | type: String, | |
1281 | } | |
1282 | }, | |
1283 | }, | |
1284 | access: { | |
1285 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), | |
1286 | }, | |
1287 | )] | |
1288 | /// Get the entries of the given path of the catalog | |
1289 | fn catalog( | |
1290 | store: String, | |
1291 | backup_type: String, | |
1292 | backup_id: String, | |
1293 | backup_time: i64, | |
1294 | filepath: String, | |
1295 | _param: Value, | |
1296 | _info: &ApiMethod, | |
1297 | rpcenv: &mut dyn RpcEnvironment, | |
1298 | ) -> Result<Value, Error> { | |
1299 | let datastore = DataStore::lookup_datastore(&store)?; | |
1300 | ||
e6dc35ac | 1301 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
5b1cfa01 | 1302 | |
e0e5b442 | 1303 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
5b1cfa01 | 1304 | |
bff85572 | 1305 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
5b1cfa01 | 1306 | |
9238cdf5 FG |
1307 | let file_name = CATALOG_NAME; |
1308 | ||
2d55beec | 1309 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1310 | for file in files { |
1311 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1312 | bail!("cannot decode '{}' - is encrypted", file_name); | |
1313 | } | |
1314 | } | |
1315 | ||
5b1cfa01 DC |
1316 | let mut path = datastore.base_path(); |
1317 | path.push(backup_dir.relative_path()); | |
9238cdf5 | 1318 | path.push(file_name); |
5b1cfa01 DC |
1319 | |
1320 | let index = DynamicIndexReader::open(&path) | |
1321 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1322 | ||
2d55beec FG |
1323 | let (csum, size) = index.compute_csum(); |
1324 | manifest.verify_file(&file_name, &csum, size)?; | |
1325 | ||
14f6c9cb | 1326 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
5b1cfa01 DC |
1327 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1328 | ||
1329 | let mut catalog_reader = CatalogReader::new(reader); | |
1330 | let mut current = catalog_reader.root()?; | |
1331 | let mut components = vec![]; | |
1332 | ||
1333 | ||
1334 | if filepath != "root" { | |
1335 | components = base64::decode(filepath)?; | |
1336 | if components.len() > 0 && components[0] == '/' as u8 { | |
1337 | components.remove(0); | |
1338 | } | |
1339 | for component in components.split(|c| *c == '/' as u8) { | |
1340 | if let Some(entry) = catalog_reader.lookup(¤t, component)? { | |
1341 | current = entry; | |
1342 | } else { | |
1343 | bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components)); | |
1344 | } | |
1345 | } | |
1346 | } | |
1347 | ||
1348 | let mut res = Vec::new(); | |
1349 | ||
1350 | for direntry in catalog_reader.read_dir(¤t)? { | |
1351 | let mut components = components.clone(); | |
1352 | components.push('/' as u8); | |
1353 | components.extend(&direntry.name); | |
1354 | let path = base64::encode(components); | |
1355 | let text = String::from_utf8_lossy(&direntry.name); | |
1356 | let mut entry = json!({ | |
1357 | "filepath": path, | |
1358 | "text": text, | |
1359 | "type": CatalogEntryType::from(&direntry.attr).to_string(), | |
1360 | "leaf": true, | |
1361 | }); | |
1362 | match direntry.attr { | |
1363 | DirEntryAttribute::Directory { start: _ } => { | |
1364 | entry["leaf"] = false.into(); | |
1365 | }, | |
1366 | DirEntryAttribute::File { size, mtime } => { | |
1367 | entry["size"] = size.into(); | |
1368 | entry["mtime"] = mtime.into(); | |
1369 | }, | |
1370 | _ => {}, | |
1371 | } | |
1372 | res.push(entry); | |
1373 | } | |
1374 | ||
1375 | Ok(res.into()) | |
1376 | } | |
1377 | ||
53a561a2 WB |
1378 | fn recurse_files<'a, T, W>( |
1379 | zip: &'a mut ZipEncoder<W>, | |
1380 | decoder: &'a mut Accessor<T>, | |
1381 | prefix: &'a Path, | |
804f6143 | 1382 | file: FileEntry<T>, |
53a561a2 | 1383 | ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> |
804f6143 DC |
1384 | where |
1385 | T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static, | |
1386 | W: tokio::io::AsyncWrite + Unpin + Send + 'static, | |
1387 | { | |
1388 | Box::pin(async move { | |
1389 | let metadata = file.entry().metadata(); | |
1390 | let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf(); | |
1391 | ||
1392 | match file.kind() { | |
1393 | EntryKind::File { .. } => { | |
1394 | let entry = ZipEntry::new( | |
1395 | path, | |
1396 | metadata.stat.mtime.secs, | |
1397 | metadata.stat.mode as u16, | |
1398 | true, | |
1399 | ); | |
1400 | zip.add_entry(entry, Some(file.contents().await?)) | |
e832860a WB |
1401 | .await |
1402 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1403 | } |
1404 | EntryKind::Hardlink(_) => { | |
1405 | let realfile = decoder.follow_hardlink(&file).await?; | |
1406 | let entry = ZipEntry::new( | |
1407 | path, | |
1408 | metadata.stat.mtime.secs, | |
1409 | metadata.stat.mode as u16, | |
1410 | true, | |
1411 | ); | |
1412 | zip.add_entry(entry, Some(realfile.contents().await?)) | |
e832860a WB |
1413 | .await |
1414 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1415 | } |
1416 | EntryKind::Directory => { | |
1417 | let dir = file.enter_directory().await?; | |
1418 | let mut readdir = dir.read_dir(); | |
1419 | let entry = ZipEntry::new( | |
1420 | path, | |
1421 | metadata.stat.mtime.secs, | |
1422 | metadata.stat.mode as u16, | |
1423 | false, | |
1424 | ); | |
1425 | zip.add_entry::<FileContents<T>>(entry, None).await?; | |
1426 | while let Some(entry) = readdir.next().await { | |
1427 | let entry = entry?.decode_entry().await?; | |
53a561a2 | 1428 | recurse_files(zip, decoder, prefix, entry).await?; |
804f6143 DC |
1429 | } |
1430 | } | |
1431 | _ => {} // ignore all else | |
1432 | }; | |
1433 | ||
53a561a2 | 1434 | Ok(()) |
804f6143 DC |
1435 | }) |
1436 | } | |
1437 | ||
d33d8f4e DC |
1438 | #[sortable] |
1439 | pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( | |
1440 | &ApiHandler::AsyncHttp(&pxar_file_download), | |
1441 | &ObjectSchema::new( | |
1ffe0301 | 1442 | "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.", |
d33d8f4e DC |
1443 | &sorted!([ |
1444 | ("store", false, &DATASTORE_SCHEMA), | |
1445 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1446 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1447 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1448 | ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), | |
1449 | ]), | |
1450 | ) | |
1451 | ).access(None, &Permission::Privilege( | |
1452 | &["datastore", "{store}"], | |
1453 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1454 | true) | |
1455 | ); | |
1456 | ||
1457 | fn pxar_file_download( | |
1458 | _parts: Parts, | |
1459 | _req_body: Body, | |
1460 | param: Value, | |
1461 | _info: &ApiMethod, | |
1462 | rpcenv: Box<dyn RpcEnvironment>, | |
1463 | ) -> ApiResponseFuture { | |
1464 | ||
1465 | async move { | |
1466 | let store = tools::required_string_param(¶m, "store")?; | |
1467 | let datastore = DataStore::lookup_datastore(&store)?; | |
1468 | ||
e6dc35ac | 1469 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
d33d8f4e DC |
1470 | |
1471 | let filepath = tools::required_string_param(¶m, "filepath")?.to_owned(); | |
1472 | ||
1473 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1474 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1475 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1476 | ||
e0e5b442 | 1477 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
d33d8f4e | 1478 | |
bff85572 | 1479 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
d33d8f4e | 1480 | |
d33d8f4e DC |
1481 | let mut components = base64::decode(&filepath)?; |
1482 | if components.len() > 0 && components[0] == '/' as u8 { | |
1483 | components.remove(0); | |
1484 | } | |
1485 | ||
1486 | let mut split = components.splitn(2, |c| *c == '/' as u8); | |
9238cdf5 | 1487 | let pxar_name = std::str::from_utf8(split.next().unwrap())?; |
d33d8f4e | 1488 | let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?; |
2d55beec | 1489 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1490 | for file in files { |
1491 | if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1492 | bail!("cannot decode '{}' - is encrypted", pxar_name); | |
1493 | } | |
1494 | } | |
d33d8f4e | 1495 | |
9238cdf5 FG |
1496 | let mut path = datastore.base_path(); |
1497 | path.push(backup_dir.relative_path()); | |
1498 | path.push(pxar_name); | |
d33d8f4e DC |
1499 | |
1500 | let index = DynamicIndexReader::open(&path) | |
1501 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1502 | ||
2d55beec FG |
1503 | let (csum, size) = index.compute_csum(); |
1504 | manifest.verify_file(&pxar_name, &csum, size)?; | |
1505 | ||
14f6c9cb | 1506 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
d33d8f4e DC |
1507 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1508 | let archive_size = reader.archive_size(); | |
1509 | let reader = LocalDynamicReadAt::new(reader); | |
1510 | ||
1511 | let decoder = Accessor::new(reader, archive_size).await?; | |
1512 | let root = decoder.open_root().await?; | |
1513 | let file = root | |
1514 | .lookup(OsStr::from_bytes(file_path)).await? | |
1515 | .ok_or(format_err!("error opening '{:?}'", file_path))?; | |
1516 | ||
804f6143 DC |
1517 | let body = match file.kind() { |
1518 | EntryKind::File { .. } => Body::wrap_stream( | |
1519 | AsyncReaderStream::new(file.contents().await?).map_err(move |err| { | |
1520 | eprintln!("error during streaming of file '{:?}' - {}", filepath, err); | |
1521 | err | |
1522 | }), | |
1523 | ), | |
1524 | EntryKind::Hardlink(_) => Body::wrap_stream( | |
1525 | AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?) | |
1526 | .map_err(move |err| { | |
1527 | eprintln!( | |
1528 | "error during streaming of hardlink '{:?}' - {}", | |
1529 | filepath, err | |
1530 | ); | |
1531 | err | |
1532 | }), | |
1533 | ), | |
1534 | EntryKind::Directory => { | |
1535 | let (sender, receiver) = tokio::sync::mpsc::channel(100); | |
1536 | let mut prefix = PathBuf::new(); | |
1537 | let mut components = file.entry().path().components(); | |
1538 | components.next_back(); // discar last | |
1539 | for comp in components { | |
1540 | prefix.push(comp); | |
1541 | } | |
d33d8f4e | 1542 | |
804f6143 | 1543 | let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024); |
804f6143 DC |
1544 | |
1545 | crate::server::spawn_internal_task(async move { | |
53a561a2 WB |
1546 | let mut zipencoder = ZipEncoder::new(channelwriter); |
1547 | let mut decoder = decoder; | |
1548 | recurse_files(&mut zipencoder, &mut decoder, &prefix, file) | |
804f6143 DC |
1549 | .await |
1550 | .map_err(|err| eprintln!("error during creating of zip: {}", err))?; | |
1551 | ||
1552 | zipencoder | |
1553 | .finish() | |
1554 | .await | |
1555 | .map_err(|err| eprintln!("error during finishing of zip: {}", err)) | |
1556 | }); | |
1557 | ||
1558 | Body::wrap_stream(receiver.map_err(move |err| { | |
1559 | eprintln!("error during streaming of zip '{:?}' - {}", filepath, err); | |
d33d8f4e | 1560 | err |
804f6143 DC |
1561 | })) |
1562 | } | |
1563 | other => bail!("cannot download file of type {:?}", other), | |
1564 | }; | |
d33d8f4e DC |
1565 | |
1566 | // fixme: set other headers ? | |
1567 | Ok(Response::builder() | |
1568 | .status(StatusCode::OK) | |
1569 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1570 | .body(body) | |
1571 | .unwrap()) | |
1572 | }.boxed() | |
1573 | } | |
1574 | ||
1a0d3d11 DM |
1575 | #[api( |
1576 | input: { | |
1577 | properties: { | |
1578 | store: { | |
1579 | schema: DATASTORE_SCHEMA, | |
1580 | }, | |
1581 | timeframe: { | |
1582 | type: RRDTimeFrameResolution, | |
1583 | }, | |
1584 | cf: { | |
1585 | type: RRDMode, | |
1586 | }, | |
1587 | }, | |
1588 | }, | |
1589 | access: { | |
1590 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1591 | }, | |
1592 | )] | |
1593 | /// Read datastore stats | |
1594 | fn get_rrd_stats( | |
1595 | store: String, | |
1596 | timeframe: RRDTimeFrameResolution, | |
1597 | cf: RRDMode, | |
1598 | _param: Value, | |
1599 | ) -> Result<Value, Error> { | |
1600 | ||
431cc7b1 DC |
1601 | create_value_from_rrd( |
1602 | &format!("datastore/{}", store), | |
1a0d3d11 DM |
1603 | &[ |
1604 | "total", "used", | |
c94e1f65 DM |
1605 | "read_ios", "read_bytes", |
1606 | "write_ios", "write_bytes", | |
1607 | "io_ticks", | |
1a0d3d11 DM |
1608 | ], |
1609 | timeframe, | |
1610 | cf, | |
1611 | ) | |
1612 | } | |
1613 | ||
912b3f5b DM |
1614 | #[api( |
1615 | input: { | |
1616 | properties: { | |
1617 | store: { | |
1618 | schema: DATASTORE_SCHEMA, | |
1619 | }, | |
1620 | "backup-type": { | |
1621 | schema: BACKUP_TYPE_SCHEMA, | |
1622 | }, | |
1623 | "backup-id": { | |
1624 | schema: BACKUP_ID_SCHEMA, | |
1625 | }, | |
1626 | "backup-time": { | |
1627 | schema: BACKUP_TIME_SCHEMA, | |
1628 | }, | |
1629 | }, | |
1630 | }, | |
1631 | access: { | |
1401f4be | 1632 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
912b3f5b DM |
1633 | }, |
1634 | )] | |
1635 | /// Get "notes" for a specific backup | |
1636 | fn get_notes( | |
1637 | store: String, | |
1638 | backup_type: String, | |
1639 | backup_id: String, | |
1640 | backup_time: i64, | |
1641 | rpcenv: &mut dyn RpcEnvironment, | |
1642 | ) -> Result<String, Error> { | |
1643 | let datastore = DataStore::lookup_datastore(&store)?; | |
1644 | ||
e6dc35ac | 1645 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1646 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1647 | |
1401f4be | 1648 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?; |
912b3f5b | 1649 | |
883aa6d5 | 1650 | let (manifest, _) = datastore.load_manifest(&backup_dir)?; |
912b3f5b | 1651 | |
883aa6d5 | 1652 | let notes = manifest.unprotected["notes"] |
912b3f5b DM |
1653 | .as_str() |
1654 | .unwrap_or(""); | |
1655 | ||
1656 | Ok(String::from(notes)) | |
1657 | } | |
1658 | ||
1659 | #[api( | |
1660 | input: { | |
1661 | properties: { | |
1662 | store: { | |
1663 | schema: DATASTORE_SCHEMA, | |
1664 | }, | |
1665 | "backup-type": { | |
1666 | schema: BACKUP_TYPE_SCHEMA, | |
1667 | }, | |
1668 | "backup-id": { | |
1669 | schema: BACKUP_ID_SCHEMA, | |
1670 | }, | |
1671 | "backup-time": { | |
1672 | schema: BACKUP_TIME_SCHEMA, | |
1673 | }, | |
1674 | notes: { | |
1675 | description: "A multiline text.", | |
1676 | }, | |
1677 | }, | |
1678 | }, | |
1679 | access: { | |
b728a69e FG |
1680 | permission: &Permission::Privilege(&["datastore", "{store}"], |
1681 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1682 | true), | |
912b3f5b DM |
1683 | }, |
1684 | )] | |
1685 | /// Set "notes" for a specific backup | |
1686 | fn set_notes( | |
1687 | store: String, | |
1688 | backup_type: String, | |
1689 | backup_id: String, | |
1690 | backup_time: i64, | |
1691 | notes: String, | |
1692 | rpcenv: &mut dyn RpcEnvironment, | |
1693 | ) -> Result<(), Error> { | |
1694 | let datastore = DataStore::lookup_datastore(&store)?; | |
1695 | ||
e6dc35ac | 1696 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1697 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1698 | |
b728a69e | 1699 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
912b3f5b | 1700 | |
1a374fcf SR |
1701 | datastore.update_manifest(&backup_dir,|manifest| { |
1702 | manifest.unprotected["notes"] = notes.into(); | |
1703 | }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?; | |
912b3f5b DM |
1704 | |
1705 | Ok(()) | |
1706 | } | |
1707 | ||
72be0eb1 | 1708 | #[api( |
4940012d | 1709 | input: { |
72be0eb1 DW |
1710 | properties: { |
1711 | store: { | |
1712 | schema: DATASTORE_SCHEMA, | |
1713 | }, | |
1714 | "backup-type": { | |
1715 | schema: BACKUP_TYPE_SCHEMA, | |
1716 | }, | |
1717 | "backup-id": { | |
1718 | schema: BACKUP_ID_SCHEMA, | |
1719 | }, | |
1720 | "new-owner": { | |
e6dc35ac | 1721 | type: Authid, |
72be0eb1 DW |
1722 | }, |
1723 | }, | |
4940012d FG |
1724 | }, |
1725 | access: { | |
bff85572 FG |
1726 | permission: &Permission::Anybody, |
1727 | description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup" | |
4940012d | 1728 | }, |
72be0eb1 DW |
1729 | )] |
1730 | /// Change owner of a backup group | |
1731 | fn set_backup_owner( | |
1732 | store: String, | |
1733 | backup_type: String, | |
1734 | backup_id: String, | |
e6dc35ac | 1735 | new_owner: Authid, |
bff85572 | 1736 | rpcenv: &mut dyn RpcEnvironment, |
72be0eb1 DW |
1737 | ) -> Result<(), Error> { |
1738 | ||
1739 | let datastore = DataStore::lookup_datastore(&store)?; | |
1740 | ||
1741 | let backup_group = BackupGroup::new(backup_type, backup_id); | |
1742 | ||
bff85572 FG |
1743 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
1744 | ||
72be0eb1 DW |
1745 | let user_info = CachedUserInfo::new()?; |
1746 | ||
bff85572 FG |
1747 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
1748 | ||
1749 | let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 { | |
1750 | // High-privilege user/token | |
1751 | true | |
1752 | } else if (privs & PRIV_DATASTORE_BACKUP) != 0 { | |
1753 | let owner = datastore.get_owner(&backup_group)?; | |
1754 | ||
1755 | match (owner.is_token(), new_owner.is_token()) { | |
1756 | (true, true) => { | |
1757 | // API token to API token, owned by same user | |
1758 | let owner = owner.user(); | |
1759 | let new_owner = new_owner.user(); | |
1760 | owner == new_owner && Authid::from(owner.clone()) == auth_id | |
1761 | }, | |
1762 | (true, false) => { | |
1763 | // API token to API token owner | |
1764 | Authid::from(owner.user().clone()) == auth_id | |
1765 | && new_owner == auth_id | |
1766 | }, | |
1767 | (false, true) => { | |
1768 | // API token owner to API token | |
1769 | owner == auth_id | |
1770 | && Authid::from(new_owner.user().clone()) == auth_id | |
1771 | }, | |
1772 | (false, false) => { | |
1773 | // User to User, not allowed for unprivileged users | |
1774 | false | |
1775 | }, | |
1776 | } | |
1777 | } else { | |
1778 | false | |
1779 | }; | |
1780 | ||
1781 | if !allowed { | |
1782 | return Err(http_err!(UNAUTHORIZED, | |
1783 | "{} does not have permission to change owner of backup group '{}' to {}", | |
1784 | auth_id, | |
1785 | backup_group, | |
1786 | new_owner, | |
1787 | )); | |
1788 | } | |
1789 | ||
e6dc35ac FG |
1790 | if !user_info.is_active_auth_id(&new_owner) { |
1791 | bail!("{} '{}' is inactive or non-existent", | |
1792 | if new_owner.is_token() { | |
1793 | "API token".to_string() | |
1794 | } else { | |
1795 | "user".to_string() | |
1796 | }, | |
1797 | new_owner); | |
72be0eb1 DW |
1798 | } |
1799 | ||
1800 | datastore.set_owner(&backup_group, &new_owner, true)?; | |
1801 | ||
1802 | Ok(()) | |
1803 | } | |
1804 | ||
552c2259 | 1805 | #[sortable] |
255f378a | 1806 | const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ |
5b1cfa01 DC |
1807 | ( |
1808 | "catalog", | |
1809 | &Router::new() | |
1810 | .get(&API_METHOD_CATALOG) | |
1811 | ), | |
72be0eb1 DW |
1812 | ( |
1813 | "change-owner", | |
1814 | &Router::new() | |
1815 | .post(&API_METHOD_SET_BACKUP_OWNER) | |
1816 | ), | |
255f378a DM |
1817 | ( |
1818 | "download", | |
1819 | &Router::new() | |
1820 | .download(&API_METHOD_DOWNLOAD_FILE) | |
1821 | ), | |
6ef9bb59 DC |
1822 | ( |
1823 | "download-decoded", | |
1824 | &Router::new() | |
1825 | .download(&API_METHOD_DOWNLOAD_FILE_DECODED) | |
1826 | ), | |
255f378a DM |
1827 | ( |
1828 | "files", | |
1829 | &Router::new() | |
09b1f7b2 | 1830 | .get(&API_METHOD_LIST_SNAPSHOT_FILES) |
255f378a DM |
1831 | ), |
1832 | ( | |
1833 | "gc", | |
1834 | &Router::new() | |
1835 | .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) | |
1836 | .post(&API_METHOD_START_GARBAGE_COLLECTION) | |
1837 | ), | |
1838 | ( | |
1839 | "groups", | |
1840 | &Router::new() | |
b31c8019 | 1841 | .get(&API_METHOD_LIST_GROUPS) |
255f378a | 1842 | ), |
912b3f5b DM |
1843 | ( |
1844 | "notes", | |
1845 | &Router::new() | |
1846 | .get(&API_METHOD_GET_NOTES) | |
1847 | .put(&API_METHOD_SET_NOTES) | |
1848 | ), | |
255f378a DM |
1849 | ( |
1850 | "prune", | |
1851 | &Router::new() | |
1852 | .post(&API_METHOD_PRUNE) | |
1853 | ), | |
d33d8f4e DC |
1854 | ( |
1855 | "pxar-file-download", | |
1856 | &Router::new() | |
1857 | .download(&API_METHOD_PXAR_FILE_DOWNLOAD) | |
1858 | ), | |
1a0d3d11 DM |
1859 | ( |
1860 | "rrd", | |
1861 | &Router::new() | |
1862 | .get(&API_METHOD_GET_RRD_STATS) | |
1863 | ), | |
255f378a DM |
1864 | ( |
1865 | "snapshots", | |
1866 | &Router::new() | |
fc189b19 | 1867 | .get(&API_METHOD_LIST_SNAPSHOTS) |
68a6a0ee | 1868 | .delete(&API_METHOD_DELETE_SNAPSHOT) |
255f378a DM |
1869 | ), |
1870 | ( | |
1871 | "status", | |
1872 | &Router::new() | |
1873 | .get(&API_METHOD_STATUS) | |
1874 | ), | |
1875 | ( | |
1876 | "upload-backup-log", | |
1877 | &Router::new() | |
1878 | .upload(&API_METHOD_UPLOAD_BACKUP_LOG) | |
1879 | ), | |
c2009e53 DM |
1880 | ( |
1881 | "verify", | |
1882 | &Router::new() | |
1883 | .post(&API_METHOD_VERIFY) | |
1884 | ), | |
255f378a DM |
1885 | ]; |
1886 | ||
ad51d02a | 1887 | const DATASTORE_INFO_ROUTER: Router = Router::new() |
255f378a DM |
1888 | .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS)) |
1889 | .subdirs(DATASTORE_INFO_SUBDIRS); | |
1890 | ||
1891 | ||
1892 | pub const ROUTER: Router = Router::new() | |
bb34b589 | 1893 | .get(&API_METHOD_GET_DATASTORE_LIST) |
255f378a | 1894 | .match_all("store", &DATASTORE_INFO_ROUTER); |