]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
src/api2/types.rs: define and use api type GarbageCollectionStatus
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
cad540e9 1use std::collections::{HashSet, HashMap};
09b1f7b2 2use std::convert::TryFrom;
cad540e9 3
aeeac29b 4use chrono::{TimeZone, Local};
15e9b4ed 5use failure::*;
9e47c0a5 6use futures::*;
cad540e9
WB
7use hyper::http::request::Parts;
8use hyper::{header, Body, Response, StatusCode};
15e9b4ed
DM
9use serde_json::{json, Value};
10
fc189b19 11use proxmox::api::api;
bb084b9c 12use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
cad540e9
WB
13use proxmox::api::router::SubdirMap;
14use proxmox::api::schema::*;
feaa1ad3 15use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
9ea4bce4
WB
16use proxmox::try_block;
17use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
e18a6c9e 18
cad540e9 19use crate::api2::types::*;
e5064ba6 20use crate::backup::*;
cad540e9 21use crate::config::datastore;
0f778e06 22use crate::server::WorkerTask;
cad540e9 23use crate::tools;
1629d2ad 24
09b1f7b2 25fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
8c70e3eb
DM
26
27 let mut path = store.base_path();
28 path.push(backup_dir.relative_path());
29 path.push("index.json.blob");
30
31 let raw_data = file_get_contents(&path)?;
09b1f7b2
DM
32 let index_size = raw_data.len() as u64;
33 let blob = DataBlob::from_raw(raw_data)?;
8c70e3eb 34
09b1f7b2 35 let manifest = BackupManifest::try_from(blob)?;
8c70e3eb 36
09b1f7b2
DM
37 let mut result = Vec::new();
38 for item in manifest.files() {
39 result.push(BackupContent {
40 filename: item.filename.clone(),
41 size: Some(item.size),
42 });
8c70e3eb
DM
43 }
44
09b1f7b2
DM
45 result.push(BackupContent {
46 filename: "index.json.blob".to_string(),
47 size: Some(index_size),
48 });
4f1e40a2 49
8c70e3eb
DM
50 Ok(result)
51}
52
8f579717
DM
53fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
54
55 let mut group_hash = HashMap::new();
56
57 for info in backup_list {
9b492eb2 58 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
8f579717
DM
59 let time_list = group_hash.entry(group_id).or_insert(vec![]);
60 time_list.push(info);
61 }
62
63 group_hash
64}
65
b31c8019
DM
66#[api(
67 input: {
68 properties: {
69 store: {
70 schema: DATASTORE_SCHEMA,
71 },
72 },
73 },
74 returns: {
75 type: Array,
76 description: "Returns the list of backup groups.",
77 items: {
78 type: GroupListItem,
79 }
80 },
81)]
82/// List backup groups.
ad20d198 83fn list_groups(
b31c8019
DM
84 store: String,
85) -> Result<Vec<GroupListItem>, Error> {
812c6f87 86
b31c8019 87 let datastore = DataStore::lookup_datastore(&store)?;
812c6f87 88
c0977501 89 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
812c6f87
DM
90
91 let group_hash = group_backups(backup_list);
92
b31c8019 93 let mut groups = Vec::new();
812c6f87
DM
94
95 for (_group_id, mut list) in group_hash {
96
2b01a225 97 BackupInfo::sort_list(&mut list, false);
812c6f87
DM
98
99 let info = &list[0];
9b492eb2 100 let group = info.backup_dir.group();
812c6f87 101
b31c8019
DM
102 let result_item = GroupListItem {
103 backup_type: group.backup_type().to_string(),
104 backup_id: group.backup_id().to_string(),
105 last_backup: info.backup_dir.backup_time().timestamp(),
106 backup_count: list.len() as u64,
107 files: info.files.clone(),
108 };
109 groups.push(result_item);
812c6f87
DM
110 }
111
b31c8019 112 Ok(groups)
812c6f87 113}
8f579717 114
09b1f7b2
DM
115#[api(
116 input: {
117 properties: {
118 store: {
119 schema: DATASTORE_SCHEMA,
120 },
121 "backup-type": {
122 schema: BACKUP_TYPE_SCHEMA,
123 },
124 "backup-id": {
125 schema: BACKUP_ID_SCHEMA,
126 },
127 "backup-time": {
128 schema: BACKUP_TIME_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of archive files inside a backup snapshots.",
135 items: {
136 type: BackupContent,
137 }
138 },
139)]
140/// List snapshot files.
141fn list_snapshot_files(
142 store: String,
143 backup_type: String,
144 backup_id: String,
145 backup_time: i64,
01a13423 146 _info: &ApiMethod,
dd5495d6 147 _rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 148) -> Result<Vec<BackupContent>, Error> {
01a13423 149
09b1f7b2 150 let datastore = DataStore::lookup_datastore(&store)?;
01a13423
DM
151 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
152
d7c24397
DM
153 let mut files = read_backup_index(&datastore, &snapshot)?;
154
155 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 156
09b1f7b2
DM
157 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
158 acc.insert(item.filename.clone());
d7c24397
DM
159 acc
160 });
161
162 for file in info.files {
163 if file_set.contains(&file) { continue; }
09b1f7b2 164 files.push(BackupContent { filename: file, size: None });
d7c24397 165 }
01a13423 166
8c70e3eb 167 Ok(files)
01a13423
DM
168}
169
68a6a0ee
DM
170#[api(
171 input: {
172 properties: {
173 store: {
174 schema: DATASTORE_SCHEMA,
175 },
176 "backup-type": {
177 schema: BACKUP_TYPE_SCHEMA,
178 },
179 "backup-id": {
180 schema: BACKUP_ID_SCHEMA,
181 },
182 "backup-time": {
183 schema: BACKUP_TIME_SCHEMA,
184 },
185 },
186 },
187)]
188/// Delete backup snapshot.
189fn delete_snapshot(
190 store: String,
191 backup_type: String,
192 backup_id: String,
193 backup_time: i64,
6f62c924 194 _info: &ApiMethod,
dd5495d6 195 _rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
196) -> Result<Value, Error> {
197
391d3107 198 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
6f62c924 199
68a6a0ee 200 let datastore = DataStore::lookup_datastore(&store)?;
6f62c924
DM
201
202 datastore.remove_backup_dir(&snapshot)?;
203
204 Ok(Value::Null)
205}
206
fc189b19
DM
207#[api(
208 input: {
209 properties: {
210 store: {
211 schema: DATASTORE_SCHEMA,
212 },
213 "backup-type": {
214 optional: true,
215 schema: BACKUP_TYPE_SCHEMA,
216 },
217 "backup-id": {
218 optional: true,
219 schema: BACKUP_ID_SCHEMA,
220 },
221 },
222 },
223 returns: {
224 type: Array,
225 description: "Returns the list of snapshots.",
226 items: {
227 type: SnapshotListItem,
228 }
229 },
230)]
231/// List backup snapshots.
184f17af
DM
232fn list_snapshots (
233 param: Value,
234 _info: &ApiMethod,
dd5495d6 235 _rpcenv: &mut dyn RpcEnvironment,
fc189b19 236) -> Result<Vec<SnapshotListItem>, Error> {
184f17af
DM
237
238 let store = tools::required_string_param(&param, "store")?;
15c847f1
DM
239 let backup_type = param["backup-type"].as_str();
240 let backup_id = param["backup-id"].as_str();
184f17af
DM
241
242 let datastore = DataStore::lookup_datastore(store)?;
243
c0977501 244 let base_path = datastore.base_path();
184f17af 245
15c847f1 246 let backup_list = BackupInfo::list_backups(&base_path)?;
184f17af
DM
247
248 let mut snapshots = vec![];
249
c0977501 250 for info in backup_list {
15c847f1
DM
251 let group = info.backup_dir.group();
252 if let Some(backup_type) = backup_type {
253 if backup_type != group.backup_type() { continue; }
254 }
a17a0e7a 255 if let Some(backup_id) = backup_id {
15c847f1
DM
256 if backup_id != group.backup_id() { continue; }
257 }
a17a0e7a 258
fc189b19
DM
259 let mut result_item = SnapshotListItem {
260 backup_type: group.backup_type().to_string(),
261 backup_id: group.backup_id().to_string(),
262 backup_time: info.backup_dir.backup_time().timestamp(),
263 files: info.files,
264 size: None,
265 };
a17a0e7a
DM
266
267 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
268 let mut backup_size = 0;
09b1f7b2
DM
269 for item in index.iter() {
270 if let Some(item_size) = item.size {
a17a0e7a
DM
271 backup_size += item_size;
272 }
273 }
fc189b19 274 result_item.size = Some(backup_size);
a17a0e7a
DM
275 }
276
277 snapshots.push(result_item);
184f17af
DM
278 }
279
fc189b19 280 Ok(snapshots)
184f17af
DM
281}
282
1dc117bb
DM
283#[api(
284 input: {
285 properties: {
286 store: {
287 schema: DATASTORE_SCHEMA,
288 },
289 },
290 },
291 returns: {
292 type: StorageStatus,
293 },
294)]
295/// Get datastore status.
0eecf38f 296fn status(
1dc117bb 297 store: String,
0eecf38f
DM
298 _info: &ApiMethod,
299 _rpcenv: &mut dyn RpcEnvironment,
1dc117bb 300) -> Result<StorageStatus, Error> {
0eecf38f 301
1dc117bb 302 let datastore = DataStore::lookup_datastore(&store)?;
0eecf38f
DM
303
304 let base_path = datastore.base_path();
305
306 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
307
308 use nix::NixPath;
309
310 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
311 nix::errno::Errno::result(res)?;
312
313 let bsize = stat.f_bsize as u64;
1dc117bb
DM
314
315 Ok(StorageStatus {
316 total: stat.f_blocks*bsize,
317 used: (stat.f_blocks-stat.f_bfree)*bsize,
318 avail: stat.f_bavail*bsize,
319 })
0eecf38f
DM
320}
321
255f378a
DM
322#[macro_export]
323macro_rules! add_common_prune_prameters {
552c2259
DM
324 ( [ $( $list1:tt )* ] ) => {
325 add_common_prune_prameters!([$( $list1 )* ] , [])
326 };
327 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
255f378a 328 [
552c2259 329 $( $list1 )*
255f378a 330 (
552c2259 331 "keep-daily",
255f378a 332 true,
552c2259 333 &IntegerSchema::new("Number of daily backups to keep.")
255f378a
DM
334 .minimum(1)
335 .schema()
336 ),
102d8d41
DM
337 (
338 "keep-hourly",
339 true,
340 &IntegerSchema::new("Number of hourly backups to keep.")
341 .minimum(1)
342 .schema()
343 ),
255f378a 344 (
552c2259 345 "keep-last",
255f378a 346 true,
552c2259 347 &IntegerSchema::new("Number of backups to keep.")
255f378a
DM
348 .minimum(1)
349 .schema()
350 ),
351 (
552c2259 352 "keep-monthly",
255f378a 353 true,
552c2259 354 &IntegerSchema::new("Number of monthly backups to keep.")
255f378a
DM
355 .minimum(1)
356 .schema()
357 ),
358 (
552c2259 359 "keep-weekly",
255f378a 360 true,
552c2259 361 &IntegerSchema::new("Number of weekly backups to keep.")
255f378a
DM
362 .minimum(1)
363 .schema()
364 ),
365 (
366 "keep-yearly",
367 true,
368 &IntegerSchema::new("Number of yearly backups to keep.")
369 .minimum(1)
370 .schema()
371 ),
552c2259 372 $( $list2 )*
255f378a
DM
373 ]
374 }
0eecf38f
DM
375}
376
0ab08ac9
DM
377const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
378 &ApiHandler::Sync(&prune),
255f378a 379 &ObjectSchema::new(
0ab08ac9
DM
380 "Prune the datastore.",
381 &add_common_prune_prameters!([
382 ("backup-id", false, &BACKUP_ID_SCHEMA),
383 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
3b03abfe
DM
384 ("dry-run", true, &BooleanSchema::new(
385 "Just show what prune would do, but do not delete anything.")
386 .schema()
387 ),
0ab08ac9 388 ],[
66c49c21 389 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9 390 ])
255f378a
DM
391 )
392);
393
83b7db02
DM
394fn prune(
395 param: Value,
396 _info: &ApiMethod,
dd5495d6 397 _rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
398) -> Result<Value, Error> {
399
400 let store = param["store"].as_str().unwrap();
401
9fdc3ef4
DM
402 let backup_type = tools::required_string_param(&param, "backup-type")?;
403 let backup_id = tools::required_string_param(&param, "backup-id")?;
404
3b03abfe
DM
405 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
406
9fdc3ef4
DM
407 let group = BackupGroup::new(backup_type, backup_id);
408
83b7db02
DM
409 let datastore = DataStore::lookup_datastore(store)?;
410
9e3f0088
DM
411 let prune_options = PruneOptions {
412 keep_last: param["keep-last"].as_u64(),
102d8d41 413 keep_hourly: param["keep-hourly"].as_u64(),
9e3f0088
DM
414 keep_daily: param["keep-daily"].as_u64(),
415 keep_weekly: param["keep-weekly"].as_u64(),
416 keep_monthly: param["keep-monthly"].as_u64(),
417 keep_yearly: param["keep-yearly"].as_u64(),
418 };
8f579717 419
503995c7
DM
420 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
421
163e9bbe 422 // We use a WorkerTask just to have a task log, but run synchrounously
503995c7 423 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
dd8e744f 424 let result = try_block! {
9e3f0088 425 if !prune_options.keeps_something() {
9fdc3ef4 426 worker.log("No prune selection - keeping all files.");
dd8e744f
DM
427 return Ok(());
428 } else {
236a396a 429 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
3b03abfe 430 if dry_run {
503995c7
DM
431 worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
432 store, backup_type, backup_id));
3b03abfe 433 } else {
503995c7
DM
434 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
435 store, backup_type, backup_id));
3b03abfe 436 }
dd8e744f 437 }
8f579717 438
aeeac29b 439 let list = group.list_backups(&datastore.base_path())?;
8f579717 440
9b783521 441 let mut prune_info = compute_prune_info(list, &prune_options)?;
dd8e744f 442
8f0b4c1f
DM
443 prune_info.reverse(); // delete older snapshots first
444
445 for (info, keep) in prune_info {
3b03abfe
DM
446 let backup_time = info.backup_dir.backup_time();
447 let timestamp = BackupDir::backup_time_to_string(backup_time);
448 let group = info.backup_dir.group();
449
450 let msg = format!(
451 "{}/{}/{} {}",
452 group.backup_type(),
453 group.backup_id(),
454 timestamp,
455 if keep { "keep" } else { "remove" },
456 );
457
458 worker.log(msg);
459
460 if !(dry_run || keep) {
8f0b4c1f
DM
461 datastore.remove_backup_dir(&info.backup_dir)?;
462 }
8f579717 463 }
dd8e744f
DM
464
465 Ok(())
466 };
467
468 worker.log_result(&result);
469
470 if let Err(err) = result {
471 bail!("prune failed - {}", err);
8f579717 472 }
83b7db02 473
163e9bbe 474 Ok(json!(worker.to_string())) // return the UPID
83b7db02
DM
475}
476
dfc58d47
DM
477#[api(
478 input: {
479 properties: {
480 store: {
481 schema: DATASTORE_SCHEMA,
482 },
483 },
484 },
485 returns: {
486 schema: UPID_SCHEMA,
487 },
488)]
489/// Start garbage collection.
6049b71f 490fn start_garbage_collection(
dfc58d47 491 store: String,
6049b71f 492 _info: &ApiMethod,
dd5495d6 493 rpcenv: &mut dyn RpcEnvironment,
6049b71f 494) -> Result<Value, Error> {
15e9b4ed 495
3e6a7dee 496 let datastore = DataStore::lookup_datastore(&store)?;
15e9b4ed 497
5a778d92 498 println!("Starting garbage collection on store {}", store);
15e9b4ed 499
0f778e06 500 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
15e9b4ed 501
0f778e06
DM
502 let upid_str = WorkerTask::new_thread(
503 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
504 {
505 worker.log(format!("starting garbage collection on store {}", store));
d4b59ae0 506 datastore.garbage_collection(worker)
0f778e06
DM
507 })?;
508
509 Ok(json!(upid_str))
15e9b4ed
DM
510}
511
a92830dc
DM
512#[api(
513 input: {
514 properties: {
515 store: {
516 schema: DATASTORE_SCHEMA,
517 },
518 },
519 },
520 returns: {
521 type: GarbageCollectionStatus,
522 }
523)]
524/// Garbage collection status.
6049b71f 525fn garbage_collection_status(
a92830dc 526 store: String,
6049b71f 527 _info: &ApiMethod,
dd5495d6 528 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 529) -> Result<GarbageCollectionStatus, Error> {
691c89a0 530
f2b99c34
DM
531 let datastore = DataStore::lookup_datastore(&store)?;
532
f2b99c34 533 let status = datastore.last_gc_status();
691c89a0 534
a92830dc 535 Ok(status)
691c89a0
DM
536}
537
691c89a0 538
6049b71f
DM
539fn get_datastore_list(
540 _param: Value,
541 _info: &ApiMethod,
dd5495d6 542 _rpcenv: &mut dyn RpcEnvironment,
6049b71f 543) -> Result<Value, Error> {
15e9b4ed 544
d0187a51 545 let (config, _digest) = datastore::config()?;
15e9b4ed 546
d0187a51 547 Ok(config.convert_to_array("store", None))
15e9b4ed
DM
548}
549
0ab08ac9
DM
550#[sortable]
551pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
552 &ApiHandler::AsyncHttp(&download_file),
553 &ObjectSchema::new(
554 "Download single raw file from backup snapshot.",
555 &sorted!([
66c49c21 556 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
557 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
558 ("backup-id", false, &BACKUP_ID_SCHEMA),
559 ("backup-time", false, &BACKUP_TIME_SCHEMA),
560 ("file-name", false, &StringSchema::new("Raw file name.")
561 .format(&FILENAME_FORMAT)
562 .schema()
563 ),
564 ]),
565 )
566);
691c89a0 567
9e47c0a5
DM
568fn download_file(
569 _parts: Parts,
570 _req_body: Body,
571 param: Value,
255f378a 572 _info: &ApiMethod,
9e47c0a5 573 _rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 574) -> ApiResponseFuture {
9e47c0a5 575
ad51d02a
DM
576 async move {
577 let store = tools::required_string_param(&param, "store")?;
f14a8c9a 578
ad51d02a 579 let datastore = DataStore::lookup_datastore(store)?;
f14a8c9a 580
ad51d02a 581 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 582
ad51d02a
DM
583 let backup_type = tools::required_string_param(&param, "backup-type")?;
584 let backup_id = tools::required_string_param(&param, "backup-id")?;
585 let backup_time = tools::required_integer_param(&param, "backup-time")?;
9e47c0a5 586
ad51d02a
DM
587 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
588 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
9e47c0a5 589
ad51d02a 590 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
9e47c0a5 591
ad51d02a
DM
592 let mut path = datastore.base_path();
593 path.push(backup_dir.relative_path());
594 path.push(&file_name);
595
596 let file = tokio::fs::File::open(path)
597 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
598 .await?;
599
db0cb9ce
WB
600 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
601 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
ad51d02a 602 let body = Body::wrap_stream(payload);
9e47c0a5 603
ad51d02a
DM
604 // fixme: set other headers ?
605 Ok(Response::builder()
606 .status(StatusCode::OK)
607 .header(header::CONTENT_TYPE, "application/octet-stream")
608 .body(body)
609 .unwrap())
610 }.boxed()
9e47c0a5
DM
611}
612
552c2259 613#[sortable]
0ab08ac9
DM
614pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
615 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a
DM
616 &ObjectSchema::new(
617 "Download single raw file from backup snapshot.",
552c2259 618 &sorted!([
66c49c21 619 ("store", false, &DATASTORE_SCHEMA),
255f378a 620 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 621 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 622 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 623 ]),
9e47c0a5 624 )
255f378a 625);
9e47c0a5 626
07ee2235
DM
627fn upload_backup_log(
628 _parts: Parts,
629 req_body: Body,
630 param: Value,
255f378a 631 _info: &ApiMethod,
07ee2235 632 _rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 633) -> ApiResponseFuture {
07ee2235 634
ad51d02a
DM
635 async move {
636 let store = tools::required_string_param(&param, "store")?;
07ee2235 637
ad51d02a 638 let datastore = DataStore::lookup_datastore(store)?;
07ee2235 639
ad51d02a 640 let file_name = "client.log.blob";
07ee2235 641
ad51d02a
DM
642 let backup_type = tools::required_string_param(&param, "backup-type")?;
643 let backup_id = tools::required_string_param(&param, "backup-id")?;
644 let backup_time = tools::required_integer_param(&param, "backup-time")?;
07ee2235 645
ad51d02a 646 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
07ee2235 647
ad51d02a
DM
648 let mut path = datastore.base_path();
649 path.push(backup_dir.relative_path());
650 path.push(&file_name);
07ee2235 651
ad51d02a
DM
652 if path.exists() {
653 bail!("backup already contains a log.");
654 }
e128d4e8 655
ad51d02a
DM
656 println!("Upload backup log to {}/{}/{}/{}/{}", store,
657 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
658
659 let data = req_body
660 .map_err(Error::from)
661 .try_fold(Vec::new(), |mut acc, chunk| {
662 acc.extend_from_slice(&*chunk);
663 future::ok::<_, Error>(acc)
664 })
665 .await?;
666
667 let blob = DataBlob::from_raw(data)?;
668 // always verify CRC at server side
669 blob.verify_crc()?;
670 let raw_data = blob.raw_data();
feaa1ad3 671 replace_file(&path, raw_data, CreateOptions::new())?;
ad51d02a
DM
672
673 // fixme: use correct formatter
674 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
675 }.boxed()
07ee2235
DM
676}
677
552c2259 678#[sortable]
255f378a
DM
679const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
680 (
681 "download",
682 &Router::new()
683 .download(&API_METHOD_DOWNLOAD_FILE)
684 ),
685 (
686 "files",
687 &Router::new()
09b1f7b2 688 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
689 ),
690 (
691 "gc",
692 &Router::new()
693 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
694 .post(&API_METHOD_START_GARBAGE_COLLECTION)
695 ),
696 (
697 "groups",
698 &Router::new()
b31c8019 699 .get(&API_METHOD_LIST_GROUPS)
255f378a
DM
700 ),
701 (
702 "prune",
703 &Router::new()
704 .post(&API_METHOD_PRUNE)
705 ),
706 (
707 "snapshots",
708 &Router::new()
fc189b19 709 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 710 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
711 ),
712 (
713 "status",
714 &Router::new()
715 .get(&API_METHOD_STATUS)
716 ),
717 (
718 "upload-backup-log",
719 &Router::new()
720 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
721 ),
722];
723
ad51d02a 724const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
725 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
726 .subdirs(DATASTORE_INFO_SUBDIRS);
727
728
729pub const ROUTER: Router = Router::new()
730 .get(
731 &ApiMethod::new(
732 &ApiHandler::Sync(&get_datastore_list),
733 &ObjectSchema::new("Directory index.", &[])
6f62c924 734 )
255f378a
DM
735 )
736 .match_all("store", &DATASTORE_INFO_ROUTER);