]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup.rs
add pxar.1 manual page
[proxmox-backup.git] / src / api2 / backup.rs
CommitLineData
152764ec 1use failure::*;
42a87f7b 2use lazy_static::lazy_static;
152764ec 3
bbf9e7e9 4//use std::sync::Arc;
92ac375a
DM
5
6use futures::*;
152764ec 7use hyper::header::{HeaderValue, UPGRADE};
bd1507c4 8use hyper::{Body, Response, StatusCode};
152764ec 9use hyper::http::request::Parts;
152764ec 10
f9578f3c 11use serde_json::{json, Value};
152764ec 12
92ac375a 13use crate::tools;
d3611366 14use crate::tools::wrapped_reader_stream::*;
152764ec
DM
15use crate::api_schema::router::*;
16use crate::api_schema::*;
42a87f7b 17use crate::server::{WorkerTask, H2Service};
21ee7912 18use crate::backup::*;
6762db70 19use crate::api2::types::*;
152764ec 20
d95ced64
DM
21mod environment;
22use environment::*;
23
21ee7912
DM
24mod upload_chunk;
25use upload_chunk::*;
26
7773ccc1
DM
27pub fn router() -> Router {
28 Router::new()
29 .upgrade(api_method_upgrade_backup())
30}
21ee7912 31
ca60c371 32pub fn api_method_upgrade_backup() -> ApiAsyncMethod {
152764ec 33 ApiAsyncMethod::new(
0aadd40b 34 upgrade_to_backup_protocol,
986bef16 35 ObjectSchema::new(concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."))
0aadd40b 36 .required("store", StringSchema::new("Datastore name."))
bbf9e7e9
DM
37 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
38 .required("backup-id", BACKUP_ID_SCHEMA.clone())
39 .required("backup-time", BACKUP_TIME_SCHEMA.clone())
a42d1f55 40 .optional("debug", BooleanSchema::new("Enable verbose debug logging."))
152764ec
DM
41 )
42}
43
0aadd40b 44fn upgrade_to_backup_protocol(
152764ec
DM
45 parts: Parts,
46 req_body: Body,
0aadd40b 47 param: Value,
152764ec 48 _info: &ApiAsyncMethod,
dd5495d6 49 rpcenv: Box<dyn RpcEnvironment>,
152764ec 50) -> Result<BoxFut, Error> {
0aadd40b 51
a42d1f55
DM
52 let debug = param["debug"].as_bool().unwrap_or(false);
53
bb105f9d
DM
54 let store = tools::required_string_param(&param, "store")?.to_owned();
55 let datastore = DataStore::lookup_datastore(&store)?;
21ee7912 56
0aadd40b
DM
57 let backup_type = tools::required_string_param(&param, "backup-type")?;
58 let backup_id = tools::required_string_param(&param, "backup-id")?;
ca5d0b61 59 let backup_time = tools::required_integer_param(&param, "backup-time")?;
152764ec
DM
60
61 let protocols = parts
62 .headers
63 .get("UPGRADE")
64 .ok_or_else(|| format_err!("missing Upgrade header"))?
65 .to_str()?;
66
986bef16 67 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
152764ec
DM
68 bail!("invalid protocol name");
69 }
70
96e95fc1
DM
71 if parts.version >= http::version::Version::HTTP_2 {
72 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
73 }
74
0aadd40b 75 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
d9bd06ea 76
58c8d7d9
DM
77 let username = rpcenv.get_user().unwrap();
78 let env_type = rpcenv.env_type();
92ac375a 79
51a4f63f 80 let backup_group = BackupGroup::new(backup_type, backup_id);
fbb798f6 81 let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
ca5d0b61
DM
82 let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
83
84 if let Some(last) = &last_backup {
85 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
86 bail!("backup timestamp is older than last backup.");
87 }
88 }
f9578f3c 89
bb105f9d 90 let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
f9578f3c
DM
91 if !is_new { bail!("backup directorty already exists."); }
92
0aadd40b 93 WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
bb105f9d 94 let mut env = BackupEnvironment::new(
6b95c7df 95 env_type, username.clone(), worker.clone(), datastore, backup_dir);
b02a52e3 96
a42d1f55 97 env.debug = debug;
bb105f9d 98 env.last_backup = last_backup;
b02a52e3 99
bb105f9d
DM
100 env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
101
42a87f7b 102 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_ROUTER, debug);
72375ce6 103
a66ab8ae
DM
104 let abort_future = worker.abort_future();
105
372724af 106 let env2 = env.clone();
a42d1f55 107 let env3 = env.clone();
bb105f9d 108
59b2baa0 109 let req_fut = req_body
152764ec 110 .on_upgrade()
92ac375a 111 .map_err(Error::from)
152764ec 112 .and_then(move |conn| {
a42d1f55 113 env3.debug("protocol upgrade done");
92ac375a
DM
114
115 let mut http = hyper::server::conn::Http::new();
116 http.http2_only(true);
adec8ea2 117 // increase window size: todo - find optiomal size
771953f9
DM
118 let window_size = 32*1024*1024; // max = (1 << 31) - 2
119 http.http2_initial_stream_window_size(window_size);
120 http.http2_initial_connection_window_size(window_size);
92ac375a 121
d9bd06ea
DM
122 http.serve_connection(conn, service)
123 .map_err(Error::from)
59b2baa0
WB
124 });
125 let abort_future = abort_future
126 .map(|_| Err(format_err!("task aborted")));
127
128 use futures::future::Either;
129 future::select(req_fut, abort_future)
130 .map(|res| match res {
131 Either::Left((Ok(res), _)) => Ok(res),
132 Either::Left((Err(err), _)) => Err(err),
133 Either::Right((Ok(res), _)) => Ok(res),
134 Either::Right((Err(err), _)) => Err(err),
135 })
136 .and_then(move |_result| async move {
372724af
DM
137 env.ensure_finished()?;
138 env.log("backup finished sucessfully");
bb105f9d
DM
139 Ok(())
140 })
59b2baa0 141 .then(move |result| async move {
372724af 142 if let Err(err) = result {
a9584932 143 match env2.ensure_finished() {
e3d525fe 144 Ok(()) => {}, // ignore error after finish
a9584932
DM
145 _ => {
146 env2.log(format!("backup failed: {}", err));
147 env2.log("removing failed backup");
148 env2.remove_backup()?;
149 return Err(err);
150 }
151 }
372724af
DM
152 }
153 Ok(())
bb105f9d 154 })
090ac9f7
DM
155 })?;
156
157 let response = Response::builder()
158 .status(StatusCode::SWITCHING_PROTOCOLS)
986bef16 159 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
090ac9f7
DM
160 .body(Body::empty())?;
161
162 Ok(Box::new(futures::future::ok(response)))
152764ec 163}
92ac375a 164
42a87f7b
DM
165lazy_static!{
166 static ref BACKUP_ROUTER: Router = backup_api();
167}
168
339ddfcb 169pub fn backup_api() -> Router {
62ee2eb4 170 Router::new()
39d6846e 171 .subdir(
cb08ac3e
DM
172 "blob", Router::new()
173 .upload(api_method_upload_blob())
39d6846e 174 )
f9578f3c 175 .subdir(
642322b4
DM
176 "dynamic_chunk", Router::new()
177 .upload(api_method_upload_dynamic_chunk())
f9578f3c
DM
178 )
179 .subdir(
180 "dynamic_index", Router::new()
d3611366 181 .download(api_method_dynamic_chunk_index())
f9578f3c 182 .post(api_method_create_dynamic_index())
82ab7230 183 .put(api_method_dynamic_append())
f9578f3c 184 )
a2077252
DM
185 .subdir(
186 "dynamic_close", Router::new()
187 .post(api_method_close_dynamic_index())
188 )
642322b4
DM
189 .subdir(
190 "fixed_chunk", Router::new()
191 .upload(api_method_upload_fixed_chunk())
192 )
a42fa400
DM
193 .subdir(
194 "fixed_index", Router::new()
195 .download(api_method_fixed_chunk_index())
196 .post(api_method_create_fixed_index())
197 .put(api_method_fixed_append())
198 )
199 .subdir(
200 "fixed_close", Router::new()
201 .post(api_method_close_fixed_index())
202 )
372724af
DM
203 .subdir(
204 "finish", Router::new()
a55fcd74 205 .post(
372724af
DM
206 ApiMethod::new(
207 finish_backup,
208 ObjectSchema::new("Mark backup as finished.")
209 )
210 )
211 )
adec8ea2
DM
212 .subdir(
213 "speedtest", Router::new()
214 .upload(api_method_upload_speedtest())
215 )
62ee2eb4 216 .list_subdirs()
92ac375a
DM
217}
218
f9578f3c
DM
219pub fn api_method_create_dynamic_index() -> ApiMethod {
220 ApiMethod::new(
221 create_dynamic_index,
222 ObjectSchema::new("Create dynamic chunk index file.")
4e93f8c1 223 .required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
f9578f3c
DM
224 )
225}
226
227fn create_dynamic_index(
228 param: Value,
229 _info: &ApiMethod,
dd5495d6 230 rpcenv: &mut dyn RpcEnvironment,
f9578f3c
DM
231) -> Result<Value, Error> {
232
233 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 234
8bea85b4 235 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 236
4af0ee05 237 let archive_name = name.clone();
0997967d 238 if !archive_name.ends_with(".didx") {
a42fa400 239 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
240 }
241
6b95c7df 242 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
243 path.push(archive_name);
244
976595e1 245 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 246 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 247
bb105f9d 248 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 249
bb105f9d 250 Ok(json!(wid))
f9578f3c
DM
251}
252
a42fa400
DM
253pub fn api_method_create_fixed_index() -> ApiMethod {
254 ApiMethod::new(
255 create_fixed_index,
256 ObjectSchema::new("Create fixed chunk index file.")
257 .required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
258 .required("size", IntegerSchema::new("File size.")
259 .minimum(1)
260 )
261 )
262}
263
264fn create_fixed_index(
265 param: Value,
266 _info: &ApiMethod,
dd5495d6 267 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
268) -> Result<Value, Error> {
269
270 let env: &BackupEnvironment = rpcenv.as_ref();
271
272 println!("PARAM: {:?}", param);
273
274 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
275 let size = tools::required_integer_param(&param, "size")? as usize;
276
4af0ee05 277 let archive_name = name.clone();
0997967d 278 if !archive_name.ends_with(".fidx") {
a42fa400 279 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
280 }
281
282 let mut path = env.backup_dir.relative_path();
283 path.push(archive_name);
284
285 let chunk_size = 4096*1024; // todo: ??
286
287 let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
288 let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
289
290 env.log(format!("created new fixed index {} ({:?})", wid, path));
291
292 Ok(json!(wid))
293}
294
82ab7230
DM
295pub fn api_method_dynamic_append() -> ApiMethod {
296 ApiMethod::new(
297 dynamic_append,
298 ObjectSchema::new("Append chunk to dynamic index writer.")
417cb073
DM
299 .required("wid", IntegerSchema::new("Dynamic writer ID.")
300 .minimum(1)
301 .maximum(256)
302 )
aa1b2e04 303 .required("digest-list", ArraySchema::new(
6762db70 304 "Chunk digest list.", CHUNK_DIGEST_SCHEMA.clone())
aa1b2e04 305 )
417cb073
DM
306 .required("offset-list", ArraySchema::new(
307 "Chunk offset list.",
308 IntegerSchema::new("Corresponding chunk offsets.")
309 .minimum(0)
310 .into())
82ab7230
DM
311 )
312 )
313}
314
315fn dynamic_append (
316 param: Value,
317 _info: &ApiMethod,
dd5495d6 318 rpcenv: &mut dyn RpcEnvironment,
82ab7230
DM
319) -> Result<Value, Error> {
320
321 let wid = tools::required_integer_param(&param, "wid")? as usize;
aa1b2e04 322 let digest_list = tools::required_array_param(&param, "digest-list")?;
417cb073 323 let offset_list = tools::required_array_param(&param, "offset-list")?;
aa1b2e04 324
417cb073
DM
325 if offset_list.len() != digest_list.len() {
326 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
327 }
328
82ab7230
DM
329 let env: &BackupEnvironment = rpcenv.as_ref();
330
39e60bd6
DM
331 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
332
417cb073 333 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 334 let digest_str = item.as_str().unwrap();
bffd40d6 335 let digest = proxmox::tools::hex_to_digest(digest_str)?;
417cb073 336 let offset = offset_list[i].as_u64().unwrap();
aa1b2e04 337 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 338
417cb073 339 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 340
39e60bd6 341 env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
aa1b2e04 342 }
82ab7230
DM
343
344 Ok(Value::Null)
345}
346
a42fa400
DM
347pub fn api_method_fixed_append() -> ApiMethod {
348 ApiMethod::new(
349 fixed_append,
350 ObjectSchema::new("Append chunk to fixed index writer.")
351 .required("wid", IntegerSchema::new("Fixed writer ID.")
352 .minimum(1)
353 .maximum(256)
354 )
355 .required("digest-list", ArraySchema::new(
6762db70 356 "Chunk digest list.", CHUNK_DIGEST_SCHEMA.clone())
a42fa400
DM
357 )
358 .required("offset-list", ArraySchema::new(
359 "Chunk offset list.",
360 IntegerSchema::new("Corresponding chunk offsets.")
361 .minimum(0)
362 .into())
363 )
364 )
365}
366
367fn fixed_append (
368 param: Value,
369 _info: &ApiMethod,
dd5495d6 370 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
371) -> Result<Value, Error> {
372
373 let wid = tools::required_integer_param(&param, "wid")? as usize;
374 let digest_list = tools::required_array_param(&param, "digest-list")?;
375 let offset_list = tools::required_array_param(&param, "offset-list")?;
376
a42fa400
DM
377 if offset_list.len() != digest_list.len() {
378 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
379 }
380
381 let env: &BackupEnvironment = rpcenv.as_ref();
382
39e60bd6
DM
383 env.debug(format!("fixed_append {} chunks", digest_list.len()));
384
a42fa400
DM
385 for (i, item) in digest_list.iter().enumerate() {
386 let digest_str = item.as_str().unwrap();
bffd40d6 387 let digest = proxmox::tools::hex_to_digest(digest_str)?;
a42fa400
DM
388 let offset = offset_list[i].as_u64().unwrap();
389 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 390
a42fa400
DM
391 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
392
39e60bd6 393 env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
a42fa400
DM
394 }
395
396 Ok(Value::Null)
397}
398
a2077252
DM
399pub fn api_method_close_dynamic_index() -> ApiMethod {
400 ApiMethod::new(
401 close_dynamic_index,
402 ObjectSchema::new("Close dynamic index writer.")
403 .required("wid", IntegerSchema::new("Dynamic writer ID.")
404 .minimum(1)
405 .maximum(256)
406 )
8bea85b4
DM
407 .required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
408 .minimum(1)
409 )
410 .required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
411 .minimum(1)
412 )
fb6026b6 413 .required("csum", StringSchema::new("Digest list checksum."))
a2077252
DM
414 )
415}
416
417fn close_dynamic_index (
418 param: Value,
419 _info: &ApiMethod,
dd5495d6 420 rpcenv: &mut dyn RpcEnvironment,
a2077252
DM
421) -> Result<Value, Error> {
422
423 let wid = tools::required_integer_param(&param, "wid")? as usize;
8bea85b4
DM
424 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
425 let size = tools::required_integer_param(&param, "size")? as u64;
fb6026b6
DM
426 let csum_str = tools::required_string_param(&param, "csum")?;
427 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a2077252
DM
428
429 let env: &BackupEnvironment = rpcenv.as_ref();
430
fb6026b6 431 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 432
bb105f9d
DM
433 env.log(format!("sucessfully closed dynamic index {}", wid));
434
a2077252
DM
435 Ok(Value::Null)
436}
437
a42fa400
DM
438pub fn api_method_close_fixed_index() -> ApiMethod {
439 ApiMethod::new(
440 close_fixed_index,
441 ObjectSchema::new("Close fixed index writer.")
442 .required("wid", IntegerSchema::new("Fixed writer ID.")
443 .minimum(1)
444 .maximum(256)
445 )
446 .required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
447 .minimum(1)
448 )
449 .required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
450 .minimum(1)
451 )
fb6026b6 452 .required("csum", StringSchema::new("Digest list checksum."))
a42fa400
DM
453 )
454}
455
456fn close_fixed_index (
457 param: Value,
458 _info: &ApiMethod,
dd5495d6 459 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
460) -> Result<Value, Error> {
461
462 let wid = tools::required_integer_param(&param, "wid")? as usize;
463 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
464 let size = tools::required_integer_param(&param, "size")? as u64;
fb6026b6
DM
465 let csum_str = tools::required_string_param(&param, "csum")?;
466 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a42fa400
DM
467
468 let env: &BackupEnvironment = rpcenv.as_ref();
469
fb6026b6 470 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400
DM
471
472 env.log(format!("sucessfully closed fixed index {}", wid));
473
474 Ok(Value::Null)
475}
a2077252 476
372724af
DM
477fn finish_backup (
478 _param: Value,
479 _info: &ApiMethod,
dd5495d6 480 rpcenv: &mut dyn RpcEnvironment,
372724af
DM
481) -> Result<Value, Error> {
482
483 let env: &BackupEnvironment = rpcenv.as_ref();
484
485 env.finish_backup()?;
60e589a1 486 env.log("sucessfully finished backup");
372724af
DM
487
488 Ok(Value::Null)
489}
a2077252 490
a42fa400
DM
491pub fn api_method_dynamic_chunk_index() -> ApiAsyncMethod {
492 ApiAsyncMethod::new(
493 dynamic_chunk_index,
494 ObjectSchema::new(r###"
495Download the dynamic chunk index from the previous backup.
496Simply returns an empty list if this is the first backup.
497"###
498 )
499 .required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
500 )
501}
502
d3611366
DM
503fn dynamic_chunk_index(
504 _parts: Parts,
505 _req_body: Body,
506 param: Value,
507 _info: &ApiAsyncMethod,
dd5495d6 508 rpcenv: Box<dyn RpcEnvironment>,
d3611366
DM
509) -> Result<BoxFut, Error> {
510
511 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 512
4af0ee05 513 let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
d3611366 514
0997967d 515 if !archive_name.ends_with(".didx") {
a42fa400 516 bail!("wrong archive extension: '{}'", archive_name);
d3611366
DM
517 }
518
a9584932
DM
519 let empty_response = {
520 Response::builder()
521 .status(StatusCode::OK)
522 .body(Body::empty())?
523 };
524
d3611366
DM
525 let last_backup = match &env.last_backup {
526 Some(info) => info,
a9584932 527 None => return Ok(Box::new(future::ok(empty_response))),
d3611366
DM
528 };
529
530 let mut path = last_backup.backup_dir.relative_path();
a9584932 531 path.push(&archive_name);
d3611366 532
a9584932
DM
533 let index = match env.datastore.open_dynamic_reader(path) {
534 Ok(index) => index,
535 Err(_) => {
536 env.log(format!("there is no last backup for archive '{}'", archive_name));
537 return Ok(Box::new(future::ok(empty_response)));
538 }
539 };
540
39e60bd6
DM
541 env.log(format!("download last backup index for archive '{}'", archive_name));
542
a9584932
DM
543 let count = index.index_count();
544 for pos in 0..count {
545 let (start, end, digest) = index.chunk_info(pos)?;
546 let size = (end - start) as u32;
547 env.register_chunk(digest, size)?;
548 }
d3611366 549
7f3d2ffa 550 let reader = DigestListEncoder::new(Box::new(index));
d3611366
DM
551
552 let stream = WrappedReaderStream::new(reader);
553
554 // fixme: set size, content type?
555 let response = http::Response::builder()
556 .status(200)
557 .body(Body::wrap_stream(stream))?;
558
559 Ok(Box::new(future::ok(response)))
560}
a42fa400
DM
561
562pub fn api_method_fixed_chunk_index() -> ApiAsyncMethod {
563 ApiAsyncMethod::new(
564 fixed_chunk_index,
565 ObjectSchema::new(r###"
566Download the fixed chunk index from the previous backup.
567Simply returns an empty list if this is the first backup.
568"###
569 )
570 .required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
571 )
572}
573
574fn fixed_chunk_index(
575 _parts: Parts,
576 _req_body: Body,
577 param: Value,
578 _info: &ApiAsyncMethod,
dd5495d6 579 rpcenv: Box<dyn RpcEnvironment>,
a42fa400
DM
580) -> Result<BoxFut, Error> {
581
582 let env: &BackupEnvironment = rpcenv.as_ref();
583
4af0ee05 584 let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
a42fa400 585
0997967d 586 if !archive_name.ends_with(".fidx") {
a42fa400 587 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
588 }
589
590 let empty_response = {
591 Response::builder()
592 .status(StatusCode::OK)
593 .body(Body::empty())?
594 };
595
596 let last_backup = match &env.last_backup {
597 Some(info) => info,
598 None => return Ok(Box::new(future::ok(empty_response))),
599 };
600
601 let mut path = last_backup.backup_dir.relative_path();
602 path.push(&archive_name);
603
604 let index = match env.datastore.open_fixed_reader(path) {
605 Ok(index) => index,
606 Err(_) => {
607 env.log(format!("there is no last backup for archive '{}'", archive_name));
608 return Ok(Box::new(future::ok(empty_response)));
609 }
610 };
611
39e60bd6
DM
612 env.log(format!("download last backup index for archive '{}'", archive_name));
613
a42fa400 614 let count = index.index_count();
b46c3fad 615 let image_size = index.index_bytes();
a42fa400
DM
616 for pos in 0..count {
617 let digest = index.index_digest(pos).unwrap();
b46c3fad
DM
618 // Note: last chunk can be smaller
619 let start = (pos*index.chunk_size) as u64;
620 let mut end = start + index.chunk_size as u64;
621 if end > image_size { end = image_size; }
622 let size = (end - start) as u32;
a42fa400
DM
623 env.register_chunk(*digest, size)?;
624 }
625
626 let reader = DigestListEncoder::new(Box::new(index));
627
628 let stream = WrappedReaderStream::new(reader);
629
630 // fixme: set size, content type?
631 let response = http::Response::builder()
632 .status(200)
633 .body(Body::wrap_stream(stream))?;
634
635 Ok(Box::new(future::ok(response)))
636}