]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup.rs
update backup api for incremental backup
[proxmox-backup.git] / src / api2 / backup.rs
CommitLineData
f7d4e4b5 1use anyhow::{bail, format_err, Error};
92ac375a 2use futures::*;
152764ec 3use hyper::header::{HeaderValue, UPGRADE};
152764ec 4use hyper::http::request::Parts;
cad540e9 5use hyper::{Body, Response, StatusCode};
f9578f3c 6use serde_json::{json, Value};
152764ec 7
9ea4bce4 8use proxmox::{sortable, identity, list_subdirs_api_method};
9f9f7eef 9use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
cad540e9
WB
10use proxmox::api::router::SubdirMap;
11use proxmox::api::schema::*;
552c2259 12
b957aa81 13use crate::tools;
42a87f7b 14use crate::server::{WorkerTask, H2Service};
21ee7912 15use crate::backup::*;
6762db70 16use crate::api2::types::*;
54552dda 17use crate::config::acl::PRIV_DATASTORE_BACKUP;
365f0f72 18use crate::config::cached_user_info::CachedUserInfo;
152764ec 19
d95ced64
DM
20mod environment;
21use environment::*;
22
21ee7912
DM
23mod upload_chunk;
24use upload_chunk::*;
25
255f378a
DM
26pub const ROUTER: Router = Router::new()
27 .upgrade(&API_METHOD_UPGRADE_BACKUP);
28
552c2259 29#[sortable]
255f378a 30pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
329d40b5 31 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
255f378a
DM
32 &ObjectSchema::new(
33 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
552c2259 34 &sorted!([
66c49c21 35 ("store", false, &DATASTORE_SCHEMA),
255f378a
DM
36 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
37 ("backup-id", false, &BACKUP_ID_SCHEMA),
38 ("backup-time", false, &BACKUP_TIME_SCHEMA),
39 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
552c2259 40 ]),
152764ec 41 )
365f0f72
DM
42).access(
43 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
54552dda 44 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
365f0f72
DM
45 &Permission::Anybody
46);
152764ec 47
0aadd40b 48fn upgrade_to_backup_protocol(
152764ec
DM
49 parts: Parts,
50 req_body: Body,
0aadd40b 51 param: Value,
255f378a 52 _info: &ApiMethod,
dd5495d6 53 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 54) -> ApiResponseFuture {
0aadd40b 55
54552dda 56async move {
a42d1f55
DM
57 let debug = param["debug"].as_bool().unwrap_or(false);
58
365f0f72
DM
59 let username = rpcenv.get_user().unwrap();
60
bb105f9d 61 let store = tools::required_string_param(&param, "store")?.to_owned();
365f0f72
DM
62
63 let user_info = CachedUserInfo::new()?;
54552dda 64 user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
365f0f72 65
bb105f9d 66 let datastore = DataStore::lookup_datastore(&store)?;
21ee7912 67
0aadd40b
DM
68 let backup_type = tools::required_string_param(&param, "backup-type")?;
69 let backup_id = tools::required_string_param(&param, "backup-id")?;
ca5d0b61 70 let backup_time = tools::required_integer_param(&param, "backup-time")?;
152764ec
DM
71
72 let protocols = parts
73 .headers
74 .get("UPGRADE")
75 .ok_or_else(|| format_err!("missing Upgrade header"))?
76 .to_str()?;
77
986bef16 78 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
152764ec
DM
79 bail!("invalid protocol name");
80 }
81
96e95fc1
DM
82 if parts.version >= http::version::Version::HTTP_2 {
83 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
84 }
85
0aadd40b 86 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
d9bd06ea 87
58c8d7d9 88 let env_type = rpcenv.env_type();
92ac375a 89
51a4f63f 90 let backup_group = BackupGroup::new(backup_type, backup_id);
54552dda
DM
91 let owner = datastore.create_backup_group(&backup_group, &username)?;
92 // permission check
93 if owner != username { // only the owner is allowed to create additional snapshots
94 bail!("backup owner check failed ({} != {})", username, owner);
95 }
96
fbb798f6 97 let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
ca5d0b61
DM
98 let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
99
100 if let Some(last) = &last_backup {
101 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
102 bail!("backup timestamp is older than last backup.");
103 }
9ce42759
DM
104 // fixme: abort if last backup is still running - howto test?
105 // Idea: write upid into a file inside snapshot dir. then test if
106 // it is still running here.
ca5d0b61 107 }
f9578f3c 108
bb105f9d 109 let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
add5861e 110 if !is_new { bail!("backup directory already exists."); }
f9578f3c 111
0aadd40b 112 WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
bb105f9d 113 let mut env = BackupEnvironment::new(
6b95c7df 114 env_type, username.clone(), worker.clone(), datastore, backup_dir);
b02a52e3 115
a42d1f55 116 env.debug = debug;
bb105f9d 117 env.last_backup = last_backup;
b02a52e3 118
bb105f9d
DM
119 env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
120
255f378a 121 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
72375ce6 122
a66ab8ae
DM
123 let abort_future = worker.abort_future();
124
372724af 125 let env2 = env.clone();
bb105f9d 126
6650a242 127 let mut req_fut = req_body
152764ec 128 .on_upgrade()
92ac375a 129 .map_err(Error::from)
152764ec 130 .and_then(move |conn| {
6650a242 131 env2.debug("protocol upgrade done");
92ac375a
DM
132
133 let mut http = hyper::server::conn::Http::new();
134 http.http2_only(true);
adec8ea2 135 // increase window size: todo - find optiomal size
771953f9
DM
136 let window_size = 32*1024*1024; // max = (1 << 31) - 2
137 http.http2_initial_stream_window_size(window_size);
138 http.http2_initial_connection_window_size(window_size);
92ac375a 139
d9bd06ea
DM
140 http.serve_connection(conn, service)
141 .map_err(Error::from)
59b2baa0 142 });
6650a242 143 let mut abort_future = abort_future
59b2baa0
WB
144 .map(|_| Err(format_err!("task aborted")));
145
6650a242
DC
146 async move {
147 let res = select!{
148 req = req_fut => req,
149 abrt = abort_future => abrt,
150 };
151
152 match (res, env.ensure_finished()) {
153 (Ok(_), Ok(())) => {
add5861e 154 env.log("backup finished successfully");
6650a242
DC
155 Ok(())
156 },
157 (Err(err), Ok(())) => {
158 // ignore errors after finish
159 env.log(format!("backup had errors but finished: {}", err));
160 Ok(())
161 },
162 (Ok(_), Err(err)) => {
163 env.log(format!("backup ended and finish failed: {}", err));
164 env.log("removing unfinished backup");
165 env.remove_backup()?;
166 Err(err)
167 },
168 (Err(err), Err(_)) => {
169 env.log(format!("backup failed: {}", err));
170 env.log("removing failed backup");
171 env.remove_backup()?;
172 Err(err)
173 },
174 }
175 }
090ac9f7
DM
176 })?;
177
178 let response = Response::builder()
179 .status(StatusCode::SWITCHING_PROTOCOLS)
986bef16 180 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
090ac9f7
DM
181 .body(Body::empty())?;
182
ad51d02a
DM
183 Ok(response)
184 }.boxed()
152764ec 185}
92ac375a 186
255f378a
DM
187pub const BACKUP_API_SUBDIRS: SubdirMap = &[
188 (
189 "blob", &Router::new()
190 .upload(&API_METHOD_UPLOAD_BLOB)
191 ),
192 (
193 "dynamic_chunk", &Router::new()
194 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
195 ),
196 (
197 "dynamic_close", &Router::new()
198 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
199 ),
200 (
201 "dynamic_index", &Router::new()
255f378a
DM
202 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
203 .put(&API_METHOD_DYNAMIC_APPEND)
204 ),
205 (
206 "finish", &Router::new()
207 .post(
208 &ApiMethod::new(
209 &ApiHandler::Sync(&finish_backup),
210 &ObjectSchema::new("Mark backup as finished.", &[])
372724af 211 )
255f378a
DM
212 )
213 ),
214 (
215 "fixed_chunk", &Router::new()
216 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
217 ),
218 (
219 "fixed_close", &Router::new()
220 .post(&API_METHOD_CLOSE_FIXED_INDEX)
221 ),
222 (
223 "fixed_index", &Router::new()
255f378a
DM
224 .post(&API_METHOD_CREATE_FIXED_INDEX)
225 .put(&API_METHOD_FIXED_APPEND)
226 ),
b957aa81
DM
227 (
228 "previous", &Router::new()
229 .download(&API_METHOD_DOWNLOAD_PREVIOUS)
230 ),
255f378a
DM
231 (
232 "speedtest", &Router::new()
233 .upload(&API_METHOD_UPLOAD_SPEEDTEST)
234 ),
235];
236
237pub const BACKUP_API_ROUTER: Router = Router::new()
238 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
239 .subdirs(BACKUP_API_SUBDIRS);
240
3d229a4a
DM
241#[sortable]
242pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
243 &ApiHandler::Sync(&create_dynamic_index),
244 &ObjectSchema::new(
245 "Create dynamic chunk index file.",
246 &sorted!([
247 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
248 ]),
249 )
250);
251
f9578f3c
DM
252fn create_dynamic_index(
253 param: Value,
3d229a4a 254 _info: &ApiMethod,
dd5495d6 255 rpcenv: &mut dyn RpcEnvironment,
f9578f3c
DM
256) -> Result<Value, Error> {
257
258 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 259
8bea85b4 260 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 261
4af0ee05 262 let archive_name = name.clone();
0997967d 263 if !archive_name.ends_with(".didx") {
a42fa400 264 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
265 }
266
6b95c7df 267 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
268 path.push(archive_name);
269
976595e1 270 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 271 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 272
bb105f9d 273 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 274
bb105f9d 275 Ok(json!(wid))
f9578f3c
DM
276}
277
552c2259 278#[sortable]
255f378a
DM
279pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
280 &ApiHandler::Sync(&create_fixed_index),
281 &ObjectSchema::new(
282 "Create fixed chunk index file.",
552c2259 283 &sorted!([
255f378a
DM
284 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
285 ("size", false, &IntegerSchema::new("File size.")
286 .minimum(1)
287 .schema()
288 ),
facd9801
SR
289 ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
290 csum and reuse index for incremental backup if it matches.").schema()),
552c2259 291 ]),
a42fa400 292 )
255f378a 293);
a42fa400
DM
294
295fn create_fixed_index(
296 param: Value,
297 _info: &ApiMethod,
dd5495d6 298 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
299) -> Result<Value, Error> {
300
301 let env: &BackupEnvironment = rpcenv.as_ref();
302
a42fa400
DM
303 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
304 let size = tools::required_integer_param(&param, "size")? as usize;
facd9801 305 let reuse_csum = param["reuse-csum"].as_str();
a42fa400 306
4af0ee05 307 let archive_name = name.clone();
0997967d 308 if !archive_name.ends_with(".fidx") {
a42fa400 309 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
310 }
311
312 let mut path = env.backup_dir.relative_path();
facd9801 313 path.push(&archive_name);
a42fa400
DM
314
315 let chunk_size = 4096*1024; // todo: ??
316
facd9801
SR
317 // do incremental backup if csum is set
318 let mut reader = None;
319 let mut incremental = false;
320 if let Some(csum) = reuse_csum {
321 incremental = true;
322 let last_backup = match &env.last_backup {
323 Some(info) => info,
324 None => {
325 bail!("cannot reuse index - no previous backup exists");
326 }
327 };
328
329 let mut last_path = last_backup.backup_dir.relative_path();
330 last_path.push(&archive_name);
331
332 let index = match env.datastore.open_fixed_reader(last_path) {
333 Ok(index) => index,
334 Err(_) => {
335 bail!("cannot reuse index - no previous backup exists for archive");
336 }
337 };
338
339 let (old_csum, _) = index.compute_csum();
340 let old_csum = proxmox::tools::digest_to_hex(&old_csum);
341 if old_csum != csum {
342 bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
343 csum, old_csum);
344 }
345
346 reader = Some(index);
347 }
348
349 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
350
351 if let Some(reader) = reader {
352 writer.clone_data_from(&reader)?;
353 }
354
355 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
a42fa400
DM
356
357 env.log(format!("created new fixed index {} ({:?})", wid, path));
358
359 Ok(json!(wid))
360}
361
552c2259 362#[sortable]
255f378a
DM
363pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
364 &ApiHandler::Sync(&dynamic_append),
365 &ObjectSchema::new(
366 "Append chunk to dynamic index writer.",
552c2259 367 &sorted!([
255f378a
DM
368 (
369 "wid",
370 false,
371 &IntegerSchema::new("Dynamic writer ID.")
372 .minimum(1)
373 .maximum(256)
374 .schema()
375 ),
376 (
377 "digest-list",
378 false,
379 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
380 ),
381 (
382 "offset-list",
383 false,
384 &ArraySchema::new(
385 "Chunk offset list.",
386 &IntegerSchema::new("Corresponding chunk offsets.")
387 .minimum(0)
388 .schema()
389 ).schema()
390 ),
552c2259 391 ]),
82ab7230 392 )
255f378a 393);
82ab7230
DM
394
395fn dynamic_append (
396 param: Value,
397 _info: &ApiMethod,
dd5495d6 398 rpcenv: &mut dyn RpcEnvironment,
82ab7230
DM
399) -> Result<Value, Error> {
400
401 let wid = tools::required_integer_param(&param, "wid")? as usize;
aa1b2e04 402 let digest_list = tools::required_array_param(&param, "digest-list")?;
417cb073 403 let offset_list = tools::required_array_param(&param, "offset-list")?;
aa1b2e04 404
417cb073
DM
405 if offset_list.len() != digest_list.len() {
406 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
407 }
408
82ab7230
DM
409 let env: &BackupEnvironment = rpcenv.as_ref();
410
39e60bd6
DM
411 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
412
417cb073 413 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 414 let digest_str = item.as_str().unwrap();
bffd40d6 415 let digest = proxmox::tools::hex_to_digest(digest_str)?;
417cb073 416 let offset = offset_list[i].as_u64().unwrap();
aa1b2e04 417 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 418
417cb073 419 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 420
add5861e 421 env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
aa1b2e04 422 }
82ab7230
DM
423
424 Ok(Value::Null)
425}
426
552c2259 427#[sortable]
255f378a
DM
428pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
429 &ApiHandler::Sync(&fixed_append),
430 &ObjectSchema::new(
431 "Append chunk to fixed index writer.",
552c2259 432 &sorted!([
255f378a
DM
433 (
434 "wid",
435 false,
436 &IntegerSchema::new("Fixed writer ID.")
437 .minimum(1)
438 .maximum(256)
439 .schema()
440 ),
441 (
442 "digest-list",
443 false,
444 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
445 ),
446 (
447 "offset-list",
448 false,
449 &ArraySchema::new(
450 "Chunk offset list.",
451 &IntegerSchema::new("Corresponding chunk offsets.")
452 .minimum(0)
453 .schema()
454 ).schema()
a42fa400 455 )
552c2259 456 ]),
a42fa400 457 )
255f378a 458);
a42fa400
DM
459
460fn fixed_append (
461 param: Value,
462 _info: &ApiMethod,
dd5495d6 463 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
464) -> Result<Value, Error> {
465
466 let wid = tools::required_integer_param(&param, "wid")? as usize;
467 let digest_list = tools::required_array_param(&param, "digest-list")?;
468 let offset_list = tools::required_array_param(&param, "offset-list")?;
469
a42fa400
DM
470 if offset_list.len() != digest_list.len() {
471 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
472 }
473
474 let env: &BackupEnvironment = rpcenv.as_ref();
475
39e60bd6
DM
476 env.debug(format!("fixed_append {} chunks", digest_list.len()));
477
a42fa400
DM
478 for (i, item) in digest_list.iter().enumerate() {
479 let digest_str = item.as_str().unwrap();
bffd40d6 480 let digest = proxmox::tools::hex_to_digest(digest_str)?;
a42fa400
DM
481 let offset = offset_list[i].as_u64().unwrap();
482 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 483
a42fa400
DM
484 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
485
add5861e 486 env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
a42fa400
DM
487 }
488
489 Ok(Value::Null)
490}
491
552c2259 492#[sortable]
255f378a
DM
493pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
494 &ApiHandler::Sync(&close_dynamic_index),
495 &ObjectSchema::new(
496 "Close dynamic index writer.",
552c2259 497 &sorted!([
255f378a
DM
498 (
499 "wid",
500 false,
501 &IntegerSchema::new("Dynamic writer ID.")
502 .minimum(1)
503 .maximum(256)
504 .schema()
505 ),
506 (
507 "chunk-count",
508 false,
509 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
510 .minimum(1)
511 .schema()
512 ),
513 (
514 "size",
515 false,
516 &IntegerSchema::new("File size. This is used to verify that the server got all data.")
517 .minimum(1)
518 .schema()
519 ),
520 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 521 ]),
a2077252 522 )
255f378a 523);
a2077252
DM
524
525fn close_dynamic_index (
526 param: Value,
527 _info: &ApiMethod,
dd5495d6 528 rpcenv: &mut dyn RpcEnvironment,
a2077252
DM
529) -> Result<Value, Error> {
530
531 let wid = tools::required_integer_param(&param, "wid")? as usize;
8bea85b4
DM
532 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
533 let size = tools::required_integer_param(&param, "size")? as u64;
fb6026b6
DM
534 let csum_str = tools::required_string_param(&param, "csum")?;
535 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a2077252
DM
536
537 let env: &BackupEnvironment = rpcenv.as_ref();
538
fb6026b6 539 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 540
add5861e 541 env.log(format!("successfully closed dynamic index {}", wid));
bb105f9d 542
a2077252
DM
543 Ok(Value::Null)
544}
545
552c2259 546#[sortable]
255f378a
DM
547pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
548 &ApiHandler::Sync(&close_fixed_index),
549 &ObjectSchema::new(
550 "Close fixed index writer.",
552c2259 551 &sorted!([
255f378a
DM
552 (
553 "wid",
554 false,
555 &IntegerSchema::new("Fixed writer ID.")
556 .minimum(1)
557 .maximum(256)
558 .schema()
559 ),
560 (
561 "chunk-count",
562 false,
facd9801
SR
563 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
564 .minimum(0)
255f378a
DM
565 .schema()
566 ),
567 (
568 "size",
569 false,
facd9801
SR
570 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
571 .minimum(0)
255f378a
DM
572 .schema()
573 ),
574 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 575 ]),
a42fa400 576 )
255f378a 577);
a42fa400
DM
578
579fn close_fixed_index (
580 param: Value,
581 _info: &ApiMethod,
dd5495d6 582 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
583) -> Result<Value, Error> {
584
585 let wid = tools::required_integer_param(&param, "wid")? as usize;
586 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
587 let size = tools::required_integer_param(&param, "size")? as u64;
fb6026b6
DM
588 let csum_str = tools::required_string_param(&param, "csum")?;
589 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a42fa400
DM
590
591 let env: &BackupEnvironment = rpcenv.as_ref();
592
fb6026b6 593 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400 594
add5861e 595 env.log(format!("successfully closed fixed index {}", wid));
a42fa400
DM
596
597 Ok(Value::Null)
598}
a2077252 599
372724af
DM
600fn finish_backup (
601 _param: Value,
602 _info: &ApiMethod,
dd5495d6 603 rpcenv: &mut dyn RpcEnvironment,
372724af
DM
604) -> Result<Value, Error> {
605
606 let env: &BackupEnvironment = rpcenv.as_ref();
607
608 env.finish_backup()?;
add5861e 609 env.log("successfully finished backup");
372724af
DM
610
611 Ok(Value::Null)
612}
a2077252 613
552c2259 614#[sortable]
b957aa81
DM
615pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
616 &ApiHandler::AsyncHttp(&download_previous),
255f378a 617 &ObjectSchema::new(
b957aa81 618 "Download archive from previous backup.",
552c2259
DM
619 &sorted!([
620 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
621 ]),
a42fa400 622 )
255f378a 623);
a42fa400 624
b957aa81 625fn download_previous(
d3611366
DM
626 _parts: Parts,
627 _req_body: Body,
628 param: Value,
255f378a 629 _info: &ApiMethod,
dd5495d6 630 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 631) -> ApiResponseFuture {
d3611366 632
ad51d02a
DM
633 async move {
634 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 635
ad51d02a 636 let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
d3611366 637
ad51d02a
DM
638 let last_backup = match &env.last_backup {
639 Some(info) => info,
b957aa81 640 None => bail!("no previous backup"),
ad51d02a
DM
641 };
642
b957aa81 643 env.log(format!("download '{}' from previous backup.", archive_name));
a42fa400 644
b957aa81 645 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
ad51d02a
DM
646 path.push(&archive_name);
647
b957aa81 648 crate::api2::helpers::create_download_response(path).await
ad51d02a 649 }.boxed()
a42fa400 650}