]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup/mod.rs
update to proxmox-sys 0.2 crate
[proxmox-backup.git] / src / api2 / backup / mod.rs
CommitLineData
bf78f708
DM
1//! Backup protocol (HTTP2 upgrade)
2
f7d4e4b5 3use anyhow::{bail, format_err, Error};
92ac375a 4use futures::*;
152764ec 5use hyper::header::{HeaderValue, UPGRADE};
152764ec 6use hyper::http::request::Parts;
89e9134a 7use hyper::{Body, Response, Request, StatusCode};
f9578f3c 8use serde_json::{json, Value};
25877d05 9use hex::FromHex;
152764ec 10
25877d05 11use proxmox_sys::{sortable, identity};
6ef1b649
WB
12use proxmox_router::list_subdirs_api_method;
13use proxmox_router::{
14 ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission,
15};
16use proxmox_schema::*;
552c2259 17
8cc3760e
DM
18use pbs_api_types::{
19 Authid, VerifyState, SnapshotVerifyState,
20 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
6227654a 21 CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA,
8cc3760e 22};
25877d05 23use proxmox_sys::fs::lock_dir_noblock_shared;
3c8c2827 24use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
6d5d305d
DM
25use pbs_config::CachedUserInfo;
26use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
b2065dc7
WB
27use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
28use pbs_datastore::index::IndexFile;
29use pbs_datastore::manifest::{archive_type, ArchiveType};
f7348a23 30use proxmox_rest_server::{WorkerTask, H2Service};
770a36e5 31
d95ced64
DM
32mod environment;
33use environment::*;
34
21ee7912
DM
35mod upload_chunk;
36use upload_chunk::*;
37
255f378a
DM
38pub const ROUTER: Router = Router::new()
39 .upgrade(&API_METHOD_UPGRADE_BACKUP);
40
552c2259 41#[sortable]
255f378a 42pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
329d40b5 43 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
255f378a
DM
44 &ObjectSchema::new(
45 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
552c2259 46 &sorted!([
66c49c21 47 ("store", false, &DATASTORE_SCHEMA),
255f378a
DM
48 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
49 ("backup-id", false, &BACKUP_ID_SCHEMA),
50 ("backup-time", false, &BACKUP_TIME_SCHEMA),
51 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
61d7b501 52 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
552c2259 53 ]),
152764ec 54 )
365f0f72
DM
55).access(
56 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
54552dda 57 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
365f0f72
DM
58 &Permission::Anybody
59);
152764ec 60
0aadd40b 61fn upgrade_to_backup_protocol(
152764ec
DM
62 parts: Parts,
63 req_body: Body,
0aadd40b 64 param: Value,
255f378a 65 _info: &ApiMethod,
dd5495d6 66 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 67) -> ApiResponseFuture {
0aadd40b 68
54552dda 69async move {
a42d1f55 70 let debug = param["debug"].as_bool().unwrap_or(false);
61d7b501 71 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
a42d1f55 72
e6dc35ac 73 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
365f0f72 74
3c8c2827 75 let store = required_string_param(&param, "store")?.to_owned();
365f0f72
DM
76
77 let user_info = CachedUserInfo::new()?;
e6dc35ac 78 user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
365f0f72 79
bb105f9d 80 let datastore = DataStore::lookup_datastore(&store)?;
21ee7912 81
3c8c2827
WB
82 let backup_type = required_string_param(&param, "backup-type")?;
83 let backup_id = required_string_param(&param, "backup-id")?;
84 let backup_time = required_integer_param(&param, "backup-time")?;
152764ec
DM
85
86 let protocols = parts
87 .headers
88 .get("UPGRADE")
89 .ok_or_else(|| format_err!("missing Upgrade header"))?
90 .to_str()?;
91
986bef16 92 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
152764ec
DM
93 bail!("invalid protocol name");
94 }
95
96e95fc1
DM
96 if parts.version >= http::version::Version::HTTP_2 {
97 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
98 }
99
4ebda996 100 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
d9bd06ea 101
58c8d7d9 102 let env_type = rpcenv.env_type();
92ac375a 103
51a4f63f 104 let backup_group = BackupGroup::new(backup_type, backup_id);
1fc82c41 105
61d7b501
HL
106 let worker_type = if backup_type == "host" && backup_id == "benchmark" {
107 if !benchmark {
108 bail!("unable to run benchmark without --benchmark flags");
109 }
110 "benchmark"
111 } else {
112 if benchmark {
113 bail!("benchmark flags is only allowed on 'host/benchmark'");
114 }
115 "backup"
116 };
117
1fc82c41 118 // lock backup group to only allow one backup per group at a time
e6dc35ac 119 let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
1fc82c41 120
54552dda 121 // permission check
bff85572
FG
122 let correct_owner = owner == auth_id
123 || (owner.is_token()
124 && Authid::from(owner.user().clone()) == auth_id);
125 if !correct_owner && worker_type != "benchmark" {
61d7b501 126 // only the owner is allowed to create additional snapshots
e6dc35ac 127 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
128 }
129
0af2da04
SR
130 let last_backup = {
131 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
132 if let Some(info) = info {
133 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
134 let verify = manifest.unprotected["verify_state"].clone();
135 match serde_json::from_value::<SnapshotVerifyState>(verify) {
136 Ok(verify) => {
d10332a1
SR
137 match verify.state {
138 VerifyState::Ok => Some(info),
139 VerifyState::Failed => None,
0af2da04
SR
140 }
141 },
142 Err(_) => {
143 // no verify state found, treat as valid
144 Some(info)
145 }
146 }
147 } else {
148 None
149 }
150 };
151
44288184 152 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
ca5d0b61 153
81f29351 154 let _last_guard = if let Some(last) = &last_backup {
ca5d0b61
DM
155 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
156 bail!("backup timestamp is older than last backup.");
157 }
81f29351
SR
158
159 // lock last snapshot to prevent forgetting/pruning it during backup
160 let full_path = datastore.snapshot_path(&last.backup_dir);
7d6c4c39 161 Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
81f29351
SR
162 } else {
163 None
164 };
f9578f3c 165
0698f78d 166 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
add5861e 167 if !is_new { bail!("backup directory already exists."); }
f9578f3c 168
61d7b501 169
049a22a3 170 WorkerTask::spawn(worker_type, Some(worker_id), auth_id.to_string(), true, move |worker| {
bb105f9d 171 let mut env = BackupEnvironment::new(
e6dc35ac 172 env_type, auth_id, worker.clone(), datastore, backup_dir);
b02a52e3 173
a42d1f55 174 env.debug = debug;
bb105f9d 175 env.last_backup = last_backup;
b02a52e3 176
61d7b501 177 env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
bb105f9d 178
255f378a 179 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
72375ce6 180
a66ab8ae
DM
181 let abort_future = worker.abort_future();
182
372724af 183 let env2 = env.clone();
bb105f9d 184
89e9134a 185 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
92ac375a 186 .map_err(Error::from)
152764ec 187 .and_then(move |conn| {
6650a242 188 env2.debug("protocol upgrade done");
92ac375a
DM
189
190 let mut http = hyper::server::conn::Http::new();
191 http.http2_only(true);
adec8ea2 192 // increase window size: todo - find optiomal size
771953f9
DM
193 let window_size = 32*1024*1024; // max = (1 << 31) - 2
194 http.http2_initial_stream_window_size(window_size);
195 http.http2_initial_connection_window_size(window_size);
cf9ea3c4 196 http.http2_max_frame_size(4*1024*1024);
92ac375a 197
b428af97 198 let env3 = env2.clone();
d9bd06ea 199 http.serve_connection(conn, service)
b428af97
DM
200 .map(move |result| {
201 match result {
202 Err(err) => {
203 // Avoid Transport endpoint is not connected (os error 107)
204 // fixme: find a better way to test for that error
205 if err.to_string().starts_with("connection error") && env3.finished() {
206 Ok(())
207 } else {
208 Err(Error::from(err))
209 }
210 }
211 Ok(()) => Ok(()),
212 }
213 })
59b2baa0 214 });
6650a242 215 let mut abort_future = abort_future
59b2baa0
WB
216 .map(|_| Err(format_err!("task aborted")));
217
6650a242 218 async move {
95bda2f2
SR
219 // keep flock until task ends
220 let _group_guard = _group_guard;
0698f78d 221 let snap_guard = snap_guard;
81f29351 222 let _last_guard = _last_guard;
95bda2f2 223
6650a242
DC
224 let res = select!{
225 req = req_fut => req,
226 abrt = abort_future => abrt,
227 };
61d7b501
HL
228 if benchmark {
229 env.log("benchmark finished successfully");
9a1b24b6 230 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
61d7b501
HL
231 return Ok(());
232 }
0698f78d
SR
233
234 let verify = |env: BackupEnvironment| {
235 if let Err(err) = env.verify_after_complete(snap_guard) {
236 env.log(format!(
237 "backup finished, but starting the requested verify task failed: {}",
238 err
239 ));
240 }
241 };
242
6650a242
DC
243 match (res, env.ensure_finished()) {
244 (Ok(_), Ok(())) => {
add5861e 245 env.log("backup finished successfully");
0698f78d 246 verify(env);
6650a242
DC
247 Ok(())
248 },
249 (Err(err), Ok(())) => {
250 // ignore errors after finish
251 env.log(format!("backup had errors but finished: {}", err));
0698f78d 252 verify(env);
6650a242
DC
253 Ok(())
254 },
255 (Ok(_), Err(err)) => {
256 env.log(format!("backup ended and finish failed: {}", err));
257 env.log("removing unfinished backup");
9a1b24b6 258 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
6650a242
DC
259 Err(err)
260 },
261 (Err(err), Err(_)) => {
262 env.log(format!("backup failed: {}", err));
263 env.log("removing failed backup");
9a1b24b6 264 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
6650a242
DC
265 Err(err)
266 },
267 }
268 }
090ac9f7
DM
269 })?;
270
271 let response = Response::builder()
272 .status(StatusCode::SWITCHING_PROTOCOLS)
986bef16 273 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
090ac9f7
DM
274 .body(Body::empty())?;
275
ad51d02a
DM
276 Ok(response)
277 }.boxed()
152764ec 278}
92ac375a 279
5bc8e80a 280const BACKUP_API_SUBDIRS: SubdirMap = &[
255f378a
DM
281 (
282 "blob", &Router::new()
283 .upload(&API_METHOD_UPLOAD_BLOB)
284 ),
285 (
286 "dynamic_chunk", &Router::new()
287 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
288 ),
289 (
290 "dynamic_close", &Router::new()
291 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
292 ),
293 (
294 "dynamic_index", &Router::new()
255f378a
DM
295 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
296 .put(&API_METHOD_DYNAMIC_APPEND)
297 ),
298 (
299 "finish", &Router::new()
300 .post(
301 &ApiMethod::new(
302 &ApiHandler::Sync(&finish_backup),
303 &ObjectSchema::new("Mark backup as finished.", &[])
372724af 304 )
255f378a
DM
305 )
306 ),
307 (
308 "fixed_chunk", &Router::new()
309 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
310 ),
311 (
312 "fixed_close", &Router::new()
313 .post(&API_METHOD_CLOSE_FIXED_INDEX)
314 ),
315 (
316 "fixed_index", &Router::new()
255f378a
DM
317 .post(&API_METHOD_CREATE_FIXED_INDEX)
318 .put(&API_METHOD_FIXED_APPEND)
319 ),
b957aa81
DM
320 (
321 "previous", &Router::new()
322 .download(&API_METHOD_DOWNLOAD_PREVIOUS)
323 ),
8b7f8d3f
FG
324 (
325 "previous_backup_time", &Router::new()
326 .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
327 ),
255f378a
DM
328 (
329 "speedtest", &Router::new()
330 .upload(&API_METHOD_UPLOAD_SPEEDTEST)
331 ),
332];
333
334pub const BACKUP_API_ROUTER: Router = Router::new()
335 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
336 .subdirs(BACKUP_API_SUBDIRS);
337
3d229a4a
DM
338#[sortable]
339pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
340 &ApiHandler::Sync(&create_dynamic_index),
341 &ObjectSchema::new(
342 "Create dynamic chunk index file.",
343 &sorted!([
6227654a 344 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
3d229a4a
DM
345 ]),
346 )
347);
348
f9578f3c
DM
349fn create_dynamic_index(
350 param: Value,
3d229a4a 351 _info: &ApiMethod,
dd5495d6 352 rpcenv: &mut dyn RpcEnvironment,
f9578f3c
DM
353) -> Result<Value, Error> {
354
355 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 356
3c8c2827 357 let name = required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 358
4af0ee05 359 let archive_name = name.clone();
0997967d 360 if !archive_name.ends_with(".didx") {
a42fa400 361 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
362 }
363
6b95c7df 364 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
365 path.push(archive_name);
366
976595e1 367 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 368 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 369
bb105f9d 370 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 371
bb105f9d 372 Ok(json!(wid))
f9578f3c
DM
373}
374
552c2259 375#[sortable]
255f378a
DM
376pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
377 &ApiHandler::Sync(&create_fixed_index),
378 &ObjectSchema::new(
379 "Create fixed chunk index file.",
552c2259 380 &sorted!([
6227654a 381 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
255f378a
DM
382 ("size", false, &IntegerSchema::new("File size.")
383 .minimum(1)
384 .schema()
385 ),
facd9801
SR
386 ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
387 csum and reuse index for incremental backup if it matches.").schema()),
552c2259 388 ]),
a42fa400 389 )
255f378a 390);
a42fa400
DM
391
392fn create_fixed_index(
393 param: Value,
394 _info: &ApiMethod,
dd5495d6 395 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
396) -> Result<Value, Error> {
397
398 let env: &BackupEnvironment = rpcenv.as_ref();
399
3c8c2827
WB
400 let name = required_string_param(&param, "archive-name")?.to_owned();
401 let size = required_integer_param(&param, "size")? as usize;
facd9801 402 let reuse_csum = param["reuse-csum"].as_str();
a42fa400 403
4af0ee05 404 let archive_name = name.clone();
0997967d 405 if !archive_name.ends_with(".fidx") {
a42fa400 406 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
407 }
408
409 let mut path = env.backup_dir.relative_path();
facd9801 410 path.push(&archive_name);
a42fa400
DM
411
412 let chunk_size = 4096*1024; // todo: ??
413
facd9801
SR
414 // do incremental backup if csum is set
415 let mut reader = None;
416 let mut incremental = false;
417 if let Some(csum) = reuse_csum {
418 incremental = true;
419 let last_backup = match &env.last_backup {
420 Some(info) => info,
421 None => {
0af2da04 422 bail!("cannot reuse index - no valid previous backup exists");
facd9801
SR
423 }
424 };
425
426 let mut last_path = last_backup.backup_dir.relative_path();
427 last_path.push(&archive_name);
428
429 let index = match env.datastore.open_fixed_reader(last_path) {
430 Ok(index) => index,
431 Err(_) => {
432 bail!("cannot reuse index - no previous backup exists for archive");
433 }
434 };
435
436 let (old_csum, _) = index.compute_csum();
25877d05 437 let old_csum = hex::encode(&old_csum);
facd9801
SR
438 if old_csum != csum {
439 bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
440 csum, old_csum);
441 }
442
443 reader = Some(index);
444 }
445
446 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
447
448 if let Some(reader) = reader {
449 writer.clone_data_from(&reader)?;
450 }
451
452 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
a42fa400
DM
453
454 env.log(format!("created new fixed index {} ({:?})", wid, path));
455
456 Ok(json!(wid))
457}
458
552c2259 459#[sortable]
255f378a
DM
460pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
461 &ApiHandler::Sync(&dynamic_append),
462 &ObjectSchema::new(
463 "Append chunk to dynamic index writer.",
552c2259 464 &sorted!([
255f378a
DM
465 (
466 "wid",
467 false,
468 &IntegerSchema::new("Dynamic writer ID.")
469 .minimum(1)
470 .maximum(256)
471 .schema()
472 ),
473 (
474 "digest-list",
475 false,
476 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
477 ),
478 (
479 "offset-list",
480 false,
481 &ArraySchema::new(
482 "Chunk offset list.",
483 &IntegerSchema::new("Corresponding chunk offsets.")
484 .minimum(0)
485 .schema()
486 ).schema()
487 ),
552c2259 488 ]),
82ab7230 489 )
255f378a 490);
82ab7230
DM
491
492fn dynamic_append (
493 param: Value,
494 _info: &ApiMethod,
dd5495d6 495 rpcenv: &mut dyn RpcEnvironment,
82ab7230
DM
496) -> Result<Value, Error> {
497
3c8c2827
WB
498 let wid = required_integer_param(&param, "wid")? as usize;
499 let digest_list = required_array_param(&param, "digest-list")?;
500 let offset_list = required_array_param(&param, "offset-list")?;
aa1b2e04 501
417cb073
DM
502 if offset_list.len() != digest_list.len() {
503 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
504 }
505
82ab7230
DM
506 let env: &BackupEnvironment = rpcenv.as_ref();
507
39e60bd6
DM
508 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
509
417cb073 510 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 511 let digest_str = item.as_str().unwrap();
25877d05 512 let digest = <[u8; 32]>::from_hex(digest_str)?;
417cb073 513 let offset = offset_list[i].as_u64().unwrap();
aa1b2e04 514 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 515
417cb073 516 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 517
add5861e 518 env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
aa1b2e04 519 }
82ab7230
DM
520
521 Ok(Value::Null)
522}
523
552c2259 524#[sortable]
255f378a
DM
525pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
526 &ApiHandler::Sync(&fixed_append),
527 &ObjectSchema::new(
528 "Append chunk to fixed index writer.",
552c2259 529 &sorted!([
255f378a
DM
530 (
531 "wid",
532 false,
533 &IntegerSchema::new("Fixed writer ID.")
534 .minimum(1)
535 .maximum(256)
536 .schema()
537 ),
538 (
539 "digest-list",
540 false,
541 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
542 ),
543 (
544 "offset-list",
545 false,
546 &ArraySchema::new(
547 "Chunk offset list.",
548 &IntegerSchema::new("Corresponding chunk offsets.")
549 .minimum(0)
550 .schema()
551 ).schema()
a42fa400 552 )
552c2259 553 ]),
a42fa400 554 )
255f378a 555);
a42fa400
DM
556
557fn fixed_append (
558 param: Value,
559 _info: &ApiMethod,
dd5495d6 560 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
561) -> Result<Value, Error> {
562
3c8c2827
WB
563 let wid = required_integer_param(&param, "wid")? as usize;
564 let digest_list = required_array_param(&param, "digest-list")?;
565 let offset_list = required_array_param(&param, "offset-list")?;
a42fa400 566
a42fa400
DM
567 if offset_list.len() != digest_list.len() {
568 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
569 }
570
571 let env: &BackupEnvironment = rpcenv.as_ref();
572
39e60bd6
DM
573 env.debug(format!("fixed_append {} chunks", digest_list.len()));
574
a42fa400
DM
575 for (i, item) in digest_list.iter().enumerate() {
576 let digest_str = item.as_str().unwrap();
25877d05 577 let digest = <[u8; 32]>::from_hex(digest_str)?;
a42fa400
DM
578 let offset = offset_list[i].as_u64().unwrap();
579 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 580
a42fa400
DM
581 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
582
add5861e 583 env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
a42fa400
DM
584 }
585
586 Ok(Value::Null)
587}
588
552c2259 589#[sortable]
255f378a
DM
590pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
591 &ApiHandler::Sync(&close_dynamic_index),
592 &ObjectSchema::new(
593 "Close dynamic index writer.",
552c2259 594 &sorted!([
255f378a
DM
595 (
596 "wid",
597 false,
598 &IntegerSchema::new("Dynamic writer ID.")
599 .minimum(1)
600 .maximum(256)
601 .schema()
602 ),
603 (
604 "chunk-count",
605 false,
606 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
607 .minimum(1)
608 .schema()
609 ),
610 (
611 "size",
612 false,
613 &IntegerSchema::new("File size. This is used to verify that the server got all data.")
614 .minimum(1)
615 .schema()
616 ),
617 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 618 ]),
a2077252 619 )
255f378a 620);
a2077252
DM
621
622fn close_dynamic_index (
623 param: Value,
624 _info: &ApiMethod,
dd5495d6 625 rpcenv: &mut dyn RpcEnvironment,
a2077252
DM
626) -> Result<Value, Error> {
627
3c8c2827
WB
628 let wid = required_integer_param(&param, "wid")? as usize;
629 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
630 let size = required_integer_param(&param, "size")? as u64;
631 let csum_str = required_string_param(&param, "csum")?;
25877d05 632 let csum = <[u8; 32]>::from_hex(csum_str)?;
a2077252
DM
633
634 let env: &BackupEnvironment = rpcenv.as_ref();
635
fb6026b6 636 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 637
add5861e 638 env.log(format!("successfully closed dynamic index {}", wid));
bb105f9d 639
a2077252
DM
640 Ok(Value::Null)
641}
642
552c2259 643#[sortable]
255f378a
DM
644pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
645 &ApiHandler::Sync(&close_fixed_index),
646 &ObjectSchema::new(
647 "Close fixed index writer.",
552c2259 648 &sorted!([
255f378a
DM
649 (
650 "wid",
651 false,
652 &IntegerSchema::new("Fixed writer ID.")
653 .minimum(1)
654 .maximum(256)
655 .schema()
656 ),
657 (
658 "chunk-count",
659 false,
facd9801
SR
660 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
661 .minimum(0)
255f378a
DM
662 .schema()
663 ),
664 (
665 "size",
666 false,
facd9801
SR
667 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
668 .minimum(0)
255f378a
DM
669 .schema()
670 ),
671 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 672 ]),
a42fa400 673 )
255f378a 674);
a42fa400
DM
675
676fn close_fixed_index (
677 param: Value,
678 _info: &ApiMethod,
dd5495d6 679 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
680) -> Result<Value, Error> {
681
3c8c2827
WB
682 let wid = required_integer_param(&param, "wid")? as usize;
683 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
684 let size = required_integer_param(&param, "size")? as u64;
685 let csum_str = required_string_param(&param, "csum")?;
25877d05 686 let csum = <[u8; 32]>::from_hex(csum_str)?;
a42fa400
DM
687
688 let env: &BackupEnvironment = rpcenv.as_ref();
689
fb6026b6 690 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400 691
add5861e 692 env.log(format!("successfully closed fixed index {}", wid));
a42fa400
DM
693
694 Ok(Value::Null)
695}
a2077252 696
372724af
DM
697fn finish_backup (
698 _param: Value,
699 _info: &ApiMethod,
dd5495d6 700 rpcenv: &mut dyn RpcEnvironment,
372724af
DM
701) -> Result<Value, Error> {
702
703 let env: &BackupEnvironment = rpcenv.as_ref();
704
705 env.finish_backup()?;
add5861e 706 env.log("successfully finished backup");
372724af
DM
707
708 Ok(Value::Null)
709}
a2077252 710
8b7f8d3f
FG
711#[sortable]
712pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
713 &ApiHandler::Sync(&get_previous_backup_time),
714 &ObjectSchema::new(
715 "Get previous backup time.",
716 &[],
717 )
718);
719
720fn get_previous_backup_time(
721 _param: Value,
722 _info: &ApiMethod,
723 rpcenv: &mut dyn RpcEnvironment,
724) -> Result<Value, Error> {
725
726 let env: &BackupEnvironment = rpcenv.as_ref();
727
728 let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
729
730 Ok(json!(backup_time))
731}
732
552c2259 733#[sortable]
b957aa81
DM
734pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
735 &ApiHandler::AsyncHttp(&download_previous),
255f378a 736 &ObjectSchema::new(
b957aa81 737 "Download archive from previous backup.",
552c2259 738 &sorted!([
6227654a 739 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)
552c2259 740 ]),
a42fa400 741 )
255f378a 742);
a42fa400 743
b957aa81 744fn download_previous(
d3611366
DM
745 _parts: Parts,
746 _req_body: Body,
747 param: Value,
255f378a 748 _info: &ApiMethod,
dd5495d6 749 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 750) -> ApiResponseFuture {
d3611366 751
ad51d02a
DM
752 async move {
753 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 754
3c8c2827 755 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
d3611366 756
ad51d02a
DM
757 let last_backup = match &env.last_backup {
758 Some(info) => info,
0af2da04 759 None => bail!("no valid previous backup"),
ad51d02a
DM
760 };
761
b957aa81 762 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
ad51d02a
DM
763 path.push(&archive_name);
764
fe3e65c3
DM
765 {
766 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
767 ArchiveType::FixedIndex => {
768 let index = env.datastore.open_fixed_reader(&path)?;
769 Some(Box::new(index))
770 }
771 ArchiveType::DynamicIndex => {
772 let index = env.datastore.open_dynamic_reader(&path)?;
773 Some(Box::new(index))
774 }
775 _ => { None }
776 };
777 if let Some(index) = index {
778 env.log(format!("register chunks in '{}' from previous backup.", archive_name));
779
780 for pos in 0..index.index_count() {
781 let info = index.chunk_info(pos).unwrap();
782 let size = info.range.end - info.range.start;
783 env.register_chunk(info.digest, size as u32)?;
784 }
785 }
786 }
787
788 env.log(format!("download '{}' from previous backup.", archive_name));
b957aa81 789 crate::api2::helpers::create_download_response(path).await
ad51d02a 790 }.boxed()
a42fa400 791}