]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup/mod.rs
split the namespace out of BackupGroup/Dir api types
[proxmox-backup.git] / src / api2 / backup / mod.rs
CommitLineData
bf78f708
DM
1//! Backup protocol (HTTP2 upgrade)
2
f7d4e4b5 3use anyhow::{bail, format_err, Error};
92ac375a 4use futures::*;
dc7a5b34 5use hex::FromHex;
152764ec 6use hyper::header::{HeaderValue, UPGRADE};
152764ec 7use hyper::http::request::Parts;
dc7a5b34 8use hyper::{Body, Request, Response, StatusCode};
8c74349b 9use serde::Deserialize;
f9578f3c 10use serde_json::{json, Value};
152764ec 11
6ef1b649
WB
12use proxmox_router::list_subdirs_api_method;
13use proxmox_router::{
dc7a5b34 14 ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
6ef1b649
WB
15};
16use proxmox_schema::*;
dc7a5b34 17use proxmox_sys::sortable;
552c2259 18
8cc3760e 19use pbs_api_types::{
133d718f
WB
20 Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState,
21 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
22 BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
8cc3760e 23};
6d5d305d 24use pbs_config::CachedUserInfo;
b2065dc7
WB
25use pbs_datastore::index::IndexFile;
26use pbs_datastore::manifest::{archive_type, ArchiveType};
dc7a5b34
TL
27use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
28use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
29use proxmox_rest_server::{H2Service, WorkerTask};
30use proxmox_sys::fs::lock_dir_noblock_shared;
770a36e5 31
d95ced64
DM
32mod environment;
33use environment::*;
34
21ee7912
DM
35mod upload_chunk;
36use upload_chunk::*;
37
dc7a5b34 38pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
255f378a 39
552c2259 40#[sortable]
255f378a 41pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
329d40b5 42 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
255f378a
DM
43 &ObjectSchema::new(
44 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
552c2259 45 &sorted!([
66c49c21 46 ("store", false, &DATASTORE_SCHEMA),
33f2c2a1 47 ("backup-ns", true, &BACKUP_NAMESPACE_SCHEMA),
255f378a
DM
48 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
49 ("backup-id", false, &BACKUP_ID_SCHEMA),
50 ("backup-time", false, &BACKUP_TIME_SCHEMA),
51 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
61d7b501 52 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
552c2259 53 ]),
152764ec 54 )
365f0f72
DM
55).access(
56 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
cabda57f 57 Some("Requires on /datastore/{store}[/{namespace}] DATASTORE_BACKUP and being the owner of the group"),
365f0f72
DM
58 &Permission::Anybody
59);
152764ec 60
133d718f
WB
61pub(crate) fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
62 match param.get("backup-ns") {
63 Some(Value::String(ns)) => ns.parse(),
64 None => Ok(BackupNamespace::root()),
65 _ => bail!("invalid backup-ns parameter"),
66 }
67}
68
0aadd40b 69fn upgrade_to_backup_protocol(
152764ec
DM
70 parts: Parts,
71 req_body: Body,
0aadd40b 72 param: Value,
255f378a 73 _info: &ApiMethod,
dd5495d6 74 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 75) -> ApiResponseFuture {
dc7a5b34
TL
76 async move {
77 let debug = param["debug"].as_bool().unwrap_or(false);
78 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
0aadd40b 79
dc7a5b34 80 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
a42d1f55 81
dc7a5b34 82 let store = required_string_param(&param, "store")?.to_owned();
133d718f 83 let backup_ns = optional_ns_param(&param)?;
cabda57f 84 let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?;
365f0f72 85
dc7a5b34 86 let user_info = CachedUserInfo::new()?;
365f0f72 87
cabda57f
TL
88 let privs = if backup_ns.is_root() {
89 user_info.lookup_privs(&auth_id, &["datastore", &store])
90 } else {
91 user_info.lookup_privs(&auth_id, &["datastore", &store, &backup_ns.to_string()])
92 };
93 if privs & PRIV_DATASTORE_BACKUP == 0 {
94 proxmox_router::http_bail!(FORBIDDEN, "permission check failed");
95 }
365f0f72 96
cabda57f 97 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
21ee7912 98
dc7a5b34
TL
99 let protocols = parts
100 .headers
101 .get("UPGRADE")
102 .ok_or_else(|| format_err!("missing Upgrade header"))?
103 .to_str()?;
152764ec 104
dc7a5b34
TL
105 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
106 bail!("invalid protocol name");
107 }
152764ec 108
dc7a5b34
TL
109 if parts.version >= http::version::Version::HTTP_2 {
110 bail!(
111 "unexpected http version '{:?}' (expected version < 2)",
112 parts.version
113 );
114 }
152764ec 115
133d718f 116 if !datastore.namespace_path(&backup_ns).exists() {
d45506d4
TL
117 proxmox_router::http_bail!(NOT_FOUND, "namespace not found");
118 }
119
8c74349b 120 let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id());
96e95fc1 121
dc7a5b34 122 let env_type = rpcenv.env_type();
d9bd06ea 123
133d718f 124 let backup_group = datastore.backup_group(backup_ns, backup_dir_arg.group.clone());
92ac375a 125
8c74349b
WB
126 let worker_type = if backup_group.backup_type() == BackupType::Host
127 && backup_group.backup_id() == "benchmark"
128 {
dc7a5b34
TL
129 if !benchmark {
130 bail!("unable to run benchmark without --benchmark flags");
131 }
132 "benchmark"
133 } else {
134 if benchmark {
135 bail!("benchmark flags is only allowed on 'host/benchmark'");
136 }
137 "backup"
138 };
1fc82c41 139
dc7a5b34 140 // lock backup group to only allow one backup per group at a time
133d718f
WB
141 let (owner, _group_guard) = datastore.create_locked_backup_group(
142 backup_group.backup_ns(),
143 backup_group.as_ref(),
144 &auth_id,
145 )?;
dc7a5b34
TL
146
147 // permission check
148 let correct_owner =
149 owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
150 if !correct_owner && worker_type != "benchmark" {
151 // only the owner is allowed to create additional snapshots
152 bail!("backup owner check failed ({} != {})", auth_id, owner);
61d7b501 153 }
54552dda 154
dc7a5b34 155 let last_backup = {
6da20161 156 let info = backup_group.last_backup(true).unwrap_or(None);
dc7a5b34
TL
157 if let Some(info) = info {
158 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
159 let verify = manifest.unprotected["verify_state"].clone();
160 match serde_json::from_value::<SnapshotVerifyState>(verify) {
161 Ok(verify) => match verify.state {
d10332a1
SR
162 VerifyState::Ok => Some(info),
163 VerifyState::Failed => None,
dc7a5b34
TL
164 },
165 Err(_) => {
166 // no verify state found, treat as valid
167 Some(info)
0af2da04 168 }
0af2da04 169 }
dc7a5b34
TL
170 } else {
171 None
0af2da04 172 }
dc7a5b34 173 };
72375ce6 174
8c74349b 175 let backup_dir = backup_group.backup_dir(backup_dir_arg.time)?;
a66ab8ae 176
dc7a5b34
TL
177 let _last_guard = if let Some(last) = &last_backup {
178 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
179 bail!("backup timestamp is older than last backup.");
180 }
bb105f9d 181
dc7a5b34 182 // lock last snapshot to prevent forgetting/pruning it during backup
133d718f 183 let full_path = last.backup_dir.full_path();
dc7a5b34
TL
184 Some(lock_dir_noblock_shared(
185 &full_path,
186 "snapshot",
187 "base snapshot is already locked by another operation",
188 )?)
189 } else {
190 None
191 };
92ac375a 192
133d718f
WB
193 let (path, is_new, snap_guard) =
194 datastore.create_locked_backup_dir(backup_dir.backup_ns(), backup_dir.as_ref())?;
dc7a5b34
TL
195 if !is_new {
196 bail!("backup directory already exists.");
197 }
92ac375a 198
dc7a5b34
TL
199 WorkerTask::spawn(
200 worker_type,
201 Some(worker_id),
202 auth_id.to_string(),
203 true,
204 move |worker| {
205 let mut env = BackupEnvironment::new(
206 env_type,
207 auth_id,
208 worker.clone(),
209 datastore,
210 backup_dir,
211 );
212
213 env.debug = debug;
214 env.last_backup = last_backup;
215
216 env.log(format!(
217 "starting new {} on datastore '{}': {:?}",
218 worker_type, store, path
219 ));
220
221 let service =
222 H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
223
224 let abort_future = worker.abort_future();
225
226 let env2 = env.clone();
227
228 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
229 .map_err(Error::from)
230 .and_then(move |conn| {
231 env2.debug("protocol upgrade done");
232
233 let mut http = hyper::server::conn::Http::new();
234 http.http2_only(true);
235 // increase window size: todo - find optiomal size
236 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
237 http.http2_initial_stream_window_size(window_size);
238 http.http2_initial_connection_window_size(window_size);
239 http.http2_max_frame_size(4 * 1024 * 1024);
240
241 let env3 = env2.clone();
242 http.serve_connection(conn, service).map(move |result| {
243 match result {
244 Err(err) => {
245 // Avoid Transport endpoint is not connected (os error 107)
246 // fixme: find a better way to test for that error
247 if err.to_string().starts_with("connection error")
248 && env3.finished()
249 {
250 Ok(())
251 } else {
252 Err(Error::from(err))
253 }
b428af97 254 }
dc7a5b34 255 Ok(()) => Ok(()),
b428af97 256 }
dc7a5b34
TL
257 })
258 });
259 let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
260
261 async move {
262 // keep flock until task ends
263 let _group_guard = _group_guard;
264 let snap_guard = snap_guard;
265 let _last_guard = _last_guard;
266
267 let res = select! {
268 req = req_fut => req,
269 abrt = abort_future => abrt,
270 };
271 if benchmark {
272 env.log("benchmark finished successfully");
273 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
274 return Ok(());
275 }
0698f78d 276
dc7a5b34
TL
277 let verify = |env: BackupEnvironment| {
278 if let Err(err) = env.verify_after_complete(snap_guard) {
279 env.log(format!(
0698f78d
SR
280 "backup finished, but starting the requested verify task failed: {}",
281 err
282 ));
dc7a5b34
TL
283 }
284 };
090ac9f7 285
dc7a5b34
TL
286 match (res, env.ensure_finished()) {
287 (Ok(_), Ok(())) => {
288 env.log("backup finished successfully");
289 verify(env);
290 Ok(())
291 }
292 (Err(err), Ok(())) => {
293 // ignore errors after finish
294 env.log(format!("backup had errors but finished: {}", err));
295 verify(env);
296 Ok(())
297 }
298 (Ok(_), Err(err)) => {
299 env.log(format!("backup ended and finish failed: {}", err));
300 env.log("removing unfinished backup");
301 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
302 Err(err)
303 }
304 (Err(err), Err(_)) => {
305 env.log(format!("backup failed: {}", err));
306 env.log("removing failed backup");
307 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
308 Err(err)
309 }
310 }
311 }
312 },
313 )?;
314
315 let response = Response::builder()
316 .status(StatusCode::SWITCHING_PROTOCOLS)
317 .header(
318 UPGRADE,
319 HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
320 )
321 .body(Body::empty())?;
090ac9f7 322
dc7a5b34
TL
323 Ok(response)
324 }
325 .boxed()
152764ec 326}
92ac375a 327
5bc8e80a 328const BACKUP_API_SUBDIRS: SubdirMap = &[
dc7a5b34 329 ("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
255f378a 330 (
dc7a5b34
TL
331 "dynamic_chunk",
332 &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
255f378a
DM
333 ),
334 (
dc7a5b34
TL
335 "dynamic_close",
336 &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
255f378a
DM
337 ),
338 (
dc7a5b34
TL
339 "dynamic_index",
340 &Router::new()
255f378a 341 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
dc7a5b34 342 .put(&API_METHOD_DYNAMIC_APPEND),
255f378a
DM
343 ),
344 (
dc7a5b34
TL
345 "finish",
346 &Router::new().post(&ApiMethod::new(
347 &ApiHandler::Sync(&finish_backup),
348 &ObjectSchema::new("Mark backup as finished.", &[]),
349 )),
255f378a
DM
350 ),
351 (
dc7a5b34
TL
352 "fixed_chunk",
353 &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
255f378a
DM
354 ),
355 (
dc7a5b34
TL
356 "fixed_close",
357 &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
255f378a
DM
358 ),
359 (
dc7a5b34
TL
360 "fixed_index",
361 &Router::new()
255f378a 362 .post(&API_METHOD_CREATE_FIXED_INDEX)
dc7a5b34 363 .put(&API_METHOD_FIXED_APPEND),
255f378a 364 ),
b957aa81 365 (
dc7a5b34
TL
366 "previous",
367 &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
b957aa81 368 ),
8b7f8d3f 369 (
dc7a5b34
TL
370 "previous_backup_time",
371 &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
8b7f8d3f 372 ),
255f378a 373 (
dc7a5b34
TL
374 "speedtest",
375 &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
255f378a
DM
376 ),
377];
378
379pub const BACKUP_API_ROUTER: Router = Router::new()
380 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
381 .subdirs(BACKUP_API_SUBDIRS);
382
3d229a4a
DM
383#[sortable]
384pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
385 &ApiHandler::Sync(&create_dynamic_index),
386 &ObjectSchema::new(
387 "Create dynamic chunk index file.",
dc7a5b34
TL
388 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
389 ),
3d229a4a
DM
390);
391
f9578f3c
DM
392fn create_dynamic_index(
393 param: Value,
3d229a4a 394 _info: &ApiMethod,
dd5495d6 395 rpcenv: &mut dyn RpcEnvironment,
f9578f3c 396) -> Result<Value, Error> {
f9578f3c 397 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 398
3c8c2827 399 let name = required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 400
4af0ee05 401 let archive_name = name.clone();
0997967d 402 if !archive_name.ends_with(".didx") {
a42fa400 403 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
404 }
405
6b95c7df 406 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
407 path.push(archive_name);
408
976595e1 409 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 410 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 411
bb105f9d 412 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 413
bb105f9d 414 Ok(json!(wid))
f9578f3c
DM
415}
416
552c2259 417#[sortable]
255f378a
DM
418pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
419 &ApiHandler::Sync(&create_fixed_index),
420 &ObjectSchema::new(
421 "Create fixed chunk index file.",
552c2259 422 &sorted!([
6227654a 423 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
dc7a5b34
TL
424 (
425 "size",
426 false,
427 &IntegerSchema::new("File size.").minimum(1).schema()
428 ),
429 (
430 "reuse-csum",
431 true,
432 &StringSchema::new(
433 "If set, compare last backup's \
434 csum and reuse index for incremental backup if it matches."
435 )
436 .schema()
255f378a 437 ),
552c2259 438 ]),
dc7a5b34 439 ),
255f378a 440);
a42fa400
DM
441
442fn create_fixed_index(
443 param: Value,
444 _info: &ApiMethod,
dd5495d6 445 rpcenv: &mut dyn RpcEnvironment,
a42fa400 446) -> Result<Value, Error> {
a42fa400
DM
447 let env: &BackupEnvironment = rpcenv.as_ref();
448
3c8c2827
WB
449 let name = required_string_param(&param, "archive-name")?.to_owned();
450 let size = required_integer_param(&param, "size")? as usize;
facd9801 451 let reuse_csum = param["reuse-csum"].as_str();
a42fa400 452
4af0ee05 453 let archive_name = name.clone();
0997967d 454 if !archive_name.ends_with(".fidx") {
a42fa400 455 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
456 }
457
458 let mut path = env.backup_dir.relative_path();
facd9801 459 path.push(&archive_name);
a42fa400 460
dc7a5b34 461 let chunk_size = 4096 * 1024; // todo: ??
a42fa400 462
facd9801
SR
463 // do incremental backup if csum is set
464 let mut reader = None;
465 let mut incremental = false;
466 if let Some(csum) = reuse_csum {
467 incremental = true;
468 let last_backup = match &env.last_backup {
469 Some(info) => info,
470 None => {
0af2da04 471 bail!("cannot reuse index - no valid previous backup exists");
facd9801
SR
472 }
473 };
474
475 let mut last_path = last_backup.backup_dir.relative_path();
476 last_path.push(&archive_name);
477
478 let index = match env.datastore.open_fixed_reader(last_path) {
479 Ok(index) => index,
480 Err(_) => {
481 bail!("cannot reuse index - no previous backup exists for archive");
482 }
483 };
484
485 let (old_csum, _) = index.compute_csum();
25877d05 486 let old_csum = hex::encode(&old_csum);
facd9801 487 if old_csum != csum {
dc7a5b34
TL
488 bail!(
489 "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
490 csum,
491 old_csum
492 );
facd9801
SR
493 }
494
495 reader = Some(index);
496 }
497
498 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
499
500 if let Some(reader) = reader {
501 writer.clone_data_from(&reader)?;
502 }
503
504 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
a42fa400
DM
505
506 env.log(format!("created new fixed index {} ({:?})", wid, path));
507
508 Ok(json!(wid))
509}
510
552c2259 511#[sortable]
255f378a
DM
512pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
513 &ApiHandler::Sync(&dynamic_append),
514 &ObjectSchema::new(
515 "Append chunk to dynamic index writer.",
552c2259 516 &sorted!([
255f378a
DM
517 (
518 "wid",
519 false,
520 &IntegerSchema::new("Dynamic writer ID.")
521 .minimum(1)
522 .maximum(256)
523 .schema()
524 ),
525 (
526 "digest-list",
527 false,
528 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
529 ),
530 (
531 "offset-list",
532 false,
533 &ArraySchema::new(
534 "Chunk offset list.",
535 &IntegerSchema::new("Corresponding chunk offsets.")
536 .minimum(0)
537 .schema()
dc7a5b34
TL
538 )
539 .schema()
255f378a 540 ),
552c2259 541 ]),
dc7a5b34 542 ),
255f378a 543);
82ab7230 544
dc7a5b34 545fn dynamic_append(
82ab7230
DM
546 param: Value,
547 _info: &ApiMethod,
dd5495d6 548 rpcenv: &mut dyn RpcEnvironment,
82ab7230 549) -> Result<Value, Error> {
3c8c2827
WB
550 let wid = required_integer_param(&param, "wid")? as usize;
551 let digest_list = required_array_param(&param, "digest-list")?;
552 let offset_list = required_array_param(&param, "offset-list")?;
aa1b2e04 553
417cb073 554 if offset_list.len() != digest_list.len() {
dc7a5b34
TL
555 bail!(
556 "offset list has wrong length ({} != {})",
557 offset_list.len(),
558 digest_list.len()
559 );
417cb073
DM
560 }
561
82ab7230
DM
562 let env: &BackupEnvironment = rpcenv.as_ref();
563
39e60bd6
DM
564 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
565
417cb073 566 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 567 let digest_str = item.as_str().unwrap();
25877d05 568 let digest = <[u8; 32]>::from_hex(digest_str)?;
417cb073 569 let offset = offset_list[i].as_u64().unwrap();
dc7a5b34
TL
570 let size = env
571 .lookup_chunk(&digest)
572 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 573
417cb073 574 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 575
dc7a5b34
TL
576 env.debug(format!(
577 "successfully added chunk {} to dynamic index {} (offset {}, size {})",
578 digest_str, wid, offset, size
579 ));
aa1b2e04 580 }
82ab7230
DM
581
582 Ok(Value::Null)
583}
584
552c2259 585#[sortable]
255f378a
DM
586pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
587 &ApiHandler::Sync(&fixed_append),
588 &ObjectSchema::new(
589 "Append chunk to fixed index writer.",
552c2259 590 &sorted!([
255f378a
DM
591 (
592 "wid",
593 false,
594 &IntegerSchema::new("Fixed writer ID.")
595 .minimum(1)
596 .maximum(256)
597 .schema()
598 ),
599 (
600 "digest-list",
601 false,
602 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
603 ),
604 (
605 "offset-list",
606 false,
607 &ArraySchema::new(
608 "Chunk offset list.",
609 &IntegerSchema::new("Corresponding chunk offsets.")
610 .minimum(0)
611 .schema()
dc7a5b34
TL
612 )
613 .schema()
a42fa400 614 )
552c2259 615 ]),
dc7a5b34 616 ),
255f378a 617);
a42fa400 618
dc7a5b34 619fn fixed_append(
a42fa400
DM
620 param: Value,
621 _info: &ApiMethod,
dd5495d6 622 rpcenv: &mut dyn RpcEnvironment,
a42fa400 623) -> Result<Value, Error> {
3c8c2827
WB
624 let wid = required_integer_param(&param, "wid")? as usize;
625 let digest_list = required_array_param(&param, "digest-list")?;
626 let offset_list = required_array_param(&param, "offset-list")?;
a42fa400 627
a42fa400 628 if offset_list.len() != digest_list.len() {
dc7a5b34
TL
629 bail!(
630 "offset list has wrong length ({} != {})",
631 offset_list.len(),
632 digest_list.len()
633 );
a42fa400
DM
634 }
635
636 let env: &BackupEnvironment = rpcenv.as_ref();
637
39e60bd6
DM
638 env.debug(format!("fixed_append {} chunks", digest_list.len()));
639
a42fa400
DM
640 for (i, item) in digest_list.iter().enumerate() {
641 let digest_str = item.as_str().unwrap();
25877d05 642 let digest = <[u8; 32]>::from_hex(digest_str)?;
a42fa400 643 let offset = offset_list[i].as_u64().unwrap();
dc7a5b34
TL
644 let size = env
645 .lookup_chunk(&digest)
646 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 647
a42fa400
DM
648 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
649
dc7a5b34
TL
650 env.debug(format!(
651 "successfully added chunk {} to fixed index {} (offset {}, size {})",
652 digest_str, wid, offset, size
653 ));
a42fa400
DM
654 }
655
656 Ok(Value::Null)
657}
658
552c2259 659#[sortable]
255f378a
DM
660pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
661 &ApiHandler::Sync(&close_dynamic_index),
662 &ObjectSchema::new(
663 "Close dynamic index writer.",
552c2259 664 &sorted!([
255f378a
DM
665 (
666 "wid",
667 false,
668 &IntegerSchema::new("Dynamic writer ID.")
669 .minimum(1)
670 .maximum(256)
671 .schema()
672 ),
673 (
674 "chunk-count",
675 false,
dc7a5b34
TL
676 &IntegerSchema::new(
677 "Chunk count. This is used to verify that the server got all chunks."
678 )
679 .minimum(1)
680 .schema()
255f378a
DM
681 ),
682 (
683 "size",
684 false,
dc7a5b34
TL
685 &IntegerSchema::new(
686 "File size. This is used to verify that the server got all data."
687 )
688 .minimum(1)
689 .schema()
690 ),
691 (
692 "csum",
693 false,
694 &StringSchema::new("Digest list checksum.").schema()
255f378a 695 ),
552c2259 696 ]),
dc7a5b34 697 ),
255f378a 698);
a2077252 699
dc7a5b34 700fn close_dynamic_index(
a2077252
DM
701 param: Value,
702 _info: &ApiMethod,
dd5495d6 703 rpcenv: &mut dyn RpcEnvironment,
a2077252 704) -> Result<Value, Error> {
3c8c2827
WB
705 let wid = required_integer_param(&param, "wid")? as usize;
706 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
707 let size = required_integer_param(&param, "size")? as u64;
708 let csum_str = required_string_param(&param, "csum")?;
25877d05 709 let csum = <[u8; 32]>::from_hex(csum_str)?;
a2077252
DM
710
711 let env: &BackupEnvironment = rpcenv.as_ref();
712
fb6026b6 713 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 714
add5861e 715 env.log(format!("successfully closed dynamic index {}", wid));
bb105f9d 716
a2077252
DM
717 Ok(Value::Null)
718}
719
552c2259 720#[sortable]
255f378a
DM
721pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
722 &ApiHandler::Sync(&close_fixed_index),
723 &ObjectSchema::new(
724 "Close fixed index writer.",
552c2259 725 &sorted!([
255f378a
DM
726 (
727 "wid",
728 false,
729 &IntegerSchema::new("Fixed writer ID.")
730 .minimum(1)
731 .maximum(256)
732 .schema()
733 ),
734 (
735 "chunk-count",
736 false,
facd9801
SR
737 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
738 .minimum(0)
255f378a
DM
739 .schema()
740 ),
741 (
742 "size",
743 false,
facd9801
SR
744 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
745 .minimum(0)
255f378a
DM
746 .schema()
747 ),
748 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 749 ]),
a42fa400 750 )
255f378a 751);
a42fa400 752
dc7a5b34 753fn close_fixed_index(
a42fa400
DM
754 param: Value,
755 _info: &ApiMethod,
dd5495d6 756 rpcenv: &mut dyn RpcEnvironment,
a42fa400 757) -> Result<Value, Error> {
3c8c2827
WB
758 let wid = required_integer_param(&param, "wid")? as usize;
759 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
760 let size = required_integer_param(&param, "size")? as u64;
761 let csum_str = required_string_param(&param, "csum")?;
25877d05 762 let csum = <[u8; 32]>::from_hex(csum_str)?;
a42fa400
DM
763
764 let env: &BackupEnvironment = rpcenv.as_ref();
765
fb6026b6 766 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400 767
add5861e 768 env.log(format!("successfully closed fixed index {}", wid));
a42fa400
DM
769
770 Ok(Value::Null)
771}
a2077252 772
dc7a5b34 773fn finish_backup(
372724af
DM
774 _param: Value,
775 _info: &ApiMethod,
dd5495d6 776 rpcenv: &mut dyn RpcEnvironment,
372724af 777) -> Result<Value, Error> {
372724af
DM
778 let env: &BackupEnvironment = rpcenv.as_ref();
779
780 env.finish_backup()?;
add5861e 781 env.log("successfully finished backup");
372724af
DM
782
783 Ok(Value::Null)
784}
a2077252 785
8b7f8d3f
FG
786#[sortable]
787pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
788 &ApiHandler::Sync(&get_previous_backup_time),
dc7a5b34 789 &ObjectSchema::new("Get previous backup time.", &[]),
8b7f8d3f
FG
790);
791
792fn get_previous_backup_time(
793 _param: Value,
794 _info: &ApiMethod,
795 rpcenv: &mut dyn RpcEnvironment,
796) -> Result<Value, Error> {
8b7f8d3f
FG
797 let env: &BackupEnvironment = rpcenv.as_ref();
798
dc7a5b34
TL
799 let backup_time = env
800 .last_backup
801 .as_ref()
802 .map(|info| info.backup_dir.backup_time());
8b7f8d3f
FG
803
804 Ok(json!(backup_time))
805}
806
552c2259 807#[sortable]
b957aa81
DM
808pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
809 &ApiHandler::AsyncHttp(&download_previous),
255f378a 810 &ObjectSchema::new(
b957aa81 811 "Download archive from previous backup.",
dc7a5b34
TL
812 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
813 ),
255f378a 814);
a42fa400 815
b957aa81 816fn download_previous(
d3611366
DM
817 _parts: Parts,
818 _req_body: Body,
819 param: Value,
255f378a 820 _info: &ApiMethod,
dd5495d6 821 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 822) -> ApiResponseFuture {
ad51d02a
DM
823 async move {
824 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 825
3c8c2827 826 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
d3611366 827
ad51d02a
DM
828 let last_backup = match &env.last_backup {
829 Some(info) => info,
0af2da04 830 None => bail!("no valid previous backup"),
ad51d02a
DM
831 };
832
133d718f 833 let mut path = last_backup.backup_dir.full_path();
ad51d02a
DM
834 path.push(&archive_name);
835
fe3e65c3
DM
836 {
837 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
838 ArchiveType::FixedIndex => {
839 let index = env.datastore.open_fixed_reader(&path)?;
840 Some(Box::new(index))
841 }
842 ArchiveType::DynamicIndex => {
843 let index = env.datastore.open_dynamic_reader(&path)?;
844 Some(Box::new(index))
845 }
dc7a5b34 846 _ => None,
fe3e65c3
DM
847 };
848 if let Some(index) = index {
dc7a5b34
TL
849 env.log(format!(
850 "register chunks in '{}' from previous backup.",
851 archive_name
852 ));
fe3e65c3
DM
853
854 for pos in 0..index.index_count() {
855 let info = index.chunk_info(pos).unwrap();
856 let size = info.range.end - info.range.start;
857 env.register_chunk(info.digest, size as u32)?;
858 }
859 }
860 }
861
862 env.log(format!("download '{}' from previous backup.", archive_name));
b957aa81 863 crate::api2::helpers::create_download_response(path).await
dc7a5b34
TL
864 }
865 .boxed()
a42fa400 866}