]>
Commit | Line | Data |
---|---|---|
f7d4e4b5 | 1 | use anyhow::{bail, format_err, Error}; |
92ac375a | 2 | use futures::*; |
152764ec | 3 | use hyper::header::{HeaderValue, UPGRADE}; |
152764ec | 4 | use hyper::http::request::Parts; |
cad540e9 | 5 | use hyper::{Body, Response, StatusCode}; |
f9578f3c | 6 | use serde_json::{json, Value}; |
152764ec | 7 | |
9ea4bce4 | 8 | use proxmox::{sortable, identity, list_subdirs_api_method}; |
9f9f7eef | 9 | use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission}; |
cad540e9 WB |
10 | use proxmox::api::router::SubdirMap; |
11 | use proxmox::api::schema::*; | |
552c2259 | 12 | |
b957aa81 | 13 | use crate::tools; |
42a87f7b | 14 | use crate::server::{WorkerTask, H2Service}; |
21ee7912 | 15 | use crate::backup::*; |
6762db70 | 16 | use crate::api2::types::*; |
54552dda | 17 | use crate::config::acl::PRIV_DATASTORE_BACKUP; |
365f0f72 | 18 | use crate::config::cached_user_info::CachedUserInfo; |
152764ec | 19 | |
d95ced64 DM |
20 | mod environment; |
21 | use environment::*; | |
22 | ||
21ee7912 DM |
23 | mod upload_chunk; |
24 | use upload_chunk::*; | |
25 | ||
255f378a DM |
26 | pub const ROUTER: Router = Router::new() |
27 | .upgrade(&API_METHOD_UPGRADE_BACKUP); | |
28 | ||
552c2259 | 29 | #[sortable] |
255f378a | 30 | pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new( |
329d40b5 | 31 | &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol), |
255f378a DM |
32 | &ObjectSchema::new( |
33 | concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."), | |
552c2259 | 34 | &sorted!([ |
66c49c21 | 35 | ("store", false, &DATASTORE_SCHEMA), |
255f378a DM |
36 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
37 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
38 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
39 | ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()), | |
552c2259 | 40 | ]), |
152764ec | 41 | ) |
365f0f72 DM |
42 | ).access( |
43 | // Note: parameter 'store' is no uri parameter, so we need to test inside function body | |
54552dda | 44 | Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."), |
365f0f72 DM |
45 | &Permission::Anybody |
46 | ); | |
152764ec | 47 | |
0aadd40b | 48 | fn upgrade_to_backup_protocol( |
152764ec DM |
49 | parts: Parts, |
50 | req_body: Body, | |
0aadd40b | 51 | param: Value, |
255f378a | 52 | _info: &ApiMethod, |
dd5495d6 | 53 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 54 | ) -> ApiResponseFuture { |
0aadd40b | 55 | |
54552dda | 56 | async move { |
a42d1f55 DM |
57 | let debug = param["debug"].as_bool().unwrap_or(false); |
58 | ||
365f0f72 DM |
59 | let username = rpcenv.get_user().unwrap(); |
60 | ||
bb105f9d | 61 | let store = tools::required_string_param(¶m, "store")?.to_owned(); |
365f0f72 DM |
62 | |
63 | let user_info = CachedUserInfo::new()?; | |
54552dda | 64 | user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?; |
365f0f72 | 65 | |
bb105f9d | 66 | let datastore = DataStore::lookup_datastore(&store)?; |
21ee7912 | 67 | |
0aadd40b DM |
68 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
69 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
ca5d0b61 | 70 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; |
152764ec DM |
71 | |
72 | let protocols = parts | |
73 | .headers | |
74 | .get("UPGRADE") | |
75 | .ok_or_else(|| format_err!("missing Upgrade header"))? | |
76 | .to_str()?; | |
77 | ||
986bef16 | 78 | if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() { |
152764ec DM |
79 | bail!("invalid protocol name"); |
80 | } | |
81 | ||
96e95fc1 DM |
82 | if parts.version >= http::version::Version::HTTP_2 { |
83 | bail!("unexpected http version '{:?}' (expected version < 2)", parts.version); | |
84 | } | |
85 | ||
0aadd40b | 86 | let worker_id = format!("{}_{}_{}", store, backup_type, backup_id); |
d9bd06ea | 87 | |
58c8d7d9 | 88 | let env_type = rpcenv.env_type(); |
92ac375a | 89 | |
51a4f63f | 90 | let backup_group = BackupGroup::new(backup_type, backup_id); |
54552dda DM |
91 | let owner = datastore.create_backup_group(&backup_group, &username)?; |
92 | // permission check | |
93 | if owner != username { // only the owner is allowed to create additional snapshots | |
94 | bail!("backup owner check failed ({} != {})", username, owner); | |
95 | } | |
96 | ||
fbb798f6 | 97 | let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None); |
95bda2f2 | 98 | let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time); |
ca5d0b61 DM |
99 | |
100 | if let Some(last) = &last_backup { | |
101 | if backup_dir.backup_time() <= last.backup_dir.backup_time() { | |
102 | bail!("backup timestamp is older than last backup."); | |
103 | } | |
104 | } | |
f9578f3c | 105 | |
95bda2f2 SR |
106 | // lock backup group to only allow one backup per group at a time |
107 | let _group_guard = backup_group.lock(&datastore.base_path())?; | |
108 | ||
bb105f9d | 109 | let (path, is_new) = datastore.create_backup_dir(&backup_dir)?; |
add5861e | 110 | if !is_new { bail!("backup directory already exists."); } |
f9578f3c | 111 | |
0aadd40b | 112 | WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| { |
bb105f9d | 113 | let mut env = BackupEnvironment::new( |
6b95c7df | 114 | env_type, username.clone(), worker.clone(), datastore, backup_dir); |
b02a52e3 | 115 | |
a42d1f55 | 116 | env.debug = debug; |
bb105f9d | 117 | env.last_backup = last_backup; |
b02a52e3 | 118 | |
bb105f9d DM |
119 | env.log(format!("starting new backup on datastore '{}': {:?}", store, path)); |
120 | ||
255f378a | 121 | let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug); |
72375ce6 | 122 | |
a66ab8ae DM |
123 | let abort_future = worker.abort_future(); |
124 | ||
372724af | 125 | let env2 = env.clone(); |
bb105f9d | 126 | |
6650a242 | 127 | let mut req_fut = req_body |
152764ec | 128 | .on_upgrade() |
92ac375a | 129 | .map_err(Error::from) |
152764ec | 130 | .and_then(move |conn| { |
6650a242 | 131 | env2.debug("protocol upgrade done"); |
92ac375a DM |
132 | |
133 | let mut http = hyper::server::conn::Http::new(); | |
134 | http.http2_only(true); | |
adec8ea2 | 135 | // increase window size: todo - find optiomal size |
771953f9 DM |
136 | let window_size = 32*1024*1024; // max = (1 << 31) - 2 |
137 | http.http2_initial_stream_window_size(window_size); | |
138 | http.http2_initial_connection_window_size(window_size); | |
92ac375a | 139 | |
d9bd06ea DM |
140 | http.serve_connection(conn, service) |
141 | .map_err(Error::from) | |
59b2baa0 | 142 | }); |
6650a242 | 143 | let mut abort_future = abort_future |
59b2baa0 WB |
144 | .map(|_| Err(format_err!("task aborted"))); |
145 | ||
6650a242 | 146 | async move { |
95bda2f2 SR |
147 | // keep flock until task ends |
148 | let _group_guard = _group_guard; | |
149 | ||
6650a242 DC |
150 | let res = select!{ |
151 | req = req_fut => req, | |
152 | abrt = abort_future => abrt, | |
153 | }; | |
154 | ||
155 | match (res, env.ensure_finished()) { | |
156 | (Ok(_), Ok(())) => { | |
add5861e | 157 | env.log("backup finished successfully"); |
6650a242 DC |
158 | Ok(()) |
159 | }, | |
160 | (Err(err), Ok(())) => { | |
161 | // ignore errors after finish | |
162 | env.log(format!("backup had errors but finished: {}", err)); | |
163 | Ok(()) | |
164 | }, | |
165 | (Ok(_), Err(err)) => { | |
166 | env.log(format!("backup ended and finish failed: {}", err)); | |
167 | env.log("removing unfinished backup"); | |
168 | env.remove_backup()?; | |
169 | Err(err) | |
170 | }, | |
171 | (Err(err), Err(_)) => { | |
172 | env.log(format!("backup failed: {}", err)); | |
173 | env.log("removing failed backup"); | |
174 | env.remove_backup()?; | |
175 | Err(err) | |
176 | }, | |
177 | } | |
178 | } | |
090ac9f7 DM |
179 | })?; |
180 | ||
181 | let response = Response::builder() | |
182 | .status(StatusCode::SWITCHING_PROTOCOLS) | |
986bef16 | 183 | .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!())) |
090ac9f7 DM |
184 | .body(Body::empty())?; |
185 | ||
ad51d02a DM |
186 | Ok(response) |
187 | }.boxed() | |
152764ec | 188 | } |
92ac375a | 189 | |
255f378a DM |
190 | pub const BACKUP_API_SUBDIRS: SubdirMap = &[ |
191 | ( | |
192 | "blob", &Router::new() | |
193 | .upload(&API_METHOD_UPLOAD_BLOB) | |
194 | ), | |
195 | ( | |
196 | "dynamic_chunk", &Router::new() | |
197 | .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK) | |
198 | ), | |
199 | ( | |
200 | "dynamic_close", &Router::new() | |
201 | .post(&API_METHOD_CLOSE_DYNAMIC_INDEX) | |
202 | ), | |
203 | ( | |
204 | "dynamic_index", &Router::new() | |
255f378a DM |
205 | .post(&API_METHOD_CREATE_DYNAMIC_INDEX) |
206 | .put(&API_METHOD_DYNAMIC_APPEND) | |
207 | ), | |
208 | ( | |
209 | "finish", &Router::new() | |
210 | .post( | |
211 | &ApiMethod::new( | |
212 | &ApiHandler::Sync(&finish_backup), | |
213 | &ObjectSchema::new("Mark backup as finished.", &[]) | |
372724af | 214 | ) |
255f378a DM |
215 | ) |
216 | ), | |
217 | ( | |
218 | "fixed_chunk", &Router::new() | |
219 | .upload(&API_METHOD_UPLOAD_FIXED_CHUNK) | |
220 | ), | |
221 | ( | |
222 | "fixed_close", &Router::new() | |
223 | .post(&API_METHOD_CLOSE_FIXED_INDEX) | |
224 | ), | |
225 | ( | |
226 | "fixed_index", &Router::new() | |
255f378a DM |
227 | .post(&API_METHOD_CREATE_FIXED_INDEX) |
228 | .put(&API_METHOD_FIXED_APPEND) | |
229 | ), | |
b957aa81 DM |
230 | ( |
231 | "previous", &Router::new() | |
232 | .download(&API_METHOD_DOWNLOAD_PREVIOUS) | |
233 | ), | |
255f378a DM |
234 | ( |
235 | "speedtest", &Router::new() | |
236 | .upload(&API_METHOD_UPLOAD_SPEEDTEST) | |
237 | ), | |
238 | ]; | |
239 | ||
240 | pub const BACKUP_API_ROUTER: Router = Router::new() | |
241 | .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS)) | |
242 | .subdirs(BACKUP_API_SUBDIRS); | |
243 | ||
3d229a4a DM |
244 | #[sortable] |
245 | pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new( | |
246 | &ApiHandler::Sync(&create_dynamic_index), | |
247 | &ObjectSchema::new( | |
248 | "Create dynamic chunk index file.", | |
249 | &sorted!([ | |
250 | ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA), | |
251 | ]), | |
252 | ) | |
253 | ); | |
254 | ||
f9578f3c DM |
255 | fn create_dynamic_index( |
256 | param: Value, | |
3d229a4a | 257 | _info: &ApiMethod, |
dd5495d6 | 258 | rpcenv: &mut dyn RpcEnvironment, |
f9578f3c DM |
259 | ) -> Result<Value, Error> { |
260 | ||
261 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
f9578f3c | 262 | |
8bea85b4 | 263 | let name = tools::required_string_param(¶m, "archive-name")?.to_owned(); |
f9578f3c | 264 | |
4af0ee05 | 265 | let archive_name = name.clone(); |
0997967d | 266 | if !archive_name.ends_with(".didx") { |
a42fa400 | 267 | bail!("wrong archive extension: '{}'", archive_name); |
f9578f3c DM |
268 | } |
269 | ||
6b95c7df | 270 | let mut path = env.backup_dir.relative_path(); |
f9578f3c DM |
271 | path.push(archive_name); |
272 | ||
976595e1 | 273 | let index = env.datastore.create_dynamic_writer(&path)?; |
8bea85b4 | 274 | let wid = env.register_dynamic_writer(index, name)?; |
f9578f3c | 275 | |
bb105f9d | 276 | env.log(format!("created new dynamic index {} ({:?})", wid, path)); |
f9578f3c | 277 | |
bb105f9d | 278 | Ok(json!(wid)) |
f9578f3c DM |
279 | } |
280 | ||
552c2259 | 281 | #[sortable] |
255f378a DM |
282 | pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new( |
283 | &ApiHandler::Sync(&create_fixed_index), | |
284 | &ObjectSchema::new( | |
285 | "Create fixed chunk index file.", | |
552c2259 | 286 | &sorted!([ |
255f378a DM |
287 | ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA), |
288 | ("size", false, &IntegerSchema::new("File size.") | |
289 | .minimum(1) | |
290 | .schema() | |
291 | ), | |
facd9801 SR |
292 | ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \ |
293 | csum and reuse index for incremental backup if it matches.").schema()), | |
552c2259 | 294 | ]), |
a42fa400 | 295 | ) |
255f378a | 296 | ); |
a42fa400 DM |
297 | |
298 | fn create_fixed_index( | |
299 | param: Value, | |
300 | _info: &ApiMethod, | |
dd5495d6 | 301 | rpcenv: &mut dyn RpcEnvironment, |
a42fa400 DM |
302 | ) -> Result<Value, Error> { |
303 | ||
304 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
305 | ||
a42fa400 DM |
306 | let name = tools::required_string_param(¶m, "archive-name")?.to_owned(); |
307 | let size = tools::required_integer_param(¶m, "size")? as usize; | |
facd9801 | 308 | let reuse_csum = param["reuse-csum"].as_str(); |
a42fa400 | 309 | |
4af0ee05 | 310 | let archive_name = name.clone(); |
0997967d | 311 | if !archive_name.ends_with(".fidx") { |
a42fa400 | 312 | bail!("wrong archive extension: '{}'", archive_name); |
a42fa400 DM |
313 | } |
314 | ||
315 | let mut path = env.backup_dir.relative_path(); | |
facd9801 | 316 | path.push(&archive_name); |
a42fa400 DM |
317 | |
318 | let chunk_size = 4096*1024; // todo: ?? | |
319 | ||
facd9801 SR |
320 | // do incremental backup if csum is set |
321 | let mut reader = None; | |
322 | let mut incremental = false; | |
323 | if let Some(csum) = reuse_csum { | |
324 | incremental = true; | |
325 | let last_backup = match &env.last_backup { | |
326 | Some(info) => info, | |
327 | None => { | |
328 | bail!("cannot reuse index - no previous backup exists"); | |
329 | } | |
330 | }; | |
331 | ||
332 | let mut last_path = last_backup.backup_dir.relative_path(); | |
333 | last_path.push(&archive_name); | |
334 | ||
335 | let index = match env.datastore.open_fixed_reader(last_path) { | |
336 | Ok(index) => index, | |
337 | Err(_) => { | |
338 | bail!("cannot reuse index - no previous backup exists for archive"); | |
339 | } | |
340 | }; | |
341 | ||
342 | let (old_csum, _) = index.compute_csum(); | |
343 | let old_csum = proxmox::tools::digest_to_hex(&old_csum); | |
344 | if old_csum != csum { | |
345 | bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup", | |
346 | csum, old_csum); | |
347 | } | |
348 | ||
349 | reader = Some(index); | |
350 | } | |
351 | ||
352 | let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?; | |
353 | ||
354 | if let Some(reader) = reader { | |
355 | writer.clone_data_from(&reader)?; | |
356 | } | |
357 | ||
358 | let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?; | |
a42fa400 DM |
359 | |
360 | env.log(format!("created new fixed index {} ({:?})", wid, path)); | |
361 | ||
362 | Ok(json!(wid)) | |
363 | } | |
364 | ||
552c2259 | 365 | #[sortable] |
255f378a DM |
366 | pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new( |
367 | &ApiHandler::Sync(&dynamic_append), | |
368 | &ObjectSchema::new( | |
369 | "Append chunk to dynamic index writer.", | |
552c2259 | 370 | &sorted!([ |
255f378a DM |
371 | ( |
372 | "wid", | |
373 | false, | |
374 | &IntegerSchema::new("Dynamic writer ID.") | |
375 | .minimum(1) | |
376 | .maximum(256) | |
377 | .schema() | |
378 | ), | |
379 | ( | |
380 | "digest-list", | |
381 | false, | |
382 | &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema() | |
383 | ), | |
384 | ( | |
385 | "offset-list", | |
386 | false, | |
387 | &ArraySchema::new( | |
388 | "Chunk offset list.", | |
389 | &IntegerSchema::new("Corresponding chunk offsets.") | |
390 | .minimum(0) | |
391 | .schema() | |
392 | ).schema() | |
393 | ), | |
552c2259 | 394 | ]), |
82ab7230 | 395 | ) |
255f378a | 396 | ); |
82ab7230 DM |
397 | |
398 | fn dynamic_append ( | |
399 | param: Value, | |
400 | _info: &ApiMethod, | |
dd5495d6 | 401 | rpcenv: &mut dyn RpcEnvironment, |
82ab7230 DM |
402 | ) -> Result<Value, Error> { |
403 | ||
404 | let wid = tools::required_integer_param(¶m, "wid")? as usize; | |
aa1b2e04 | 405 | let digest_list = tools::required_array_param(¶m, "digest-list")?; |
417cb073 | 406 | let offset_list = tools::required_array_param(¶m, "offset-list")?; |
aa1b2e04 | 407 | |
417cb073 DM |
408 | if offset_list.len() != digest_list.len() { |
409 | bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len()); | |
410 | } | |
411 | ||
82ab7230 DM |
412 | let env: &BackupEnvironment = rpcenv.as_ref(); |
413 | ||
39e60bd6 DM |
414 | env.debug(format!("dynamic_append {} chunks", digest_list.len())); |
415 | ||
417cb073 | 416 | for (i, item) in digest_list.iter().enumerate() { |
aa1b2e04 | 417 | let digest_str = item.as_str().unwrap(); |
bffd40d6 | 418 | let digest = proxmox::tools::hex_to_digest(digest_str)?; |
417cb073 | 419 | let offset = offset_list[i].as_u64().unwrap(); |
aa1b2e04 | 420 | let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?; |
39e60bd6 | 421 | |
417cb073 | 422 | env.dynamic_writer_append_chunk(wid, offset, size, &digest)?; |
82ab7230 | 423 | |
add5861e | 424 | env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size)); |
aa1b2e04 | 425 | } |
82ab7230 DM |
426 | |
427 | Ok(Value::Null) | |
428 | } | |
429 | ||
552c2259 | 430 | #[sortable] |
255f378a DM |
431 | pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new( |
432 | &ApiHandler::Sync(&fixed_append), | |
433 | &ObjectSchema::new( | |
434 | "Append chunk to fixed index writer.", | |
552c2259 | 435 | &sorted!([ |
255f378a DM |
436 | ( |
437 | "wid", | |
438 | false, | |
439 | &IntegerSchema::new("Fixed writer ID.") | |
440 | .minimum(1) | |
441 | .maximum(256) | |
442 | .schema() | |
443 | ), | |
444 | ( | |
445 | "digest-list", | |
446 | false, | |
447 | &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema() | |
448 | ), | |
449 | ( | |
450 | "offset-list", | |
451 | false, | |
452 | &ArraySchema::new( | |
453 | "Chunk offset list.", | |
454 | &IntegerSchema::new("Corresponding chunk offsets.") | |
455 | .minimum(0) | |
456 | .schema() | |
457 | ).schema() | |
a42fa400 | 458 | ) |
552c2259 | 459 | ]), |
a42fa400 | 460 | ) |
255f378a | 461 | ); |
a42fa400 DM |
462 | |
463 | fn fixed_append ( | |
464 | param: Value, | |
465 | _info: &ApiMethod, | |
dd5495d6 | 466 | rpcenv: &mut dyn RpcEnvironment, |
a42fa400 DM |
467 | ) -> Result<Value, Error> { |
468 | ||
469 | let wid = tools::required_integer_param(¶m, "wid")? as usize; | |
470 | let digest_list = tools::required_array_param(¶m, "digest-list")?; | |
471 | let offset_list = tools::required_array_param(¶m, "offset-list")?; | |
472 | ||
a42fa400 DM |
473 | if offset_list.len() != digest_list.len() { |
474 | bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len()); | |
475 | } | |
476 | ||
477 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
478 | ||
39e60bd6 DM |
479 | env.debug(format!("fixed_append {} chunks", digest_list.len())); |
480 | ||
a42fa400 DM |
481 | for (i, item) in digest_list.iter().enumerate() { |
482 | let digest_str = item.as_str().unwrap(); | |
bffd40d6 | 483 | let digest = proxmox::tools::hex_to_digest(digest_str)?; |
a42fa400 DM |
484 | let offset = offset_list[i].as_u64().unwrap(); |
485 | let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?; | |
39e60bd6 | 486 | |
a42fa400 DM |
487 | env.fixed_writer_append_chunk(wid, offset, size, &digest)?; |
488 | ||
add5861e | 489 | env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size)); |
a42fa400 DM |
490 | } |
491 | ||
492 | Ok(Value::Null) | |
493 | } | |
494 | ||
552c2259 | 495 | #[sortable] |
255f378a DM |
496 | pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new( |
497 | &ApiHandler::Sync(&close_dynamic_index), | |
498 | &ObjectSchema::new( | |
499 | "Close dynamic index writer.", | |
552c2259 | 500 | &sorted!([ |
255f378a DM |
501 | ( |
502 | "wid", | |
503 | false, | |
504 | &IntegerSchema::new("Dynamic writer ID.") | |
505 | .minimum(1) | |
506 | .maximum(256) | |
507 | .schema() | |
508 | ), | |
509 | ( | |
510 | "chunk-count", | |
511 | false, | |
512 | &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.") | |
513 | .minimum(1) | |
514 | .schema() | |
515 | ), | |
516 | ( | |
517 | "size", | |
518 | false, | |
519 | &IntegerSchema::new("File size. This is used to verify that the server got all data.") | |
520 | .minimum(1) | |
521 | .schema() | |
522 | ), | |
523 | ("csum", false, &StringSchema::new("Digest list checksum.").schema()), | |
552c2259 | 524 | ]), |
a2077252 | 525 | ) |
255f378a | 526 | ); |
a2077252 DM |
527 | |
528 | fn close_dynamic_index ( | |
529 | param: Value, | |
530 | _info: &ApiMethod, | |
dd5495d6 | 531 | rpcenv: &mut dyn RpcEnvironment, |
a2077252 DM |
532 | ) -> Result<Value, Error> { |
533 | ||
534 | let wid = tools::required_integer_param(¶m, "wid")? as usize; | |
8bea85b4 DM |
535 | let chunk_count = tools::required_integer_param(¶m, "chunk-count")? as u64; |
536 | let size = tools::required_integer_param(¶m, "size")? as u64; | |
fb6026b6 DM |
537 | let csum_str = tools::required_string_param(¶m, "csum")?; |
538 | let csum = proxmox::tools::hex_to_digest(csum_str)?; | |
a2077252 DM |
539 | |
540 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
541 | ||
fb6026b6 | 542 | env.dynamic_writer_close(wid, chunk_count, size, csum)?; |
a2077252 | 543 | |
add5861e | 544 | env.log(format!("successfully closed dynamic index {}", wid)); |
bb105f9d | 545 | |
a2077252 DM |
546 | Ok(Value::Null) |
547 | } | |
548 | ||
552c2259 | 549 | #[sortable] |
255f378a DM |
550 | pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new( |
551 | &ApiHandler::Sync(&close_fixed_index), | |
552 | &ObjectSchema::new( | |
553 | "Close fixed index writer.", | |
552c2259 | 554 | &sorted!([ |
255f378a DM |
555 | ( |
556 | "wid", | |
557 | false, | |
558 | &IntegerSchema::new("Fixed writer ID.") | |
559 | .minimum(1) | |
560 | .maximum(256) | |
561 | .schema() | |
562 | ), | |
563 | ( | |
564 | "chunk-count", | |
565 | false, | |
facd9801 SR |
566 | &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.") |
567 | .minimum(0) | |
255f378a DM |
568 | .schema() |
569 | ), | |
570 | ( | |
571 | "size", | |
572 | false, | |
facd9801 SR |
573 | &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.") |
574 | .minimum(0) | |
255f378a DM |
575 | .schema() |
576 | ), | |
577 | ("csum", false, &StringSchema::new("Digest list checksum.").schema()), | |
552c2259 | 578 | ]), |
a42fa400 | 579 | ) |
255f378a | 580 | ); |
a42fa400 DM |
581 | |
582 | fn close_fixed_index ( | |
583 | param: Value, | |
584 | _info: &ApiMethod, | |
dd5495d6 | 585 | rpcenv: &mut dyn RpcEnvironment, |
a42fa400 DM |
586 | ) -> Result<Value, Error> { |
587 | ||
588 | let wid = tools::required_integer_param(¶m, "wid")? as usize; | |
589 | let chunk_count = tools::required_integer_param(¶m, "chunk-count")? as u64; | |
590 | let size = tools::required_integer_param(¶m, "size")? as u64; | |
fb6026b6 DM |
591 | let csum_str = tools::required_string_param(¶m, "csum")?; |
592 | let csum = proxmox::tools::hex_to_digest(csum_str)?; | |
a42fa400 DM |
593 | |
594 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
595 | ||
fb6026b6 | 596 | env.fixed_writer_close(wid, chunk_count, size, csum)?; |
a42fa400 | 597 | |
add5861e | 598 | env.log(format!("successfully closed fixed index {}", wid)); |
a42fa400 DM |
599 | |
600 | Ok(Value::Null) | |
601 | } | |
a2077252 | 602 | |
372724af DM |
603 | fn finish_backup ( |
604 | _param: Value, | |
605 | _info: &ApiMethod, | |
dd5495d6 | 606 | rpcenv: &mut dyn RpcEnvironment, |
372724af DM |
607 | ) -> Result<Value, Error> { |
608 | ||
609 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
610 | ||
611 | env.finish_backup()?; | |
add5861e | 612 | env.log("successfully finished backup"); |
372724af DM |
613 | |
614 | Ok(Value::Null) | |
615 | } | |
a2077252 | 616 | |
552c2259 | 617 | #[sortable] |
b957aa81 DM |
618 | pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new( |
619 | &ApiHandler::AsyncHttp(&download_previous), | |
255f378a | 620 | &ObjectSchema::new( |
b957aa81 | 621 | "Download archive from previous backup.", |
552c2259 DM |
622 | &sorted!([ |
623 | ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA) | |
624 | ]), | |
a42fa400 | 625 | ) |
255f378a | 626 | ); |
a42fa400 | 627 | |
b957aa81 | 628 | fn download_previous( |
d3611366 DM |
629 | _parts: Parts, |
630 | _req_body: Body, | |
631 | param: Value, | |
255f378a | 632 | _info: &ApiMethod, |
dd5495d6 | 633 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 634 | ) -> ApiResponseFuture { |
d3611366 | 635 | |
ad51d02a DM |
636 | async move { |
637 | let env: &BackupEnvironment = rpcenv.as_ref(); | |
d3611366 | 638 | |
ad51d02a | 639 | let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned(); |
d3611366 | 640 | |
ad51d02a DM |
641 | let last_backup = match &env.last_backup { |
642 | Some(info) => info, | |
b957aa81 | 643 | None => bail!("no previous backup"), |
ad51d02a DM |
644 | }; |
645 | ||
b957aa81 | 646 | let mut path = env.datastore.snapshot_path(&last_backup.backup_dir); |
ad51d02a DM |
647 | path.push(&archive_name); |
648 | ||
fe3e65c3 DM |
649 | { |
650 | let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? { | |
651 | ArchiveType::FixedIndex => { | |
652 | let index = env.datastore.open_fixed_reader(&path)?; | |
653 | Some(Box::new(index)) | |
654 | } | |
655 | ArchiveType::DynamicIndex => { | |
656 | let index = env.datastore.open_dynamic_reader(&path)?; | |
657 | Some(Box::new(index)) | |
658 | } | |
659 | _ => { None } | |
660 | }; | |
661 | if let Some(index) = index { | |
662 | env.log(format!("register chunks in '{}' from previous backup.", archive_name)); | |
d53fbe24 | 663 | env.register_base_snapshot(last_backup.backup_dir.clone()); |
fe3e65c3 DM |
664 | |
665 | for pos in 0..index.index_count() { | |
666 | let info = index.chunk_info(pos).unwrap(); | |
667 | let size = info.range.end - info.range.start; | |
668 | env.register_chunk(info.digest, size as u32)?; | |
669 | } | |
670 | } | |
671 | } | |
672 | ||
673 | env.log(format!("download '{}' from previous backup.", archive_name)); | |
b957aa81 | 674 | crate::api2::helpers::create_download_response(path).await |
ad51d02a | 675 | }.boxed() |
a42fa400 | 676 | } |