]>
Commit | Line | Data |
---|---|---|
b017bbc4 DM |
1 | use std::path::Path; |
2 | use std::ffi::OsStr; | |
3 | use std::convert::TryFrom; | |
4 | ||
5 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 6 | use serde_json::Value; |
b017bbc4 DM |
7 | |
8 | use proxmox::{ | |
b9b4b312 DM |
9 | api::{ |
10 | api, | |
11 | RpcEnvironment, | |
12 | RpcEnvironmentType, | |
13 | Router, | |
14 | section_config::SectionConfigData, | |
15 | }, | |
b017bbc4 DM |
16 | tools::{ |
17 | Uuid, | |
18 | io::ReadExt, | |
19 | fs::{ | |
20 | replace_file, | |
21 | CreateOptions, | |
22 | }, | |
23 | }, | |
b017bbc4 DM |
24 | }; |
25 | ||
26 | use crate::{ | |
8de9a991 | 27 | task_log, |
a80d72f9 | 28 | task::TaskState, |
b017bbc4 | 29 | tools::compute_file_csum, |
b9b4b312 DM |
30 | api2::types::{ |
31 | DATASTORE_SCHEMA, | |
9883b54c | 32 | DRIVE_NAME_SCHEMA, |
b9b4b312 DM |
33 | UPID_SCHEMA, |
34 | Authid, | |
b9b4b312 | 35 | }, |
25aa55b5 | 36 | config, |
b017bbc4 DM |
37 | backup::{ |
38 | archive_type, | |
39 | MANIFEST_BLOB_NAME, | |
40 | CryptMode, | |
41 | DataStore, | |
42 | BackupDir, | |
43 | DataBlob, | |
44 | BackupManifest, | |
45 | ArchiveType, | |
46 | IndexFile, | |
47 | DynamicIndexReader, | |
48 | FixedIndexReader, | |
49 | }, | |
50 | server::WorkerTask, | |
51 | tape::{ | |
52 | TAPE_STATUS_DIR, | |
53 | TapeRead, | |
54 | MediaId, | |
55 | MediaCatalog, | |
b9b4b312 DM |
56 | MediaPool, |
57 | Inventory, | |
b017bbc4 DM |
58 | file_formats::{ |
59 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
60 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
61 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, | |
62 | PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, | |
63 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
64 | MediaContentHeader, | |
f47e0357 | 65 | ChunkArchiveDecoder, |
b017bbc4 | 66 | }, |
37796ff7 DM |
67 | drive::{ |
68 | TapeDriver, | |
69 | request_and_load_media, | |
25aa55b5 | 70 | lock_tape_device, |
926d05ef | 71 | set_tape_device_state, |
25aa55b5 | 72 | }, |
b017bbc4 DM |
73 | }, |
74 | }; | |
75 | ||
b9b4b312 DM |
76 | pub const ROUTER: Router = Router::new() |
77 | .post(&API_METHOD_RESTORE); | |
78 | ||
79 | ||
80 | #[api( | |
81 | input: { | |
82 | properties: { | |
83 | store: { | |
84 | schema: DATASTORE_SCHEMA, | |
85 | }, | |
9883b54c DM |
86 | drive: { |
87 | schema: DRIVE_NAME_SCHEMA, | |
88 | }, | |
b9b4b312 DM |
89 | "media-set": { |
90 | description: "Media set UUID.", | |
91 | type: String, | |
92 | }, | |
93 | }, | |
94 | }, | |
95 | returns: { | |
96 | schema: UPID_SCHEMA, | |
97 | }, | |
98 | )] | |
99 | /// Restore data from media-set | |
100 | pub fn restore( | |
101 | store: String, | |
9883b54c | 102 | drive: String, |
b9b4b312 DM |
103 | media_set: String, |
104 | rpcenv: &mut dyn RpcEnvironment, | |
105 | ) -> Result<Value, Error> { | |
106 | ||
107 | let datastore = DataStore::lookup_datastore(&store)?; | |
108 | ||
109 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
110 | ||
111 | let status_path = Path::new(TAPE_STATUS_DIR); | |
112 | let inventory = Inventory::load(status_path)?; | |
113 | ||
114 | let media_set_uuid = media_set.parse()?; | |
115 | ||
116 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
117 | ||
b9b4b312 | 118 | let (drive_config, _digest) = config::drive::config()?; |
25aa55b5 DM |
119 | |
120 | // early check/lock before starting worker | |
121 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
b9b4b312 | 122 | |
39735609 | 123 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 DM |
124 | |
125 | let upid_str = WorkerTask::new_thread( | |
126 | "tape-restore", | |
127 | Some(store.clone()), | |
128 | auth_id.clone(), | |
129 | to_stdout, | |
130 | move |worker| { | |
25aa55b5 | 131 | let _drive_lock = drive_lock; // keep lock guard |
b9b4b312 | 132 | |
926d05ef DC |
133 | set_tape_device_state(&drive, &worker.upid().to_string())?; |
134 | ||
b9b4b312 DM |
135 | let _lock = MediaPool::lock(status_path, &pool)?; |
136 | ||
137 | let members = inventory.compute_media_set_members(&media_set_uuid)?; | |
138 | ||
44288184 | 139 | let media_list = members.media_list(); |
b9b4b312 DM |
140 | |
141 | let mut media_id_list = Vec::new(); | |
142 | ||
8e6459a8 DM |
143 | let mut encryption_key_fingerprint = None; |
144 | ||
b9b4b312 DM |
145 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { |
146 | match media_uuid { | |
147 | None => { | |
148 | bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr); | |
149 | } | |
150 | Some(media_uuid) => { | |
8e6459a8 DM |
151 | let media_id = inventory.lookup_media(media_uuid).unwrap(); |
152 | if let Some(ref set) = media_id.media_set_label { // always true here | |
153 | if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() { | |
154 | encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); | |
155 | } | |
156 | } | |
157 | media_id_list.push(media_id); | |
b9b4b312 DM |
158 | } |
159 | } | |
160 | } | |
161 | ||
8de9a991 | 162 | task_log!(worker, "Restore mediaset '{}'", media_set); |
8e6459a8 | 163 | if let Some(fingerprint) = encryption_key_fingerprint { |
8de9a991 | 164 | task_log!(worker, "Encryption key fingerprint: {}", fingerprint); |
8e6459a8 | 165 | } |
8de9a991 DM |
166 | task_log!(worker, "Pool: {}", pool); |
167 | task_log!(worker, "Datastore: {}", store); | |
168 | task_log!(worker, "Drive: {}", drive); | |
169 | task_log!( | |
170 | worker, | |
b9b4b312 DM |
171 | "Required media list: {}", |
172 | media_id_list.iter() | |
8446fbca | 173 | .map(|media_id| media_id.label.label_text.as_str()) |
b9b4b312 DM |
174 | .collect::<Vec<&str>>() |
175 | .join(";") | |
8de9a991 | 176 | ); |
b9b4b312 DM |
177 | |
178 | for media_id in media_id_list.iter() { | |
179 | request_and_restore_media( | |
180 | &worker, | |
181 | media_id, | |
182 | &drive_config, | |
9883b54c | 183 | &drive, |
b9b4b312 DM |
184 | &datastore, |
185 | &auth_id, | |
186 | )?; | |
187 | } | |
188 | ||
8de9a991 | 189 | task_log!(worker, "Restore mediaset '{}' done", media_set); |
926d05ef DC |
190 | |
191 | if let Err(err) = set_tape_device_state(&drive, "") { | |
192 | task_log!( | |
193 | worker, | |
194 | "could not unset drive state for {}: {}", | |
195 | drive, | |
196 | err | |
197 | ); | |
198 | } | |
199 | ||
b9b4b312 DM |
200 | Ok(()) |
201 | } | |
202 | )?; | |
203 | ||
204 | Ok(upid_str.into()) | |
205 | } | |
206 | ||
b017bbc4 DM |
207 | /// Request and restore complete media without using existing catalog (create catalog instead) |
208 | pub fn request_and_restore_media( | |
209 | worker: &WorkerTask, | |
210 | media_id: &MediaId, | |
211 | drive_config: &SectionConfigData, | |
212 | drive_name: &str, | |
213 | datastore: &DataStore, | |
214 | authid: &Authid, | |
215 | ) -> Result<(), Error> { | |
216 | ||
217 | let media_set_uuid = match media_id.media_set_label { | |
218 | None => bail!("restore_media: no media set - internal error"), | |
219 | Some(ref set) => &set.uuid, | |
220 | }; | |
221 | ||
ff58c519 | 222 | let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?; |
b017bbc4 DM |
223 | |
224 | match info.media_set_label { | |
225 | None => { | |
226 | bail!("missing media set label on media {} ({})", | |
8446fbca | 227 | media_id.label.label_text, media_id.label.uuid); |
b017bbc4 DM |
228 | } |
229 | Some(ref set) => { | |
230 | if &set.uuid != media_set_uuid { | |
231 | bail!("wrong media set label on media {} ({} != {})", | |
8446fbca | 232 | media_id.label.label_text, media_id.label.uuid, |
b017bbc4 DM |
233 | media_set_uuid); |
234 | } | |
8e6459a8 DM |
235 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone() |
236 | .map(|fp| (fp, set.uuid.clone())); | |
237 | ||
238 | drive.set_encryption(encrypt_fingerprint)?; | |
b017bbc4 DM |
239 | } |
240 | } | |
241 | ||
242 | restore_media(worker, &mut drive, &info, Some((datastore, authid)), false) | |
243 | } | |
244 | ||
245 | /// Restore complete media content and catalog | |
246 | /// | |
247 | /// Only create the catalog if target is None. | |
248 | pub fn restore_media( | |
249 | worker: &WorkerTask, | |
250 | drive: &mut Box<dyn TapeDriver>, | |
251 | media_id: &MediaId, | |
252 | target: Option<(&DataStore, &Authid)>, | |
253 | verbose: bool, | |
254 | ) -> Result<(), Error> { | |
255 | ||
256 | let status_path = Path::new(TAPE_STATUS_DIR); | |
257 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
258 | ||
259 | loop { | |
260 | let current_file_number = drive.current_file_number()?; | |
261 | let reader = match drive.read_next_file()? { | |
262 | None => { | |
8de9a991 | 263 | task_log!(worker, "detected EOT after {} files", current_file_number); |
b017bbc4 DM |
264 | break; |
265 | } | |
266 | Some(reader) => reader, | |
267 | }; | |
268 | ||
b017bbc4 DM |
269 | restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?; |
270 | } | |
271 | ||
272 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; | |
273 | ||
274 | Ok(()) | |
275 | } | |
276 | ||
277 | fn restore_archive<'a>( | |
278 | worker: &WorkerTask, | |
279 | mut reader: Box<dyn 'a + TapeRead>, | |
280 | current_file_number: u64, | |
281 | target: Option<(&DataStore, &Authid)>, | |
282 | catalog: &mut MediaCatalog, | |
283 | verbose: bool, | |
284 | ) -> Result<(), Error> { | |
285 | ||
286 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
287 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
288 | bail!("missing MediaContentHeader"); | |
289 | } | |
290 | ||
291 | //println!("Found MediaContentHeader: {:?}", header); | |
292 | ||
293 | match header.content_magic { | |
294 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
295 | bail!("unexpected content magic (label)"); | |
296 | } | |
297 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
298 | let snapshot = reader.read_exact_allocated(header.size as usize)?; | |
299 | let snapshot = std::str::from_utf8(&snapshot) | |
300 | .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?; | |
8de9a991 | 301 | task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot); |
b017bbc4 DM |
302 | |
303 | let backup_dir: BackupDir = snapshot.parse()?; | |
304 | ||
305 | if let Some((datastore, authid)) = target.as_ref() { | |
306 | ||
307 | let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?; | |
308 | if *authid != &owner { // only the owner is allowed to create additional snapshots | |
309 | bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner); | |
310 | } | |
311 | ||
312 | let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?; | |
313 | let mut path = datastore.base_path(); | |
314 | path.push(rel_path); | |
315 | ||
316 | if is_new { | |
8de9a991 | 317 | task_log!(worker, "restore snapshot {}", backup_dir); |
b017bbc4 | 318 | |
a80d72f9 | 319 | match restore_snapshot_archive(worker, reader, &path) { |
b017bbc4 DM |
320 | Err(err) => { |
321 | std::fs::remove_dir_all(&path)?; | |
322 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
323 | } | |
324 | Ok(false) => { | |
325 | std::fs::remove_dir_all(&path)?; | |
8de9a991 | 326 | task_log!(worker, "skip incomplete snapshot {}", backup_dir); |
b017bbc4 DM |
327 | } |
328 | Ok(true) => { | |
329 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
330 | catalog.commit_if_large()?; | |
331 | } | |
332 | } | |
333 | return Ok(()); | |
334 | } | |
335 | } | |
336 | ||
337 | reader.skip_to_end()?; // read all data | |
338 | if let Ok(false) = reader.is_incomplete() { | |
339 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
340 | catalog.commit_if_large()?; | |
341 | } | |
342 | } | |
343 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
344 | ||
8de9a991 | 345 | task_log!(worker, "Found chunk archive: {}", current_file_number); |
b017bbc4 DM |
346 | let datastore = target.as_ref().map(|t| t.0); |
347 | ||
348 | if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? { | |
349 | catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?; | |
350 | for digest in chunks.iter() { | |
351 | catalog.register_chunk(&digest)?; | |
352 | } | |
8de9a991 | 353 | task_log!(worker, "register {} chunks", chunks.len()); |
b017bbc4 DM |
354 | catalog.end_chunk_archive()?; |
355 | catalog.commit_if_large()?; | |
356 | } | |
357 | } | |
358 | _ => bail!("unknown content magic {:?}", header.content_magic), | |
359 | } | |
360 | ||
361 | catalog.commit()?; | |
362 | ||
363 | Ok(()) | |
364 | } | |
365 | ||
366 | fn restore_chunk_archive<'a>( | |
367 | worker: &WorkerTask, | |
368 | reader: Box<dyn 'a + TapeRead>, | |
369 | datastore: Option<&DataStore>, | |
370 | verbose: bool, | |
371 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
372 | ||
373 | let mut chunks = Vec::new(); | |
374 | ||
375 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
376 | ||
377 | let result: Result<_, Error> = proxmox::try_block!({ | |
0d2133db | 378 | while let Some((digest, blob)) = decoder.next_chunk()? { |
a80d72f9 DM |
379 | |
380 | worker.check_abort()?; | |
381 | ||
0d2133db FG |
382 | if let Some(datastore) = datastore { |
383 | let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; | |
384 | if !chunk_exists { | |
385 | blob.verify_crc()?; | |
386 | ||
387 | if blob.crypt_mode()? == CryptMode::None { | |
388 | blob.decode(None, Some(&digest))?; // verify digest | |
389 | } | |
390 | if verbose { | |
8de9a991 | 391 | task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 392 | } |
0d2133db FG |
393 | datastore.insert_chunk(&blob, &digest)?; |
394 | } else if verbose { | |
8de9a991 | 395 | task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 396 | } |
0d2133db | 397 | } else if verbose { |
8de9a991 | 398 | task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 399 | } |
0d2133db | 400 | chunks.push(digest); |
b017bbc4 DM |
401 | } |
402 | Ok(()) | |
403 | }); | |
404 | ||
405 | match result { | |
406 | Ok(()) => Ok(Some(chunks)), | |
407 | Err(err) => { | |
408 | let reader = decoder.reader(); | |
409 | ||
410 | // check if this stream is marked incomplete | |
411 | if let Ok(true) = reader.is_incomplete() { | |
412 | return Ok(Some(chunks)); | |
413 | } | |
414 | ||
415 | // check if this is an aborted stream without end marker | |
416 | if let Ok(false) = reader.has_end_marker() { | |
3b82f3ee | 417 | worker.log("missing stream end marker".to_string()); |
b017bbc4 DM |
418 | return Ok(None); |
419 | } | |
420 | ||
421 | // else the archive is corrupt | |
422 | Err(err) | |
423 | } | |
424 | } | |
425 | } | |
426 | ||
427 | fn restore_snapshot_archive<'a>( | |
a80d72f9 | 428 | worker: &WorkerTask, |
b017bbc4 DM |
429 | reader: Box<dyn 'a + TapeRead>, |
430 | snapshot_path: &Path, | |
431 | ) -> Result<bool, Error> { | |
432 | ||
433 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
a80d72f9 | 434 | match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { |
38556bf6 | 435 | Ok(()) => Ok(true), |
b017bbc4 DM |
436 | Err(err) => { |
437 | let reader = decoder.input(); | |
438 | ||
439 | // check if this stream is marked incomplete | |
440 | if let Ok(true) = reader.is_incomplete() { | |
441 | return Ok(false); | |
442 | } | |
443 | ||
444 | // check if this is an aborted stream without end marker | |
445 | if let Ok(false) = reader.has_end_marker() { | |
446 | return Ok(false); | |
447 | } | |
448 | ||
449 | // else the archive is corrupt | |
38556bf6 | 450 | Err(err) |
b017bbc4 DM |
451 | } |
452 | } | |
453 | } | |
454 | ||
455 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
a80d72f9 | 456 | worker: &WorkerTask, |
b017bbc4 DM |
457 | decoder: &mut pxar::decoder::sync::Decoder<R>, |
458 | snapshot_path: &Path, | |
459 | ) -> Result<(), Error> { | |
460 | ||
461 | let _root = match decoder.next() { | |
462 | None => bail!("missing root entry"), | |
463 | Some(root) => { | |
464 | let root = root?; | |
465 | match root.kind() { | |
466 | pxar::EntryKind::Directory => { /* Ok */ } | |
467 | _ => bail!("wrong root entry type"), | |
468 | } | |
469 | root | |
470 | } | |
471 | }; | |
472 | ||
473 | let root_path = Path::new("/"); | |
474 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
475 | ||
476 | let mut manifest = None; | |
477 | ||
478 | loop { | |
a80d72f9 DM |
479 | worker.check_abort()?; |
480 | ||
b017bbc4 DM |
481 | let entry = match decoder.next() { |
482 | None => break, | |
483 | Some(entry) => entry?, | |
484 | }; | |
485 | let entry_path = entry.path(); | |
486 | ||
487 | match entry.kind() { | |
488 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
489 | _ => bail!("wrong entry type for {:?}", entry_path), | |
490 | } | |
491 | match entry_path.parent() { | |
492 | None => bail!("wrong parent for {:?}", entry_path), | |
493 | Some(p) => { | |
494 | if p != root_path { | |
495 | bail!("wrong parent for {:?}", entry_path); | |
496 | } | |
497 | } | |
498 | } | |
499 | ||
500 | let filename = entry.file_name(); | |
501 | let mut contents = match decoder.contents() { | |
502 | None => bail!("missing file content"), | |
503 | Some(contents) => contents, | |
504 | }; | |
505 | ||
506 | let mut archive_path = snapshot_path.to_owned(); | |
507 | archive_path.push(&filename); | |
508 | ||
509 | let mut tmp_path = archive_path.clone(); | |
510 | tmp_path.set_extension("tmp"); | |
511 | ||
512 | if filename == manifest_file_name { | |
513 | ||
514 | let blob = DataBlob::load_from_reader(&mut contents)?; | |
515 | let options = CreateOptions::new(); | |
516 | replace_file(&tmp_path, blob.raw_data(), options)?; | |
517 | ||
518 | manifest = Some(BackupManifest::try_from(blob)?); | |
519 | } else { | |
520 | let mut tmpfile = std::fs::OpenOptions::new() | |
521 | .write(true) | |
522 | .create(true) | |
523 | .read(true) | |
524 | .open(&tmp_path) | |
525 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
526 | ||
527 | std::io::copy(&mut contents, &mut tmpfile)?; | |
528 | ||
529 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
530 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
531 | } | |
532 | } | |
533 | } | |
534 | ||
535 | let manifest = match manifest { | |
536 | None => bail!("missing manifest"), | |
537 | Some(manifest) => manifest, | |
538 | }; | |
539 | ||
540 | for item in manifest.files() { | |
541 | let mut archive_path = snapshot_path.to_owned(); | |
542 | archive_path.push(&item.filename); | |
543 | ||
544 | match archive_type(&item.filename)? { | |
545 | ArchiveType::DynamicIndex => { | |
546 | let index = DynamicIndexReader::open(&archive_path)?; | |
547 | let (csum, size) = index.compute_csum(); | |
548 | manifest.verify_file(&item.filename, &csum, size)?; | |
549 | } | |
550 | ArchiveType::FixedIndex => { | |
551 | let index = FixedIndexReader::open(&archive_path)?; | |
552 | let (csum, size) = index.compute_csum(); | |
553 | manifest.verify_file(&item.filename, &csum, size)?; | |
554 | } | |
555 | ArchiveType::Blob => { | |
556 | let mut tmpfile = std::fs::File::open(&archive_path)?; | |
557 | let (csum, size) = compute_file_csum(&mut tmpfile)?; | |
558 | manifest.verify_file(&item.filename, &csum, size)?; | |
559 | } | |
560 | } | |
561 | } | |
562 | ||
563 | // commit manifest | |
564 | let mut manifest_path = snapshot_path.to_owned(); | |
565 | manifest_path.push(MANIFEST_BLOB_NAME); | |
566 | let mut tmp_manifest_path = manifest_path.clone(); | |
567 | tmp_manifest_path.set_extension("tmp"); | |
568 | ||
569 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
570 | bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err); | |
571 | } | |
572 | ||
573 | Ok(()) | |
574 | } |