]>
Commit | Line | Data |
---|---|---|
b017bbc4 DM |
1 | use std::path::Path; |
2 | use std::ffi::OsStr; | |
3 | use std::convert::TryFrom; | |
4 | ||
5 | use anyhow::{bail, format_err, Error}; | |
b9b4b312 | 6 | use serde_json::Value; |
b017bbc4 DM |
7 | |
8 | use proxmox::{ | |
b9b4b312 DM |
9 | api::{ |
10 | api, | |
11 | RpcEnvironment, | |
12 | RpcEnvironmentType, | |
13 | Router, | |
14 | section_config::SectionConfigData, | |
15 | }, | |
b017bbc4 DM |
16 | tools::{ |
17 | Uuid, | |
18 | io::ReadExt, | |
19 | fs::{ | |
20 | replace_file, | |
21 | CreateOptions, | |
22 | }, | |
23 | }, | |
b017bbc4 DM |
24 | }; |
25 | ||
26 | use crate::{ | |
8de9a991 | 27 | task_log, |
a80d72f9 | 28 | task::TaskState, |
b017bbc4 | 29 | tools::compute_file_csum, |
b9b4b312 DM |
30 | api2::types::{ |
31 | DATASTORE_SCHEMA, | |
9883b54c | 32 | DRIVE_NAME_SCHEMA, |
b9b4b312 DM |
33 | UPID_SCHEMA, |
34 | Authid, | |
35 | MediaPoolConfig, | |
36 | }, | |
25aa55b5 | 37 | config, |
b017bbc4 DM |
38 | backup::{ |
39 | archive_type, | |
40 | MANIFEST_BLOB_NAME, | |
41 | CryptMode, | |
42 | DataStore, | |
43 | BackupDir, | |
44 | DataBlob, | |
45 | BackupManifest, | |
46 | ArchiveType, | |
47 | IndexFile, | |
48 | DynamicIndexReader, | |
49 | FixedIndexReader, | |
50 | }, | |
51 | server::WorkerTask, | |
52 | tape::{ | |
53 | TAPE_STATUS_DIR, | |
54 | TapeRead, | |
55 | MediaId, | |
56 | MediaCatalog, | |
b9b4b312 DM |
57 | MediaPool, |
58 | Inventory, | |
b017bbc4 DM |
59 | file_formats::{ |
60 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, | |
61 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, | |
62 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, | |
63 | PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, | |
64 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, | |
65 | MediaContentHeader, | |
f47e0357 | 66 | ChunkArchiveDecoder, |
b017bbc4 | 67 | }, |
37796ff7 DM |
68 | drive::{ |
69 | TapeDriver, | |
70 | request_and_load_media, | |
25aa55b5 DM |
71 | lock_tape_device, |
72 | }, | |
b017bbc4 DM |
73 | }, |
74 | }; | |
75 | ||
b9b4b312 DM |
76 | pub const ROUTER: Router = Router::new() |
77 | .post(&API_METHOD_RESTORE); | |
78 | ||
79 | ||
80 | #[api( | |
81 | input: { | |
82 | properties: { | |
83 | store: { | |
84 | schema: DATASTORE_SCHEMA, | |
85 | }, | |
9883b54c DM |
86 | drive: { |
87 | schema: DRIVE_NAME_SCHEMA, | |
88 | }, | |
b9b4b312 DM |
89 | "media-set": { |
90 | description: "Media set UUID.", | |
91 | type: String, | |
92 | }, | |
93 | }, | |
94 | }, | |
95 | returns: { | |
96 | schema: UPID_SCHEMA, | |
97 | }, | |
98 | )] | |
99 | /// Restore data from media-set | |
100 | pub fn restore( | |
101 | store: String, | |
9883b54c | 102 | drive: String, |
b9b4b312 DM |
103 | media_set: String, |
104 | rpcenv: &mut dyn RpcEnvironment, | |
105 | ) -> Result<Value, Error> { | |
106 | ||
107 | let datastore = DataStore::lookup_datastore(&store)?; | |
108 | ||
109 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
110 | ||
111 | let status_path = Path::new(TAPE_STATUS_DIR); | |
112 | let inventory = Inventory::load(status_path)?; | |
113 | ||
114 | let media_set_uuid = media_set.parse()?; | |
115 | ||
116 | let pool = inventory.lookup_media_set_pool(&media_set_uuid)?; | |
117 | ||
9883b54c | 118 | // check if pool exists |
b9b4b312 | 119 | let (config, _digest) = config::media_pool::config()?; |
9883b54c | 120 | let _pool_config: MediaPoolConfig = config.lookup("pool", &pool)?; |
b9b4b312 DM |
121 | |
122 | let (drive_config, _digest) = config::drive::config()?; | |
25aa55b5 DM |
123 | |
124 | // early check/lock before starting worker | |
125 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
b9b4b312 | 126 | |
39735609 | 127 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
b9b4b312 DM |
128 | |
129 | let upid_str = WorkerTask::new_thread( | |
130 | "tape-restore", | |
131 | Some(store.clone()), | |
132 | auth_id.clone(), | |
133 | to_stdout, | |
134 | move |worker| { | |
25aa55b5 | 135 | let _drive_lock = drive_lock; // keep lock guard |
b9b4b312 DM |
136 | |
137 | let _lock = MediaPool::lock(status_path, &pool)?; | |
138 | ||
139 | let members = inventory.compute_media_set_members(&media_set_uuid)?; | |
140 | ||
44288184 | 141 | let media_list = members.media_list(); |
b9b4b312 DM |
142 | |
143 | let mut media_id_list = Vec::new(); | |
144 | ||
8e6459a8 DM |
145 | let mut encryption_key_fingerprint = None; |
146 | ||
b9b4b312 DM |
147 | for (seq_nr, media_uuid) in media_list.iter().enumerate() { |
148 | match media_uuid { | |
149 | None => { | |
150 | bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr); | |
151 | } | |
152 | Some(media_uuid) => { | |
8e6459a8 DM |
153 | let media_id = inventory.lookup_media(media_uuid).unwrap(); |
154 | if let Some(ref set) = media_id.media_set_label { // always true here | |
155 | if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() { | |
156 | encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); | |
157 | } | |
158 | } | |
159 | media_id_list.push(media_id); | |
b9b4b312 DM |
160 | } |
161 | } | |
162 | } | |
163 | ||
8de9a991 | 164 | task_log!(worker, "Restore mediaset '{}'", media_set); |
8e6459a8 | 165 | if let Some(fingerprint) = encryption_key_fingerprint { |
8de9a991 | 166 | task_log!(worker, "Encryption key fingerprint: {}", fingerprint); |
8e6459a8 | 167 | } |
8de9a991 DM |
168 | task_log!(worker, "Pool: {}", pool); |
169 | task_log!(worker, "Datastore: {}", store); | |
170 | task_log!(worker, "Drive: {}", drive); | |
171 | task_log!( | |
172 | worker, | |
b9b4b312 DM |
173 | "Required media list: {}", |
174 | media_id_list.iter() | |
8446fbca | 175 | .map(|media_id| media_id.label.label_text.as_str()) |
b9b4b312 DM |
176 | .collect::<Vec<&str>>() |
177 | .join(";") | |
8de9a991 | 178 | ); |
b9b4b312 DM |
179 | |
180 | for media_id in media_id_list.iter() { | |
181 | request_and_restore_media( | |
182 | &worker, | |
183 | media_id, | |
184 | &drive_config, | |
9883b54c | 185 | &drive, |
b9b4b312 DM |
186 | &datastore, |
187 | &auth_id, | |
188 | )?; | |
189 | } | |
190 | ||
8de9a991 | 191 | task_log!(worker, "Restore mediaset '{}' done", media_set); |
b9b4b312 DM |
192 | Ok(()) |
193 | } | |
194 | )?; | |
195 | ||
196 | Ok(upid_str.into()) | |
197 | } | |
198 | ||
b017bbc4 DM |
199 | /// Request and restore complete media without using existing catalog (create catalog instead) |
200 | pub fn request_and_restore_media( | |
201 | worker: &WorkerTask, | |
202 | media_id: &MediaId, | |
203 | drive_config: &SectionConfigData, | |
204 | drive_name: &str, | |
205 | datastore: &DataStore, | |
206 | authid: &Authid, | |
207 | ) -> Result<(), Error> { | |
208 | ||
209 | let media_set_uuid = match media_id.media_set_label { | |
210 | None => bail!("restore_media: no media set - internal error"), | |
211 | Some(ref set) => &set.uuid, | |
212 | }; | |
213 | ||
ff58c519 | 214 | let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?; |
b017bbc4 DM |
215 | |
216 | match info.media_set_label { | |
217 | None => { | |
218 | bail!("missing media set label on media {} ({})", | |
8446fbca | 219 | media_id.label.label_text, media_id.label.uuid); |
b017bbc4 DM |
220 | } |
221 | Some(ref set) => { | |
222 | if &set.uuid != media_set_uuid { | |
223 | bail!("wrong media set label on media {} ({} != {})", | |
8446fbca | 224 | media_id.label.label_text, media_id.label.uuid, |
b017bbc4 DM |
225 | media_set_uuid); |
226 | } | |
8e6459a8 DM |
227 | let encrypt_fingerprint = set.encryption_key_fingerprint.clone() |
228 | .map(|fp| (fp, set.uuid.clone())); | |
229 | ||
230 | drive.set_encryption(encrypt_fingerprint)?; | |
b017bbc4 DM |
231 | } |
232 | } | |
233 | ||
234 | restore_media(worker, &mut drive, &info, Some((datastore, authid)), false) | |
235 | } | |
236 | ||
237 | /// Restore complete media content and catalog | |
238 | /// | |
239 | /// Only create the catalog if target is None. | |
240 | pub fn restore_media( | |
241 | worker: &WorkerTask, | |
242 | drive: &mut Box<dyn TapeDriver>, | |
243 | media_id: &MediaId, | |
244 | target: Option<(&DataStore, &Authid)>, | |
245 | verbose: bool, | |
246 | ) -> Result<(), Error> { | |
247 | ||
248 | let status_path = Path::new(TAPE_STATUS_DIR); | |
249 | let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?; | |
250 | ||
251 | loop { | |
252 | let current_file_number = drive.current_file_number()?; | |
253 | let reader = match drive.read_next_file()? { | |
254 | None => { | |
8de9a991 | 255 | task_log!(worker, "detected EOT after {} files", current_file_number); |
b017bbc4 DM |
256 | break; |
257 | } | |
258 | Some(reader) => reader, | |
259 | }; | |
260 | ||
b017bbc4 DM |
261 | restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?; |
262 | } | |
263 | ||
264 | MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?; | |
265 | ||
266 | Ok(()) | |
267 | } | |
268 | ||
269 | fn restore_archive<'a>( | |
270 | worker: &WorkerTask, | |
271 | mut reader: Box<dyn 'a + TapeRead>, | |
272 | current_file_number: u64, | |
273 | target: Option<(&DataStore, &Authid)>, | |
274 | catalog: &mut MediaCatalog, | |
275 | verbose: bool, | |
276 | ) -> Result<(), Error> { | |
277 | ||
278 | let header: MediaContentHeader = unsafe { reader.read_le_value()? }; | |
279 | if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { | |
280 | bail!("missing MediaContentHeader"); | |
281 | } | |
282 | ||
283 | //println!("Found MediaContentHeader: {:?}", header); | |
284 | ||
285 | match header.content_magic { | |
286 | PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => { | |
287 | bail!("unexpected content magic (label)"); | |
288 | } | |
289 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => { | |
290 | let snapshot = reader.read_exact_allocated(header.size as usize)?; | |
291 | let snapshot = std::str::from_utf8(&snapshot) | |
292 | .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?; | |
8de9a991 | 293 | task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot); |
b017bbc4 DM |
294 | |
295 | let backup_dir: BackupDir = snapshot.parse()?; | |
296 | ||
297 | if let Some((datastore, authid)) = target.as_ref() { | |
298 | ||
299 | let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?; | |
300 | if *authid != &owner { // only the owner is allowed to create additional snapshots | |
301 | bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner); | |
302 | } | |
303 | ||
304 | let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?; | |
305 | let mut path = datastore.base_path(); | |
306 | path.push(rel_path); | |
307 | ||
308 | if is_new { | |
8de9a991 | 309 | task_log!(worker, "restore snapshot {}", backup_dir); |
b017bbc4 | 310 | |
a80d72f9 | 311 | match restore_snapshot_archive(worker, reader, &path) { |
b017bbc4 DM |
312 | Err(err) => { |
313 | std::fs::remove_dir_all(&path)?; | |
314 | bail!("restore snapshot {} failed - {}", backup_dir, err); | |
315 | } | |
316 | Ok(false) => { | |
317 | std::fs::remove_dir_all(&path)?; | |
8de9a991 | 318 | task_log!(worker, "skip incomplete snapshot {}", backup_dir); |
b017bbc4 DM |
319 | } |
320 | Ok(true) => { | |
321 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
322 | catalog.commit_if_large()?; | |
323 | } | |
324 | } | |
325 | return Ok(()); | |
326 | } | |
327 | } | |
328 | ||
329 | reader.skip_to_end()?; // read all data | |
330 | if let Ok(false) = reader.is_incomplete() { | |
331 | catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; | |
332 | catalog.commit_if_large()?; | |
333 | } | |
334 | } | |
335 | PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { | |
336 | ||
8de9a991 | 337 | task_log!(worker, "Found chunk archive: {}", current_file_number); |
b017bbc4 DM |
338 | let datastore = target.as_ref().map(|t| t.0); |
339 | ||
340 | if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? { | |
341 | catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?; | |
342 | for digest in chunks.iter() { | |
343 | catalog.register_chunk(&digest)?; | |
344 | } | |
8de9a991 | 345 | task_log!(worker, "register {} chunks", chunks.len()); |
b017bbc4 DM |
346 | catalog.end_chunk_archive()?; |
347 | catalog.commit_if_large()?; | |
348 | } | |
349 | } | |
350 | _ => bail!("unknown content magic {:?}", header.content_magic), | |
351 | } | |
352 | ||
353 | catalog.commit()?; | |
354 | ||
355 | Ok(()) | |
356 | } | |
357 | ||
358 | fn restore_chunk_archive<'a>( | |
359 | worker: &WorkerTask, | |
360 | reader: Box<dyn 'a + TapeRead>, | |
361 | datastore: Option<&DataStore>, | |
362 | verbose: bool, | |
363 | ) -> Result<Option<Vec<[u8;32]>>, Error> { | |
364 | ||
365 | let mut chunks = Vec::new(); | |
366 | ||
367 | let mut decoder = ChunkArchiveDecoder::new(reader); | |
368 | ||
369 | let result: Result<_, Error> = proxmox::try_block!({ | |
0d2133db | 370 | while let Some((digest, blob)) = decoder.next_chunk()? { |
a80d72f9 DM |
371 | |
372 | worker.check_abort()?; | |
373 | ||
0d2133db FG |
374 | if let Some(datastore) = datastore { |
375 | let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; | |
376 | if !chunk_exists { | |
377 | blob.verify_crc()?; | |
378 | ||
379 | if blob.crypt_mode()? == CryptMode::None { | |
380 | blob.decode(None, Some(&digest))?; // verify digest | |
381 | } | |
382 | if verbose { | |
8de9a991 | 383 | task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 384 | } |
0d2133db FG |
385 | datastore.insert_chunk(&blob, &digest)?; |
386 | } else if verbose { | |
8de9a991 | 387 | task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 388 | } |
0d2133db | 389 | } else if verbose { |
8de9a991 | 390 | task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest)); |
b017bbc4 | 391 | } |
0d2133db | 392 | chunks.push(digest); |
b017bbc4 DM |
393 | } |
394 | Ok(()) | |
395 | }); | |
396 | ||
397 | match result { | |
398 | Ok(()) => Ok(Some(chunks)), | |
399 | Err(err) => { | |
400 | let reader = decoder.reader(); | |
401 | ||
402 | // check if this stream is marked incomplete | |
403 | if let Ok(true) = reader.is_incomplete() { | |
404 | return Ok(Some(chunks)); | |
405 | } | |
406 | ||
407 | // check if this is an aborted stream without end marker | |
408 | if let Ok(false) = reader.has_end_marker() { | |
3b82f3ee | 409 | worker.log("missing stream end marker".to_string()); |
b017bbc4 DM |
410 | return Ok(None); |
411 | } | |
412 | ||
413 | // else the archive is corrupt | |
414 | Err(err) | |
415 | } | |
416 | } | |
417 | } | |
418 | ||
419 | fn restore_snapshot_archive<'a>( | |
a80d72f9 | 420 | worker: &WorkerTask, |
b017bbc4 DM |
421 | reader: Box<dyn 'a + TapeRead>, |
422 | snapshot_path: &Path, | |
423 | ) -> Result<bool, Error> { | |
424 | ||
425 | let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; | |
a80d72f9 | 426 | match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { |
38556bf6 | 427 | Ok(()) => Ok(true), |
b017bbc4 DM |
428 | Err(err) => { |
429 | let reader = decoder.input(); | |
430 | ||
431 | // check if this stream is marked incomplete | |
432 | if let Ok(true) = reader.is_incomplete() { | |
433 | return Ok(false); | |
434 | } | |
435 | ||
436 | // check if this is an aborted stream without end marker | |
437 | if let Ok(false) = reader.has_end_marker() { | |
438 | return Ok(false); | |
439 | } | |
440 | ||
441 | // else the archive is corrupt | |
38556bf6 | 442 | Err(err) |
b017bbc4 DM |
443 | } |
444 | } | |
445 | } | |
446 | ||
447 | fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( | |
a80d72f9 | 448 | worker: &WorkerTask, |
b017bbc4 DM |
449 | decoder: &mut pxar::decoder::sync::Decoder<R>, |
450 | snapshot_path: &Path, | |
451 | ) -> Result<(), Error> { | |
452 | ||
453 | let _root = match decoder.next() { | |
454 | None => bail!("missing root entry"), | |
455 | Some(root) => { | |
456 | let root = root?; | |
457 | match root.kind() { | |
458 | pxar::EntryKind::Directory => { /* Ok */ } | |
459 | _ => bail!("wrong root entry type"), | |
460 | } | |
461 | root | |
462 | } | |
463 | }; | |
464 | ||
465 | let root_path = Path::new("/"); | |
466 | let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); | |
467 | ||
468 | let mut manifest = None; | |
469 | ||
470 | loop { | |
a80d72f9 DM |
471 | worker.check_abort()?; |
472 | ||
b017bbc4 DM |
473 | let entry = match decoder.next() { |
474 | None => break, | |
475 | Some(entry) => entry?, | |
476 | }; | |
477 | let entry_path = entry.path(); | |
478 | ||
479 | match entry.kind() { | |
480 | pxar::EntryKind::File { .. } => { /* Ok */ } | |
481 | _ => bail!("wrong entry type for {:?}", entry_path), | |
482 | } | |
483 | match entry_path.parent() { | |
484 | None => bail!("wrong parent for {:?}", entry_path), | |
485 | Some(p) => { | |
486 | if p != root_path { | |
487 | bail!("wrong parent for {:?}", entry_path); | |
488 | } | |
489 | } | |
490 | } | |
491 | ||
492 | let filename = entry.file_name(); | |
493 | let mut contents = match decoder.contents() { | |
494 | None => bail!("missing file content"), | |
495 | Some(contents) => contents, | |
496 | }; | |
497 | ||
498 | let mut archive_path = snapshot_path.to_owned(); | |
499 | archive_path.push(&filename); | |
500 | ||
501 | let mut tmp_path = archive_path.clone(); | |
502 | tmp_path.set_extension("tmp"); | |
503 | ||
504 | if filename == manifest_file_name { | |
505 | ||
506 | let blob = DataBlob::load_from_reader(&mut contents)?; | |
507 | let options = CreateOptions::new(); | |
508 | replace_file(&tmp_path, blob.raw_data(), options)?; | |
509 | ||
510 | manifest = Some(BackupManifest::try_from(blob)?); | |
511 | } else { | |
512 | let mut tmpfile = std::fs::OpenOptions::new() | |
513 | .write(true) | |
514 | .create(true) | |
515 | .read(true) | |
516 | .open(&tmp_path) | |
517 | .map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?; | |
518 | ||
519 | std::io::copy(&mut contents, &mut tmpfile)?; | |
520 | ||
521 | if let Err(err) = std::fs::rename(&tmp_path, &archive_path) { | |
522 | bail!("Atomic rename file {:?} failed - {}", archive_path, err); | |
523 | } | |
524 | } | |
525 | } | |
526 | ||
527 | let manifest = match manifest { | |
528 | None => bail!("missing manifest"), | |
529 | Some(manifest) => manifest, | |
530 | }; | |
531 | ||
532 | for item in manifest.files() { | |
533 | let mut archive_path = snapshot_path.to_owned(); | |
534 | archive_path.push(&item.filename); | |
535 | ||
536 | match archive_type(&item.filename)? { | |
537 | ArchiveType::DynamicIndex => { | |
538 | let index = DynamicIndexReader::open(&archive_path)?; | |
539 | let (csum, size) = index.compute_csum(); | |
540 | manifest.verify_file(&item.filename, &csum, size)?; | |
541 | } | |
542 | ArchiveType::FixedIndex => { | |
543 | let index = FixedIndexReader::open(&archive_path)?; | |
544 | let (csum, size) = index.compute_csum(); | |
545 | manifest.verify_file(&item.filename, &csum, size)?; | |
546 | } | |
547 | ArchiveType::Blob => { | |
548 | let mut tmpfile = std::fs::File::open(&archive_path)?; | |
549 | let (csum, size) = compute_file_csum(&mut tmpfile)?; | |
550 | manifest.verify_file(&item.filename, &csum, size)?; | |
551 | } | |
552 | } | |
553 | } | |
554 | ||
555 | // commit manifest | |
556 | let mut manifest_path = snapshot_path.to_owned(); | |
557 | manifest_path.push(MANIFEST_BLOB_NAME); | |
558 | let mut tmp_manifest_path = manifest_path.clone(); | |
559 | tmp_manifest_path.set_extension("tmp"); | |
560 | ||
561 | if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) { | |
562 | bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err); | |
563 | } | |
564 | ||
565 | Ok(()) | |
566 | } |