]> git.proxmox.com Git - proxmox-backup.git/blob - src/tape/media_catalog.rs
tape: improve catalog consistency checks
[proxmox-backup.git] / src / tape / media_catalog.rs
1 use std::convert::TryFrom;
2 use std::fs::File;
3 use std::io::{Write, Read, BufReader, Seek, SeekFrom};
4 use std::os::unix::io::AsRawFd;
5 use std::path::Path;
6 use std::collections::{HashSet, HashMap};
7
8 use anyhow::{bail, format_err, Error};
9 use endian_trait::Endian;
10
11 use proxmox::tools::{
12 Uuid,
13 fs::{
14 fchown,
15 create_path,
16 CreateOptions,
17 },
18 io::{
19 WriteExt,
20 ReadExt,
21 },
22 };
23
24 use crate::{
25 tools::fs::read_subdir,
26 backup::BackupDir,
27 tape::{
28 MediaId,
29 file_formats::MediaSetLabel,
30 },
31 };
32
33 pub struct DatastoreContent {
34 pub snapshot_index: HashMap<String, u64>, // snapshot => file_nr
35 pub chunk_index: HashMap<[u8;32], u64>, // chunk => file_nr
36 }
37
38 impl DatastoreContent {
39
40 pub fn new() -> Self {
41 Self {
42 chunk_index: HashMap::new(),
43 snapshot_index: HashMap::new(),
44 }
45 }
46 }
47
48 /// The Media Catalog
49 ///
50 /// Stores what chunks and snapshots are stored on a specific media,
51 /// including the file position.
52 ///
53 /// We use a simple binary format to store data on disk.
54 pub struct MediaCatalog {
55
56 uuid: Uuid, // BackupMedia uuid
57
58 file: Option<File>,
59
60 log_to_stdout: bool,
61
62 current_archive: Option<(Uuid, u64, String)>, // (uuid, file_nr, store)
63
64 last_entry: Option<(Uuid, u64)>,
65
66 content: HashMap<String, DatastoreContent>,
67
68 pending: Vec<u8>,
69 }
70
71 impl MediaCatalog {
72
73 /// Magic number for media catalog files.
74 // openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
75 // Note: this version did not store datastore names (not supported anymore)
76 pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
77
78 // openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8]
79 pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113];
80
81 /// List media with catalogs
82 pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
83 let mut catalogs = HashSet::new();
84
85 for entry in read_subdir(libc::AT_FDCWD, base_path)? {
86 let entry = entry?;
87 let name = unsafe { entry.file_name_utf8_unchecked() };
88 if !name.ends_with(".log") { continue; }
89 if let Ok(uuid) = Uuid::parse_str(&name[..(name.len()-4)]) {
90 catalogs.insert(uuid);
91 }
92 }
93
94 Ok(catalogs)
95 }
96
97 /// Test if a catalog exists
98 pub fn exists(base_path: &Path, uuid: &Uuid) -> bool {
99 let mut path = base_path.to_owned();
100 path.push(uuid.to_string());
101 path.set_extension("log");
102 path.exists()
103 }
104
105 /// Destroy the media catalog (remove all files)
106 pub fn destroy(base_path: &Path, uuid: &Uuid) -> Result<(), Error> {
107
108 let mut path = base_path.to_owned();
109 path.push(uuid.to_string());
110 path.set_extension("log");
111
112 match std::fs::remove_file(path) {
113 Ok(()) => Ok(()),
114 Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
115 Err(err) => Err(err.into()),
116 }
117 }
118
119 /// Enable/Disable logging to stdout (disabled by default)
120 pub fn log_to_stdout(&mut self, enable: bool) {
121 self.log_to_stdout = enable;
122 }
123
124 fn create_basedir(base_path: &Path) -> Result<(), Error> {
125 let backup_user = crate::backup::backup_user()?;
126 let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
127 let opts = CreateOptions::new()
128 .perm(mode)
129 .owner(backup_user.uid)
130 .group(backup_user.gid);
131
132 create_path(base_path, None, Some(opts))
133 .map_err(|err: Error| format_err!("unable to create media catalog dir - {}", err))?;
134 Ok(())
135 }
136
137 /// Open a catalog database, load into memory
138 pub fn open(
139 base_path: &Path,
140 media_id: &MediaId,
141 write: bool,
142 create: bool,
143 ) -> Result<Self, Error> {
144
145 let uuid = &media_id.label.uuid;
146
147 let mut path = base_path.to_owned();
148 path.push(uuid.to_string());
149 path.set_extension("log");
150
151 let me = proxmox::try_block!({
152
153 Self::create_basedir(base_path)?;
154
155 let mut file = std::fs::OpenOptions::new()
156 .read(true)
157 .write(write)
158 .create(create)
159 .open(&path)?;
160
161 let backup_user = crate::backup::backup_user()?;
162 fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
163 .map_err(|err| format_err!("fchown failed - {}", err))?;
164
165 let mut me = Self {
166 uuid: uuid.clone(),
167 file: None,
168 log_to_stdout: false,
169 current_archive: None,
170 last_entry: None,
171 content: HashMap::new(),
172 pending: Vec::new(),
173 };
174
175 let found_magic_number = me.load_catalog(&mut file, media_id.media_set_label.as_ref())?;
176
177 if !found_magic_number {
178 me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
179 }
180
181 if write {
182 me.file = Some(file);
183 }
184 Ok(me)
185 }).map_err(|err: Error| {
186 format_err!("unable to open media catalog {:?} - {}", path, err)
187 })?;
188
189 Ok(me)
190 }
191
192 /// Creates a temporary, empty catalog database
193 ///
194 /// Creates a new catalog file using a ".tmp" file extension.
195 pub fn create_temporary_database(
196 base_path: &Path,
197 media_id: &MediaId,
198 log_to_stdout: bool,
199 ) -> Result<Self, Error> {
200
201 let uuid = &media_id.label.uuid;
202
203 let mut tmp_path = base_path.to_owned();
204 tmp_path.push(uuid.to_string());
205 tmp_path.set_extension("tmp");
206
207 let me = proxmox::try_block!({
208
209 Self::create_basedir(base_path)?;
210
211 let file = std::fs::OpenOptions::new()
212 .read(true)
213 .write(true)
214 .create(true)
215 .truncate(true)
216 .open(&tmp_path)?;
217
218 let backup_user = crate::backup::backup_user()?;
219 fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
220 .map_err(|err| format_err!("fchown failed - {}", err))?;
221
222 let mut me = Self {
223 uuid: uuid.clone(),
224 file: Some(file),
225 log_to_stdout: false,
226 current_archive: None,
227 last_entry: None,
228 content: HashMap::new(),
229 pending: Vec::new(),
230 };
231
232 me.log_to_stdout = log_to_stdout;
233
234 me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
235
236 me.register_label(&media_id.label.uuid, 0, 0)?;
237
238 if let Some(ref set) = media_id.media_set_label {
239 me.register_label(&set.uuid, set.seq_nr, 1)?;
240 }
241
242 me.commit()?;
243
244 Ok(me)
245 }).map_err(|err: Error| {
246 format_err!("unable to create temporary media catalog {:?} - {}", tmp_path, err)
247 })?;
248
249 Ok(me)
250 }
251
252 /// Commit or Abort a temporary catalog database
253 ///
254 /// With commit set, we rename the ".tmp" file extension to
255 /// ".log". When commit is false, we remove the ".tmp" file.
256 pub fn finish_temporary_database(
257 base_path: &Path,
258 uuid: &Uuid,
259 commit: bool,
260 ) -> Result<(), Error> {
261
262 let mut tmp_path = base_path.to_owned();
263 tmp_path.push(uuid.to_string());
264 tmp_path.set_extension("tmp");
265
266 if commit {
267 let mut catalog_path = tmp_path.clone();
268 catalog_path.set_extension("log");
269
270 if let Err(err) = std::fs::rename(&tmp_path, &catalog_path) {
271 bail!("Atomic rename catalog {:?} failed - {}", catalog_path, err);
272 }
273 } else {
274 std::fs::remove_file(&tmp_path)?;
275 }
276 Ok(())
277 }
278
279 /// Returns the BackupMedia uuid
280 pub fn uuid(&self) -> &Uuid {
281 &self.uuid
282 }
283
284 /// Accessor to content list
285 pub fn content(&self) -> &HashMap<String, DatastoreContent> {
286 &self.content
287 }
288
289 /// Commit pending changes
290 ///
291 /// This is necessary to store changes persistently.
292 ///
293 /// Fixme: this should be atomic ...
294 pub fn commit(&mut self) -> Result<(), Error> {
295
296 if self.pending.is_empty() {
297 return Ok(());
298 }
299
300 match self.file {
301 Some(ref mut file) => {
302 file.write_all(&self.pending)?;
303 file.flush()?;
304 file.sync_data()?;
305 }
306 None => bail!("media catalog not writable (opened read only)"),
307 }
308
309 self.pending = Vec::new();
310
311 Ok(())
312 }
313
314 /// Conditionally commit if in pending data is large (> 1Mb)
315 pub fn commit_if_large(&mut self) -> Result<(), Error> {
316 if self.pending.len() > 1024*1024 {
317 self.commit()?;
318 }
319 Ok(())
320 }
321
322 /// Destroy existing catalog, opens a new one
323 pub fn overwrite(
324 base_path: &Path,
325 media_id: &MediaId,
326 log_to_stdout: bool,
327 ) -> Result<Self, Error> {
328
329 let uuid = &media_id.label.uuid;
330
331 let me = Self::create_temporary_database(base_path, &media_id, log_to_stdout)?;
332
333 Self::finish_temporary_database(base_path, uuid, true)?;
334
335 Ok(me)
336 }
337
338 /// Test if the catalog already contain a snapshot
339 pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
340 match self.content.get(store) {
341 None => false,
342 Some(content) => content.snapshot_index.contains_key(snapshot),
343 }
344 }
345
346 /// Returns the snapshot archive file number
347 pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<u64> {
348 match self.content.get(store) {
349 None => None,
350 Some(content) => content.snapshot_index.get(snapshot).copied(),
351 }
352 }
353
354 /// Test if the catalog already contain a chunk
355 pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
356 match self.content.get(store) {
357 None => false,
358 Some(content) => content.chunk_index.contains_key(digest),
359 }
360 }
361
362 /// Returns the chunk archive file number
363 pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<u64> {
364 match self.content.get(store) {
365 None => None,
366 Some(content) => content.chunk_index.get(digest).copied(),
367 }
368 }
369
370 fn check_register_label(&self, file_number: u64, uuid: &Uuid) -> Result<(), Error> {
371
372 if file_number >= 2 {
373 bail!("register label failed: got wrong file number ({} >= 2)", file_number);
374 }
375
376 if file_number == 0 && uuid != &self.uuid {
377 bail!("register label failed: uuid does not match");
378 }
379
380 if self.current_archive.is_some() {
381 bail!("register label failed: inside chunk archive");
382 }
383
384 let expected_file_number = match self.last_entry {
385 Some((_, last_number)) => last_number + 1,
386 None => 0,
387 };
388
389 if file_number != expected_file_number {
390 bail!("register label failed: got unexpected file number ({} < {})",
391 file_number, expected_file_number);
392 }
393 Ok(())
394 }
395
396 /// Register media labels (file 0 and 1)
397 pub fn register_label(
398 &mut self,
399 uuid: &Uuid, // Media/MediaSet Uuid
400 seq_nr: u64, // onyl used for media set labels
401 file_number: u64,
402 ) -> Result<(), Error> {
403
404 self.check_register_label(file_number, uuid)?;
405
406 if file_number == 0 && seq_nr != 0 {
407 bail!("register_label failed - seq_nr should be 0 - iternal error");
408 }
409
410 let entry = LabelEntry {
411 file_number,
412 uuid: *uuid.as_bytes(),
413 seq_nr,
414 };
415
416 if self.log_to_stdout {
417 println!("L|{}|{}", file_number, uuid.to_string());
418 }
419
420 self.pending.push(b'L');
421
422 unsafe { self.pending.write_le_value(entry)?; }
423
424 self.last_entry = Some((uuid.clone(), file_number));
425
426 Ok(())
427 }
428
429 /// Register a chunk
430 ///
431 /// Only valid after start_chunk_archive.
432 pub fn register_chunk(
433 &mut self,
434 digest: &[u8;32],
435 ) -> Result<(), Error> {
436
437 let (file_number, store) = match self.current_archive {
438 None => bail!("register_chunk failed: no archive started"),
439 Some((_, file_number, ref store)) => (file_number, store),
440 };
441
442 if self.log_to_stdout {
443 println!("C|{}", proxmox::tools::digest_to_hex(digest));
444 }
445
446 self.pending.push(b'C');
447 self.pending.extend(digest);
448
449 match self.content.get_mut(store) {
450 None => bail!("storage {} not registered - internal error", store),
451 Some(content) => {
452 content.chunk_index.insert(*digest, file_number);
453 }
454 }
455
456 Ok(())
457 }
458
459 fn check_start_chunk_archive(&self, file_number: u64) -> Result<(), Error> {
460
461 if self.current_archive.is_some() {
462 bail!("start_chunk_archive failed: already started");
463 }
464
465 if file_number < 2 {
466 bail!("start_chunk_archive failed: got wrong file number ({} < 2)", file_number);
467 }
468
469 let expect_min_file_number = match self.last_entry {
470 Some((_, last_number)) => last_number + 1,
471 None => 0,
472 };
473
474 if file_number < expect_min_file_number {
475 bail!("start_chunk_archive: got unexpected file number ({} < {})",
476 file_number, expect_min_file_number);
477 }
478
479 Ok(())
480 }
481
482 /// Start a chunk archive section
483 pub fn start_chunk_archive(
484 &mut self,
485 uuid: Uuid, // Uuid form MediaContentHeader
486 file_number: u64,
487 store: &str,
488 ) -> Result<(), Error> {
489
490 self.check_start_chunk_archive(file_number)?;
491
492 let entry = ChunkArchiveStart {
493 file_number,
494 uuid: *uuid.as_bytes(),
495 store_name_len: u8::try_from(store.len())?,
496 };
497
498 if self.log_to_stdout {
499 println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
500 }
501
502 self.pending.push(b'A');
503
504 unsafe { self.pending.write_le_value(entry)?; }
505 self.pending.extend(store.as_bytes());
506
507 self.content.entry(store.to_string()).or_insert(DatastoreContent::new());
508
509 self.current_archive = Some((uuid, file_number, store.to_string()));
510
511 Ok(())
512 }
513
514 fn check_end_chunk_archive(&self, uuid: &Uuid, file_number: u64) -> Result<(), Error> {
515
516 match self.current_archive {
517 None => bail!("end_chunk archive failed: not started"),
518 Some((ref expected_uuid, expected_file_number, ..)) => {
519 if uuid != expected_uuid {
520 bail!("end_chunk_archive failed: got unexpected uuid");
521 }
522 if file_number != expected_file_number {
523 bail!("end_chunk_archive failed: got unexpected file number ({} != {})",
524 file_number, expected_file_number);
525 }
526 }
527 }
528 Ok(())
529 }
530
531 /// End a chunk archive section
532 pub fn end_chunk_archive(&mut self) -> Result<(), Error> {
533
534 match self.current_archive.take() {
535 None => bail!("end_chunk_archive failed: not started"),
536 Some((uuid, file_number, ..)) => {
537
538 let entry = ChunkArchiveEnd {
539 file_number,
540 uuid: *uuid.as_bytes(),
541 };
542
543 if self.log_to_stdout {
544 println!("E|{}|{}\n", file_number, uuid.to_string());
545 }
546
547 self.pending.push(b'E');
548
549 unsafe { self.pending.write_le_value(entry)?; }
550
551 self.last_entry = Some((uuid, file_number));
552 }
553 }
554
555 Ok(())
556 }
557
558 fn check_register_snapshot(&self, file_number: u64, snapshot: &str) -> Result<(), Error> {
559
560 if self.current_archive.is_some() {
561 bail!("register_snapshot failed: inside chunk_archive");
562 }
563
564 if file_number < 2 {
565 bail!("register_snapshot failed: got wrong file number ({} < 2)", file_number);
566 }
567
568 let expect_min_file_number = match self.last_entry {
569 Some((_, last_number)) => last_number + 1,
570 None => 0,
571 };
572
573 if file_number < expect_min_file_number {
574 bail!("register_snapshot failed: got unexpected file number ({} < {})",
575 file_number, expect_min_file_number);
576 }
577
578 if let Err(err) = snapshot.parse::<BackupDir>() {
579 bail!("register_snapshot failed: unable to parse snapshot '{}' - {}", snapshot, err);
580 }
581
582 Ok(())
583 }
584
585 /// Register a snapshot
586 pub fn register_snapshot(
587 &mut self,
588 uuid: Uuid, // Uuid form MediaContentHeader
589 file_number: u64,
590 store: &str,
591 snapshot: &str,
592 ) -> Result<(), Error> {
593
594 self.check_register_snapshot(file_number, snapshot)?;
595
596 let entry = SnapshotEntry {
597 file_number,
598 uuid: *uuid.as_bytes(),
599 store_name_len: u8::try_from(store.len())?,
600 name_len: u16::try_from(snapshot.len())?,
601 };
602
603 if self.log_to_stdout {
604 println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot);
605 }
606
607 self.pending.push(b'S');
608
609 unsafe { self.pending.write_le_value(entry)?; }
610 self.pending.extend(store.as_bytes());
611 self.pending.push(b':');
612 self.pending.extend(snapshot.as_bytes());
613
614 let content = self.content.entry(store.to_string())
615 .or_insert(DatastoreContent::new());
616
617 content.snapshot_index.insert(snapshot.to_string(), file_number);
618
619 self.last_entry = Some((uuid, file_number));
620
621 Ok(())
622 }
623
624 fn load_catalog(
625 &mut self,
626 file: &mut File,
627 media_set_label: Option<&MediaSetLabel>,
628 ) -> Result<bool, Error> {
629
630 let mut file = BufReader::new(file);
631 let mut found_magic_number = false;
632
633 loop {
634 let pos = file.seek(SeekFrom::Current(0))?;
635
636 if pos == 0 { // read/check magic number
637 let mut magic = [0u8; 8];
638 match file.read_exact_or_eof(&mut magic) {
639 Ok(false) => { /* EOF */ break; }
640 Ok(true) => { /* OK */ }
641 Err(err) => bail!("read failed - {}", err),
642 }
643 if magic == Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
644 // only use in unreleased versions
645 bail!("old catalog format (v1.0) is no longer supported");
646 }
647 if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1 {
648 bail!("wrong magic number");
649 }
650 found_magic_number = true;
651 continue;
652 }
653
654 let mut entry_type = [0u8; 1];
655 match file.read_exact_or_eof(&mut entry_type) {
656 Ok(false) => { /* EOF */ break; }
657 Ok(true) => { /* OK */ }
658 Err(err) => bail!("read failed - {}", err),
659 }
660
661 match entry_type[0] {
662 b'C' => {
663 let (file_number, store) = match self.current_archive {
664 None => bail!("register_chunk failed: no archive started"),
665 Some((_, file_number, ref store)) => (file_number, store),
666 };
667 let mut digest = [0u8; 32];
668 file.read_exact(&mut digest)?;
669 match self.content.get_mut(store) {
670 None => bail!("storage {} not registered - internal error", store),
671 Some(content) => {
672 content.chunk_index.insert(digest, file_number);
673 }
674 }
675 }
676 b'A' => {
677 let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
678 let file_number = entry.file_number;
679 let uuid = Uuid::from(entry.uuid);
680 let store_name_len = entry.store_name_len as usize;
681
682 let store = file.read_exact_allocated(store_name_len)?;
683 let store = std::str::from_utf8(&store)?;
684
685 self.check_start_chunk_archive(file_number)?;
686
687 self.content.entry(store.to_string())
688 .or_insert(DatastoreContent::new());
689
690 self.current_archive = Some((uuid, file_number, store.to_string()));
691 }
692 b'E' => {
693 let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
694 let file_number = entry.file_number;
695 let uuid = Uuid::from(entry.uuid);
696
697 self.check_end_chunk_archive(&uuid, file_number)?;
698
699 self.current_archive = None;
700 self.last_entry = Some((uuid, file_number));
701 }
702 b'S' => {
703 let entry: SnapshotEntry = unsafe { file.read_le_value()? };
704 let file_number = entry.file_number;
705 let store_name_len = entry.store_name_len as usize;
706 let name_len = entry.name_len as usize;
707 let uuid = Uuid::from(entry.uuid);
708
709 let store = file.read_exact_allocated(store_name_len + 1)?;
710 if store[store_name_len] != b':' {
711 bail!("parse-error: missing separator in SnapshotEntry");
712 }
713
714 let store = std::str::from_utf8(&store[..store_name_len])?;
715
716 let snapshot = file.read_exact_allocated(name_len)?;
717 let snapshot = std::str::from_utf8(&snapshot)?;
718
719 self.check_register_snapshot(file_number, snapshot)?;
720
721 let content = self.content.entry(store.to_string())
722 .or_insert(DatastoreContent::new());
723
724 content.snapshot_index.insert(snapshot.to_string(), file_number);
725
726 self.last_entry = Some((uuid, file_number));
727 }
728 b'L' => {
729 let entry: LabelEntry = unsafe { file.read_le_value()? };
730 let file_number = entry.file_number;
731 let uuid = Uuid::from(entry.uuid);
732
733 self.check_register_label(file_number, &uuid)?;
734
735 if file_number == 1 {
736 if let Some(set) = media_set_label {
737 if set.uuid != uuid {
738 bail!("got unexpected media set uuid");
739 }
740 if set.seq_nr != entry.seq_nr {
741 bail!("got unexpected media set sequence number");
742 }
743 }
744 }
745
746 self.last_entry = Some((uuid, file_number));
747 }
748 _ => {
749 bail!("unknown entry type '{}'", entry_type[0]);
750 }
751 }
752
753 }
754
755 Ok(found_magic_number)
756 }
757 }
758
759 /// Media set catalog
760 ///
761 /// Catalog for multiple media.
762 pub struct MediaSetCatalog {
763 catalog_list: HashMap<Uuid, MediaCatalog>,
764 }
765
766 impl MediaSetCatalog {
767
768 /// Creates a new instance
769 pub fn new() -> Self {
770 Self {
771 catalog_list: HashMap::new(),
772 }
773 }
774
775 /// Add a catalog
776 pub fn append_catalog(&mut self, catalog: MediaCatalog) -> Result<(), Error> {
777
778 if self.catalog_list.get(&catalog.uuid).is_some() {
779 bail!("MediaSetCatalog already contains media '{}'", catalog.uuid);
780 }
781
782 self.catalog_list.insert(catalog.uuid.clone(), catalog);
783
784 Ok(())
785 }
786
787 /// Remove a catalog
788 pub fn remove_catalog(&mut self, media_uuid: &Uuid) {
789 self.catalog_list.remove(media_uuid);
790 }
791
792 /// Test if the catalog already contain a snapshot
793 pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
794 for catalog in self.catalog_list.values() {
795 if catalog.contains_snapshot(store, snapshot) {
796 return true;
797 }
798 }
799 false
800 }
801
802 /// Test if the catalog already contain a chunk
803 pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
804 for catalog in self.catalog_list.values() {
805 if catalog.contains_chunk(store, digest) {
806 return true;
807 }
808 }
809 false
810 }
811 }
812
813 // Type definitions for internal binary catalog encoding
814
815 #[derive(Endian)]
816 #[repr(C)]
817 struct LabelEntry {
818 file_number: u64,
819 uuid: [u8;16],
820 seq_nr: u64, // only used for media set labels
821 }
822
823 #[derive(Endian)]
824 #[repr(C)]
825 struct ChunkArchiveStart {
826 file_number: u64,
827 uuid: [u8;16],
828 store_name_len: u8,
829 /* datastore name follows */
830 }
831
832 #[derive(Endian)]
833 #[repr(C)]
834 struct ChunkArchiveEnd{
835 file_number: u64,
836 uuid: [u8;16],
837 }
838
839 #[derive(Endian)]
840 #[repr(C)]
841 struct SnapshotEntry{
842 file_number: u64,
843 uuid: [u8;16],
844 store_name_len: u8,
845 name_len: u16,
846 /* datastore name, ':', snapshot name follows */
847 }