]> git.proxmox.com Git - rustc.git/blob - vendor/gix-odb/src/store_impls/dynamic/load_index.rs
New upstream version 1.74.1+dfsg1
[rustc.git] / vendor / gix-odb / src / store_impls / dynamic / load_index.rs
1 use std::{
2 collections::{BTreeMap, VecDeque},
3 ffi::OsStr,
4 ops::Deref,
5 path::{Path, PathBuf},
6 sync::{
7 atomic::{AtomicU16, AtomicUsize, Ordering},
8 Arc,
9 },
10 time::SystemTime,
11 };
12
13 use crate::store::{handle, types, RefreshMode};
14
15 pub(crate) struct Snapshot {
16 /// Indices ready for object lookup or contains checks, ordered usually by modification data, recent ones first.
17 pub(crate) indices: Vec<handle::IndexLookup>,
18 /// A set of loose objects dbs to search once packed objects weren't found.
19 pub(crate) loose_dbs: Arc<Vec<crate::loose::Store>>,
20 /// remember what this state represents and to compare to other states.
21 pub(crate) marker: types::SlotIndexMarker,
22 }
23
24 mod error {
25 use std::path::PathBuf;
26
27 use gix_pack::multi_index::PackIndex;
28
29 /// Returned by [`crate::at_opts()`]
30 #[derive(thiserror::Error, Debug)]
31 #[allow(missing_docs)]
32 pub enum Error {
33 #[error("The objects directory at '{0}' is not an accessible directory")]
34 Inaccessible(PathBuf),
35 #[error(transparent)]
36 Io(#[from] std::io::Error),
37 #[error(transparent)]
38 Alternate(#[from] crate::alternate::Error),
39 #[error("The slotmap turned out to be too small with {} entries, would need {} more", .current, .needed)]
40 InsufficientSlots { current: usize, needed: usize },
41 /// The problem here is that some logic assumes that more recent generations are higher than previous ones. If we would overflow,
42 /// we would break that invariant which can lead to the wrong object from being returned. It would probably be super rare, but…
43 /// let's not risk it.
44 #[error(
45 "Would have overflown amount of max possible generations of {}",
46 super::Generation::MAX
47 )]
48 GenerationOverflow,
49 #[error("Cannot numerically handle more than {limit} packs in a single multi-pack index, got {actual} in file {index_path:?}")]
50 TooManyPacksInMultiIndex {
51 actual: PackIndex,
52 limit: PackIndex,
53 index_path: PathBuf,
54 },
55 }
56 }
57
58 pub use error::Error;
59
60 use crate::store::types::{Generation, IndexAndPacks, MutableIndexAndPack, PackId, SlotMapIndex};
61
62 impl super::Store {
63 /// Load all indices, refreshing from disk only if needed.
64 pub(crate) fn load_all_indices(&self) -> Result<Snapshot, Error> {
65 let mut snapshot = self.collect_snapshot();
66 while let Some(new_snapshot) = self.load_one_index(RefreshMode::Never, snapshot.marker)? {
67 snapshot = new_snapshot
68 }
69 Ok(snapshot)
70 }
71
72 /// If `None` is returned, there is new indices and the caller should give up. This is a possibility even if it's allowed to refresh
73 /// as here might be no change to pick up.
74 pub(crate) fn load_one_index(
75 &self,
76 refresh_mode: RefreshMode,
77 marker: types::SlotIndexMarker,
78 ) -> Result<Option<Snapshot>, Error> {
79 let index = self.index.load();
80 if !index.is_initialized() {
81 return self.consolidate_with_disk_state(true /* needs_init */, false /*load one new index*/);
82 }
83
84 if marker.generation != index.generation || marker.state_id != index.state_id() {
85 // We have a more recent state already, provide it.
86 Ok(Some(self.collect_snapshot()))
87 } else {
88 // always compare to the latest state
89 // Nothing changed in the mean time, try to load another index…
90 if self.load_next_index(index) {
91 Ok(Some(self.collect_snapshot()))
92 } else {
93 // …and if that didn't yield anything new consider refreshing our disk state.
94 match refresh_mode {
95 RefreshMode::Never => Ok(None),
96 RefreshMode::AfterAllIndicesLoaded => {
97 self.consolidate_with_disk_state(false /* needs init */, true /*load one new index*/)
98 }
99 }
100 }
101 }
102 }
103
104 /// load a new index (if not yet loaded), and return true if one was indeed loaded (leading to a `state_id()` change) of the current index.
105 /// Note that interacting with the slot-map is inherently racy and we have to deal with it, being conservative in what we even try to load
106 /// as our index might already be out-of-date as we try to use it to learn what's next.
107 fn load_next_index(&self, mut index: arc_swap::Guard<Arc<SlotMapIndex>>) -> bool {
108 'retry_with_changed_index: loop {
109 let previous_state_id = index.state_id();
110 'retry_with_next_slot_index: loop {
111 match index
112 .next_index_to_load
113 .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |current| {
114 (current != index.slot_indices.len()).then_some(current + 1)
115 }) {
116 Ok(slot_map_index) => {
117 // This slot-map index is in bounds and was only given to us.
118 let _ongoing_operation = IncOnNewAndDecOnDrop::new(&index.num_indices_currently_being_loaded);
119 let slot = &self.files[index.slot_indices[slot_map_index]];
120 let _lock = slot.write.lock();
121 if slot.generation.load(Ordering::SeqCst) > index.generation {
122 // There is a disk consolidation in progress which just overwrote a slot that cold be disposed with some other
123 // index, one we didn't intend to load.
124 // Continue with the next slot index in the hope there is something else we can do…
125 continue 'retry_with_next_slot_index;
126 }
127 let mut bundle = slot.files.load_full();
128 let bundle_mut = Arc::make_mut(&mut bundle);
129 if let Some(files) = bundle_mut.as_mut() {
130 // these are always expected to be set, unless somebody raced us. We handle this later by retrying.
131 let _loaded_count = IncOnDrop(&index.loaded_indices);
132 match files.load_index(self.object_hash) {
133 Ok(_) => {
134 slot.files.store(bundle);
135 break 'retry_with_next_slot_index;
136 }
137 Err(_) => {
138 slot.files.store(bundle);
139 continue 'retry_with_next_slot_index;
140 }
141 }
142 }
143 }
144 Err(_nothing_more_to_load) => {
145 // There can be contention as many threads start working at the same time and take all the
146 // slots to load indices for. Some threads might just be left-over and have to wait for something
147 // to change.
148 let num_load_operations = index.num_indices_currently_being_loaded.deref();
149 // TODO: potentially hot loop - could this be a condition variable?
150 while num_load_operations.load(Ordering::Relaxed) != 0 {
151 std::thread::yield_now()
152 }
153 break 'retry_with_next_slot_index;
154 }
155 }
156 }
157 if previous_state_id == index.state_id() {
158 let potentially_new_index = self.index.load();
159 if Arc::as_ptr(&potentially_new_index) == Arc::as_ptr(&index) {
160 // There isn't a new index with which to retry the whole ordeal, so nothing could be done here.
161 return false;
162 } else {
163 // the index changed, worth trying again
164 index = potentially_new_index;
165 continue 'retry_with_changed_index;
166 }
167 } else {
168 // something inarguably changed, probably an index was loaded. 'probably' because we consider failed loads valid attempts,
169 // even they don't change anything for the caller which would then do a round for nothing.
170 return true;
171 }
172 }
173 }
174
175 /// refresh and possibly clear out our existing data structures, causing all pack ids to be invalidated.
176 /// `load_new_index` is an optimization to at least provide one newly loaded pack after refreshing the slot map.
177 pub(crate) fn consolidate_with_disk_state(
178 &self,
179 needs_init: bool,
180 load_new_index: bool,
181 ) -> Result<Option<Snapshot>, Error> {
182 let index = self.index.load();
183 let previous_index_state = Arc::as_ptr(&index) as usize;
184
185 // IMPORTANT: get a lock after we recorded the previous state.
186 let write = self.write.lock();
187 let objects_directory = &self.path;
188
189 // Now we know the index isn't going to change anymore, even though threads might still load indices in the meantime.
190 let index = self.index.load();
191 if previous_index_state != Arc::as_ptr(&index) as usize {
192 // Someone else took the look before and changed the index. Return it without doing any additional work.
193 return Ok(Some(self.collect_snapshot()));
194 }
195
196 let was_uninitialized = !index.is_initialized();
197
198 // We might not be able to detect by pointer if the state changed, as this itself is racy. So we keep track of double-initialization
199 // using a flag, which means that if `needs_init` was true we saw the index uninitialized once, but now that we are here it's
200 // initialized meaning that somebody was faster and we couldn't detect it by comparisons to the index.
201 // If so, make sure we collect the snapshot instead of returning None in case nothing actually changed, which is likely with a
202 // race like this.
203 if !was_uninitialized && needs_init {
204 return Ok(Some(self.collect_snapshot()));
205 }
206 self.num_disk_state_consolidation.fetch_add(1, Ordering::Relaxed);
207
208 let db_paths: Vec<_> = std::iter::once(objects_directory.to_owned())
209 .chain(crate::alternate::resolve(objects_directory.clone(), &self.current_dir)?)
210 .collect();
211
212 // turn db paths into loose object databases. Reuse what's there, but only if it is in the right order.
213 let loose_dbs = if was_uninitialized
214 || db_paths.len() != index.loose_dbs.len()
215 || db_paths
216 .iter()
217 .zip(index.loose_dbs.iter().map(|ldb| &ldb.path))
218 .any(|(lhs, rhs)| lhs != rhs)
219 {
220 Arc::new(
221 db_paths
222 .iter()
223 .map(|path| crate::loose::Store::at(path, self.object_hash))
224 .collect::<Vec<_>>(),
225 )
226 } else {
227 Arc::clone(&index.loose_dbs)
228 };
229
230 let indices_by_modification_time = Self::collect_indices_and_mtime_sorted_by_size(
231 db_paths,
232 index.slot_indices.len().into(),
233 self.use_multi_pack_index.then_some(self.object_hash),
234 )?;
235 let mut idx_by_index_path: BTreeMap<_, _> = index
236 .slot_indices
237 .iter()
238 .filter_map(|&idx| {
239 let f = &self.files[idx];
240 Option::as_ref(&f.files.load()).map(|f| (f.index_path().to_owned(), idx))
241 })
242 .collect();
243
244 let mut new_slot_map_indices = Vec::new(); // these indices into the slot map still exist there/didn't change
245 let mut index_paths_to_add = was_uninitialized
246 .then(|| VecDeque::with_capacity(indices_by_modification_time.len()))
247 .unwrap_or_default();
248
249 // Figure out this number based on what we see while handling the existing indices
250 let mut num_loaded_indices = 0;
251 for (index_info, mtime) in indices_by_modification_time.into_iter().map(|(a, b, _)| (a, b)) {
252 match idx_by_index_path.remove(index_info.path()) {
253 Some(slot_idx) => {
254 let slot = &self.files[slot_idx];
255 let files_guard = slot.files.load();
256 let files =
257 Option::as_ref(&files_guard).expect("slot is set or we wouldn't know it points to this file");
258 if index_info.is_multi_index() && files.mtime() != mtime {
259 // we have a changed multi-pack index. We can't just change the existing slot as it may alter slot indices
260 // that are currently available. Instead we have to move what's there into a new slot, along with the changes,
261 // and later free the slot or dispose of the index in the slot (like we do for removed/missing files).
262 index_paths_to_add.push_back((index_info, mtime, Some(slot_idx)));
263 // If the current slot is loaded, the soon-to-be copied multi-index path will be loaded as well.
264 if files.index_is_loaded() {
265 num_loaded_indices += 1;
266 }
267 } else {
268 // packs and indices are immutable, so no need to check modification times. Unchanged multi-pack indices also
269 // are handled like this just to be sure they are in the desired state. For these, the only way this could happen
270 // is if somebody deletes and then puts back
271 if Self::assure_slot_matches_index(&write, slot, index_info, mtime, index.generation) {
272 num_loaded_indices += 1;
273 }
274 new_slot_map_indices.push(slot_idx);
275 }
276 }
277 None => index_paths_to_add.push_back((index_info, mtime, None)),
278 }
279 }
280 let needs_stable_indices = self.maintain_stable_indices(&write);
281
282 let mut next_possibly_free_index = index
283 .slot_indices
284 .iter()
285 .max()
286 .map_or(0, |idx| (idx + 1) % self.files.len());
287 let mut num_indices_checked = 0;
288 let mut needs_generation_change = false;
289 let mut slot_indices_to_remove: Vec<_> = idx_by_index_path.into_values().collect();
290 while let Some((mut index_info, mtime, move_from_slot_idx)) = index_paths_to_add.pop_front() {
291 'increment_slot_index: loop {
292 if num_indices_checked == self.files.len() {
293 return Err(Error::InsufficientSlots {
294 current: self.files.len(),
295 needed: index_paths_to_add.len() + 1, /*the one currently popped off*/
296 });
297 }
298 let slot_index = next_possibly_free_index;
299 let slot = &self.files[slot_index];
300 next_possibly_free_index = (next_possibly_free_index + 1) % self.files.len();
301 num_indices_checked += 1;
302 match move_from_slot_idx {
303 Some(move_from_slot_idx) => {
304 debug_assert!(index_info.is_multi_index(), "only set for multi-pack indices");
305 if slot_index == move_from_slot_idx {
306 // don't try to move onto ourselves
307 continue 'increment_slot_index;
308 }
309 match Self::try_set_index_slot(
310 &write,
311 slot,
312 index_info,
313 mtime,
314 index.generation,
315 needs_stable_indices,
316 ) {
317 Ok(dest_was_empty) => {
318 slot_indices_to_remove.push(move_from_slot_idx);
319 new_slot_map_indices.push(slot_index);
320 // To avoid handling out the wrong pack (due to reassigned pack ids), declare this a new generation.
321 if !dest_was_empty {
322 needs_generation_change = true;
323 }
324 break 'increment_slot_index;
325 }
326 Err(unused_index_info) => index_info = unused_index_info,
327 }
328 }
329 None => {
330 match Self::try_set_index_slot(
331 &write,
332 slot,
333 index_info,
334 mtime,
335 index.generation,
336 needs_stable_indices,
337 ) {
338 Ok(dest_was_empty) => {
339 new_slot_map_indices.push(slot_index);
340 if !dest_was_empty {
341 needs_generation_change = true;
342 }
343 break 'increment_slot_index;
344 }
345 Err(unused_index_info) => index_info = unused_index_info,
346 }
347 }
348 }
349 // This isn't racy as it's only us who can change the Option::Some/None state of a slot.
350 }
351 }
352 assert_eq!(
353 index_paths_to_add.len(),
354 0,
355 "By this time we have assigned all new files to slots"
356 );
357
358 let generation = if needs_generation_change {
359 index.generation.checked_add(1).ok_or(Error::GenerationOverflow)?
360 } else {
361 index.generation
362 };
363 let index_unchanged = index.slot_indices == new_slot_map_indices;
364 if generation != index.generation {
365 assert!(
366 !index_unchanged,
367 "if the generation changed, the slot index must have changed for sure"
368 );
369 }
370 if !index_unchanged || loose_dbs != index.loose_dbs {
371 let new_index = Arc::new(SlotMapIndex {
372 slot_indices: new_slot_map_indices,
373 loose_dbs,
374 generation,
375 // if there was a prior generation, some indices might already be loaded. But we deal with it by trying to load the next index then,
376 // until we find one.
377 next_index_to_load: index_unchanged
378 .then(|| Arc::clone(&index.next_index_to_load))
379 .unwrap_or_default(),
380 loaded_indices: index_unchanged
381 .then(|| Arc::clone(&index.loaded_indices))
382 .unwrap_or_else(|| Arc::new(num_loaded_indices.into())),
383 num_indices_currently_being_loaded: Default::default(),
384 });
385 self.index.store(new_index);
386 }
387
388 // deleted items - remove their slots AFTER we have set the new index if we may alter indices, otherwise we only declare them garbage.
389 // removing slots may cause pack loading to fail, and they will then reload their indices.
390 for slot in slot_indices_to_remove.into_iter().map(|idx| &self.files[idx]) {
391 let _lock = slot.write.lock();
392 let mut files = slot.files.load_full();
393 let files_mut = Arc::make_mut(&mut files);
394 if needs_stable_indices {
395 if let Some(files) = files_mut.as_mut() {
396 files.trash();
397 // generation stays the same, as it's the same value still but scheduled for eventual removal.
398 }
399 } else {
400 *files_mut = None;
401 };
402 slot.files.store(files);
403 if !needs_stable_indices {
404 // Not racy due to lock, generation must be set after unsetting the slot value AND storing it.
405 slot.generation.store(generation, Ordering::SeqCst);
406 }
407 }
408
409 let new_index = self.index.load();
410 Ok(if index.state_id() == new_index.state_id() {
411 // there was no change, and nothing was loaded in the meantime, reflect that in the return value to not get into loops
412 None
413 } else {
414 if load_new_index {
415 self.load_next_index(new_index);
416 }
417 Some(self.collect_snapshot())
418 })
419 }
420
421 pub(crate) fn collect_indices_and_mtime_sorted_by_size(
422 db_paths: Vec<PathBuf>,
423 initial_capacity: Option<usize>,
424 multi_pack_index_object_hash: Option<gix_hash::Kind>,
425 ) -> Result<Vec<(Either, SystemTime, u64)>, Error> {
426 let mut indices_by_modification_time = Vec::with_capacity(initial_capacity.unwrap_or_default());
427 for db_path in db_paths {
428 let packs = db_path.join("pack");
429 let entries = match std::fs::read_dir(packs) {
430 Ok(e) => e,
431 Err(err) if err.kind() == std::io::ErrorKind::NotFound => continue,
432 Err(err) => return Err(err.into()),
433 };
434 let indices = entries
435 .filter_map(Result::ok)
436 .filter_map(|e| e.metadata().map(|md| (e.path(), md)).ok())
437 .filter(|(_, md)| md.file_type().is_file())
438 .filter(|(p, _)| {
439 let ext = p.extension();
440 (ext == Some(OsStr::new("idx")) && p.with_extension("pack").is_file())
441 || (multi_pack_index_object_hash.is_some() && ext.is_none() && is_multipack_index(p))
442 })
443 .map(|(p, md)| md.modified().map_err(Error::from).map(|mtime| (p, mtime, md.len())))
444 .collect::<Result<Vec<_>, _>>()?;
445
446 let multi_index_info = multi_pack_index_object_hash
447 .and_then(|hash| {
448 indices.iter().find_map(|(p, a, b)| {
449 is_multipack_index(p)
450 .then(|| {
451 // we always open the multi-pack here to be able to remove indices
452 gix_pack::multi_index::File::at(p)
453 .ok()
454 .filter(|midx| midx.object_hash() == hash)
455 .map(|midx| (midx, *a, *b))
456 })
457 .flatten()
458 .map(|t| {
459 if t.0.num_indices() > PackId::max_packs_in_multi_index() {
460 Err(Error::TooManyPacksInMultiIndex {
461 index_path: p.to_owned(),
462 actual: t.0.num_indices(),
463 limit: PackId::max_packs_in_multi_index(),
464 })
465 } else {
466 Ok(t)
467 }
468 })
469 })
470 })
471 .transpose()?;
472 if let Some((multi_index, mtime, flen)) = multi_index_info {
473 let index_names_in_multi_index: Vec<_> = multi_index.index_names().iter().map(AsRef::as_ref).collect();
474 let mut indices_not_in_multi_index: Vec<(Either, _, _)> = indices
475 .into_iter()
476 .filter_map(|(path, a, b)| {
477 (path != multi_index.path()
478 && !index_names_in_multi_index
479 .contains(&Path::new(path.file_name().expect("file name present"))))
480 .then_some((Either::IndexPath(path), a, b))
481 })
482 .collect();
483 indices_not_in_multi_index.insert(0, (Either::MultiIndexFile(Arc::new(multi_index)), mtime, flen));
484 indices_by_modification_time.extend(indices_not_in_multi_index);
485 } else {
486 indices_by_modification_time.extend(
487 indices
488 .into_iter()
489 .filter_map(|(p, a, b)| (!is_multipack_index(&p)).then_some((Either::IndexPath(p), a, b))),
490 )
491 }
492 }
493 // Unlike libgit2, do not sort by modification date, but by size and put the biggest indices first. That way
494 // the chance to hit an object should be higher. We leave it to the handle to sort by LRU.
495 // Git itself doesn't change the order which may safe time, but we want it to be stable which also helps some tests.
496 // NOTE: this will work well for well-packed repos or those using geometric repacking, but force us to open a lot
497 // of files when dealing with new objects, as there is no notion of recency here as would be with unmaintained
498 // repositories. Different algorithms should be provided, like newest packs first, and possibly a mix of both
499 // with big packs first, then sorting by recency for smaller packs.
500 // We also want to implement `fetch.unpackLimit` to alleviate this issue a little.
501 indices_by_modification_time.sort_by(|l, r| l.2.cmp(&r.2).reverse());
502 Ok(indices_by_modification_time)
503 }
504
505 /// returns Ok<dest slot was empty> if the copy could happen because dest-slot was actually free or disposable , and Some(true) if it was empty
506 #[allow(clippy::too_many_arguments)]
507 fn try_set_index_slot(
508 lock: &parking_lot::MutexGuard<'_, ()>,
509 dest_slot: &MutableIndexAndPack,
510 index_info: Either,
511 mtime: SystemTime,
512 current_generation: Generation,
513 needs_stable_indices: bool,
514 ) -> Result<bool, Either> {
515 let (dest_slot_was_empty, generation) = match &**dest_slot.files.load() {
516 Some(bundle) => {
517 if bundle.index_path() == index_info.path() || (bundle.is_disposable() && needs_stable_indices) {
518 // it might be possible to see ourselves in case all slots are taken, but there are still a few more destination
519 // slots to look for.
520 return Err(index_info);
521 }
522 // Since we overwrite an existing slot, we have to increment the generation to prevent anyone from trying to use it while
523 // before we are replacing it with a different value.
524 // In detail:
525 // We need to declare this to be the future to avoid anything in that slot to be returned to people who
526 // last saw the old state. They will then try to get a new index which by that time, might be happening
527 // in time so they get the latest one. If not, they will probably get into the same situation again until
528 // it finally succeeds. Alternatively, the object will be reported unobtainable, but at least it won't return
529 // some other object.
530 (false, current_generation + 1)
531 }
532 None => {
533 // For multi-pack indices:
534 // Do NOT copy the packs over, they need to be reopened to get the correct pack id matching the new slot map index.
535 // If we are allowed to delete the original, and nobody has the pack referenced, it is closed which is preferred.
536 // Thus we simply always start new with packs in multi-pack indices.
537 // In the worst case this could mean duplicate file handle usage though as the old and the new index can't share
538 // packs due to the intrinsic id.
539 // Note that the ID is used for cache access, too, so it must be unique. It must also be mappable from pack-id to slotmap id.
540 (true, current_generation)
541 }
542 };
543 Self::set_slot_to_index(lock, dest_slot, index_info, mtime, generation);
544 Ok(dest_slot_was_empty)
545 }
546
547 fn set_slot_to_index(
548 _lock: &parking_lot::MutexGuard<'_, ()>,
549 slot: &MutableIndexAndPack,
550 index_info: Either,
551 mtime: SystemTime,
552 generation: Generation,
553 ) {
554 let _lock = slot.write.lock();
555 let mut files = slot.files.load_full();
556 let files_mut = Arc::make_mut(&mut files);
557 // set the generation before we actually change the value, otherwise readers of old generations could observe the new one.
558 // We rather want them to turn around here and update their index, which, by that time, might actually already be available.
559 // If not, they would fail unable to load a pack or index they need, but that's preferred over returning wrong objects.
560 // Safety: can't race as we hold the lock, have to set the generation beforehand to help avoid others to observe the value.
561 slot.generation.store(generation, Ordering::SeqCst);
562 *files_mut = Some(index_info.into_index_and_packs(mtime));
563 slot.files.store(files);
564 }
565
566 /// Returns true if the index was left in a loaded state.
567 fn assure_slot_matches_index(
568 _lock: &parking_lot::MutexGuard<'_, ()>,
569 slot: &MutableIndexAndPack,
570 index_info: Either,
571 mtime: SystemTime,
572 current_generation: Generation,
573 ) -> bool {
574 match Option::as_ref(&slot.files.load()) {
575 Some(bundle) => {
576 assert_eq!(
577 bundle.index_path(),
578 index_info.path(),
579 "Parallel writers cannot change the file the slot points to."
580 );
581 if bundle.is_disposable() {
582 // put it into the correct mode, it's now available for sure so should not be missing or garbage.
583 // The latter can happen if files are removed and put back for some reason, but we should definitely
584 // have them in a decent state now that we know/think they are there.
585 let _lock = slot.write.lock();
586 let mut files = slot.files.load_full();
587 let files_mut = Arc::make_mut(&mut files)
588 .as_mut()
589 .expect("BUG: cannot change from something to nothing, would be race");
590 files_mut.put_back();
591 debug_assert_eq!(
592 files_mut.mtime(),
593 mtime,
594 "BUG: we can only put back files that didn't obviously change"
595 );
596 // Safety: can't race as we hold the lock, must be set before replacing the data.
597 // NOTE that we don't change the generation as it's still the very same index we talk about, it doesn't change
598 // identity.
599 slot.generation.store(current_generation, Ordering::SeqCst);
600 slot.files.store(files);
601 } else {
602 // it's already in the correct state, either loaded or unloaded.
603 }
604 bundle.index_is_loaded()
605 }
606 None => {
607 unreachable!("BUG: a slot can never be deleted if we have it recorded in the index WHILE changing said index. There shouldn't be a race")
608 }
609 }
610 }
611
612 /// Stability means that indices returned by this API will remain valid.
613 /// Without that constraint, we may unload unused packs and indices, and may rebuild the slotmap index.
614 ///
615 /// Note that this must be called with a lock to the relevant state held to assure these values don't change while
616 /// we are working on said index.
617 fn maintain_stable_indices(&self, _guard: &parking_lot::MutexGuard<'_, ()>) -> bool {
618 self.num_handles_stable.load(Ordering::SeqCst) > 0
619 }
620
621 pub(crate) fn collect_snapshot(&self) -> Snapshot {
622 let index = self.index.load();
623 let indices = if index.is_initialized() {
624 index
625 .slot_indices
626 .iter()
627 .map(|idx| (*idx, &self.files[*idx]))
628 .filter_map(|(id, file)| {
629 let lookup = match (**file.files.load()).as_ref()? {
630 types::IndexAndPacks::Index(bundle) => handle::SingleOrMultiIndex::Single {
631 index: bundle.index.loaded()?.clone(),
632 data: bundle.data.loaded().cloned(),
633 },
634 types::IndexAndPacks::MultiIndex(multi) => handle::SingleOrMultiIndex::Multi {
635 index: multi.multi_index.loaded()?.clone(),
636 data: multi.data.iter().map(|f| f.loaded().cloned()).collect(),
637 },
638 };
639 handle::IndexLookup { file: lookup, id }.into()
640 })
641 .collect()
642 } else {
643 Vec::new()
644 };
645
646 Snapshot {
647 indices,
648 loose_dbs: Arc::clone(&index.loose_dbs),
649 marker: index.marker(),
650 }
651 }
652 }
653
654 // Outside of this method we will never assign new slot indices.
655 fn is_multipack_index(path: &Path) -> bool {
656 path.file_name() == Some(OsStr::new("multi-pack-index"))
657 }
658
659 struct IncOnNewAndDecOnDrop<'a>(&'a AtomicU16);
660 impl<'a> IncOnNewAndDecOnDrop<'a> {
661 pub fn new(v: &'a AtomicU16) -> Self {
662 v.fetch_add(1, Ordering::SeqCst);
663 Self(v)
664 }
665 }
666 impl<'a> Drop for IncOnNewAndDecOnDrop<'a> {
667 fn drop(&mut self) {
668 self.0.fetch_sub(1, Ordering::SeqCst);
669 }
670 }
671
672 struct IncOnDrop<'a>(&'a AtomicUsize);
673 impl<'a> Drop for IncOnDrop<'a> {
674 fn drop(&mut self) {
675 self.0.fetch_add(1, Ordering::SeqCst);
676 }
677 }
678
679 pub(crate) enum Either {
680 IndexPath(PathBuf),
681 MultiIndexFile(Arc<gix_pack::multi_index::File>),
682 }
683
684 impl Either {
685 fn path(&self) -> &Path {
686 match self {
687 Either::IndexPath(p) => p,
688 Either::MultiIndexFile(f) => f.path(),
689 }
690 }
691
692 fn into_index_and_packs(self, mtime: SystemTime) -> IndexAndPacks {
693 match self {
694 Either::IndexPath(path) => IndexAndPacks::new_single(path, mtime),
695 Either::MultiIndexFile(file) => IndexAndPacks::new_multi_from_open_file(file, mtime),
696 }
697 }
698
699 fn is_multi_index(&self) -> bool {
700 matches!(self, Either::MultiIndexFile(_))
701 }
702 }
703
704 impl Eq for Either {}
705
706 impl PartialEq<Self> for Either {
707 fn eq(&self, other: &Self) -> bool {
708 self.path().eq(other.path())
709 }
710 }
711
712 impl PartialOrd<Self> for Either {
713 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
714 self.path().partial_cmp(other.path())
715 }
716 }
717
718 impl Ord for Either {
719 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
720 self.path().cmp(other.path())
721 }
722 }