]> git.proxmox.com Git - pxar.git/blob - src/accessor/mod.rs
formatting fixups
[pxar.git] / src / accessor / mod.rs
1 //! Random access for PXAR files.
2
3 use std::ffi::{OsStr, OsString};
4 use std::io;
5 use std::mem::{self, size_of, size_of_val, MaybeUninit};
6 use std::ops::Range;
7 use std::os::unix::ffi::{OsStrExt, OsStringExt};
8 use std::path::{Path, PathBuf};
9 use std::pin::Pin;
10 use std::sync::Arc;
11 use std::task::{Context, Poll};
12
13 use endian_trait::Endian;
14
15 use crate::binary_tree_array;
16 use crate::decoder::{self, DecoderImpl};
17 use crate::format::{self, GoodbyeItem};
18 use crate::poll_fn::poll_fn;
19 use crate::util;
20 use crate::{Entry, EntryKind};
21
22 pub mod aio;
23 pub mod cache;
24 pub mod sync;
25
26 #[doc(inline)]
27 pub use sync::{Accessor, DirEntry, Directory, FileEntry, ReadDir};
28
29 use cache::Cache;
30
31 /// Random access read implementation.
32 pub trait ReadAt {
33 fn poll_read_at(
34 self: Pin<&Self>,
35 cx: &mut Context,
36 buf: &mut [u8],
37 offset: u64,
38 ) -> Poll<io::Result<usize>>;
39 }
40
41 /// awaitable version of `poll_read_at`.
42 async fn read_at<T>(input: &T, buf: &mut [u8], offset: u64) -> io::Result<usize>
43 where
44 T: ReadAt + ?Sized,
45 {
46 poll_fn(|cx| unsafe { Pin::new_unchecked(input).poll_read_at(cx, buf, offset) }).await
47 }
48
49 /// `read_exact_at` - since that's what we _actually_ want most of the time.
50 async fn read_exact_at<T>(input: &T, mut buf: &mut [u8], mut offset: u64) -> io::Result<()>
51 where
52 T: ReadAt + ?Sized,
53 {
54 while !buf.is_empty() {
55 match read_at(input, buf, offset).await? {
56 0 => io_bail!("unexpected EOF"),
57 got => {
58 buf = &mut buf[got..];
59 offset += got as u64;
60 }
61 }
62 }
63 Ok(())
64 }
65
66 /// Helper to read into an `Endian`-implementing `struct`.
67 async fn read_entry_at<T, E: Endian>(input: &T, offset: u64) -> io::Result<E>
68 where
69 T: ReadAt + ?Sized,
70 {
71 let mut data = MaybeUninit::<E>::uninit();
72 let buf =
73 unsafe { std::slice::from_raw_parts_mut(data.as_mut_ptr() as *mut u8, size_of::<E>()) };
74 read_exact_at(input, buf, offset).await?;
75 Ok(unsafe { data.assume_init().from_le() })
76 }
77
78 /// Helper to read into an allocated byte vector.
79 async fn read_exact_data_at<T>(input: &T, size: usize, offset: u64) -> io::Result<Vec<u8>>
80 where
81 T: ReadAt + ?Sized,
82 {
83 let mut data = util::vec_new(size);
84 read_exact_at(input, &mut data[..], offset).await?;
85 Ok(data)
86 }
87
88 /// Allow using trait objects for `T: ReadAt`
89 impl<'a> ReadAt for &(dyn ReadAt + 'a) {
90 fn poll_read_at(
91 self: Pin<&Self>,
92 cx: &mut Context,
93 buf: &mut [u8],
94 offset: u64,
95 ) -> Poll<io::Result<usize>> {
96 unsafe { Pin::new_unchecked(&**self).poll_read_at(cx, buf, offset) }
97 }
98 }
99
100 /// Convenience impl for `Arc<dyn ReadAt + Send + Sync + 'static>`. Since `ReadAt` only requires
101 /// immutable `&self`, this adds some convenience by allowing to just `Arc` any `'static` type that
102 /// implemments `ReadAt` for type monomorphization.
103 impl ReadAt for Arc<dyn ReadAt + Send + Sync + 'static> {
104 fn poll_read_at(
105 self: Pin<&Self>,
106 cx: &mut Context,
107 buf: &mut [u8],
108 offset: u64,
109 ) -> Poll<io::Result<usize>> {
110 unsafe { Pin::new_unchecked(&**self).poll_read_at(cx, buf, offset) }
111 }
112 }
113
114 #[derive(Clone)]
115 struct Caches {
116 /// The goodbye table cache maps goodbye table offsets to cache entries.
117 gbt_cache: Option<Arc<dyn Cache<u64, [GoodbyeItem]> + Send + Sync>>,
118 }
119
120 impl Default for Caches {
121 fn default() -> Self {
122 Self { gbt_cache: None }
123 }
124 }
125
126 /// The random access state machine implementation.
127 pub(crate) struct AccessorImpl<T> {
128 input: T,
129 size: u64,
130 caches: Arc<Caches>,
131 }
132
133 impl<T: ReadAt> AccessorImpl<T> {
134 pub async fn new(input: T, size: u64) -> io::Result<Self> {
135 if size < (size_of::<GoodbyeItem>() as u64) {
136 io_bail!("too small to contain a pxar archive");
137 }
138
139 Ok(Self {
140 input,
141 size,
142 caches: Arc::new(Caches::default()),
143 })
144 }
145
146 pub fn size(&self) -> u64 {
147 self.size
148 }
149
150 pub async fn open_root_ref<'a>(&'a self) -> io::Result<DirectoryImpl<&'a dyn ReadAt>> {
151 DirectoryImpl::open_at_end(
152 &self.input as &dyn ReadAt,
153 self.size,
154 "/".into(),
155 Arc::clone(&self.caches),
156 )
157 .await
158 }
159
160 pub fn set_goodbye_table_cache(
161 &mut self,
162 cache: Option<Arc<dyn Cache<u64, [GoodbyeItem]> + Send + Sync>>,
163 ) {
164 let new_caches = Arc::new(Caches {
165 gbt_cache: cache,
166 ..*self.caches
167 });
168 self.caches = new_caches;
169 }
170 }
171
172 async fn get_decoder<T: ReadAt>(
173 input: T,
174 entry_range: Range<u64>,
175 path: PathBuf,
176 ) -> io::Result<DecoderImpl<SeqReadAtAdapter<T>>> {
177 Ok(DecoderImpl::new_full(SeqReadAtAdapter::new(input, entry_range), path).await?)
178 }
179
180 impl<T: Clone + ReadAt> AccessorImpl<T> {
181 pub async fn open_root(&self) -> io::Result<DirectoryImpl<T>> {
182 DirectoryImpl::open_at_end(
183 self.input.clone(),
184 self.size,
185 "/".into(),
186 Arc::clone(&self.caches),
187 )
188 .await
189 }
190
191 /// Allow opening a directory at a specified offset.
192 pub async unsafe fn open_dir_at_end(&self, offset: u64) -> io::Result<DirectoryImpl<T>> {
193 DirectoryImpl::open_at_end(
194 self.input.clone(),
195 offset,
196 "/".into(),
197 Arc::clone(&self.caches),
198 )
199 .await
200 }
201
202 /// Allow opening a regular file from a specified range.
203 pub async unsafe fn open_file_at_range(
204 &self,
205 range: Range<u64>,
206 ) -> io::Result<FileEntryImpl<T>> {
207 let mut decoder = get_decoder(self.input.clone(), range.clone(), PathBuf::new()).await?;
208 let entry = decoder
209 .next()
210 .await
211 .ok_or_else(|| io_format_err!("unexpected EOF while decoding file entry"))??;
212 Ok(FileEntryImpl {
213 input: self.input.clone(),
214 entry,
215 entry_range: range,
216 caches: Arc::clone(&self.caches),
217 })
218 }
219
220 /// Allow opening arbitrary contents from a specific range.
221 pub unsafe fn open_contents_at_range(&self, range: Range<u64>) -> FileContentsImpl<T> {
222 FileContentsImpl::new(self.input.clone(), range)
223 }
224
225 /// Following a hardlink breaks a couple of conventions we otherwise have, particularly we will
226 /// never know the actual length of the target entry until we're done decoding it, so this
227 /// needs to happen at the accessor level, rather than a "sub-entry-reader".
228 pub async fn follow_hardlink(&self, link: &format::Hardlink) -> io::Result<FileEntryImpl<T>> {
229 let mut decoder = get_decoder(
230 self.input.clone(),
231 link.offset..self.size,
232 PathBuf::from(link.as_os_str()),
233 )
234 .await?;
235 let entry = decoder
236 .next()
237 .await
238 .ok_or_else(|| io_format_err!("unexpected EOF while following a hardlink"))??;
239 match entry.kind() {
240 EntryKind::File { offset: None, .. } => {
241 io_bail!("failed to follow hardlink, reader provided no offsets");
242 }
243 EntryKind::File {
244 offset: Some(offset),
245 size,
246 } => {
247 let meta_size = offset - link.offset;
248 let entry_end = link.offset + meta_size + size;
249 Ok(FileEntryImpl {
250 input: self.input.clone(),
251 entry,
252 entry_range: link.offset..entry_end,
253 caches: Arc::clone(&self.caches),
254 })
255 }
256 _ => io_bail!("hardlink does not point to a regular file"),
257 }
258 }
259 }
260
261 /// The directory random-access state machine implementation.
262 pub(crate) struct DirectoryImpl<T> {
263 input: T,
264 entry_ofs: u64,
265 goodbye_ofs: u64,
266 size: u64,
267 table: Arc<[GoodbyeItem]>,
268 path: PathBuf,
269 caches: Arc<Caches>,
270 }
271
272 impl<T: Clone + ReadAt> DirectoryImpl<T> {
273 /// Open a directory ending at the specified position.
274 async fn open_at_end(
275 input: T,
276 end_offset: u64,
277 path: PathBuf,
278 caches: Arc<Caches>,
279 ) -> io::Result<DirectoryImpl<T>> {
280 let tail = Self::read_tail_entry(&input, end_offset).await?;
281
282 if end_offset < tail.size {
283 io_bail!("goodbye tail size out of range");
284 }
285
286 let goodbye_ofs = end_offset - tail.size;
287
288 if goodbye_ofs < tail.offset {
289 io_bail!("goodbye offset out of range");
290 }
291
292 let entry_ofs = goodbye_ofs - tail.offset;
293 let size = end_offset - entry_ofs;
294
295 let table: Option<Arc<[GoodbyeItem]>> = caches
296 .gbt_cache
297 .as_ref()
298 .and_then(|cache| cache.fetch(goodbye_ofs));
299
300 let mut this = Self {
301 input,
302 entry_ofs,
303 goodbye_ofs,
304 size,
305 table: table.as_ref().map_or_else(|| Arc::new([]), Arc::clone),
306 path,
307 caches,
308 };
309
310 // sanity check:
311 if this.table_size() % (size_of::<GoodbyeItem>() as u64) != 0 {
312 io_bail!("invalid goodbye table size: {}", this.table_size());
313 }
314
315 if table.is_none() {
316 this.table = this.load_table().await?;
317 if let Some(ref cache) = this.caches.gbt_cache {
318 cache.insert(goodbye_ofs, Arc::clone(&this.table));
319 }
320 }
321
322 Ok(this)
323 }
324
325 /// Load the entire goodbye table:
326 async fn load_table(&self) -> io::Result<Arc<[GoodbyeItem]>> {
327 let len = self.len();
328 let mut data = Vec::with_capacity(self.len());
329 unsafe {
330 data.set_len(len);
331 let slice = std::slice::from_raw_parts_mut(
332 data.as_mut_ptr() as *mut u8,
333 len * size_of::<GoodbyeItem>(),
334 );
335 read_exact_at(&self.input, slice, self.table_offset()).await?;
336 drop(slice);
337 }
338 Ok(Arc::from(data))
339 }
340
341 #[inline]
342 fn end_offset(&self) -> u64 {
343 self.entry_ofs + self.size
344 }
345
346 #[inline]
347 fn entry_range(&self) -> Range<u64> {
348 self.entry_ofs..self.end_offset()
349 }
350
351 #[inline]
352 fn table_size(&self) -> u64 {
353 (self.end_offset() - self.goodbye_ofs) - (size_of::<format::Header>() as u64)
354 }
355
356 #[inline]
357 fn table_offset(&self) -> u64 {
358 self.goodbye_ofs + (size_of::<format::Header>() as u64)
359 }
360
361 /// Length *excluding* the tail marker!
362 #[inline]
363 fn len(&self) -> usize {
364 (self.table_size() / (size_of::<GoodbyeItem>() as u64)) as usize - 1
365 }
366
367 /// Read the goodbye tail and perform some sanity checks.
368 async fn read_tail_entry(input: &T, end_offset: u64) -> io::Result<GoodbyeItem> {
369 if end_offset < (size_of::<GoodbyeItem>() as u64) {
370 io_bail!("goodbye tail does not fit");
371 }
372
373 let tail_offset = end_offset - (size_of::<GoodbyeItem>() as u64);
374 let tail: GoodbyeItem = read_entry_at(input, tail_offset).await?;
375
376 if tail.hash != format::PXAR_GOODBYE_TAIL_MARKER {
377 io_bail!("no goodbye tail marker found");
378 }
379
380 Ok(tail)
381 }
382
383 /// Get a decoder for the directory contents.
384 pub(crate) async fn decode_full(&self) -> io::Result<DecoderImpl<SeqReadAtAdapter<T>>> {
385 let (dir, decoder) = self.decode_one_entry(self.entry_range(), None).await?;
386 if !dir.is_dir() {
387 io_bail!("directory does not seem to be a directory");
388 }
389 Ok(decoder)
390 }
391
392 async fn get_decoder(
393 &self,
394 entry_range: Range<u64>,
395 file_name: Option<&Path>,
396 ) -> io::Result<DecoderImpl<SeqReadAtAdapter<T>>> {
397 get_decoder(
398 self.input.clone(),
399 entry_range,
400 match file_name {
401 None => self.path.clone(),
402 Some(file) => self.path.join(file),
403 },
404 )
405 .await
406 }
407
408 async fn decode_one_entry(
409 &self,
410 entry_range: Range<u64>,
411 file_name: Option<&Path>,
412 ) -> io::Result<(Entry, DecoderImpl<SeqReadAtAdapter<T>>)> {
413 let mut decoder = self.get_decoder(entry_range, file_name).await?;
414 let entry = decoder
415 .next()
416 .await
417 .ok_or_else(|| io_format_err!("unexpected EOF while decoding directory entry"))??;
418 Ok((entry, decoder))
419 }
420
421 fn lookup_hash_position(&self, hash: u64, start: usize, skip: usize) -> Option<usize> {
422 binary_tree_array::search_by(&self.table, start, skip, |i| hash.cmp(&i.hash))
423 }
424
425 pub async fn lookup_self(&self) -> io::Result<FileEntryImpl<T>> {
426 let (entry, _decoder) = self.decode_one_entry(self.entry_range(), None).await?;
427 Ok(FileEntryImpl {
428 input: self.input.clone(),
429 entry,
430 entry_range: self.entry_range(),
431 caches: Arc::clone(&self.caches),
432 })
433 }
434
435 /// Lookup a directory entry.
436 pub async fn lookup(&self, path: &Path) -> io::Result<Option<FileEntryImpl<T>>> {
437 let mut cur: Option<FileEntryImpl<T>> = None;
438
439 let mut first = true;
440 for component in path.components() {
441 use std::path::Component;
442
443 let first = mem::replace(&mut first, false);
444
445 let component = match component {
446 Component::Normal(path) => path,
447 Component::ParentDir => io_bail!("cannot enter parent directory in archive"),
448 Component::RootDir | Component::CurDir if first => {
449 cur = Some(self.lookup_self().await?);
450 continue;
451 }
452 Component::CurDir => continue,
453 _ => io_bail!("invalid component in path"),
454 };
455
456 let next = match cur {
457 Some(entry) => {
458 entry
459 .enter_directory()
460 .await?
461 .lookup_component(component)
462 .await?
463 }
464 None => self.lookup_component(component).await?,
465 };
466
467 if next.is_none() {
468 return Ok(None);
469 }
470
471 cur = next;
472 }
473
474 Ok(cur)
475 }
476
477 /// Lookup a single directory entry component (does not handle multiple components in path)
478 pub async fn lookup_component(&self, path: &OsStr) -> io::Result<Option<FileEntryImpl<T>>> {
479 let hash = format::hash_filename(path.as_bytes());
480 let first_index = match self.lookup_hash_position(hash, 0, 0) {
481 Some(index) => index,
482 None => return Ok(None),
483 };
484
485 // Lookup FILENAME, if the hash matches but the filename doesn't, check for a duplicate
486 // hash once found, use the GoodbyeItem's offset+size as well as the file's Entry to return
487 // a DirEntry::Dir or Dir::Entry.
488 //
489 let mut dup = 0;
490 loop {
491 let index = match self.lookup_hash_position(hash, first_index, dup) {
492 Some(index) => index,
493 None => return Ok(None),
494 };
495
496 let cursor = self.get_cursor(index).await?;
497 if cursor.file_name == path {
498 return Ok(Some(cursor.decode_entry().await?));
499 }
500
501 dup += 1;
502 }
503 }
504
505 async fn get_cursor<'a>(&'a self, index: usize) -> io::Result<DirEntryImpl<'a, T>> {
506 let entry = &self.table[index];
507 let file_goodbye_ofs = entry.offset;
508 if self.goodbye_ofs < file_goodbye_ofs {
509 io_bail!("invalid file offset");
510 }
511
512 let file_ofs = self.goodbye_ofs - file_goodbye_ofs;
513 let (file_name, entry_ofs) = self.read_filename_entry(file_ofs).await?;
514
515 let entry_range = Range {
516 start: entry_ofs,
517 end: file_ofs + entry.size,
518 };
519 if entry_range.end < entry_range.start {
520 io_bail!(
521 "bad file: invalid entry ranges for {:?}: \
522 start=0x{:x}, file_ofs=0x{:x}, size=0x{:x}",
523 file_name,
524 entry_ofs,
525 file_ofs,
526 entry.size,
527 );
528 }
529
530 Ok(DirEntryImpl {
531 dir: self,
532 file_name,
533 entry_range,
534 caches: Arc::clone(&self.caches),
535 })
536 }
537
538 async fn read_filename_entry(&self, file_ofs: u64) -> io::Result<(PathBuf, u64)> {
539 let head: format::Header = read_entry_at(&self.input, file_ofs).await?;
540 if head.htype != format::PXAR_FILENAME {
541 io_bail!("expected PXAR_FILENAME header, found: {:x}", head.htype);
542 }
543
544 let mut path = read_exact_data_at(
545 &self.input,
546 head.content_size() as usize,
547 file_ofs + (size_of_val(&head) as u64),
548 )
549 .await?;
550
551 if path.pop() != Some(0) {
552 io_bail!("invalid file name (missing terminating zero)");
553 }
554
555 if path.is_empty() {
556 io_bail!("invalid empty file name");
557 }
558
559 let file_name = PathBuf::from(OsString::from_vec(path));
560 format::check_file_name(&file_name)?;
561
562 Ok((file_name, file_ofs + head.full_size()))
563 }
564
565 pub fn read_dir(&self) -> ReadDirImpl<T> {
566 ReadDirImpl::new(self, 0)
567 }
568
569 pub fn entry_count(&self) -> usize {
570 self.table.len()
571 }
572 }
573
574 /// A file entry retrieved from a Directory.
575 #[derive(Clone)]
576 pub(crate) struct FileEntryImpl<T: Clone + ReadAt> {
577 input: T,
578 entry: Entry,
579 entry_range: Range<u64>,
580 caches: Arc<Caches>,
581 }
582
583 impl<T: Clone + ReadAt> FileEntryImpl<T> {
584 pub async fn enter_directory(&self) -> io::Result<DirectoryImpl<T>> {
585 if !self.entry.is_dir() {
586 io_bail!("enter_directory() on a non-directory");
587 }
588
589 DirectoryImpl::open_at_end(
590 self.input.clone(),
591 self.entry_range.end,
592 self.entry.path.clone(),
593 Arc::clone(&self.caches),
594 )
595 .await
596 }
597
598 /// For use with unsafe accessor methods.
599 pub fn content_range(&self) -> io::Result<Option<Range<u64>>> {
600 match self.entry.kind {
601 EntryKind::File { offset: None, .. } => {
602 io_bail!("cannot open file, reader provided no offset")
603 }
604 EntryKind::File {
605 size,
606 offset: Some(offset),
607 } => Ok(Some(offset..(offset + size))),
608 _ => Ok(None),
609 }
610 }
611
612 pub async fn contents(&self) -> io::Result<FileContentsImpl<T>> {
613 match self.content_range()? {
614 Some(range) => Ok(FileContentsImpl::new(self.input.clone(), range)),
615 None => io_bail!("not a file"),
616 }
617 }
618
619 #[inline]
620 pub fn into_entry(self) -> Entry {
621 self.entry
622 }
623
624 #[inline]
625 pub fn entry(&self) -> &Entry {
626 &self.entry
627 }
628
629 /// Exposed for raw by-offset access methods (use with `open_dir_at_end`).
630 #[inline]
631 pub fn entry_range(&self) -> Range<u64> {
632 self.entry_range.clone()
633 }
634 }
635
636 /// An iterator over the contents of a directory.
637 pub(crate) struct ReadDirImpl<'a, T> {
638 dir: &'a DirectoryImpl<T>,
639 at: usize,
640 }
641
642 impl<'a, T: Clone + ReadAt> ReadDirImpl<'a, T> {
643 fn new(dir: &'a DirectoryImpl<T>, at: usize) -> Self {
644 Self { dir, at }
645 }
646
647 /// Get the next entry.
648 pub async fn next(&mut self) -> io::Result<Option<DirEntryImpl<'a, T>>> {
649 if self.at == self.dir.table.len() {
650 Ok(None)
651 } else {
652 let cursor = self.dir.get_cursor(self.at).await?;
653 self.at += 1;
654 Ok(Some(cursor))
655 }
656 }
657
658 /// Efficient alternative to `Iterator::skip`.
659 #[inline]
660 pub fn skip(self, n: usize) -> Self {
661 Self {
662 at: (self.at + n).min(self.dir.table.len()),
663 dir: self.dir,
664 }
665 }
666
667 /// Efficient alternative to `Iterator::count`.
668 #[inline]
669 pub fn count(self) -> usize {
670 self.dir.table.len()
671 }
672 }
673
674 /// A cursor pointing to a file in a directory.
675 ///
676 /// At this point only the file name has been read and we remembered the position for finding the
677 /// actual data. This can be upgraded into a FileEntryImpl.
678 pub(crate) struct DirEntryImpl<'a, T: Clone + ReadAt> {
679 dir: &'a DirectoryImpl<T>,
680 file_name: PathBuf,
681 entry_range: Range<u64>,
682 caches: Arc<Caches>,
683 }
684
685 impl<'a, T: Clone + ReadAt> DirEntryImpl<'a, T> {
686 pub fn file_name(&self) -> &Path {
687 &self.file_name
688 }
689
690 async fn decode_entry(&self) -> io::Result<FileEntryImpl<T>> {
691 let (entry, _decoder) = self
692 .dir
693 .decode_one_entry(self.entry_range.clone(), Some(&self.file_name))
694 .await?;
695
696 Ok(FileEntryImpl {
697 input: self.dir.input.clone(),
698 entry,
699 entry_range: self.entry_range(),
700 caches: Arc::clone(&self.caches),
701 })
702 }
703
704 /// Exposed for raw by-offset access methods.
705 #[inline]
706 pub fn entry_range(&self) -> Range<u64> {
707 self.entry_range.clone()
708 }
709 }
710
711 /// A reader for file contents.
712 pub(crate) struct FileContentsImpl<T> {
713 input: T,
714
715 /// Absolute offset inside the `input`.
716 range: Range<u64>,
717 }
718
719 impl<T: Clone + ReadAt> FileContentsImpl<T> {
720 pub fn new(input: T, range: Range<u64>) -> Self {
721 Self { input, range }
722 }
723
724 #[inline]
725 pub fn file_size(&self) -> u64 {
726 self.range.end - self.range.start
727 }
728
729 async fn read_at(&self, mut buf: &mut [u8], offset: u64) -> io::Result<usize> {
730 let size = self.file_size();
731 if offset >= size {
732 return Ok(0);
733 }
734 let remaining = size - offset;
735
736 if remaining < buf.len() as u64 {
737 buf = &mut buf[..(remaining as usize)];
738 }
739
740 read_at(&self.input, buf, self.range.start + offset).await
741 }
742 }
743
744 impl<T: Clone + ReadAt> ReadAt for FileContentsImpl<T> {
745 fn poll_read_at(
746 self: Pin<&Self>,
747 cx: &mut Context,
748 mut buf: &mut [u8],
749 offset: u64,
750 ) -> Poll<io::Result<usize>> {
751 let size = self.file_size();
752 if offset >= size {
753 return Poll::Ready(Ok(0));
754 }
755 let remaining = size - offset;
756
757 if remaining < buf.len() as u64 {
758 buf = &mut buf[..(remaining as usize)];
759 }
760
761 let offset = self.range.start + offset;
762 unsafe { self.map_unchecked(|this| &this.input) }.poll_read_at(cx, buf, offset)
763 }
764 }
765
766 #[doc(hidden)]
767 pub struct SeqReadAtAdapter<T> {
768 input: T,
769 range: Range<u64>,
770 }
771
772 impl<T: ReadAt> SeqReadAtAdapter<T> {
773 pub fn new(input: T, range: Range<u64>) -> Self {
774 if range.end < range.start {
775 panic!("BAD SEQ READ AT ADAPTER");
776 }
777 Self { input, range }
778 }
779
780 #[inline]
781 fn remaining(&self) -> usize {
782 (self.range.end - self.range.start) as usize
783 }
784 }
785
786 impl<T: ReadAt> decoder::SeqRead for SeqReadAtAdapter<T> {
787 fn poll_seq_read(
788 self: Pin<&mut Self>,
789 cx: &mut Context,
790 buf: &mut [u8],
791 ) -> Poll<io::Result<usize>> {
792 let len = buf.len().min(self.remaining());
793 let buf = &mut buf[..len];
794
795 let this = unsafe { self.get_unchecked_mut() };
796
797 let got = ready!(unsafe {
798 Pin::new_unchecked(&this.input).poll_read_at(cx, buf, this.range.start)
799 })?;
800 this.range.start += got as u64;
801 Poll::Ready(Ok(got))
802 }
803
804 fn poll_position(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<io::Result<u64>>> {
805 Poll::Ready(Some(Ok(self.range.start)))
806 }
807 }