]> git.proxmox.com Git - rustc.git/blame - library/std/src/io/buffered/bufwriter.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / library / std / src / io / buffered / bufwriter.rs
CommitLineData
5869c6ff 1use crate::error;
29967ef6
XL
2use crate::fmt;
3use crate::io::{
4 self, Error, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE,
5};
5869c6ff 6use crate::mem;
17df50a5 7use crate::ptr;
29967ef6
XL
8
9/// Wraps a writer and buffers its output.
10///
11/// It can be excessively inefficient to work directly with something that
12/// implements [`Write`]. For example, every call to
13/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
14/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
15/// writer in large, infrequent batches.
16///
17/// `BufWriter<W>` can improve the speed of programs that make *small* and
18/// *repeated* write calls to the same file or network socket. It does not
19/// help when writing very large amounts at once, or writing just one or a few
20/// times. It also provides no advantage when writing to a destination that is
21/// in memory, like a [`Vec`]`<u8>`.
22///
23/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
24/// dropping will attempt to flush the contents of the buffer, any errors
25/// that happen in the process of dropping will be ignored. Calling [`flush`]
26/// ensures that the buffer is empty and thus dropping will not even attempt
27/// file operations.
28///
29/// # Examples
30///
31/// Let's write the numbers one through ten to a [`TcpStream`]:
32///
33/// ```no_run
34/// use std::io::prelude::*;
35/// use std::net::TcpStream;
36///
37/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
38///
39/// for i in 0..10 {
40/// stream.write(&[i+1]).unwrap();
41/// }
42/// ```
43///
44/// Because we're not buffering, we write each one in turn, incurring the
45/// overhead of a system call per byte written. We can fix this with a
46/// `BufWriter<W>`:
47///
48/// ```no_run
49/// use std::io::prelude::*;
50/// use std::io::BufWriter;
51/// use std::net::TcpStream;
52///
53/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
54///
55/// for i in 0..10 {
56/// stream.write(&[i+1]).unwrap();
57/// }
58/// stream.flush().unwrap();
59/// ```
60///
61/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
62/// together by the buffer and will all be written out in one system call when
63/// the `stream` is flushed.
64///
65// HACK(#78696): can't use `crate` for associated items
66/// [`TcpStream::write`]: super::super::super::net::TcpStream::write
67/// [`TcpStream`]: crate::net::TcpStream
68/// [`flush`]: BufWriter::flush
69#[stable(feature = "rust1", since = "1.0.0")]
70pub struct BufWriter<W: Write> {
136023e0 71 inner: W,
17df50a5
XL
72 // The buffer. Avoid using this like a normal `Vec` in common code paths.
73 // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
74 // methods that require bounds checking or the like. This makes an enormous
75 // difference to performance (we may want to stop using a `Vec` entirely).
29967ef6
XL
76 buf: Vec<u8>,
77 // #30888: If the inner writer panics in a call to write, we don't want to
78 // write the buffered data a second time in BufWriter's destructor. This
79 // flag tells the Drop impl if it should skip the flush.
80 panicked: bool,
81}
82
83impl<W: Write> BufWriter<W> {
84 /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
85 /// but may change in the future.
86 ///
87 /// # Examples
88 ///
89 /// ```no_run
90 /// use std::io::BufWriter;
91 /// use std::net::TcpStream;
92 ///
93 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
94 /// ```
95 #[stable(feature = "rust1", since = "1.0.0")]
96 pub fn new(inner: W) -> BufWriter<W> {
97 BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
98 }
99
100 /// Creates a new `BufWriter<W>` with the specified buffer capacity.
101 ///
102 /// # Examples
103 ///
104 /// Creating a buffer with a buffer of a hundred bytes.
105 ///
106 /// ```no_run
107 /// use std::io::BufWriter;
108 /// use std::net::TcpStream;
109 ///
110 /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
111 /// let mut buffer = BufWriter::with_capacity(100, stream);
112 /// ```
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
136023e0 115 BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false }
29967ef6
XL
116 }
117
118 /// Send data in our local buffer into the inner writer, looping as
119 /// necessary until either it's all been sent or an error occurs.
120 ///
121 /// Because all the data in the buffer has been reported to our owner as
122 /// "successfully written" (by returning nonzero success values from
123 /// `write`), any 0-length writes from `inner` must be reported as i/o
124 /// errors from this method.
5869c6ff 125 pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
29967ef6
XL
126 /// Helper struct to ensure the buffer is updated after all the writes
127 /// are complete. It tracks the number of written bytes and drains them
128 /// all from the front of the buffer when dropped.
129 struct BufGuard<'a> {
130 buffer: &'a mut Vec<u8>,
131 written: usize,
132 }
133
134 impl<'a> BufGuard<'a> {
135 fn new(buffer: &'a mut Vec<u8>) -> Self {
136 Self { buffer, written: 0 }
137 }
138
139 /// The unwritten part of the buffer
140 fn remaining(&self) -> &[u8] {
141 &self.buffer[self.written..]
142 }
143
144 /// Flag some bytes as removed from the front of the buffer
145 fn consume(&mut self, amt: usize) {
146 self.written += amt;
147 }
148
149 /// true if all of the bytes have been written
150 fn done(&self) -> bool {
151 self.written >= self.buffer.len()
152 }
153 }
154
155 impl Drop for BufGuard<'_> {
156 fn drop(&mut self) {
157 if self.written > 0 {
158 self.buffer.drain(..self.written);
159 }
160 }
161 }
162
163 let mut guard = BufGuard::new(&mut self.buf);
29967ef6
XL
164 while !guard.done() {
165 self.panicked = true;
136023e0 166 let r = self.inner.write(guard.remaining());
29967ef6
XL
167 self.panicked = false;
168
169 match r {
170 Ok(0) => {
cdc7bbd5 171 return Err(Error::new_const(
29967ef6 172 ErrorKind::WriteZero,
cdc7bbd5 173 &"failed to write the buffered data",
29967ef6
XL
174 ));
175 }
176 Ok(n) => guard.consume(n),
177 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
178 Err(e) => return Err(e),
179 }
180 }
181 Ok(())
182 }
183
184 /// Buffer some data without flushing it, regardless of the size of the
185 /// data. Writes as much as possible without exceeding capacity. Returns
186 /// the number of bytes written.
187 pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
17df50a5 188 let available = self.spare_capacity();
29967ef6 189 let amt_to_buffer = available.min(buf.len());
17df50a5
XL
190
191 // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
192 unsafe {
193 self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
194 }
195
29967ef6
XL
196 amt_to_buffer
197 }
198
199 /// Gets a reference to the underlying writer.
200 ///
201 /// # Examples
202 ///
203 /// ```no_run
204 /// use std::io::BufWriter;
205 /// use std::net::TcpStream;
206 ///
207 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
208 ///
209 /// // we can use reference just like buffer
210 /// let reference = buffer.get_ref();
211 /// ```
212 #[stable(feature = "rust1", since = "1.0.0")]
213 pub fn get_ref(&self) -> &W {
136023e0 214 &self.inner
29967ef6
XL
215 }
216
217 /// Gets a mutable reference to the underlying writer.
218 ///
219 /// It is inadvisable to directly write to the underlying writer.
220 ///
221 /// # Examples
222 ///
223 /// ```no_run
224 /// use std::io::BufWriter;
225 /// use std::net::TcpStream;
226 ///
227 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
228 ///
229 /// // we can use reference just like buffer
230 /// let reference = buffer.get_mut();
231 /// ```
232 #[stable(feature = "rust1", since = "1.0.0")]
233 pub fn get_mut(&mut self) -> &mut W {
136023e0 234 &mut self.inner
29967ef6
XL
235 }
236
237 /// Returns a reference to the internally buffered data.
238 ///
239 /// # Examples
240 ///
241 /// ```no_run
242 /// use std::io::BufWriter;
243 /// use std::net::TcpStream;
244 ///
245 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
246 ///
247 /// // See how many bytes are currently buffered
248 /// let bytes_buffered = buf_writer.buffer().len();
249 /// ```
250 #[stable(feature = "bufreader_buffer", since = "1.37.0")]
251 pub fn buffer(&self) -> &[u8] {
252 &self.buf
253 }
254
5869c6ff
XL
255 /// Returns a mutable reference to the internal buffer.
256 ///
257 /// This can be used to write data directly into the buffer without triggering writers
258 /// to the underlying writer.
259 ///
260 /// That the buffer is a `Vec` is an implementation detail.
261 /// Callers should not modify the capacity as there currently is no public API to do so
262 /// and thus any capacity changes would be unexpected by the user.
263 pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
264 &mut self.buf
265 }
266
29967ef6
XL
267 /// Returns the number of bytes the internal buffer can hold without flushing.
268 ///
269 /// # Examples
270 ///
271 /// ```no_run
272 /// use std::io::BufWriter;
273 /// use std::net::TcpStream;
274 ///
275 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
276 ///
277 /// // Check the capacity of the inner buffer
278 /// let capacity = buf_writer.capacity();
279 /// // Calculate how many bytes can be written without flushing
280 /// let without_flush = capacity - buf_writer.buffer().len();
281 /// ```
282 #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
283 pub fn capacity(&self) -> usize {
284 self.buf.capacity()
285 }
286
287 /// Unwraps this `BufWriter<W>`, returning the underlying writer.
288 ///
289 /// The buffer is written out before returning the writer.
290 ///
291 /// # Errors
292 ///
293 /// An [`Err`] will be returned if an error occurs while flushing the buffer.
294 ///
295 /// # Examples
296 ///
297 /// ```no_run
298 /// use std::io::BufWriter;
299 /// use std::net::TcpStream;
300 ///
301 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
302 ///
303 /// // unwrap the TcpStream and flush the buffer
304 /// let stream = buffer.into_inner().unwrap();
305 /// ```
306 #[stable(feature = "rust1", since = "1.0.0")]
307 pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
308 match self.flush_buf() {
309 Err(e) => Err(IntoInnerError::new(self, e)),
136023e0 310 Ok(()) => Ok(self.into_raw_parts().0),
29967ef6
XL
311 }
312 }
5869c6ff
XL
313
314 /// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
315 /// unwritten data.
316 ///
317 /// If the underlying writer panicked, it is not known what portion of the data was written.
318 /// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
319 /// contents can still be recovered).
320 ///
321 /// `into_raw_parts` makes no attempt to flush data and cannot fail.
322 ///
323 /// # Examples
324 ///
325 /// ```
326 /// #![feature(bufwriter_into_raw_parts)]
327 /// use std::io::{BufWriter, Write};
328 ///
329 /// let mut buffer = [0u8; 10];
330 /// let mut stream = BufWriter::new(buffer.as_mut());
331 /// write!(stream, "too much data").unwrap();
332 /// stream.flush().expect_err("it doesn't fit");
333 /// let (recovered_writer, buffered_data) = stream.into_raw_parts();
334 /// assert_eq!(recovered_writer.len(), 0);
335 /// assert_eq!(&buffered_data.unwrap(), b"ata");
336 /// ```
337 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
338 pub fn into_raw_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>) {
339 let buf = mem::take(&mut self.buf);
340 let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
136023e0
XL
341
342 // SAFETY: forget(self) prevents double dropping inner
343 let inner = unsafe { ptr::read(&mut self.inner) };
344 mem::forget(self);
345
346 (inner, buf)
5869c6ff 347 }
17df50a5
XL
348
349 // Ensure this function does not get inlined into `write`, so that it
350 // remains inlineable and its common path remains as short as possible.
351 // If this function ends up being called frequently relative to `write`,
352 // it's likely a sign that the client is using an improperly sized buffer
353 // or their write patterns are somewhat pathological.
354 #[cold]
355 #[inline(never)]
356 fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
357 if buf.len() > self.spare_capacity() {
358 self.flush_buf()?;
359 }
360
361 // Why not len > capacity? To avoid a needless trip through the buffer when the input
362 // exactly fills it. We'd just need to flush it to the underlying writer anyway.
363 if buf.len() >= self.buf.capacity() {
364 self.panicked = true;
365 let r = self.get_mut().write(buf);
366 self.panicked = false;
367 r
368 } else {
369 // Write to the buffer. In this case, we write to the buffer even if it fills it
370 // exactly. Doing otherwise would mean flushing the buffer, then writing this
371 // input to the inner writer, which in many cases would be a worse strategy.
372
373 // SAFETY: There was either enough spare capacity already, or there wasn't and we
374 // flushed the buffer to ensure that there is. In the latter case, we know that there
375 // is because flushing ensured that our entire buffer is spare capacity, and we entered
376 // this block because the input buffer length is less than that capacity. In either
377 // case, it's safe to write the input buffer to our buffer.
378 unsafe {
379 self.write_to_buffer_unchecked(buf);
380 }
381
382 Ok(buf.len())
383 }
384 }
385
386 // Ensure this function does not get inlined into `write_all`, so that it
387 // remains inlineable and its common path remains as short as possible.
388 // If this function ends up being called frequently relative to `write_all`,
389 // it's likely a sign that the client is using an improperly sized buffer
390 // or their write patterns are somewhat pathological.
391 #[cold]
392 #[inline(never)]
393 fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
394 // Normally, `write_all` just calls `write` in a loop. We can do better
395 // by calling `self.get_mut().write_all()` directly, which avoids
396 // round trips through the buffer in the event of a series of partial
397 // writes in some circumstances.
398
399 if buf.len() > self.spare_capacity() {
400 self.flush_buf()?;
401 }
402
403 // Why not len > capacity? To avoid a needless trip through the buffer when the input
404 // exactly fills it. We'd just need to flush it to the underlying writer anyway.
405 if buf.len() >= self.buf.capacity() {
406 self.panicked = true;
407 let r = self.get_mut().write_all(buf);
408 self.panicked = false;
409 r
410 } else {
411 // Write to the buffer. In this case, we write to the buffer even if it fills it
412 // exactly. Doing otherwise would mean flushing the buffer, then writing this
413 // input to the inner writer, which in many cases would be a worse strategy.
414
415 // SAFETY: There was either enough spare capacity already, or there wasn't and we
416 // flushed the buffer to ensure that there is. In the latter case, we know that there
417 // is because flushing ensured that our entire buffer is spare capacity, and we entered
418 // this block because the input buffer length is less than that capacity. In either
419 // case, it's safe to write the input buffer to our buffer.
420 unsafe {
421 self.write_to_buffer_unchecked(buf);
422 }
423
424 Ok(())
425 }
426 }
427
428 // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
429 // i.e., that input buffer length is less than or equal to spare capacity.
430 #[inline]
431 unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
432 debug_assert!(buf.len() <= self.spare_capacity());
433 let old_len = self.buf.len();
434 let buf_len = buf.len();
435 let src = buf.as_ptr();
436 let dst = self.buf.as_mut_ptr().add(old_len);
437 ptr::copy_nonoverlapping(src, dst, buf_len);
438 self.buf.set_len(old_len + buf_len);
439 }
440
441 #[inline]
442 fn spare_capacity(&self) -> usize {
443 self.buf.capacity() - self.buf.len()
444 }
5869c6ff
XL
445}
446
447#[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
448/// Error returned for the buffered data from `BufWriter::into_raw_parts`, when the underlying
449/// writer has previously panicked. Contains the (possibly partly written) buffered data.
450///
451/// # Example
452///
453/// ```
454/// #![feature(bufwriter_into_raw_parts)]
455/// use std::io::{self, BufWriter, Write};
456/// use std::panic::{catch_unwind, AssertUnwindSafe};
457///
458/// struct PanickingWriter;
459/// impl Write for PanickingWriter {
460/// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
461/// fn flush(&mut self) -> io::Result<()> { panic!() }
462/// }
463///
464/// let mut stream = BufWriter::new(PanickingWriter);
465/// write!(stream, "some data").unwrap();
466/// let result = catch_unwind(AssertUnwindSafe(|| {
467/// stream.flush().unwrap()
468/// }));
469/// assert!(result.is_err());
470/// let (recovered_writer, buffered_data) = stream.into_raw_parts();
471/// assert!(matches!(recovered_writer, PanickingWriter));
472/// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
473/// ```
474pub struct WriterPanicked {
475 buf: Vec<u8>,
476}
477
478impl WriterPanicked {
479 /// Returns the perhaps-unwritten data. Some of this data may have been written by the
480 /// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
481 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
482 pub fn into_inner(self) -> Vec<u8> {
483 self.buf
484 }
485
486 const DESCRIPTION: &'static str =
487 "BufWriter inner writer panicked, what data remains unwritten is not known";
488}
489
490#[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
491impl error::Error for WriterPanicked {
492 #[allow(deprecated, deprecated_in_future)]
493 fn description(&self) -> &str {
494 Self::DESCRIPTION
495 }
496}
497
498#[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
499impl fmt::Display for WriterPanicked {
500 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
501 write!(f, "{}", Self::DESCRIPTION)
502 }
503}
504
505#[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
506impl fmt::Debug for WriterPanicked {
507 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
508 f.debug_struct("WriterPanicked")
509 .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
510 .finish()
511 }
29967ef6
XL
512}
513
514#[stable(feature = "rust1", since = "1.0.0")]
515impl<W: Write> Write for BufWriter<W> {
17df50a5 516 #[inline]
29967ef6 517 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
17df50a5
XL
518 // Use < instead of <= to avoid a needless trip through the buffer in some cases.
519 // See `write_cold` for details.
520 if buf.len() < self.spare_capacity() {
521 // SAFETY: safe by above conditional.
522 unsafe {
523 self.write_to_buffer_unchecked(buf);
524 }
525
29967ef6 526 Ok(buf.len())
17df50a5
XL
527 } else {
528 self.write_cold(buf)
29967ef6
XL
529 }
530 }
531
17df50a5 532 #[inline]
29967ef6 533 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
17df50a5
XL
534 // Use < instead of <= to avoid a needless trip through the buffer in some cases.
535 // See `write_all_cold` for details.
536 if buf.len() < self.spare_capacity() {
537 // SAFETY: safe by above conditional.
538 unsafe {
539 self.write_to_buffer_unchecked(buf);
540 }
541
29967ef6 542 Ok(())
17df50a5
XL
543 } else {
544 self.write_all_cold(buf)
29967ef6
XL
545 }
546 }
547
548 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
17df50a5
XL
549 // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
550 // to `write` and `write_all`. The performance benefits can be significant. See #79930.
fc512014 551 if self.get_ref().is_write_vectored() {
17df50a5
XL
552 // We have to handle the possibility that the total length of the buffers overflows
553 // `usize` (even though this can only happen if multiple `IoSlice`s reference the
554 // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
555 // computation overflows, then surely the input cannot fit in our buffer, so we forward
556 // to the inner writer's `write_vectored` method to let it handle it appropriately.
557 let saturated_total_len =
558 bufs.iter().fold(0usize, |acc, b| acc.saturating_add(b.len()));
559
560 if saturated_total_len > self.spare_capacity() {
561 // Flush if the total length of the input exceeds our buffer's spare capacity.
562 // If we would have overflowed, this condition also holds, and we need to flush.
fc512014
XL
563 self.flush_buf()?;
564 }
17df50a5
XL
565
566 if saturated_total_len >= self.buf.capacity() {
567 // Forward to our inner writer if the total length of the input is greater than or
568 // equal to our buffer capacity. If we would have overflowed, this condition also
569 // holds, and we punt to the inner writer.
fc512014
XL
570 self.panicked = true;
571 let r = self.get_mut().write_vectored(bufs);
572 self.panicked = false;
573 r
574 } else {
17df50a5
XL
575 // `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
576
577 // SAFETY: We checked whether or not the spare capacity was large enough above. If
578 // it was, then we're safe already. If it wasn't, we flushed, making sufficient
579 // room for any input <= the buffer size, which includes this input.
580 unsafe {
581 bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
582 };
583
584 Ok(saturated_total_len)
fc512014 585 }
29967ef6 586 } else {
fc512014
XL
587 let mut iter = bufs.iter();
588 let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
589 // This is the first non-empty slice to write, so if it does
590 // not fit in the buffer, we still get to flush and proceed.
17df50a5 591 if buf.len() > self.spare_capacity() {
fc512014
XL
592 self.flush_buf()?;
593 }
594 if buf.len() >= self.buf.capacity() {
595 // The slice is at least as large as the buffering capacity,
596 // so it's better to write it directly, bypassing the buffer.
597 self.panicked = true;
598 let r = self.get_mut().write(buf);
599 self.panicked = false;
600 return r;
601 } else {
17df50a5
XL
602 // SAFETY: We checked whether or not the spare capacity was large enough above.
603 // If it was, then we're safe already. If it wasn't, we flushed, making
604 // sufficient room for any input <= the buffer size, which includes this input.
605 unsafe {
606 self.write_to_buffer_unchecked(buf);
607 }
608
fc512014
XL
609 buf.len()
610 }
611 } else {
612 return Ok(0);
613 };
614 debug_assert!(total_written != 0);
615 for buf in iter {
17df50a5
XL
616 if buf.len() <= self.spare_capacity() {
617 // SAFETY: safe by above conditional.
618 unsafe {
619 self.write_to_buffer_unchecked(buf);
620 }
621
622 // This cannot overflow `usize`. If we are here, we've written all of the bytes
623 // so far to our buffer, and we've ensured that we never exceed the buffer's
624 // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
fc512014 625 total_written += buf.len();
17df50a5
XL
626 } else {
627 break;
fc512014
XL
628 }
629 }
630 Ok(total_written)
29967ef6
XL
631 }
632 }
633
634 fn is_write_vectored(&self) -> bool {
fc512014 635 true
29967ef6
XL
636 }
637
638 fn flush(&mut self) -> io::Result<()> {
639 self.flush_buf().and_then(|()| self.get_mut().flush())
640 }
641}
642
643#[stable(feature = "rust1", since = "1.0.0")]
644impl<W: Write> fmt::Debug for BufWriter<W>
645where
646 W: fmt::Debug,
647{
648 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
649 fmt.debug_struct("BufWriter")
136023e0 650 .field("writer", &self.inner)
29967ef6
XL
651 .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
652 .finish()
653 }
654}
655
656#[stable(feature = "rust1", since = "1.0.0")]
657impl<W: Write + Seek> Seek for BufWriter<W> {
658 /// Seek to the offset, in bytes, in the underlying writer.
659 ///
660 /// Seeking always writes out the internal buffer before seeking.
661 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
662 self.flush_buf()?;
663 self.get_mut().seek(pos)
664 }
665}
666
667#[stable(feature = "rust1", since = "1.0.0")]
668impl<W: Write> Drop for BufWriter<W> {
669 fn drop(&mut self) {
136023e0 670 if !self.panicked {
29967ef6
XL
671 // dtors should not panic, so we ignore a failed flush
672 let _r = self.flush_buf();
673 }
674 }
675}