]> git.proxmox.com Git - rustc.git/blob - library/std/src/io/buffered/bufwriter.rs
New upstream version 1.54.0+dfsg1
[rustc.git] / library / std / src / io / buffered / bufwriter.rs
1 use crate::error;
2 use crate::fmt;
3 use crate::io::{
4 self, Error, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE,
5 };
6 use crate::mem;
7 use crate::ptr;
8
9 /// Wraps a writer and buffers its output.
10 ///
11 /// It can be excessively inefficient to work directly with something that
12 /// implements [`Write`]. For example, every call to
13 /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
14 /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
15 /// writer in large, infrequent batches.
16 ///
17 /// `BufWriter<W>` can improve the speed of programs that make *small* and
18 /// *repeated* write calls to the same file or network socket. It does not
19 /// help when writing very large amounts at once, or writing just one or a few
20 /// times. It also provides no advantage when writing to a destination that is
21 /// in memory, like a [`Vec`]`<u8>`.
22 ///
23 /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
24 /// dropping will attempt to flush the contents of the buffer, any errors
25 /// that happen in the process of dropping will be ignored. Calling [`flush`]
26 /// ensures that the buffer is empty and thus dropping will not even attempt
27 /// file operations.
28 ///
29 /// # Examples
30 ///
31 /// Let's write the numbers one through ten to a [`TcpStream`]:
32 ///
33 /// ```no_run
34 /// use std::io::prelude::*;
35 /// use std::net::TcpStream;
36 ///
37 /// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
38 ///
39 /// for i in 0..10 {
40 /// stream.write(&[i+1]).unwrap();
41 /// }
42 /// ```
43 ///
44 /// Because we're not buffering, we write each one in turn, incurring the
45 /// overhead of a system call per byte written. We can fix this with a
46 /// `BufWriter<W>`:
47 ///
48 /// ```no_run
49 /// use std::io::prelude::*;
50 /// use std::io::BufWriter;
51 /// use std::net::TcpStream;
52 ///
53 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
54 ///
55 /// for i in 0..10 {
56 /// stream.write(&[i+1]).unwrap();
57 /// }
58 /// stream.flush().unwrap();
59 /// ```
60 ///
61 /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
62 /// together by the buffer and will all be written out in one system call when
63 /// the `stream` is flushed.
64 ///
65 // HACK(#78696): can't use `crate` for associated items
66 /// [`TcpStream::write`]: super::super::super::net::TcpStream::write
67 /// [`TcpStream`]: crate::net::TcpStream
68 /// [`flush`]: BufWriter::flush
69 #[stable(feature = "rust1", since = "1.0.0")]
70 pub struct BufWriter<W: Write> {
71 inner: Option<W>,
72 // The buffer. Avoid using this like a normal `Vec` in common code paths.
73 // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
74 // methods that require bounds checking or the like. This makes an enormous
75 // difference to performance (we may want to stop using a `Vec` entirely).
76 buf: Vec<u8>,
77 // #30888: If the inner writer panics in a call to write, we don't want to
78 // write the buffered data a second time in BufWriter's destructor. This
79 // flag tells the Drop impl if it should skip the flush.
80 panicked: bool,
81 }
82
83 impl<W: Write> BufWriter<W> {
84 /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
85 /// but may change in the future.
86 ///
87 /// # Examples
88 ///
89 /// ```no_run
90 /// use std::io::BufWriter;
91 /// use std::net::TcpStream;
92 ///
93 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
94 /// ```
95 #[stable(feature = "rust1", since = "1.0.0")]
96 pub fn new(inner: W) -> BufWriter<W> {
97 BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
98 }
99
100 /// Creates a new `BufWriter<W>` with the specified buffer capacity.
101 ///
102 /// # Examples
103 ///
104 /// Creating a buffer with a buffer of a hundred bytes.
105 ///
106 /// ```no_run
107 /// use std::io::BufWriter;
108 /// use std::net::TcpStream;
109 ///
110 /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
111 /// let mut buffer = BufWriter::with_capacity(100, stream);
112 /// ```
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
115 BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
116 }
117
118 /// Send data in our local buffer into the inner writer, looping as
119 /// necessary until either it's all been sent or an error occurs.
120 ///
121 /// Because all the data in the buffer has been reported to our owner as
122 /// "successfully written" (by returning nonzero success values from
123 /// `write`), any 0-length writes from `inner` must be reported as i/o
124 /// errors from this method.
125 pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
126 /// Helper struct to ensure the buffer is updated after all the writes
127 /// are complete. It tracks the number of written bytes and drains them
128 /// all from the front of the buffer when dropped.
129 struct BufGuard<'a> {
130 buffer: &'a mut Vec<u8>,
131 written: usize,
132 }
133
134 impl<'a> BufGuard<'a> {
135 fn new(buffer: &'a mut Vec<u8>) -> Self {
136 Self { buffer, written: 0 }
137 }
138
139 /// The unwritten part of the buffer
140 fn remaining(&self) -> &[u8] {
141 &self.buffer[self.written..]
142 }
143
144 /// Flag some bytes as removed from the front of the buffer
145 fn consume(&mut self, amt: usize) {
146 self.written += amt;
147 }
148
149 /// true if all of the bytes have been written
150 fn done(&self) -> bool {
151 self.written >= self.buffer.len()
152 }
153 }
154
155 impl Drop for BufGuard<'_> {
156 fn drop(&mut self) {
157 if self.written > 0 {
158 self.buffer.drain(..self.written);
159 }
160 }
161 }
162
163 let mut guard = BufGuard::new(&mut self.buf);
164 let inner = self.inner.as_mut().unwrap();
165 while !guard.done() {
166 self.panicked = true;
167 let r = inner.write(guard.remaining());
168 self.panicked = false;
169
170 match r {
171 Ok(0) => {
172 return Err(Error::new_const(
173 ErrorKind::WriteZero,
174 &"failed to write the buffered data",
175 ));
176 }
177 Ok(n) => guard.consume(n),
178 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
179 Err(e) => return Err(e),
180 }
181 }
182 Ok(())
183 }
184
185 /// Buffer some data without flushing it, regardless of the size of the
186 /// data. Writes as much as possible without exceeding capacity. Returns
187 /// the number of bytes written.
188 pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
189 let available = self.spare_capacity();
190 let amt_to_buffer = available.min(buf.len());
191
192 // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
193 unsafe {
194 self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
195 }
196
197 amt_to_buffer
198 }
199
200 /// Gets a reference to the underlying writer.
201 ///
202 /// # Examples
203 ///
204 /// ```no_run
205 /// use std::io::BufWriter;
206 /// use std::net::TcpStream;
207 ///
208 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
209 ///
210 /// // we can use reference just like buffer
211 /// let reference = buffer.get_ref();
212 /// ```
213 #[stable(feature = "rust1", since = "1.0.0")]
214 pub fn get_ref(&self) -> &W {
215 self.inner.as_ref().unwrap()
216 }
217
218 /// Gets a mutable reference to the underlying writer.
219 ///
220 /// It is inadvisable to directly write to the underlying writer.
221 ///
222 /// # Examples
223 ///
224 /// ```no_run
225 /// use std::io::BufWriter;
226 /// use std::net::TcpStream;
227 ///
228 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
229 ///
230 /// // we can use reference just like buffer
231 /// let reference = buffer.get_mut();
232 /// ```
233 #[stable(feature = "rust1", since = "1.0.0")]
234 pub fn get_mut(&mut self) -> &mut W {
235 self.inner.as_mut().unwrap()
236 }
237
238 /// Returns a reference to the internally buffered data.
239 ///
240 /// # Examples
241 ///
242 /// ```no_run
243 /// use std::io::BufWriter;
244 /// use std::net::TcpStream;
245 ///
246 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
247 ///
248 /// // See how many bytes are currently buffered
249 /// let bytes_buffered = buf_writer.buffer().len();
250 /// ```
251 #[stable(feature = "bufreader_buffer", since = "1.37.0")]
252 pub fn buffer(&self) -> &[u8] {
253 &self.buf
254 }
255
256 /// Returns a mutable reference to the internal buffer.
257 ///
258 /// This can be used to write data directly into the buffer without triggering writers
259 /// to the underlying writer.
260 ///
261 /// That the buffer is a `Vec` is an implementation detail.
262 /// Callers should not modify the capacity as there currently is no public API to do so
263 /// and thus any capacity changes would be unexpected by the user.
264 pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
265 &mut self.buf
266 }
267
268 /// Returns the number of bytes the internal buffer can hold without flushing.
269 ///
270 /// # Examples
271 ///
272 /// ```no_run
273 /// use std::io::BufWriter;
274 /// use std::net::TcpStream;
275 ///
276 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
277 ///
278 /// // Check the capacity of the inner buffer
279 /// let capacity = buf_writer.capacity();
280 /// // Calculate how many bytes can be written without flushing
281 /// let without_flush = capacity - buf_writer.buffer().len();
282 /// ```
283 #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
284 pub fn capacity(&self) -> usize {
285 self.buf.capacity()
286 }
287
288 /// Unwraps this `BufWriter<W>`, returning the underlying writer.
289 ///
290 /// The buffer is written out before returning the writer.
291 ///
292 /// # Errors
293 ///
294 /// An [`Err`] will be returned if an error occurs while flushing the buffer.
295 ///
296 /// # Examples
297 ///
298 /// ```no_run
299 /// use std::io::BufWriter;
300 /// use std::net::TcpStream;
301 ///
302 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
303 ///
304 /// // unwrap the TcpStream and flush the buffer
305 /// let stream = buffer.into_inner().unwrap();
306 /// ```
307 #[stable(feature = "rust1", since = "1.0.0")]
308 pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
309 match self.flush_buf() {
310 Err(e) => Err(IntoInnerError::new(self, e)),
311 Ok(()) => Ok(self.inner.take().unwrap()),
312 }
313 }
314
315 /// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
316 /// unwritten data.
317 ///
318 /// If the underlying writer panicked, it is not known what portion of the data was written.
319 /// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
320 /// contents can still be recovered).
321 ///
322 /// `into_raw_parts` makes no attempt to flush data and cannot fail.
323 ///
324 /// # Examples
325 ///
326 /// ```
327 /// #![feature(bufwriter_into_raw_parts)]
328 /// use std::io::{BufWriter, Write};
329 ///
330 /// let mut buffer = [0u8; 10];
331 /// let mut stream = BufWriter::new(buffer.as_mut());
332 /// write!(stream, "too much data").unwrap();
333 /// stream.flush().expect_err("it doesn't fit");
334 /// let (recovered_writer, buffered_data) = stream.into_raw_parts();
335 /// assert_eq!(recovered_writer.len(), 0);
336 /// assert_eq!(&buffered_data.unwrap(), b"ata");
337 /// ```
338 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
339 pub fn into_raw_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>) {
340 let buf = mem::take(&mut self.buf);
341 let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
342 (self.inner.take().unwrap(), buf)
343 }
344
345 // Ensure this function does not get inlined into `write`, so that it
346 // remains inlineable and its common path remains as short as possible.
347 // If this function ends up being called frequently relative to `write`,
348 // it's likely a sign that the client is using an improperly sized buffer
349 // or their write patterns are somewhat pathological.
350 #[cold]
351 #[inline(never)]
352 fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
353 if buf.len() > self.spare_capacity() {
354 self.flush_buf()?;
355 }
356
357 // Why not len > capacity? To avoid a needless trip through the buffer when the input
358 // exactly fills it. We'd just need to flush it to the underlying writer anyway.
359 if buf.len() >= self.buf.capacity() {
360 self.panicked = true;
361 let r = self.get_mut().write(buf);
362 self.panicked = false;
363 r
364 } else {
365 // Write to the buffer. In this case, we write to the buffer even if it fills it
366 // exactly. Doing otherwise would mean flushing the buffer, then writing this
367 // input to the inner writer, which in many cases would be a worse strategy.
368
369 // SAFETY: There was either enough spare capacity already, or there wasn't and we
370 // flushed the buffer to ensure that there is. In the latter case, we know that there
371 // is because flushing ensured that our entire buffer is spare capacity, and we entered
372 // this block because the input buffer length is less than that capacity. In either
373 // case, it's safe to write the input buffer to our buffer.
374 unsafe {
375 self.write_to_buffer_unchecked(buf);
376 }
377
378 Ok(buf.len())
379 }
380 }
381
382 // Ensure this function does not get inlined into `write_all`, so that it
383 // remains inlineable and its common path remains as short as possible.
384 // If this function ends up being called frequently relative to `write_all`,
385 // it's likely a sign that the client is using an improperly sized buffer
386 // or their write patterns are somewhat pathological.
387 #[cold]
388 #[inline(never)]
389 fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
390 // Normally, `write_all` just calls `write` in a loop. We can do better
391 // by calling `self.get_mut().write_all()` directly, which avoids
392 // round trips through the buffer in the event of a series of partial
393 // writes in some circumstances.
394
395 if buf.len() > self.spare_capacity() {
396 self.flush_buf()?;
397 }
398
399 // Why not len > capacity? To avoid a needless trip through the buffer when the input
400 // exactly fills it. We'd just need to flush it to the underlying writer anyway.
401 if buf.len() >= self.buf.capacity() {
402 self.panicked = true;
403 let r = self.get_mut().write_all(buf);
404 self.panicked = false;
405 r
406 } else {
407 // Write to the buffer. In this case, we write to the buffer even if it fills it
408 // exactly. Doing otherwise would mean flushing the buffer, then writing this
409 // input to the inner writer, which in many cases would be a worse strategy.
410
411 // SAFETY: There was either enough spare capacity already, or there wasn't and we
412 // flushed the buffer to ensure that there is. In the latter case, we know that there
413 // is because flushing ensured that our entire buffer is spare capacity, and we entered
414 // this block because the input buffer length is less than that capacity. In either
415 // case, it's safe to write the input buffer to our buffer.
416 unsafe {
417 self.write_to_buffer_unchecked(buf);
418 }
419
420 Ok(())
421 }
422 }
423
424 // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
425 // i.e., that input buffer length is less than or equal to spare capacity.
426 #[inline]
427 unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
428 debug_assert!(buf.len() <= self.spare_capacity());
429 let old_len = self.buf.len();
430 let buf_len = buf.len();
431 let src = buf.as_ptr();
432 let dst = self.buf.as_mut_ptr().add(old_len);
433 ptr::copy_nonoverlapping(src, dst, buf_len);
434 self.buf.set_len(old_len + buf_len);
435 }
436
437 #[inline]
438 fn spare_capacity(&self) -> usize {
439 self.buf.capacity() - self.buf.len()
440 }
441 }
442
443 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
444 /// Error returned for the buffered data from `BufWriter::into_raw_parts`, when the underlying
445 /// writer has previously panicked. Contains the (possibly partly written) buffered data.
446 ///
447 /// # Example
448 ///
449 /// ```
450 /// #![feature(bufwriter_into_raw_parts)]
451 /// use std::io::{self, BufWriter, Write};
452 /// use std::panic::{catch_unwind, AssertUnwindSafe};
453 ///
454 /// struct PanickingWriter;
455 /// impl Write for PanickingWriter {
456 /// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
457 /// fn flush(&mut self) -> io::Result<()> { panic!() }
458 /// }
459 ///
460 /// let mut stream = BufWriter::new(PanickingWriter);
461 /// write!(stream, "some data").unwrap();
462 /// let result = catch_unwind(AssertUnwindSafe(|| {
463 /// stream.flush().unwrap()
464 /// }));
465 /// assert!(result.is_err());
466 /// let (recovered_writer, buffered_data) = stream.into_raw_parts();
467 /// assert!(matches!(recovered_writer, PanickingWriter));
468 /// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
469 /// ```
470 pub struct WriterPanicked {
471 buf: Vec<u8>,
472 }
473
474 impl WriterPanicked {
475 /// Returns the perhaps-unwritten data. Some of this data may have been written by the
476 /// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
477 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
478 pub fn into_inner(self) -> Vec<u8> {
479 self.buf
480 }
481
482 const DESCRIPTION: &'static str =
483 "BufWriter inner writer panicked, what data remains unwritten is not known";
484 }
485
486 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
487 impl error::Error for WriterPanicked {
488 #[allow(deprecated, deprecated_in_future)]
489 fn description(&self) -> &str {
490 Self::DESCRIPTION
491 }
492 }
493
494 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
495 impl fmt::Display for WriterPanicked {
496 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
497 write!(f, "{}", Self::DESCRIPTION)
498 }
499 }
500
501 #[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
502 impl fmt::Debug for WriterPanicked {
503 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
504 f.debug_struct("WriterPanicked")
505 .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
506 .finish()
507 }
508 }
509
510 #[stable(feature = "rust1", since = "1.0.0")]
511 impl<W: Write> Write for BufWriter<W> {
512 #[inline]
513 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
514 // Use < instead of <= to avoid a needless trip through the buffer in some cases.
515 // See `write_cold` for details.
516 if buf.len() < self.spare_capacity() {
517 // SAFETY: safe by above conditional.
518 unsafe {
519 self.write_to_buffer_unchecked(buf);
520 }
521
522 Ok(buf.len())
523 } else {
524 self.write_cold(buf)
525 }
526 }
527
528 #[inline]
529 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
530 // Use < instead of <= to avoid a needless trip through the buffer in some cases.
531 // See `write_all_cold` for details.
532 if buf.len() < self.spare_capacity() {
533 // SAFETY: safe by above conditional.
534 unsafe {
535 self.write_to_buffer_unchecked(buf);
536 }
537
538 Ok(())
539 } else {
540 self.write_all_cold(buf)
541 }
542 }
543
544 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
545 // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
546 // to `write` and `write_all`. The performance benefits can be significant. See #79930.
547 if self.get_ref().is_write_vectored() {
548 // We have to handle the possibility that the total length of the buffers overflows
549 // `usize` (even though this can only happen if multiple `IoSlice`s reference the
550 // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
551 // computation overflows, then surely the input cannot fit in our buffer, so we forward
552 // to the inner writer's `write_vectored` method to let it handle it appropriately.
553 let saturated_total_len =
554 bufs.iter().fold(0usize, |acc, b| acc.saturating_add(b.len()));
555
556 if saturated_total_len > self.spare_capacity() {
557 // Flush if the total length of the input exceeds our buffer's spare capacity.
558 // If we would have overflowed, this condition also holds, and we need to flush.
559 self.flush_buf()?;
560 }
561
562 if saturated_total_len >= self.buf.capacity() {
563 // Forward to our inner writer if the total length of the input is greater than or
564 // equal to our buffer capacity. If we would have overflowed, this condition also
565 // holds, and we punt to the inner writer.
566 self.panicked = true;
567 let r = self.get_mut().write_vectored(bufs);
568 self.panicked = false;
569 r
570 } else {
571 // `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
572
573 // SAFETY: We checked whether or not the spare capacity was large enough above. If
574 // it was, then we're safe already. If it wasn't, we flushed, making sufficient
575 // room for any input <= the buffer size, which includes this input.
576 unsafe {
577 bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
578 };
579
580 Ok(saturated_total_len)
581 }
582 } else {
583 let mut iter = bufs.iter();
584 let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
585 // This is the first non-empty slice to write, so if it does
586 // not fit in the buffer, we still get to flush and proceed.
587 if buf.len() > self.spare_capacity() {
588 self.flush_buf()?;
589 }
590 if buf.len() >= self.buf.capacity() {
591 // The slice is at least as large as the buffering capacity,
592 // so it's better to write it directly, bypassing the buffer.
593 self.panicked = true;
594 let r = self.get_mut().write(buf);
595 self.panicked = false;
596 return r;
597 } else {
598 // SAFETY: We checked whether or not the spare capacity was large enough above.
599 // If it was, then we're safe already. If it wasn't, we flushed, making
600 // sufficient room for any input <= the buffer size, which includes this input.
601 unsafe {
602 self.write_to_buffer_unchecked(buf);
603 }
604
605 buf.len()
606 }
607 } else {
608 return Ok(0);
609 };
610 debug_assert!(total_written != 0);
611 for buf in iter {
612 if buf.len() <= self.spare_capacity() {
613 // SAFETY: safe by above conditional.
614 unsafe {
615 self.write_to_buffer_unchecked(buf);
616 }
617
618 // This cannot overflow `usize`. If we are here, we've written all of the bytes
619 // so far to our buffer, and we've ensured that we never exceed the buffer's
620 // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
621 total_written += buf.len();
622 } else {
623 break;
624 }
625 }
626 Ok(total_written)
627 }
628 }
629
630 fn is_write_vectored(&self) -> bool {
631 true
632 }
633
634 fn flush(&mut self) -> io::Result<()> {
635 self.flush_buf().and_then(|()| self.get_mut().flush())
636 }
637 }
638
639 #[stable(feature = "rust1", since = "1.0.0")]
640 impl<W: Write> fmt::Debug for BufWriter<W>
641 where
642 W: fmt::Debug,
643 {
644 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
645 fmt.debug_struct("BufWriter")
646 .field("writer", &self.inner.as_ref().unwrap())
647 .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
648 .finish()
649 }
650 }
651
652 #[stable(feature = "rust1", since = "1.0.0")]
653 impl<W: Write + Seek> Seek for BufWriter<W> {
654 /// Seek to the offset, in bytes, in the underlying writer.
655 ///
656 /// Seeking always writes out the internal buffer before seeking.
657 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
658 self.flush_buf()?;
659 self.get_mut().seek(pos)
660 }
661 }
662
663 #[stable(feature = "rust1", since = "1.0.0")]
664 impl<W: Write> Drop for BufWriter<W> {
665 fn drop(&mut self) {
666 if self.inner.is_some() && !self.panicked {
667 // dtors should not panic, so we ignore a failed flush
668 let _r = self.flush_buf();
669 }
670 }
671 }