1 //! A priority queue implemented with a binary heap.
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //! fn cmp(&self, other: &Self) -> Ordering {
34 //! // Notice that the we flip the ordering on costs.
35 //! // In case of a tie we compare positions - this step is necessary
36 //! // to make implementations of `PartialEq` and `Ord` consistent.
37 //! other.cost.cmp(&self.cost)
38 //! .then_with(|| self.position.cmp(&other.position))
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //! Some(self.cmp(other))
49 //! // Each node is represented as a `usize`, for a shorter implementation.
55 //! // Dijkstra's shortest path algorithm.
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //! // dist[node] = current shortest distance from `start` to `node`
63 //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
65 //! let mut heap = BinaryHeap::new();
67 //! // We're at `start`, with a zero cost
69 //! heap.push(State { cost: 0, position: start });
71 //! // Examine the frontier with lower cost nodes first (min-heap)
72 //! while let Some(State { cost, position }) = heap.pop() {
73 //! // Alternatively we could have continued to find all shortest paths
74 //! if position == goal { return Some(cost); }
76 //! // Important as we may have already found a better way
77 //! if cost > dist[position] { continue; }
79 //! // For each node we can reach, see if we can find a way with
80 //! // a lower cost going through this node
81 //! for edge in &adj_list[position] {
82 //! let next = State { cost: cost + edge.cost, position: edge.node };
84 //! // If so, add it to the frontier and continue
85 //! if next.cost < dist[next.position] {
87 //! // Relaxation, we have now found a better way
88 //! dist[next.position] = next.cost;
93 //! // Goal not reachable
98 //! // This is the directed graph we're going to use.
99 //! // The node numbers correspond to the different states,
100 //! // and the edge weights symbolize the cost of moving
101 //! // from one node to another.
102 //! // Note that the edges are one-way.
105 //! // +-----------------+
108 //! // 0 -----> 1 -----> 3 ---> 4
112 //! // +------> 2 -------+ |
114 //! // +---------------+
116 //! // The graph is represented as an adjacency list where each index,
117 //! // corresponding to a node value, has a list of outgoing edges.
118 //! // Chosen for its efficiency.
119 //! let graph = vec![
121 //! vec![Edge { node: 2, cost: 10 },
122 //! Edge { node: 1, cost: 1 }],
124 //! vec![Edge { node: 3, cost: 2 }],
126 //! vec![Edge { node: 1, cost: 1 },
127 //! Edge { node: 3, cost: 3 },
128 //! Edge { node: 4, cost: 1 }],
130 //! vec![Edge { node: 0, cost: 7 },
131 //! Edge { node: 4, cost: 2 }],
135 //! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //! assert_eq!(shortest_path(&graph, 4, 0), None);
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
147 use core
::iter
::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen}
;
148 use core
::mem
::{self, swap, ManuallyDrop}
;
149 use core
::ops
::{Deref, DerefMut}
;
152 use crate::collections
::TryReserveError
;
154 use crate::vec
::{self, AsIntoIter, Vec}
;
156 use super::SpecExtend
;
158 /// A priority queue implemented with a binary heap.
160 /// This will be a max-heap.
162 /// It is a logic error for an item to be modified in such a way that the
163 /// item's ordering relative to any other item, as determined by the [`Ord`]
164 /// trait, changes while it is in the heap. This is normally only possible
165 /// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
166 /// behavior resulting from such a logic error is not specified (it
167 /// could include panics, incorrect results, aborts, memory leaks, or
168 /// non-termination) but will not be undefined behavior.
173 /// use std::collections::BinaryHeap;
175 /// // Type inference lets us omit an explicit type signature (which
176 /// // would be `BinaryHeap<i32>` in this example).
177 /// let mut heap = BinaryHeap::new();
179 /// // We can use peek to look at the next item in the heap. In this case,
180 /// // there's no items in there yet so we get None.
181 /// assert_eq!(heap.peek(), None);
183 /// // Let's add some scores...
188 /// // Now peek shows the most important item in the heap.
189 /// assert_eq!(heap.peek(), Some(&5));
191 /// // We can check the length of a heap.
192 /// assert_eq!(heap.len(), 3);
194 /// // We can iterate over the items in the heap, although they are returned in
195 /// // a random order.
197 /// println!("{}", x);
200 /// // If we instead pop these scores, they should come back in order.
201 /// assert_eq!(heap.pop(), Some(5));
202 /// assert_eq!(heap.pop(), Some(2));
203 /// assert_eq!(heap.pop(), Some(1));
204 /// assert_eq!(heap.pop(), None);
206 /// // We can clear the heap of any remaining items.
209 /// // The heap should now be empty.
210 /// assert!(heap.is_empty())
213 /// A `BinaryHeap` with a known list of items can be initialized from an array:
216 /// use std::collections::BinaryHeap;
218 /// let heap = BinaryHeap::from([1, 5, 2]);
223 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
224 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
225 /// value instead of the greatest one.
228 /// use std::collections::BinaryHeap;
229 /// use std::cmp::Reverse;
231 /// let mut heap = BinaryHeap::new();
233 /// // Wrap values in `Reverse`
234 /// heap.push(Reverse(1));
235 /// heap.push(Reverse(5));
236 /// heap.push(Reverse(2));
238 /// // If we pop these scores now, they should come back in the reverse order.
239 /// assert_eq!(heap.pop(), Some(Reverse(1)));
240 /// assert_eq!(heap.pop(), Some(Reverse(2)));
241 /// assert_eq!(heap.pop(), Some(Reverse(5)));
242 /// assert_eq!(heap.pop(), None);
245 /// # Time complexity
247 /// | [push] | [pop] | [peek]/[peek\_mut] |
248 /// |---------|---------------|--------------------|
249 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
251 /// The value for `push` is an expected cost; the method documentation gives a
252 /// more detailed analysis.
254 /// [`core::cmp::Reverse`]: core::cmp::Reverse
255 /// [`Ord`]: core::cmp::Ord
256 /// [`Cell`]: core::cell::Cell
257 /// [`RefCell`]: core::cell::RefCell
258 /// [push]: BinaryHeap::push
259 /// [pop]: BinaryHeap::pop
260 /// [peek]: BinaryHeap::peek
261 /// [peek\_mut]: BinaryHeap::peek_mut
262 #[stable(feature = "rust1", since = "1.0.0")]
263 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
264 pub struct BinaryHeap
<T
> {
268 /// Structure wrapping a mutable reference to the greatest item on a
271 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
272 /// its documentation for more.
274 /// [`peek_mut`]: BinaryHeap::peek_mut
275 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
276 pub struct PeekMut
<'a
, T
: 'a
+ Ord
> {
277 heap
: &'a
mut BinaryHeap
<T
>,
281 #[stable(feature = "collection_debug", since = "1.17.0")]
282 impl<T
: Ord
+ fmt
::Debug
> fmt
::Debug
for PeekMut
<'_
, T
> {
283 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
284 f
.debug_tuple("PeekMut").field(&self.heap
.data
[0]).finish()
288 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
289 impl<T
: Ord
> Drop
for PeekMut
<'_
, T
> {
292 // SAFETY: PeekMut is only instantiated for non-empty heaps.
293 unsafe { self.heap.sift_down(0) }
;
298 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
299 impl<T
: Ord
> Deref
for PeekMut
<'_
, T
> {
301 fn deref(&self) -> &T
{
302 debug_assert
!(!self.heap
.is_empty());
303 // SAFE: PeekMut is only instantiated for non-empty heaps
304 unsafe { self.heap.data.get_unchecked(0) }
308 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
309 impl<T
: Ord
> DerefMut
for PeekMut
<'_
, T
> {
310 fn deref_mut(&mut self) -> &mut T
{
311 debug_assert
!(!self.heap
.is_empty());
313 // SAFE: PeekMut is only instantiated for non-empty heaps
314 unsafe { self.heap.data.get_unchecked_mut(0) }
318 impl<'a
, T
: Ord
> PeekMut
<'a
, T
> {
319 /// Removes the peeked value from the heap and returns it.
320 #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
321 pub fn pop(mut this
: PeekMut
<'a
, T
>) -> T
{
322 let value
= this
.heap
.pop().unwrap();
328 #[stable(feature = "rust1", since = "1.0.0")]
329 impl<T
: Clone
> Clone
for BinaryHeap
<T
> {
330 fn clone(&self) -> Self {
331 BinaryHeap { data: self.data.clone() }
334 fn clone_from(&mut self, source
: &Self) {
335 self.data
.clone_from(&source
.data
);
339 #[stable(feature = "rust1", since = "1.0.0")]
340 impl<T
: Ord
> Default
for BinaryHeap
<T
> {
341 /// Creates an empty `BinaryHeap<T>`.
343 fn default() -> BinaryHeap
<T
> {
348 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
349 impl<T
: fmt
::Debug
> fmt
::Debug
for BinaryHeap
<T
> {
350 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
351 f
.debug_list().entries(self.iter()).finish()
355 impl<T
: Ord
> BinaryHeap
<T
> {
356 /// Creates an empty `BinaryHeap` as a max-heap.
363 /// use std::collections::BinaryHeap;
364 /// let mut heap = BinaryHeap::new();
367 #[stable(feature = "rust1", since = "1.0.0")]
369 pub fn new() -> BinaryHeap
<T
> {
370 BinaryHeap { data: vec![] }
373 /// Creates an empty `BinaryHeap` with a specific capacity.
374 /// This preallocates enough memory for `capacity` elements,
375 /// so that the `BinaryHeap` does not have to be reallocated
376 /// until it contains at least that many values.
383 /// use std::collections::BinaryHeap;
384 /// let mut heap = BinaryHeap::with_capacity(10);
387 #[stable(feature = "rust1", since = "1.0.0")]
389 pub fn with_capacity(capacity
: usize) -> BinaryHeap
<T
> {
390 BinaryHeap { data: Vec::with_capacity(capacity) }
393 /// Returns a mutable reference to the greatest item in the binary heap, or
394 /// `None` if it is empty.
396 /// Note: If the `PeekMut` value is leaked, the heap may be in an
397 /// inconsistent state.
404 /// use std::collections::BinaryHeap;
405 /// let mut heap = BinaryHeap::new();
406 /// assert!(heap.peek_mut().is_none());
412 /// let mut val = heap.peek_mut().unwrap();
415 /// assert_eq!(heap.peek(), Some(&2));
418 /// # Time complexity
420 /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
421 /// otherwise it's *O*(1).
422 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
423 pub fn peek_mut(&mut self) -> Option
<PeekMut
<'_
, T
>> {
424 if self.is_empty() { None }
else { Some(PeekMut { heap: self, sift: false }
) }
427 /// Removes the greatest item from the binary heap and returns it, or `None` if it
435 /// use std::collections::BinaryHeap;
436 /// let mut heap = BinaryHeap::from(vec![1, 3]);
438 /// assert_eq!(heap.pop(), Some(3));
439 /// assert_eq!(heap.pop(), Some(1));
440 /// assert_eq!(heap.pop(), None);
443 /// # Time complexity
445 /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
446 #[stable(feature = "rust1", since = "1.0.0")]
447 pub fn pop(&mut self) -> Option
<T
> {
448 self.data
.pop().map(|mut item
| {
449 if !self.is_empty() {
450 swap(&mut item
, &mut self.data
[0]);
451 // SAFETY: !self.is_empty() means that self.len() > 0
452 unsafe { self.sift_down_to_bottom(0) }
;
458 /// Pushes an item onto the binary heap.
465 /// use std::collections::BinaryHeap;
466 /// let mut heap = BinaryHeap::new();
471 /// assert_eq!(heap.len(), 3);
472 /// assert_eq!(heap.peek(), Some(&5));
475 /// # Time complexity
477 /// The expected cost of `push`, averaged over every possible ordering of
478 /// the elements being pushed, and over a sufficiently large number of
479 /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
480 /// elements that are *not* already in any sorted pattern.
482 /// The time complexity degrades if elements are pushed in predominantly
483 /// ascending order. In the worst case, elements are pushed in ascending
484 /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
485 /// containing *n* elements.
487 /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
488 /// occurs when capacity is exhausted and needs a resize. The resize cost
489 /// has been amortized in the previous figures.
490 #[stable(feature = "rust1", since = "1.0.0")]
491 pub fn push(&mut self, item
: T
) {
492 let old_len
= self.len();
493 self.data
.push(item
);
494 // SAFETY: Since we pushed a new item it means that
495 // old_len = self.len() - 1 < self.len()
496 unsafe { self.sift_up(0, old_len) }
;
499 /// Consumes the `BinaryHeap` and returns a vector in sorted
500 /// (ascending) order.
507 /// use std::collections::BinaryHeap;
509 /// let mut heap = BinaryHeap::from(vec![1, 2, 4, 5, 7]);
513 /// let vec = heap.into_sorted_vec();
514 /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
516 #[must_use = "`self` will be dropped if the result is not used"]
517 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
518 pub fn into_sorted_vec(mut self) -> Vec
<T
> {
519 let mut end
= self.len();
522 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
523 // so it's always a valid index to access.
524 // It is safe to access index 0 (i.e. `ptr`), because
525 // 1 <= end < self.len(), which means self.len() >= 2.
527 let ptr
= self.data
.as_mut_ptr();
528 ptr
::swap(ptr
, ptr
.add(end
));
530 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
531 // 0 < 1 <= end <= self.len() - 1 < self.len()
532 // Which means 0 < end and end < self.len().
533 unsafe { self.sift_down_range(0, end) }
;
538 // The implementations of sift_up and sift_down use unsafe blocks in
539 // order to move an element out of the vector (leaving behind a
540 // hole), shift along the others and move the removed element back into the
541 // vector at the final location of the hole.
542 // The `Hole` type is used to represent this, and make sure
543 // the hole is filled back at the end of its scope, even on panic.
544 // Using a hole reduces the constant factor compared to using swaps,
545 // which involves twice as many moves.
549 /// The caller must guarantee that `pos < self.len()`.
550 unsafe fn sift_up(&mut self, start
: usize, pos
: usize) -> usize {
551 // Take out the value at `pos` and create a hole.
552 // SAFETY: The caller guarantees that pos < self.len()
553 let mut hole
= unsafe { Hole::new(&mut self.data, pos) }
;
555 while hole
.pos() > start
{
556 let parent
= (hole
.pos() - 1) / 2;
558 // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
559 // and so hole.pos() - 1 can't underflow.
560 // This guarantees that parent < hole.pos() so
561 // it's a valid index and also != hole.pos().
562 if hole
.element() <= unsafe { hole.get(parent) }
{
566 // SAFETY: Same as above
567 unsafe { hole.move_to(parent) }
;
573 /// Take an element at `pos` and move it down the heap,
574 /// while its children are larger.
578 /// The caller must guarantee that `pos < end <= self.len()`.
579 unsafe fn sift_down_range(&mut self, pos
: usize, end
: usize) {
580 // SAFETY: The caller guarantees that pos < end <= self.len().
581 let mut hole
= unsafe { Hole::new(&mut self.data, pos) }
;
582 let mut child
= 2 * hole
.pos() + 1;
584 // Loop invariant: child == 2 * hole.pos() + 1.
585 while child
<= end
.saturating_sub(2) {
586 // compare with the greater of the two children
587 // SAFETY: child < end - 1 < self.len() and
588 // child + 1 < end <= self.len(), so they're valid indexes.
589 // child == 2 * hole.pos() + 1 != hole.pos() and
590 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
591 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
593 child
+= unsafe { hole.get(child) <= hole.get(child + 1) }
as usize;
595 // if we are already in order, stop.
596 // SAFETY: child is now either the old child or the old child+1
597 // We already proven that both are < self.len() and != hole.pos()
598 if hole
.element() >= unsafe { hole.get(child) }
{
602 // SAFETY: same as above.
603 unsafe { hole.move_to(child) }
;
604 child
= 2 * hole
.pos() + 1;
607 // SAFETY: && short circuit, which means that in the
608 // second condition it's already true that child == end - 1 < self.len().
609 if child
== end
- 1 && hole
.element() < unsafe { hole.get(child) }
{
610 // SAFETY: child is already proven to be a valid index and
611 // child == 2 * hole.pos() + 1 != hole.pos().
612 unsafe { hole.move_to(child) }
;
618 /// The caller must guarantee that `pos < self.len()`.
619 unsafe fn sift_down(&mut self, pos
: usize) {
620 let len
= self.len();
621 // SAFETY: pos < len is guaranteed by the caller and
622 // obviously len = self.len() <= self.len().
623 unsafe { self.sift_down_range(pos, len) }
;
626 /// Take an element at `pos` and move it all the way down the heap,
627 /// then sift it up to its position.
629 /// Note: This is faster when the element is known to be large / should
630 /// be closer to the bottom.
634 /// The caller must guarantee that `pos < self.len()`.
635 unsafe fn sift_down_to_bottom(&mut self, mut pos
: usize) {
636 let end
= self.len();
639 // SAFETY: The caller guarantees that pos < self.len().
640 let mut hole
= unsafe { Hole::new(&mut self.data, pos) }
;
641 let mut child
= 2 * hole
.pos() + 1;
643 // Loop invariant: child == 2 * hole.pos() + 1.
644 while child
<= end
.saturating_sub(2) {
645 // SAFETY: child < end - 1 < self.len() and
646 // child + 1 < end <= self.len(), so they're valid indexes.
647 // child == 2 * hole.pos() + 1 != hole.pos() and
648 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
649 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
651 child
+= unsafe { hole.get(child) <= hole.get(child + 1) }
as usize;
653 // SAFETY: Same as above
654 unsafe { hole.move_to(child) }
;
655 child
= 2 * hole
.pos() + 1;
658 if child
== end
- 1 {
659 // SAFETY: child == end - 1 < self.len(), so it's a valid index
660 // and child == 2 * hole.pos() + 1 != hole.pos().
661 unsafe { hole.move_to(child) }
;
666 // SAFETY: pos is the position in the hole and was already proven
667 // to be a valid index.
668 unsafe { self.sift_up(start, pos) }
;
671 /// Rebuild assuming data[0..start] is still a proper heap.
672 fn rebuild_tail(&mut self, start
: usize) {
673 if start
== self.len() {
677 let tail_len
= self.len() - start
;
680 fn log2_fast(x
: usize) -> usize {
681 (usize::BITS
- x
.leading_zeros() - 1) as usize
684 // `rebuild` takes O(self.len()) operations
685 // and about 2 * self.len() comparisons in the worst case
686 // while repeating `sift_up` takes O(tail_len * log(start)) operations
687 // and about 1 * tail_len * log_2(start) comparisons in the worst case,
688 // assuming start >= tail_len. For larger heaps, the crossover point
689 // no longer follows this reasoning and was determined empirically.
690 let better_to_rebuild
= if start
< tail_len
{
692 } else if self.len() <= 2048 {
693 2 * self.len() < tail_len
* log2_fast(start
)
695 2 * self.len() < tail_len
* 11
698 if better_to_rebuild
{
701 for i
in start
..self.len() {
702 // SAFETY: The index `i` is always less than self.len().
703 unsafe { self.sift_up(0, i) }
;
708 fn rebuild(&mut self) {
709 let mut n
= self.len() / 2;
712 // SAFETY: n starts from self.len() / 2 and goes down to 0.
713 // The only case when !(n < self.len()) is if
714 // self.len() == 0, but it's ruled out by the loop condition.
715 unsafe { self.sift_down(n) }
;
719 /// Moves all the elements of `other` into `self`, leaving `other` empty.
726 /// use std::collections::BinaryHeap;
728 /// let v = vec![-10, 1, 2, 3, 3];
729 /// let mut a = BinaryHeap::from(v);
731 /// let v = vec![-20, 5, 43];
732 /// let mut b = BinaryHeap::from(v);
734 /// a.append(&mut b);
736 /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
737 /// assert!(b.is_empty());
739 #[stable(feature = "binary_heap_append", since = "1.11.0")]
740 pub fn append(&mut self, other
: &mut Self) {
741 if self.len() < other
.len() {
745 let start
= self.data
.len();
747 self.data
.append(&mut other
.data
);
749 self.rebuild_tail(start
);
752 /// Returns an iterator which retrieves elements in heap order.
753 /// The retrieved elements are removed from the original heap.
754 /// The remaining elements will be removed on drop in heap order.
757 /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
758 /// You should use the latter for most cases.
765 /// #![feature(binary_heap_drain_sorted)]
766 /// use std::collections::BinaryHeap;
768 /// let mut heap = BinaryHeap::from(vec![1, 2, 3, 4, 5]);
769 /// assert_eq!(heap.len(), 5);
771 /// drop(heap.drain_sorted()); // removes all elements in heap order
772 /// assert_eq!(heap.len(), 0);
775 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
776 pub fn drain_sorted(&mut self) -> DrainSorted
<'_
, T
> {
777 DrainSorted { inner: self }
780 /// Retains only the elements specified by the predicate.
782 /// In other words, remove all elements `e` such that `f(&e)` returns
783 /// `false`. The elements are visited in unsorted (and unspecified) order.
790 /// #![feature(binary_heap_retain)]
791 /// use std::collections::BinaryHeap;
793 /// let mut heap = BinaryHeap::from(vec![-10, -5, 1, 2, 4, 13]);
795 /// heap.retain(|x| x % 2 == 0); // only keep even numbers
797 /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
799 #[unstable(feature = "binary_heap_retain", issue = "71503")]
800 pub fn retain
<F
>(&mut self, mut f
: F
)
802 F
: FnMut(&T
) -> bool
,
804 let mut first_removed
= self.len();
806 self.data
.retain(|e
| {
808 if !keep
&& i
< first_removed
{
814 // data[0..first_removed] is untouched, so we only need to rebuild the tail:
815 self.rebuild_tail(first_removed
);
819 impl<T
> BinaryHeap
<T
> {
820 /// Returns an iterator visiting all values in the underlying vector, in
828 /// use std::collections::BinaryHeap;
829 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
831 /// // Print 1, 2, 3, 4 in arbitrary order
832 /// for x in heap.iter() {
833 /// println!("{}", x);
836 #[stable(feature = "rust1", since = "1.0.0")]
837 pub fn iter(&self) -> Iter
<'_
, T
> {
838 Iter { iter: self.data.iter() }
841 /// Returns an iterator which retrieves elements in heap order.
842 /// This method consumes the original heap.
849 /// #![feature(binary_heap_into_iter_sorted)]
850 /// use std::collections::BinaryHeap;
851 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5]);
853 /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), vec![5, 4]);
855 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
856 pub fn into_iter_sorted(self) -> IntoIterSorted
<T
> {
857 IntoIterSorted { inner: self }
860 /// Returns the greatest item in the binary heap, or `None` if it is empty.
867 /// use std::collections::BinaryHeap;
868 /// let mut heap = BinaryHeap::new();
869 /// assert_eq!(heap.peek(), None);
874 /// assert_eq!(heap.peek(), Some(&5));
878 /// # Time complexity
880 /// Cost is *O*(1) in the worst case.
882 #[stable(feature = "rust1", since = "1.0.0")]
883 pub fn peek(&self) -> Option
<&T
> {
887 /// Returns the number of elements the binary heap can hold without reallocating.
894 /// use std::collections::BinaryHeap;
895 /// let mut heap = BinaryHeap::with_capacity(100);
896 /// assert!(heap.capacity() >= 100);
900 #[stable(feature = "rust1", since = "1.0.0")]
901 pub fn capacity(&self) -> usize {
905 /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
906 /// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
908 /// Note that the allocator may give the collection more space than it requests. Therefore
909 /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
910 /// insertions are expected.
914 /// Panics if the new capacity overflows `usize`.
921 /// use std::collections::BinaryHeap;
922 /// let mut heap = BinaryHeap::new();
923 /// heap.reserve_exact(100);
924 /// assert!(heap.capacity() >= 100);
928 /// [`reserve`]: BinaryHeap::reserve
929 #[stable(feature = "rust1", since = "1.0.0")]
930 pub fn reserve_exact(&mut self, additional
: usize) {
931 self.data
.reserve_exact(additional
);
934 /// Reserves capacity for at least `additional` more elements to be inserted in the
935 /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
939 /// Panics if the new capacity overflows `usize`.
946 /// use std::collections::BinaryHeap;
947 /// let mut heap = BinaryHeap::new();
948 /// heap.reserve(100);
949 /// assert!(heap.capacity() >= 100);
952 #[stable(feature = "rust1", since = "1.0.0")]
953 pub fn reserve(&mut self, additional
: usize) {
954 self.data
.reserve(additional
);
957 /// Tries to reserve the minimum capacity for exactly `additional`
958 /// elements to be inserted in the given `BinaryHeap<T>`. After calling
959 /// `try_reserve_exact`, capacity will be greater than or equal to
960 /// `self.len() + additional` if it returns `Ok(())`.
961 /// Does nothing if the capacity is already sufficient.
963 /// Note that the allocator may give the collection more space than it
964 /// requests. Therefore, capacity can not be relied upon to be precisely
965 /// minimal. Prefer [`try_reserve`] if future insertions are expected.
967 /// [`try_reserve`]: BinaryHeap::try_reserve
971 /// If the capacity overflows, or the allocator reports a failure, then an error
977 /// #![feature(try_reserve_2)]
978 /// use std::collections::BinaryHeap;
979 /// use std::collections::TryReserveError;
981 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
982 /// let mut heap = BinaryHeap::new();
984 /// // Pre-reserve the memory, exiting if we can't
985 /// heap.try_reserve_exact(data.len())?;
987 /// // Now we know this can't OOM in the middle of our complex work
988 /// heap.extend(data.iter());
992 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
994 #[unstable(feature = "try_reserve_2", issue = "91789")]
995 pub fn try_reserve_exact(&mut self, additional
: usize) -> Result
<(), TryReserveError
> {
996 self.data
.try_reserve_exact(additional
)
999 /// Tries to reserve capacity for at least `additional` more elements to be inserted
1000 /// in the given `BinaryHeap<T>`. The collection may reserve more space to avoid
1001 /// frequent reallocations. After calling `try_reserve`, capacity will be
1002 /// greater than or equal to `self.len() + additional`. Does nothing if
1003 /// capacity is already sufficient.
1007 /// If the capacity overflows, or the allocator reports a failure, then an error
1013 /// #![feature(try_reserve_2)]
1014 /// use std::collections::BinaryHeap;
1015 /// use std::collections::TryReserveError;
1017 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1018 /// let mut heap = BinaryHeap::new();
1020 /// // Pre-reserve the memory, exiting if we can't
1021 /// heap.try_reserve(data.len())?;
1023 /// // Now we know this can't OOM in the middle of our complex work
1024 /// heap.extend(data.iter());
1028 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1030 #[unstable(feature = "try_reserve_2", issue = "91789")]
1031 pub fn try_reserve(&mut self, additional
: usize) -> Result
<(), TryReserveError
> {
1032 self.data
.try_reserve(additional
)
1035 /// Discards as much additional capacity as possible.
1042 /// use std::collections::BinaryHeap;
1043 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1045 /// assert!(heap.capacity() >= 100);
1046 /// heap.shrink_to_fit();
1047 /// assert!(heap.capacity() == 0);
1049 #[stable(feature = "rust1", since = "1.0.0")]
1050 pub fn shrink_to_fit(&mut self) {
1051 self.data
.shrink_to_fit();
1054 /// Discards capacity with a lower bound.
1056 /// The capacity will remain at least as large as both the length
1057 /// and the supplied value.
1059 /// If the current capacity is less than the lower limit, this is a no-op.
1064 /// use std::collections::BinaryHeap;
1065 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1067 /// assert!(heap.capacity() >= 100);
1068 /// heap.shrink_to(10);
1069 /// assert!(heap.capacity() >= 10);
1072 #[stable(feature = "shrink_to", since = "1.56.0")]
1073 pub fn shrink_to(&mut self, min_capacity
: usize) {
1074 self.data
.shrink_to(min_capacity
)
1077 /// Returns a slice of all values in the underlying vector, in arbitrary
1085 /// #![feature(binary_heap_as_slice)]
1086 /// use std::collections::BinaryHeap;
1087 /// use std::io::{self, Write};
1089 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]);
1091 /// io::sink().write(heap.as_slice()).unwrap();
1094 #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
1095 pub fn as_slice(&self) -> &[T
] {
1096 self.data
.as_slice()
1099 /// Consumes the `BinaryHeap` and returns the underlying vector
1100 /// in arbitrary order.
1107 /// use std::collections::BinaryHeap;
1108 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]);
1109 /// let vec = heap.into_vec();
1111 /// // Will print in some order
1113 /// println!("{}", x);
1116 #[must_use = "`self` will be dropped if the result is not used"]
1117 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1118 pub fn into_vec(self) -> Vec
<T
> {
1122 /// Returns the length of the binary heap.
1129 /// use std::collections::BinaryHeap;
1130 /// let heap = BinaryHeap::from(vec![1, 3]);
1132 /// assert_eq!(heap.len(), 2);
1135 #[stable(feature = "rust1", since = "1.0.0")]
1136 pub fn len(&self) -> usize {
1140 /// Checks if the binary heap is empty.
1147 /// use std::collections::BinaryHeap;
1148 /// let mut heap = BinaryHeap::new();
1150 /// assert!(heap.is_empty());
1156 /// assert!(!heap.is_empty());
1159 #[stable(feature = "rust1", since = "1.0.0")]
1160 pub fn is_empty(&self) -> bool
{
1164 /// Clears the binary heap, returning an iterator over the removed elements.
1166 /// The elements are removed in arbitrary order.
1173 /// use std::collections::BinaryHeap;
1174 /// let mut heap = BinaryHeap::from(vec![1, 3]);
1176 /// assert!(!heap.is_empty());
1178 /// for x in heap.drain() {
1179 /// println!("{}", x);
1182 /// assert!(heap.is_empty());
1185 #[stable(feature = "drain", since = "1.6.0")]
1186 pub fn drain(&mut self) -> Drain
<'_
, T
> {
1187 Drain { iter: self.data.drain(..) }
1190 /// Drops all items from the binary heap.
1197 /// use std::collections::BinaryHeap;
1198 /// let mut heap = BinaryHeap::from(vec![1, 3]);
1200 /// assert!(!heap.is_empty());
1204 /// assert!(heap.is_empty());
1206 #[stable(feature = "rust1", since = "1.0.0")]
1207 pub fn clear(&mut self) {
1212 /// Hole represents a hole in a slice i.e., an index without valid value
1213 /// (because it was moved from or duplicated).
1214 /// In drop, `Hole` will restore the slice by filling the hole
1215 /// position with the value that was originally removed.
1216 struct Hole
<'a
, T
: 'a
> {
1218 elt
: ManuallyDrop
<T
>,
1222 impl<'a
, T
> Hole
<'a
, T
> {
1223 /// Create a new `Hole` at index `pos`.
1225 /// Unsafe because pos must be within the data slice.
1227 unsafe fn new(data
: &'a
mut [T
], pos
: usize) -> Self {
1228 debug_assert
!(pos
< data
.len());
1229 // SAFE: pos should be inside the slice
1230 let elt
= unsafe { ptr::read(data.get_unchecked(pos)) }
;
1231 Hole { data, elt: ManuallyDrop::new(elt), pos }
1235 fn pos(&self) -> usize {
1239 /// Returns a reference to the element removed.
1241 fn element(&self) -> &T
{
1245 /// Returns a reference to the element at `index`.
1247 /// Unsafe because index must be within the data slice and not equal to pos.
1249 unsafe fn get(&self, index
: usize) -> &T
{
1250 debug_assert
!(index
!= self.pos
);
1251 debug_assert
!(index
< self.data
.len());
1252 unsafe { self.data.get_unchecked(index) }
1255 /// Move hole to new location
1257 /// Unsafe because index must be within the data slice and not equal to pos.
1259 unsafe fn move_to(&mut self, index
: usize) {
1260 debug_assert
!(index
!= self.pos
);
1261 debug_assert
!(index
< self.data
.len());
1263 let ptr
= self.data
.as_mut_ptr();
1264 let index_ptr
: *const _
= ptr
.add(index
);
1265 let hole_ptr
= ptr
.add(self.pos
);
1266 ptr
::copy_nonoverlapping(index_ptr
, hole_ptr
, 1);
1272 impl<T
> Drop
for Hole
<'_
, T
> {
1274 fn drop(&mut self) {
1275 // fill the hole again
1278 ptr
::copy_nonoverlapping(&*self.elt
, self.data
.get_unchecked_mut(pos
), 1);
1283 /// An iterator over the elements of a `BinaryHeap`.
1285 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1286 /// documentation for more.
1288 /// [`iter`]: BinaryHeap::iter
1289 #[must_use = "iterators are lazy and do nothing unless consumed"]
1290 #[stable(feature = "rust1", since = "1.0.0")]
1291 pub struct Iter
<'a
, T
: 'a
> {
1292 iter
: slice
::Iter
<'a
, T
>,
1295 #[stable(feature = "collection_debug", since = "1.17.0")]
1296 impl<T
: fmt
::Debug
> fmt
::Debug
for Iter
<'_
, T
> {
1297 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1298 f
.debug_tuple("Iter").field(&self.iter
.as_slice()).finish()
1302 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1303 #[stable(feature = "rust1", since = "1.0.0")]
1304 impl<T
> Clone
for Iter
<'_
, T
> {
1305 fn clone(&self) -> Self {
1306 Iter { iter: self.iter.clone() }
1310 #[stable(feature = "rust1", since = "1.0.0")]
1311 impl<'a
, T
> Iterator
for Iter
<'a
, T
> {
1315 fn next(&mut self) -> Option
<&'a T
> {
1320 fn size_hint(&self) -> (usize, Option
<usize>) {
1321 self.iter
.size_hint()
1325 fn last(self) -> Option
<&'a T
> {
1330 #[stable(feature = "rust1", since = "1.0.0")]
1331 impl<'a
, T
> DoubleEndedIterator
for Iter
<'a
, T
> {
1333 fn next_back(&mut self) -> Option
<&'a T
> {
1334 self.iter
.next_back()
1338 #[stable(feature = "rust1", since = "1.0.0")]
1339 impl<T
> ExactSizeIterator
for Iter
<'_
, T
> {
1340 fn is_empty(&self) -> bool
{
1341 self.iter
.is_empty()
1345 #[stable(feature = "fused", since = "1.26.0")]
1346 impl<T
> FusedIterator
for Iter
<'_
, T
> {}
1348 /// An owning iterator over the elements of a `BinaryHeap`.
1350 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1351 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1353 /// [`into_iter`]: BinaryHeap::into_iter
1354 /// [`IntoIterator`]: core::iter::IntoIterator
1355 #[stable(feature = "rust1", since = "1.0.0")]
1357 pub struct IntoIter
<T
> {
1358 iter
: vec
::IntoIter
<T
>,
1361 #[stable(feature = "collection_debug", since = "1.17.0")]
1362 impl<T
: fmt
::Debug
> fmt
::Debug
for IntoIter
<T
> {
1363 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1364 f
.debug_tuple("IntoIter").field(&self.iter
.as_slice()).finish()
1368 #[stable(feature = "rust1", since = "1.0.0")]
1369 impl<T
> Iterator
for IntoIter
<T
> {
1373 fn next(&mut self) -> Option
<T
> {
1378 fn size_hint(&self) -> (usize, Option
<usize>) {
1379 self.iter
.size_hint()
1383 #[stable(feature = "rust1", since = "1.0.0")]
1384 impl<T
> DoubleEndedIterator
for IntoIter
<T
> {
1386 fn next_back(&mut self) -> Option
<T
> {
1387 self.iter
.next_back()
1391 #[stable(feature = "rust1", since = "1.0.0")]
1392 impl<T
> ExactSizeIterator
for IntoIter
<T
> {
1393 fn is_empty(&self) -> bool
{
1394 self.iter
.is_empty()
1398 #[stable(feature = "fused", since = "1.26.0")]
1399 impl<T
> FusedIterator
for IntoIter
<T
> {}
1401 #[unstable(issue = "none", feature = "inplace_iteration")]
1403 unsafe impl<T
> SourceIter
for IntoIter
<T
> {
1404 type Source
= IntoIter
<T
>;
1407 unsafe fn as_inner(&mut self) -> &mut Self::Source
{
1412 #[unstable(issue = "none", feature = "inplace_iteration")]
1414 unsafe impl<I
> InPlaceIterable
for IntoIter
<I
> {}
1416 impl<I
> AsIntoIter
for IntoIter
<I
> {
1419 fn as_into_iter(&mut self) -> &mut vec
::IntoIter
<Self::Item
> {
1424 #[must_use = "iterators are lazy and do nothing unless consumed"]
1425 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1426 #[derive(Clone, Debug)]
1427 pub struct IntoIterSorted
<T
> {
1428 inner
: BinaryHeap
<T
>,
1431 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1432 impl<T
: Ord
> Iterator
for IntoIterSorted
<T
> {
1436 fn next(&mut self) -> Option
<T
> {
1441 fn size_hint(&self) -> (usize, Option
<usize>) {
1442 let exact
= self.inner
.len();
1443 (exact
, Some(exact
))
1447 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1448 impl<T
: Ord
> ExactSizeIterator
for IntoIterSorted
<T
> {}
1450 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1451 impl<T
: Ord
> FusedIterator
for IntoIterSorted
<T
> {}
1453 #[unstable(feature = "trusted_len", issue = "37572")]
1454 unsafe impl<T
: Ord
> TrustedLen
for IntoIterSorted
<T
> {}
1456 /// A draining iterator over the elements of a `BinaryHeap`.
1458 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1459 /// documentation for more.
1461 /// [`drain`]: BinaryHeap::drain
1462 #[stable(feature = "drain", since = "1.6.0")]
1464 pub struct Drain
<'a
, T
: 'a
> {
1465 iter
: vec
::Drain
<'a
, T
>,
1468 #[stable(feature = "drain", since = "1.6.0")]
1469 impl<T
> Iterator
for Drain
<'_
, T
> {
1473 fn next(&mut self) -> Option
<T
> {
1478 fn size_hint(&self) -> (usize, Option
<usize>) {
1479 self.iter
.size_hint()
1483 #[stable(feature = "drain", since = "1.6.0")]
1484 impl<T
> DoubleEndedIterator
for Drain
<'_
, T
> {
1486 fn next_back(&mut self) -> Option
<T
> {
1487 self.iter
.next_back()
1491 #[stable(feature = "drain", since = "1.6.0")]
1492 impl<T
> ExactSizeIterator
for Drain
<'_
, T
> {
1493 fn is_empty(&self) -> bool
{
1494 self.iter
.is_empty()
1498 #[stable(feature = "fused", since = "1.26.0")]
1499 impl<T
> FusedIterator
for Drain
<'_
, T
> {}
1501 /// A draining iterator over the elements of a `BinaryHeap`.
1503 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1504 /// documentation for more.
1506 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1507 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1509 pub struct DrainSorted
<'a
, T
: Ord
> {
1510 inner
: &'a
mut BinaryHeap
<T
>,
1513 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1514 impl<'a
, T
: Ord
> Drop
for DrainSorted
<'a
, T
> {
1515 /// Removes heap elements in heap order.
1516 fn drop(&mut self) {
1517 struct DropGuard
<'r
, 'a
, T
: Ord
>(&'r
mut DrainSorted
<'a
, T
>);
1519 impl<'r
, 'a
, T
: Ord
> Drop
for DropGuard
<'r
, 'a
, T
> {
1520 fn drop(&mut self) {
1521 while self.0.inner
.pop().is_some() {}
1525 while let Some(item
) = self.inner
.pop() {
1526 let guard
= DropGuard(self);
1533 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1534 impl<T
: Ord
> Iterator
for DrainSorted
<'_
, T
> {
1538 fn next(&mut self) -> Option
<T
> {
1543 fn size_hint(&self) -> (usize, Option
<usize>) {
1544 let exact
= self.inner
.len();
1545 (exact
, Some(exact
))
1549 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1550 impl<T
: Ord
> ExactSizeIterator
for DrainSorted
<'_
, T
> {}
1552 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1553 impl<T
: Ord
> FusedIterator
for DrainSorted
<'_
, T
> {}
1555 #[unstable(feature = "trusted_len", issue = "37572")]
1556 unsafe impl<T
: Ord
> TrustedLen
for DrainSorted
<'_
, T
> {}
1558 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1559 impl<T
: Ord
> From
<Vec
<T
>> for BinaryHeap
<T
> {
1560 /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1562 /// This conversion happens in-place, and has *O*(*n*) time complexity.
1563 fn from(vec
: Vec
<T
>) -> BinaryHeap
<T
> {
1564 let mut heap
= BinaryHeap { data: vec }
;
1570 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1571 impl<T
: Ord
, const N
: usize> From
<[T
; N
]> for BinaryHeap
<T
> {
1573 /// use std::collections::BinaryHeap;
1575 /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1576 /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1577 /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1578 /// assert_eq!(a, b);
1581 fn from(arr
: [T
; N
]) -> Self {
1582 Self::from_iter(arr
)
1586 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1587 impl<T
> From
<BinaryHeap
<T
>> for Vec
<T
> {
1588 /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1590 /// This conversion requires no data movement or allocation, and has
1591 /// constant time complexity.
1592 fn from(heap
: BinaryHeap
<T
>) -> Vec
<T
> {
1597 #[stable(feature = "rust1", since = "1.0.0")]
1598 impl<T
: Ord
> FromIterator
<T
> for BinaryHeap
<T
> {
1599 fn from_iter
<I
: IntoIterator
<Item
= T
>>(iter
: I
) -> BinaryHeap
<T
> {
1600 BinaryHeap
::from(iter
.into_iter().collect
::<Vec
<_
>>())
1604 #[stable(feature = "rust1", since = "1.0.0")]
1605 impl<T
> IntoIterator
for BinaryHeap
<T
> {
1607 type IntoIter
= IntoIter
<T
>;
1609 /// Creates a consuming iterator, that is, one that moves each value out of
1610 /// the binary heap in arbitrary order. The binary heap cannot be used
1611 /// after calling this.
1618 /// use std::collections::BinaryHeap;
1619 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
1621 /// // Print 1, 2, 3, 4 in arbitrary order
1622 /// for x in heap.into_iter() {
1623 /// // x has type i32, not &i32
1624 /// println!("{}", x);
1627 fn into_iter(self) -> IntoIter
<T
> {
1628 IntoIter { iter: self.data.into_iter() }
1632 #[stable(feature = "rust1", since = "1.0.0")]
1633 impl<'a
, T
> IntoIterator
for &'a BinaryHeap
<T
> {
1635 type IntoIter
= Iter
<'a
, T
>;
1637 fn into_iter(self) -> Iter
<'a
, T
> {
1642 #[stable(feature = "rust1", since = "1.0.0")]
1643 impl<T
: Ord
> Extend
<T
> for BinaryHeap
<T
> {
1645 fn extend
<I
: IntoIterator
<Item
= T
>>(&mut self, iter
: I
) {
1646 <Self as SpecExtend
<I
>>::spec_extend(self, iter
);
1650 fn extend_one(&mut self, item
: T
) {
1655 fn extend_reserve(&mut self, additional
: usize) {
1656 self.reserve(additional
);
1660 impl<T
: Ord
, I
: IntoIterator
<Item
= T
>> SpecExtend
<I
> for BinaryHeap
<T
> {
1661 default fn spec_extend(&mut self, iter
: I
) {
1662 self.extend_desugared(iter
.into_iter());
1666 impl<T
: Ord
> SpecExtend
<Vec
<T
>> for BinaryHeap
<T
> {
1667 fn spec_extend(&mut self, ref mut other
: Vec
<T
>) {
1668 let start
= self.data
.len();
1669 self.data
.append(other
);
1670 self.rebuild_tail(start
);
1674 impl<T
: Ord
> SpecExtend
<BinaryHeap
<T
>> for BinaryHeap
<T
> {
1675 fn spec_extend(&mut self, ref mut other
: BinaryHeap
<T
>) {
1680 impl<T
: Ord
> BinaryHeap
<T
> {
1681 fn extend_desugared
<I
: IntoIterator
<Item
= T
>>(&mut self, iter
: I
) {
1682 let iterator
= iter
.into_iter();
1683 let (lower
, _
) = iterator
.size_hint();
1685 self.reserve(lower
);
1687 iterator
.for_each(move |elem
| self.push(elem
));
1691 #[stable(feature = "extend_ref", since = "1.2.0")]
1692 impl<'a
, T
: 'a
+ Ord
+ Copy
> Extend
<&'a T
> for BinaryHeap
<T
> {
1693 fn extend
<I
: IntoIterator
<Item
= &'a T
>>(&mut self, iter
: I
) {
1694 self.extend(iter
.into_iter().cloned());
1698 fn extend_one(&mut self, &item
: &'a T
) {
1703 fn extend_reserve(&mut self, additional
: usize) {
1704 self.reserve(additional
);