]> git.proxmox.com Git - rustc.git/blob - library/alloc/src/collections/binary_heap.rs
New upstream version 1.59.0+dfsg1
[rustc.git] / library / alloc / src / collections / binary_heap.rs
1 //! A priority queue implemented with a binary heap.
2 //!
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
7 //! in-place heapsort.
8 //!
9 //! # Examples
10 //!
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
14 //!
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
18 //!
19 //! ```
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
22 //!
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
24 //! struct State {
25 //! cost: usize,
26 //! position: usize,
27 //! }
28 //!
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //! fn cmp(&self, other: &Self) -> Ordering {
34 //! // Notice that the we flip the ordering on costs.
35 //! // In case of a tie we compare positions - this step is necessary
36 //! // to make implementations of `PartialEq` and `Ord` consistent.
37 //! other.cost.cmp(&self.cost)
38 //! .then_with(|| self.position.cmp(&other.position))
39 //! }
40 //! }
41 //!
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //! Some(self.cmp(other))
46 //! }
47 //! }
48 //!
49 //! // Each node is represented as a `usize`, for a shorter implementation.
50 //! struct Edge {
51 //! node: usize,
52 //! cost: usize,
53 //! }
54 //!
55 //! // Dijkstra's shortest path algorithm.
56 //!
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //! // dist[node] = current shortest distance from `start` to `node`
63 //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
64 //!
65 //! let mut heap = BinaryHeap::new();
66 //!
67 //! // We're at `start`, with a zero cost
68 //! dist[start] = 0;
69 //! heap.push(State { cost: 0, position: start });
70 //!
71 //! // Examine the frontier with lower cost nodes first (min-heap)
72 //! while let Some(State { cost, position }) = heap.pop() {
73 //! // Alternatively we could have continued to find all shortest paths
74 //! if position == goal { return Some(cost); }
75 //!
76 //! // Important as we may have already found a better way
77 //! if cost > dist[position] { continue; }
78 //!
79 //! // For each node we can reach, see if we can find a way with
80 //! // a lower cost going through this node
81 //! for edge in &adj_list[position] {
82 //! let next = State { cost: cost + edge.cost, position: edge.node };
83 //!
84 //! // If so, add it to the frontier and continue
85 //! if next.cost < dist[next.position] {
86 //! heap.push(next);
87 //! // Relaxation, we have now found a better way
88 //! dist[next.position] = next.cost;
89 //! }
90 //! }
91 //! }
92 //!
93 //! // Goal not reachable
94 //! None
95 //! }
96 //!
97 //! fn main() {
98 //! // This is the directed graph we're going to use.
99 //! // The node numbers correspond to the different states,
100 //! // and the edge weights symbolize the cost of moving
101 //! // from one node to another.
102 //! // Note that the edges are one-way.
103 //! //
104 //! // 7
105 //! // +-----------------+
106 //! // | |
107 //! // v 1 2 | 2
108 //! // 0 -----> 1 -----> 3 ---> 4
109 //! // | ^ ^ ^
110 //! // | | 1 | |
111 //! // | | | 3 | 1
112 //! // +------> 2 -------+ |
113 //! // 10 | |
114 //! // +---------------+
115 //! //
116 //! // The graph is represented as an adjacency list where each index,
117 //! // corresponding to a node value, has a list of outgoing edges.
118 //! // Chosen for its efficiency.
119 //! let graph = vec![
120 //! // Node 0
121 //! vec![Edge { node: 2, cost: 10 },
122 //! Edge { node: 1, cost: 1 }],
123 //! // Node 1
124 //! vec![Edge { node: 3, cost: 2 }],
125 //! // Node 2
126 //! vec![Edge { node: 1, cost: 1 },
127 //! Edge { node: 3, cost: 3 },
128 //! Edge { node: 4, cost: 1 }],
129 //! // Node 3
130 //! vec![Edge { node: 0, cost: 7 },
131 //! Edge { node: 4, cost: 2 }],
132 //! // Node 4
133 //! vec![]];
134 //!
135 //! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //! assert_eq!(shortest_path(&graph, 4, 0), None);
140 //! }
141 //! ```
142
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
145
146 use core::fmt;
147 use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
148 use core::mem::{self, swap, ManuallyDrop};
149 use core::ops::{Deref, DerefMut};
150 use core::ptr;
151
152 use crate::collections::TryReserveError;
153 use crate::slice;
154 use crate::vec::{self, AsIntoIter, Vec};
155
156 use super::SpecExtend;
157
158 /// A priority queue implemented with a binary heap.
159 ///
160 /// This will be a max-heap.
161 ///
162 /// It is a logic error for an item to be modified in such a way that the
163 /// item's ordering relative to any other item, as determined by the [`Ord`]
164 /// trait, changes while it is in the heap. This is normally only possible
165 /// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
166 /// behavior resulting from such a logic error is not specified (it
167 /// could include panics, incorrect results, aborts, memory leaks, or
168 /// non-termination) but will not be undefined behavior.
169 ///
170 /// # Examples
171 ///
172 /// ```
173 /// use std::collections::BinaryHeap;
174 ///
175 /// // Type inference lets us omit an explicit type signature (which
176 /// // would be `BinaryHeap<i32>` in this example).
177 /// let mut heap = BinaryHeap::new();
178 ///
179 /// // We can use peek to look at the next item in the heap. In this case,
180 /// // there's no items in there yet so we get None.
181 /// assert_eq!(heap.peek(), None);
182 ///
183 /// // Let's add some scores...
184 /// heap.push(1);
185 /// heap.push(5);
186 /// heap.push(2);
187 ///
188 /// // Now peek shows the most important item in the heap.
189 /// assert_eq!(heap.peek(), Some(&5));
190 ///
191 /// // We can check the length of a heap.
192 /// assert_eq!(heap.len(), 3);
193 ///
194 /// // We can iterate over the items in the heap, although they are returned in
195 /// // a random order.
196 /// for x in &heap {
197 /// println!("{}", x);
198 /// }
199 ///
200 /// // If we instead pop these scores, they should come back in order.
201 /// assert_eq!(heap.pop(), Some(5));
202 /// assert_eq!(heap.pop(), Some(2));
203 /// assert_eq!(heap.pop(), Some(1));
204 /// assert_eq!(heap.pop(), None);
205 ///
206 /// // We can clear the heap of any remaining items.
207 /// heap.clear();
208 ///
209 /// // The heap should now be empty.
210 /// assert!(heap.is_empty())
211 /// ```
212 ///
213 /// A `BinaryHeap` with a known list of items can be initialized from an array:
214 ///
215 /// ```
216 /// use std::collections::BinaryHeap;
217 ///
218 /// let heap = BinaryHeap::from([1, 5, 2]);
219 /// ```
220 ///
221 /// ## Min-heap
222 ///
223 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
224 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
225 /// value instead of the greatest one.
226 ///
227 /// ```
228 /// use std::collections::BinaryHeap;
229 /// use std::cmp::Reverse;
230 ///
231 /// let mut heap = BinaryHeap::new();
232 ///
233 /// // Wrap values in `Reverse`
234 /// heap.push(Reverse(1));
235 /// heap.push(Reverse(5));
236 /// heap.push(Reverse(2));
237 ///
238 /// // If we pop these scores now, they should come back in the reverse order.
239 /// assert_eq!(heap.pop(), Some(Reverse(1)));
240 /// assert_eq!(heap.pop(), Some(Reverse(2)));
241 /// assert_eq!(heap.pop(), Some(Reverse(5)));
242 /// assert_eq!(heap.pop(), None);
243 /// ```
244 ///
245 /// # Time complexity
246 ///
247 /// | [push] | [pop] | [peek]/[peek\_mut] |
248 /// |---------|---------------|--------------------|
249 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
250 ///
251 /// The value for `push` is an expected cost; the method documentation gives a
252 /// more detailed analysis.
253 ///
254 /// [`core::cmp::Reverse`]: core::cmp::Reverse
255 /// [`Ord`]: core::cmp::Ord
256 /// [`Cell`]: core::cell::Cell
257 /// [`RefCell`]: core::cell::RefCell
258 /// [push]: BinaryHeap::push
259 /// [pop]: BinaryHeap::pop
260 /// [peek]: BinaryHeap::peek
261 /// [peek\_mut]: BinaryHeap::peek_mut
262 #[stable(feature = "rust1", since = "1.0.0")]
263 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
264 pub struct BinaryHeap<T> {
265 data: Vec<T>,
266 }
267
268 /// Structure wrapping a mutable reference to the greatest item on a
269 /// `BinaryHeap`.
270 ///
271 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
272 /// its documentation for more.
273 ///
274 /// [`peek_mut`]: BinaryHeap::peek_mut
275 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
276 pub struct PeekMut<'a, T: 'a + Ord> {
277 heap: &'a mut BinaryHeap<T>,
278 sift: bool,
279 }
280
281 #[stable(feature = "collection_debug", since = "1.17.0")]
282 impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
284 f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
285 }
286 }
287
288 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
289 impl<T: Ord> Drop for PeekMut<'_, T> {
290 fn drop(&mut self) {
291 if self.sift {
292 // SAFETY: PeekMut is only instantiated for non-empty heaps.
293 unsafe { self.heap.sift_down(0) };
294 }
295 }
296 }
297
298 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
299 impl<T: Ord> Deref for PeekMut<'_, T> {
300 type Target = T;
301 fn deref(&self) -> &T {
302 debug_assert!(!self.heap.is_empty());
303 // SAFE: PeekMut is only instantiated for non-empty heaps
304 unsafe { self.heap.data.get_unchecked(0) }
305 }
306 }
307
308 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
309 impl<T: Ord> DerefMut for PeekMut<'_, T> {
310 fn deref_mut(&mut self) -> &mut T {
311 debug_assert!(!self.heap.is_empty());
312 self.sift = true;
313 // SAFE: PeekMut is only instantiated for non-empty heaps
314 unsafe { self.heap.data.get_unchecked_mut(0) }
315 }
316 }
317
318 impl<'a, T: Ord> PeekMut<'a, T> {
319 /// Removes the peeked value from the heap and returns it.
320 #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
321 pub fn pop(mut this: PeekMut<'a, T>) -> T {
322 let value = this.heap.pop().unwrap();
323 this.sift = false;
324 value
325 }
326 }
327
328 #[stable(feature = "rust1", since = "1.0.0")]
329 impl<T: Clone> Clone for BinaryHeap<T> {
330 fn clone(&self) -> Self {
331 BinaryHeap { data: self.data.clone() }
332 }
333
334 fn clone_from(&mut self, source: &Self) {
335 self.data.clone_from(&source.data);
336 }
337 }
338
339 #[stable(feature = "rust1", since = "1.0.0")]
340 impl<T: Ord> Default for BinaryHeap<T> {
341 /// Creates an empty `BinaryHeap<T>`.
342 #[inline]
343 fn default() -> BinaryHeap<T> {
344 BinaryHeap::new()
345 }
346 }
347
348 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
349 impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
350 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
351 f.debug_list().entries(self.iter()).finish()
352 }
353 }
354
355 impl<T: Ord> BinaryHeap<T> {
356 /// Creates an empty `BinaryHeap` as a max-heap.
357 ///
358 /// # Examples
359 ///
360 /// Basic usage:
361 ///
362 /// ```
363 /// use std::collections::BinaryHeap;
364 /// let mut heap = BinaryHeap::new();
365 /// heap.push(4);
366 /// ```
367 #[stable(feature = "rust1", since = "1.0.0")]
368 #[must_use]
369 pub fn new() -> BinaryHeap<T> {
370 BinaryHeap { data: vec![] }
371 }
372
373 /// Creates an empty `BinaryHeap` with a specific capacity.
374 /// This preallocates enough memory for `capacity` elements,
375 /// so that the `BinaryHeap` does not have to be reallocated
376 /// until it contains at least that many values.
377 ///
378 /// # Examples
379 ///
380 /// Basic usage:
381 ///
382 /// ```
383 /// use std::collections::BinaryHeap;
384 /// let mut heap = BinaryHeap::with_capacity(10);
385 /// heap.push(4);
386 /// ```
387 #[stable(feature = "rust1", since = "1.0.0")]
388 #[must_use]
389 pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
390 BinaryHeap { data: Vec::with_capacity(capacity) }
391 }
392
393 /// Returns a mutable reference to the greatest item in the binary heap, or
394 /// `None` if it is empty.
395 ///
396 /// Note: If the `PeekMut` value is leaked, the heap may be in an
397 /// inconsistent state.
398 ///
399 /// # Examples
400 ///
401 /// Basic usage:
402 ///
403 /// ```
404 /// use std::collections::BinaryHeap;
405 /// let mut heap = BinaryHeap::new();
406 /// assert!(heap.peek_mut().is_none());
407 ///
408 /// heap.push(1);
409 /// heap.push(5);
410 /// heap.push(2);
411 /// {
412 /// let mut val = heap.peek_mut().unwrap();
413 /// *val = 0;
414 /// }
415 /// assert_eq!(heap.peek(), Some(&2));
416 /// ```
417 ///
418 /// # Time complexity
419 ///
420 /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
421 /// otherwise it's *O*(1).
422 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
423 pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
424 if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
425 }
426
427 /// Removes the greatest item from the binary heap and returns it, or `None` if it
428 /// is empty.
429 ///
430 /// # Examples
431 ///
432 /// Basic usage:
433 ///
434 /// ```
435 /// use std::collections::BinaryHeap;
436 /// let mut heap = BinaryHeap::from(vec![1, 3]);
437 ///
438 /// assert_eq!(heap.pop(), Some(3));
439 /// assert_eq!(heap.pop(), Some(1));
440 /// assert_eq!(heap.pop(), None);
441 /// ```
442 ///
443 /// # Time complexity
444 ///
445 /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
446 #[stable(feature = "rust1", since = "1.0.0")]
447 pub fn pop(&mut self) -> Option<T> {
448 self.data.pop().map(|mut item| {
449 if !self.is_empty() {
450 swap(&mut item, &mut self.data[0]);
451 // SAFETY: !self.is_empty() means that self.len() > 0
452 unsafe { self.sift_down_to_bottom(0) };
453 }
454 item
455 })
456 }
457
458 /// Pushes an item onto the binary heap.
459 ///
460 /// # Examples
461 ///
462 /// Basic usage:
463 ///
464 /// ```
465 /// use std::collections::BinaryHeap;
466 /// let mut heap = BinaryHeap::new();
467 /// heap.push(3);
468 /// heap.push(5);
469 /// heap.push(1);
470 ///
471 /// assert_eq!(heap.len(), 3);
472 /// assert_eq!(heap.peek(), Some(&5));
473 /// ```
474 ///
475 /// # Time complexity
476 ///
477 /// The expected cost of `push`, averaged over every possible ordering of
478 /// the elements being pushed, and over a sufficiently large number of
479 /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
480 /// elements that are *not* already in any sorted pattern.
481 ///
482 /// The time complexity degrades if elements are pushed in predominantly
483 /// ascending order. In the worst case, elements are pushed in ascending
484 /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
485 /// containing *n* elements.
486 ///
487 /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
488 /// occurs when capacity is exhausted and needs a resize. The resize cost
489 /// has been amortized in the previous figures.
490 #[stable(feature = "rust1", since = "1.0.0")]
491 pub fn push(&mut self, item: T) {
492 let old_len = self.len();
493 self.data.push(item);
494 // SAFETY: Since we pushed a new item it means that
495 // old_len = self.len() - 1 < self.len()
496 unsafe { self.sift_up(0, old_len) };
497 }
498
499 /// Consumes the `BinaryHeap` and returns a vector in sorted
500 /// (ascending) order.
501 ///
502 /// # Examples
503 ///
504 /// Basic usage:
505 ///
506 /// ```
507 /// use std::collections::BinaryHeap;
508 ///
509 /// let mut heap = BinaryHeap::from(vec![1, 2, 4, 5, 7]);
510 /// heap.push(6);
511 /// heap.push(3);
512 ///
513 /// let vec = heap.into_sorted_vec();
514 /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
515 /// ```
516 #[must_use = "`self` will be dropped if the result is not used"]
517 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
518 pub fn into_sorted_vec(mut self) -> Vec<T> {
519 let mut end = self.len();
520 while end > 1 {
521 end -= 1;
522 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
523 // so it's always a valid index to access.
524 // It is safe to access index 0 (i.e. `ptr`), because
525 // 1 <= end < self.len(), which means self.len() >= 2.
526 unsafe {
527 let ptr = self.data.as_mut_ptr();
528 ptr::swap(ptr, ptr.add(end));
529 }
530 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
531 // 0 < 1 <= end <= self.len() - 1 < self.len()
532 // Which means 0 < end and end < self.len().
533 unsafe { self.sift_down_range(0, end) };
534 }
535 self.into_vec()
536 }
537
538 // The implementations of sift_up and sift_down use unsafe blocks in
539 // order to move an element out of the vector (leaving behind a
540 // hole), shift along the others and move the removed element back into the
541 // vector at the final location of the hole.
542 // The `Hole` type is used to represent this, and make sure
543 // the hole is filled back at the end of its scope, even on panic.
544 // Using a hole reduces the constant factor compared to using swaps,
545 // which involves twice as many moves.
546
547 /// # Safety
548 ///
549 /// The caller must guarantee that `pos < self.len()`.
550 unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
551 // Take out the value at `pos` and create a hole.
552 // SAFETY: The caller guarantees that pos < self.len()
553 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
554
555 while hole.pos() > start {
556 let parent = (hole.pos() - 1) / 2;
557
558 // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
559 // and so hole.pos() - 1 can't underflow.
560 // This guarantees that parent < hole.pos() so
561 // it's a valid index and also != hole.pos().
562 if hole.element() <= unsafe { hole.get(parent) } {
563 break;
564 }
565
566 // SAFETY: Same as above
567 unsafe { hole.move_to(parent) };
568 }
569
570 hole.pos()
571 }
572
573 /// Take an element at `pos` and move it down the heap,
574 /// while its children are larger.
575 ///
576 /// # Safety
577 ///
578 /// The caller must guarantee that `pos < end <= self.len()`.
579 unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
580 // SAFETY: The caller guarantees that pos < end <= self.len().
581 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
582 let mut child = 2 * hole.pos() + 1;
583
584 // Loop invariant: child == 2 * hole.pos() + 1.
585 while child <= end.saturating_sub(2) {
586 // compare with the greater of the two children
587 // SAFETY: child < end - 1 < self.len() and
588 // child + 1 < end <= self.len(), so they're valid indexes.
589 // child == 2 * hole.pos() + 1 != hole.pos() and
590 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
591 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
592 // if T is a ZST
593 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
594
595 // if we are already in order, stop.
596 // SAFETY: child is now either the old child or the old child+1
597 // We already proven that both are < self.len() and != hole.pos()
598 if hole.element() >= unsafe { hole.get(child) } {
599 return;
600 }
601
602 // SAFETY: same as above.
603 unsafe { hole.move_to(child) };
604 child = 2 * hole.pos() + 1;
605 }
606
607 // SAFETY: && short circuit, which means that in the
608 // second condition it's already true that child == end - 1 < self.len().
609 if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
610 // SAFETY: child is already proven to be a valid index and
611 // child == 2 * hole.pos() + 1 != hole.pos().
612 unsafe { hole.move_to(child) };
613 }
614 }
615
616 /// # Safety
617 ///
618 /// The caller must guarantee that `pos < self.len()`.
619 unsafe fn sift_down(&mut self, pos: usize) {
620 let len = self.len();
621 // SAFETY: pos < len is guaranteed by the caller and
622 // obviously len = self.len() <= self.len().
623 unsafe { self.sift_down_range(pos, len) };
624 }
625
626 /// Take an element at `pos` and move it all the way down the heap,
627 /// then sift it up to its position.
628 ///
629 /// Note: This is faster when the element is known to be large / should
630 /// be closer to the bottom.
631 ///
632 /// # Safety
633 ///
634 /// The caller must guarantee that `pos < self.len()`.
635 unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
636 let end = self.len();
637 let start = pos;
638
639 // SAFETY: The caller guarantees that pos < self.len().
640 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
641 let mut child = 2 * hole.pos() + 1;
642
643 // Loop invariant: child == 2 * hole.pos() + 1.
644 while child <= end.saturating_sub(2) {
645 // SAFETY: child < end - 1 < self.len() and
646 // child + 1 < end <= self.len(), so they're valid indexes.
647 // child == 2 * hole.pos() + 1 != hole.pos() and
648 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
649 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
650 // if T is a ZST
651 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
652
653 // SAFETY: Same as above
654 unsafe { hole.move_to(child) };
655 child = 2 * hole.pos() + 1;
656 }
657
658 if child == end - 1 {
659 // SAFETY: child == end - 1 < self.len(), so it's a valid index
660 // and child == 2 * hole.pos() + 1 != hole.pos().
661 unsafe { hole.move_to(child) };
662 }
663 pos = hole.pos();
664 drop(hole);
665
666 // SAFETY: pos is the position in the hole and was already proven
667 // to be a valid index.
668 unsafe { self.sift_up(start, pos) };
669 }
670
671 /// Rebuild assuming data[0..start] is still a proper heap.
672 fn rebuild_tail(&mut self, start: usize) {
673 if start == self.len() {
674 return;
675 }
676
677 let tail_len = self.len() - start;
678
679 #[inline(always)]
680 fn log2_fast(x: usize) -> usize {
681 (usize::BITS - x.leading_zeros() - 1) as usize
682 }
683
684 // `rebuild` takes O(self.len()) operations
685 // and about 2 * self.len() comparisons in the worst case
686 // while repeating `sift_up` takes O(tail_len * log(start)) operations
687 // and about 1 * tail_len * log_2(start) comparisons in the worst case,
688 // assuming start >= tail_len. For larger heaps, the crossover point
689 // no longer follows this reasoning and was determined empirically.
690 let better_to_rebuild = if start < tail_len {
691 true
692 } else if self.len() <= 2048 {
693 2 * self.len() < tail_len * log2_fast(start)
694 } else {
695 2 * self.len() < tail_len * 11
696 };
697
698 if better_to_rebuild {
699 self.rebuild();
700 } else {
701 for i in start..self.len() {
702 // SAFETY: The index `i` is always less than self.len().
703 unsafe { self.sift_up(0, i) };
704 }
705 }
706 }
707
708 fn rebuild(&mut self) {
709 let mut n = self.len() / 2;
710 while n > 0 {
711 n -= 1;
712 // SAFETY: n starts from self.len() / 2 and goes down to 0.
713 // The only case when !(n < self.len()) is if
714 // self.len() == 0, but it's ruled out by the loop condition.
715 unsafe { self.sift_down(n) };
716 }
717 }
718
719 /// Moves all the elements of `other` into `self`, leaving `other` empty.
720 ///
721 /// # Examples
722 ///
723 /// Basic usage:
724 ///
725 /// ```
726 /// use std::collections::BinaryHeap;
727 ///
728 /// let v = vec![-10, 1, 2, 3, 3];
729 /// let mut a = BinaryHeap::from(v);
730 ///
731 /// let v = vec![-20, 5, 43];
732 /// let mut b = BinaryHeap::from(v);
733 ///
734 /// a.append(&mut b);
735 ///
736 /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
737 /// assert!(b.is_empty());
738 /// ```
739 #[stable(feature = "binary_heap_append", since = "1.11.0")]
740 pub fn append(&mut self, other: &mut Self) {
741 if self.len() < other.len() {
742 swap(self, other);
743 }
744
745 let start = self.data.len();
746
747 self.data.append(&mut other.data);
748
749 self.rebuild_tail(start);
750 }
751
752 /// Returns an iterator which retrieves elements in heap order.
753 /// The retrieved elements are removed from the original heap.
754 /// The remaining elements will be removed on drop in heap order.
755 ///
756 /// Note:
757 /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
758 /// You should use the latter for most cases.
759 ///
760 /// # Examples
761 ///
762 /// Basic usage:
763 ///
764 /// ```
765 /// #![feature(binary_heap_drain_sorted)]
766 /// use std::collections::BinaryHeap;
767 ///
768 /// let mut heap = BinaryHeap::from(vec![1, 2, 3, 4, 5]);
769 /// assert_eq!(heap.len(), 5);
770 ///
771 /// drop(heap.drain_sorted()); // removes all elements in heap order
772 /// assert_eq!(heap.len(), 0);
773 /// ```
774 #[inline]
775 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
776 pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
777 DrainSorted { inner: self }
778 }
779
780 /// Retains only the elements specified by the predicate.
781 ///
782 /// In other words, remove all elements `e` such that `f(&e)` returns
783 /// `false`. The elements are visited in unsorted (and unspecified) order.
784 ///
785 /// # Examples
786 ///
787 /// Basic usage:
788 ///
789 /// ```
790 /// #![feature(binary_heap_retain)]
791 /// use std::collections::BinaryHeap;
792 ///
793 /// let mut heap = BinaryHeap::from(vec![-10, -5, 1, 2, 4, 13]);
794 ///
795 /// heap.retain(|x| x % 2 == 0); // only keep even numbers
796 ///
797 /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
798 /// ```
799 #[unstable(feature = "binary_heap_retain", issue = "71503")]
800 pub fn retain<F>(&mut self, mut f: F)
801 where
802 F: FnMut(&T) -> bool,
803 {
804 let mut first_removed = self.len();
805 let mut i = 0;
806 self.data.retain(|e| {
807 let keep = f(e);
808 if !keep && i < first_removed {
809 first_removed = i;
810 }
811 i += 1;
812 keep
813 });
814 // data[0..first_removed] is untouched, so we only need to rebuild the tail:
815 self.rebuild_tail(first_removed);
816 }
817 }
818
819 impl<T> BinaryHeap<T> {
820 /// Returns an iterator visiting all values in the underlying vector, in
821 /// arbitrary order.
822 ///
823 /// # Examples
824 ///
825 /// Basic usage:
826 ///
827 /// ```
828 /// use std::collections::BinaryHeap;
829 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
830 ///
831 /// // Print 1, 2, 3, 4 in arbitrary order
832 /// for x in heap.iter() {
833 /// println!("{}", x);
834 /// }
835 /// ```
836 #[stable(feature = "rust1", since = "1.0.0")]
837 pub fn iter(&self) -> Iter<'_, T> {
838 Iter { iter: self.data.iter() }
839 }
840
841 /// Returns an iterator which retrieves elements in heap order.
842 /// This method consumes the original heap.
843 ///
844 /// # Examples
845 ///
846 /// Basic usage:
847 ///
848 /// ```
849 /// #![feature(binary_heap_into_iter_sorted)]
850 /// use std::collections::BinaryHeap;
851 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5]);
852 ///
853 /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), vec![5, 4]);
854 /// ```
855 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
856 pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
857 IntoIterSorted { inner: self }
858 }
859
860 /// Returns the greatest item in the binary heap, or `None` if it is empty.
861 ///
862 /// # Examples
863 ///
864 /// Basic usage:
865 ///
866 /// ```
867 /// use std::collections::BinaryHeap;
868 /// let mut heap = BinaryHeap::new();
869 /// assert_eq!(heap.peek(), None);
870 ///
871 /// heap.push(1);
872 /// heap.push(5);
873 /// heap.push(2);
874 /// assert_eq!(heap.peek(), Some(&5));
875 ///
876 /// ```
877 ///
878 /// # Time complexity
879 ///
880 /// Cost is *O*(1) in the worst case.
881 #[must_use]
882 #[stable(feature = "rust1", since = "1.0.0")]
883 pub fn peek(&self) -> Option<&T> {
884 self.data.get(0)
885 }
886
887 /// Returns the number of elements the binary heap can hold without reallocating.
888 ///
889 /// # Examples
890 ///
891 /// Basic usage:
892 ///
893 /// ```
894 /// use std::collections::BinaryHeap;
895 /// let mut heap = BinaryHeap::with_capacity(100);
896 /// assert!(heap.capacity() >= 100);
897 /// heap.push(4);
898 /// ```
899 #[must_use]
900 #[stable(feature = "rust1", since = "1.0.0")]
901 pub fn capacity(&self) -> usize {
902 self.data.capacity()
903 }
904
905 /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
906 /// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
907 ///
908 /// Note that the allocator may give the collection more space than it requests. Therefore
909 /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
910 /// insertions are expected.
911 ///
912 /// # Panics
913 ///
914 /// Panics if the new capacity overflows `usize`.
915 ///
916 /// # Examples
917 ///
918 /// Basic usage:
919 ///
920 /// ```
921 /// use std::collections::BinaryHeap;
922 /// let mut heap = BinaryHeap::new();
923 /// heap.reserve_exact(100);
924 /// assert!(heap.capacity() >= 100);
925 /// heap.push(4);
926 /// ```
927 ///
928 /// [`reserve`]: BinaryHeap::reserve
929 #[stable(feature = "rust1", since = "1.0.0")]
930 pub fn reserve_exact(&mut self, additional: usize) {
931 self.data.reserve_exact(additional);
932 }
933
934 /// Reserves capacity for at least `additional` more elements to be inserted in the
935 /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
936 ///
937 /// # Panics
938 ///
939 /// Panics if the new capacity overflows `usize`.
940 ///
941 /// # Examples
942 ///
943 /// Basic usage:
944 ///
945 /// ```
946 /// use std::collections::BinaryHeap;
947 /// let mut heap = BinaryHeap::new();
948 /// heap.reserve(100);
949 /// assert!(heap.capacity() >= 100);
950 /// heap.push(4);
951 /// ```
952 #[stable(feature = "rust1", since = "1.0.0")]
953 pub fn reserve(&mut self, additional: usize) {
954 self.data.reserve(additional);
955 }
956
957 /// Tries to reserve the minimum capacity for exactly `additional`
958 /// elements to be inserted in the given `BinaryHeap<T>`. After calling
959 /// `try_reserve_exact`, capacity will be greater than or equal to
960 /// `self.len() + additional` if it returns `Ok(())`.
961 /// Does nothing if the capacity is already sufficient.
962 ///
963 /// Note that the allocator may give the collection more space than it
964 /// requests. Therefore, capacity can not be relied upon to be precisely
965 /// minimal. Prefer [`try_reserve`] if future insertions are expected.
966 ///
967 /// [`try_reserve`]: BinaryHeap::try_reserve
968 ///
969 /// # Errors
970 ///
971 /// If the capacity overflows, or the allocator reports a failure, then an error
972 /// is returned.
973 ///
974 /// # Examples
975 ///
976 /// ```
977 /// #![feature(try_reserve_2)]
978 /// use std::collections::BinaryHeap;
979 /// use std::collections::TryReserveError;
980 ///
981 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
982 /// let mut heap = BinaryHeap::new();
983 ///
984 /// // Pre-reserve the memory, exiting if we can't
985 /// heap.try_reserve_exact(data.len())?;
986 ///
987 /// // Now we know this can't OOM in the middle of our complex work
988 /// heap.extend(data.iter());
989 ///
990 /// Ok(heap.pop())
991 /// }
992 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
993 /// ```
994 #[unstable(feature = "try_reserve_2", issue = "91789")]
995 pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
996 self.data.try_reserve_exact(additional)
997 }
998
999 /// Tries to reserve capacity for at least `additional` more elements to be inserted
1000 /// in the given `BinaryHeap<T>`. The collection may reserve more space to avoid
1001 /// frequent reallocations. After calling `try_reserve`, capacity will be
1002 /// greater than or equal to `self.len() + additional`. Does nothing if
1003 /// capacity is already sufficient.
1004 ///
1005 /// # Errors
1006 ///
1007 /// If the capacity overflows, or the allocator reports a failure, then an error
1008 /// is returned.
1009 ///
1010 /// # Examples
1011 ///
1012 /// ```
1013 /// #![feature(try_reserve_2)]
1014 /// use std::collections::BinaryHeap;
1015 /// use std::collections::TryReserveError;
1016 ///
1017 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1018 /// let mut heap = BinaryHeap::new();
1019 ///
1020 /// // Pre-reserve the memory, exiting if we can't
1021 /// heap.try_reserve(data.len())?;
1022 ///
1023 /// // Now we know this can't OOM in the middle of our complex work
1024 /// heap.extend(data.iter());
1025 ///
1026 /// Ok(heap.pop())
1027 /// }
1028 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1029 /// ```
1030 #[unstable(feature = "try_reserve_2", issue = "91789")]
1031 pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1032 self.data.try_reserve(additional)
1033 }
1034
1035 /// Discards as much additional capacity as possible.
1036 ///
1037 /// # Examples
1038 ///
1039 /// Basic usage:
1040 ///
1041 /// ```
1042 /// use std::collections::BinaryHeap;
1043 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1044 ///
1045 /// assert!(heap.capacity() >= 100);
1046 /// heap.shrink_to_fit();
1047 /// assert!(heap.capacity() == 0);
1048 /// ```
1049 #[stable(feature = "rust1", since = "1.0.0")]
1050 pub fn shrink_to_fit(&mut self) {
1051 self.data.shrink_to_fit();
1052 }
1053
1054 /// Discards capacity with a lower bound.
1055 ///
1056 /// The capacity will remain at least as large as both the length
1057 /// and the supplied value.
1058 ///
1059 /// If the current capacity is less than the lower limit, this is a no-op.
1060 ///
1061 /// # Examples
1062 ///
1063 /// ```
1064 /// use std::collections::BinaryHeap;
1065 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1066 ///
1067 /// assert!(heap.capacity() >= 100);
1068 /// heap.shrink_to(10);
1069 /// assert!(heap.capacity() >= 10);
1070 /// ```
1071 #[inline]
1072 #[stable(feature = "shrink_to", since = "1.56.0")]
1073 pub fn shrink_to(&mut self, min_capacity: usize) {
1074 self.data.shrink_to(min_capacity)
1075 }
1076
1077 /// Returns a slice of all values in the underlying vector, in arbitrary
1078 /// order.
1079 ///
1080 /// # Examples
1081 ///
1082 /// Basic usage:
1083 ///
1084 /// ```
1085 /// #![feature(binary_heap_as_slice)]
1086 /// use std::collections::BinaryHeap;
1087 /// use std::io::{self, Write};
1088 ///
1089 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]);
1090 ///
1091 /// io::sink().write(heap.as_slice()).unwrap();
1092 /// ```
1093 #[must_use]
1094 #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
1095 pub fn as_slice(&self) -> &[T] {
1096 self.data.as_slice()
1097 }
1098
1099 /// Consumes the `BinaryHeap` and returns the underlying vector
1100 /// in arbitrary order.
1101 ///
1102 /// # Examples
1103 ///
1104 /// Basic usage:
1105 ///
1106 /// ```
1107 /// use std::collections::BinaryHeap;
1108 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]);
1109 /// let vec = heap.into_vec();
1110 ///
1111 /// // Will print in some order
1112 /// for x in vec {
1113 /// println!("{}", x);
1114 /// }
1115 /// ```
1116 #[must_use = "`self` will be dropped if the result is not used"]
1117 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1118 pub fn into_vec(self) -> Vec<T> {
1119 self.into()
1120 }
1121
1122 /// Returns the length of the binary heap.
1123 ///
1124 /// # Examples
1125 ///
1126 /// Basic usage:
1127 ///
1128 /// ```
1129 /// use std::collections::BinaryHeap;
1130 /// let heap = BinaryHeap::from(vec![1, 3]);
1131 ///
1132 /// assert_eq!(heap.len(), 2);
1133 /// ```
1134 #[must_use]
1135 #[stable(feature = "rust1", since = "1.0.0")]
1136 pub fn len(&self) -> usize {
1137 self.data.len()
1138 }
1139
1140 /// Checks if the binary heap is empty.
1141 ///
1142 /// # Examples
1143 ///
1144 /// Basic usage:
1145 ///
1146 /// ```
1147 /// use std::collections::BinaryHeap;
1148 /// let mut heap = BinaryHeap::new();
1149 ///
1150 /// assert!(heap.is_empty());
1151 ///
1152 /// heap.push(3);
1153 /// heap.push(5);
1154 /// heap.push(1);
1155 ///
1156 /// assert!(!heap.is_empty());
1157 /// ```
1158 #[must_use]
1159 #[stable(feature = "rust1", since = "1.0.0")]
1160 pub fn is_empty(&self) -> bool {
1161 self.len() == 0
1162 }
1163
1164 /// Clears the binary heap, returning an iterator over the removed elements.
1165 ///
1166 /// The elements are removed in arbitrary order.
1167 ///
1168 /// # Examples
1169 ///
1170 /// Basic usage:
1171 ///
1172 /// ```
1173 /// use std::collections::BinaryHeap;
1174 /// let mut heap = BinaryHeap::from(vec![1, 3]);
1175 ///
1176 /// assert!(!heap.is_empty());
1177 ///
1178 /// for x in heap.drain() {
1179 /// println!("{}", x);
1180 /// }
1181 ///
1182 /// assert!(heap.is_empty());
1183 /// ```
1184 #[inline]
1185 #[stable(feature = "drain", since = "1.6.0")]
1186 pub fn drain(&mut self) -> Drain<'_, T> {
1187 Drain { iter: self.data.drain(..) }
1188 }
1189
1190 /// Drops all items from the binary heap.
1191 ///
1192 /// # Examples
1193 ///
1194 /// Basic usage:
1195 ///
1196 /// ```
1197 /// use std::collections::BinaryHeap;
1198 /// let mut heap = BinaryHeap::from(vec![1, 3]);
1199 ///
1200 /// assert!(!heap.is_empty());
1201 ///
1202 /// heap.clear();
1203 ///
1204 /// assert!(heap.is_empty());
1205 /// ```
1206 #[stable(feature = "rust1", since = "1.0.0")]
1207 pub fn clear(&mut self) {
1208 self.drain();
1209 }
1210 }
1211
1212 /// Hole represents a hole in a slice i.e., an index without valid value
1213 /// (because it was moved from or duplicated).
1214 /// In drop, `Hole` will restore the slice by filling the hole
1215 /// position with the value that was originally removed.
1216 struct Hole<'a, T: 'a> {
1217 data: &'a mut [T],
1218 elt: ManuallyDrop<T>,
1219 pos: usize,
1220 }
1221
1222 impl<'a, T> Hole<'a, T> {
1223 /// Create a new `Hole` at index `pos`.
1224 ///
1225 /// Unsafe because pos must be within the data slice.
1226 #[inline]
1227 unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1228 debug_assert!(pos < data.len());
1229 // SAFE: pos should be inside the slice
1230 let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1231 Hole { data, elt: ManuallyDrop::new(elt), pos }
1232 }
1233
1234 #[inline]
1235 fn pos(&self) -> usize {
1236 self.pos
1237 }
1238
1239 /// Returns a reference to the element removed.
1240 #[inline]
1241 fn element(&self) -> &T {
1242 &self.elt
1243 }
1244
1245 /// Returns a reference to the element at `index`.
1246 ///
1247 /// Unsafe because index must be within the data slice and not equal to pos.
1248 #[inline]
1249 unsafe fn get(&self, index: usize) -> &T {
1250 debug_assert!(index != self.pos);
1251 debug_assert!(index < self.data.len());
1252 unsafe { self.data.get_unchecked(index) }
1253 }
1254
1255 /// Move hole to new location
1256 ///
1257 /// Unsafe because index must be within the data slice and not equal to pos.
1258 #[inline]
1259 unsafe fn move_to(&mut self, index: usize) {
1260 debug_assert!(index != self.pos);
1261 debug_assert!(index < self.data.len());
1262 unsafe {
1263 let ptr = self.data.as_mut_ptr();
1264 let index_ptr: *const _ = ptr.add(index);
1265 let hole_ptr = ptr.add(self.pos);
1266 ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1267 }
1268 self.pos = index;
1269 }
1270 }
1271
1272 impl<T> Drop for Hole<'_, T> {
1273 #[inline]
1274 fn drop(&mut self) {
1275 // fill the hole again
1276 unsafe {
1277 let pos = self.pos;
1278 ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1279 }
1280 }
1281 }
1282
1283 /// An iterator over the elements of a `BinaryHeap`.
1284 ///
1285 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1286 /// documentation for more.
1287 ///
1288 /// [`iter`]: BinaryHeap::iter
1289 #[must_use = "iterators are lazy and do nothing unless consumed"]
1290 #[stable(feature = "rust1", since = "1.0.0")]
1291 pub struct Iter<'a, T: 'a> {
1292 iter: slice::Iter<'a, T>,
1293 }
1294
1295 #[stable(feature = "collection_debug", since = "1.17.0")]
1296 impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1297 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1298 f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1299 }
1300 }
1301
1302 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1303 #[stable(feature = "rust1", since = "1.0.0")]
1304 impl<T> Clone for Iter<'_, T> {
1305 fn clone(&self) -> Self {
1306 Iter { iter: self.iter.clone() }
1307 }
1308 }
1309
1310 #[stable(feature = "rust1", since = "1.0.0")]
1311 impl<'a, T> Iterator for Iter<'a, T> {
1312 type Item = &'a T;
1313
1314 #[inline]
1315 fn next(&mut self) -> Option<&'a T> {
1316 self.iter.next()
1317 }
1318
1319 #[inline]
1320 fn size_hint(&self) -> (usize, Option<usize>) {
1321 self.iter.size_hint()
1322 }
1323
1324 #[inline]
1325 fn last(self) -> Option<&'a T> {
1326 self.iter.last()
1327 }
1328 }
1329
1330 #[stable(feature = "rust1", since = "1.0.0")]
1331 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1332 #[inline]
1333 fn next_back(&mut self) -> Option<&'a T> {
1334 self.iter.next_back()
1335 }
1336 }
1337
1338 #[stable(feature = "rust1", since = "1.0.0")]
1339 impl<T> ExactSizeIterator for Iter<'_, T> {
1340 fn is_empty(&self) -> bool {
1341 self.iter.is_empty()
1342 }
1343 }
1344
1345 #[stable(feature = "fused", since = "1.26.0")]
1346 impl<T> FusedIterator for Iter<'_, T> {}
1347
1348 /// An owning iterator over the elements of a `BinaryHeap`.
1349 ///
1350 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1351 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1352 ///
1353 /// [`into_iter`]: BinaryHeap::into_iter
1354 /// [`IntoIterator`]: core::iter::IntoIterator
1355 #[stable(feature = "rust1", since = "1.0.0")]
1356 #[derive(Clone)]
1357 pub struct IntoIter<T> {
1358 iter: vec::IntoIter<T>,
1359 }
1360
1361 #[stable(feature = "collection_debug", since = "1.17.0")]
1362 impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
1363 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1364 f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1365 }
1366 }
1367
1368 #[stable(feature = "rust1", since = "1.0.0")]
1369 impl<T> Iterator for IntoIter<T> {
1370 type Item = T;
1371
1372 #[inline]
1373 fn next(&mut self) -> Option<T> {
1374 self.iter.next()
1375 }
1376
1377 #[inline]
1378 fn size_hint(&self) -> (usize, Option<usize>) {
1379 self.iter.size_hint()
1380 }
1381 }
1382
1383 #[stable(feature = "rust1", since = "1.0.0")]
1384 impl<T> DoubleEndedIterator for IntoIter<T> {
1385 #[inline]
1386 fn next_back(&mut self) -> Option<T> {
1387 self.iter.next_back()
1388 }
1389 }
1390
1391 #[stable(feature = "rust1", since = "1.0.0")]
1392 impl<T> ExactSizeIterator for IntoIter<T> {
1393 fn is_empty(&self) -> bool {
1394 self.iter.is_empty()
1395 }
1396 }
1397
1398 #[stable(feature = "fused", since = "1.26.0")]
1399 impl<T> FusedIterator for IntoIter<T> {}
1400
1401 #[unstable(issue = "none", feature = "inplace_iteration")]
1402 #[doc(hidden)]
1403 unsafe impl<T> SourceIter for IntoIter<T> {
1404 type Source = IntoIter<T>;
1405
1406 #[inline]
1407 unsafe fn as_inner(&mut self) -> &mut Self::Source {
1408 self
1409 }
1410 }
1411
1412 #[unstable(issue = "none", feature = "inplace_iteration")]
1413 #[doc(hidden)]
1414 unsafe impl<I> InPlaceIterable for IntoIter<I> {}
1415
1416 impl<I> AsIntoIter for IntoIter<I> {
1417 type Item = I;
1418
1419 fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1420 &mut self.iter
1421 }
1422 }
1423
1424 #[must_use = "iterators are lazy and do nothing unless consumed"]
1425 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1426 #[derive(Clone, Debug)]
1427 pub struct IntoIterSorted<T> {
1428 inner: BinaryHeap<T>,
1429 }
1430
1431 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1432 impl<T: Ord> Iterator for IntoIterSorted<T> {
1433 type Item = T;
1434
1435 #[inline]
1436 fn next(&mut self) -> Option<T> {
1437 self.inner.pop()
1438 }
1439
1440 #[inline]
1441 fn size_hint(&self) -> (usize, Option<usize>) {
1442 let exact = self.inner.len();
1443 (exact, Some(exact))
1444 }
1445 }
1446
1447 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1448 impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
1449
1450 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1451 impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
1452
1453 #[unstable(feature = "trusted_len", issue = "37572")]
1454 unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
1455
1456 /// A draining iterator over the elements of a `BinaryHeap`.
1457 ///
1458 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1459 /// documentation for more.
1460 ///
1461 /// [`drain`]: BinaryHeap::drain
1462 #[stable(feature = "drain", since = "1.6.0")]
1463 #[derive(Debug)]
1464 pub struct Drain<'a, T: 'a> {
1465 iter: vec::Drain<'a, T>,
1466 }
1467
1468 #[stable(feature = "drain", since = "1.6.0")]
1469 impl<T> Iterator for Drain<'_, T> {
1470 type Item = T;
1471
1472 #[inline]
1473 fn next(&mut self) -> Option<T> {
1474 self.iter.next()
1475 }
1476
1477 #[inline]
1478 fn size_hint(&self) -> (usize, Option<usize>) {
1479 self.iter.size_hint()
1480 }
1481 }
1482
1483 #[stable(feature = "drain", since = "1.6.0")]
1484 impl<T> DoubleEndedIterator for Drain<'_, T> {
1485 #[inline]
1486 fn next_back(&mut self) -> Option<T> {
1487 self.iter.next_back()
1488 }
1489 }
1490
1491 #[stable(feature = "drain", since = "1.6.0")]
1492 impl<T> ExactSizeIterator for Drain<'_, T> {
1493 fn is_empty(&self) -> bool {
1494 self.iter.is_empty()
1495 }
1496 }
1497
1498 #[stable(feature = "fused", since = "1.26.0")]
1499 impl<T> FusedIterator for Drain<'_, T> {}
1500
1501 /// A draining iterator over the elements of a `BinaryHeap`.
1502 ///
1503 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1504 /// documentation for more.
1505 ///
1506 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1507 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1508 #[derive(Debug)]
1509 pub struct DrainSorted<'a, T: Ord> {
1510 inner: &'a mut BinaryHeap<T>,
1511 }
1512
1513 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1514 impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
1515 /// Removes heap elements in heap order.
1516 fn drop(&mut self) {
1517 struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
1518
1519 impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
1520 fn drop(&mut self) {
1521 while self.0.inner.pop().is_some() {}
1522 }
1523 }
1524
1525 while let Some(item) = self.inner.pop() {
1526 let guard = DropGuard(self);
1527 drop(item);
1528 mem::forget(guard);
1529 }
1530 }
1531 }
1532
1533 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1534 impl<T: Ord> Iterator for DrainSorted<'_, T> {
1535 type Item = T;
1536
1537 #[inline]
1538 fn next(&mut self) -> Option<T> {
1539 self.inner.pop()
1540 }
1541
1542 #[inline]
1543 fn size_hint(&self) -> (usize, Option<usize>) {
1544 let exact = self.inner.len();
1545 (exact, Some(exact))
1546 }
1547 }
1548
1549 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1550 impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
1551
1552 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1553 impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
1554
1555 #[unstable(feature = "trusted_len", issue = "37572")]
1556 unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
1557
1558 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1559 impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
1560 /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1561 ///
1562 /// This conversion happens in-place, and has *O*(*n*) time complexity.
1563 fn from(vec: Vec<T>) -> BinaryHeap<T> {
1564 let mut heap = BinaryHeap { data: vec };
1565 heap.rebuild();
1566 heap
1567 }
1568 }
1569
1570 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1571 impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1572 /// ```
1573 /// use std::collections::BinaryHeap;
1574 ///
1575 /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1576 /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1577 /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1578 /// assert_eq!(a, b);
1579 /// }
1580 /// ```
1581 fn from(arr: [T; N]) -> Self {
1582 Self::from_iter(arr)
1583 }
1584 }
1585
1586 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1587 impl<T> From<BinaryHeap<T>> for Vec<T> {
1588 /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1589 ///
1590 /// This conversion requires no data movement or allocation, and has
1591 /// constant time complexity.
1592 fn from(heap: BinaryHeap<T>) -> Vec<T> {
1593 heap.data
1594 }
1595 }
1596
1597 #[stable(feature = "rust1", since = "1.0.0")]
1598 impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1599 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1600 BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1601 }
1602 }
1603
1604 #[stable(feature = "rust1", since = "1.0.0")]
1605 impl<T> IntoIterator for BinaryHeap<T> {
1606 type Item = T;
1607 type IntoIter = IntoIter<T>;
1608
1609 /// Creates a consuming iterator, that is, one that moves each value out of
1610 /// the binary heap in arbitrary order. The binary heap cannot be used
1611 /// after calling this.
1612 ///
1613 /// # Examples
1614 ///
1615 /// Basic usage:
1616 ///
1617 /// ```
1618 /// use std::collections::BinaryHeap;
1619 /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]);
1620 ///
1621 /// // Print 1, 2, 3, 4 in arbitrary order
1622 /// for x in heap.into_iter() {
1623 /// // x has type i32, not &i32
1624 /// println!("{}", x);
1625 /// }
1626 /// ```
1627 fn into_iter(self) -> IntoIter<T> {
1628 IntoIter { iter: self.data.into_iter() }
1629 }
1630 }
1631
1632 #[stable(feature = "rust1", since = "1.0.0")]
1633 impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
1634 type Item = &'a T;
1635 type IntoIter = Iter<'a, T>;
1636
1637 fn into_iter(self) -> Iter<'a, T> {
1638 self.iter()
1639 }
1640 }
1641
1642 #[stable(feature = "rust1", since = "1.0.0")]
1643 impl<T: Ord> Extend<T> for BinaryHeap<T> {
1644 #[inline]
1645 fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1646 <Self as SpecExtend<I>>::spec_extend(self, iter);
1647 }
1648
1649 #[inline]
1650 fn extend_one(&mut self, item: T) {
1651 self.push(item);
1652 }
1653
1654 #[inline]
1655 fn extend_reserve(&mut self, additional: usize) {
1656 self.reserve(additional);
1657 }
1658 }
1659
1660 impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
1661 default fn spec_extend(&mut self, iter: I) {
1662 self.extend_desugared(iter.into_iter());
1663 }
1664 }
1665
1666 impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
1667 fn spec_extend(&mut self, ref mut other: Vec<T>) {
1668 let start = self.data.len();
1669 self.data.append(other);
1670 self.rebuild_tail(start);
1671 }
1672 }
1673
1674 impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
1675 fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
1676 self.append(other);
1677 }
1678 }
1679
1680 impl<T: Ord> BinaryHeap<T> {
1681 fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1682 let iterator = iter.into_iter();
1683 let (lower, _) = iterator.size_hint();
1684
1685 self.reserve(lower);
1686
1687 iterator.for_each(move |elem| self.push(elem));
1688 }
1689 }
1690
1691 #[stable(feature = "extend_ref", since = "1.2.0")]
1692 impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
1693 fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1694 self.extend(iter.into_iter().cloned());
1695 }
1696
1697 #[inline]
1698 fn extend_one(&mut self, &item: &'a T) {
1699 self.push(item);
1700 }
1701
1702 #[inline]
1703 fn extend_reserve(&mut self, additional: usize) {
1704 self.reserve(additional);
1705 }
1706 }