]> git.proxmox.com Git - rustc.git/blob - library/alloc/src/collections/binary_heap.rs
Update unsuspicious file list
[rustc.git] / library / alloc / src / collections / binary_heap.rs
1 //! A priority queue implemented with a binary heap.
2 //!
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
7 //! in-place heapsort.
8 //!
9 //! # Examples
10 //!
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
14 //!
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
18 //!
19 //! ```
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
22 //!
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
24 //! struct State {
25 //! cost: usize,
26 //! position: usize,
27 //! }
28 //!
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //! fn cmp(&self, other: &Self) -> Ordering {
34 //! // Notice that the we flip the ordering on costs.
35 //! // In case of a tie we compare positions - this step is necessary
36 //! // to make implementations of `PartialEq` and `Ord` consistent.
37 //! other.cost.cmp(&self.cost)
38 //! .then_with(|| self.position.cmp(&other.position))
39 //! }
40 //! }
41 //!
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //! Some(self.cmp(other))
46 //! }
47 //! }
48 //!
49 //! // Each node is represented as a `usize`, for a shorter implementation.
50 //! struct Edge {
51 //! node: usize,
52 //! cost: usize,
53 //! }
54 //!
55 //! // Dijkstra's shortest path algorithm.
56 //!
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //! // dist[node] = current shortest distance from `start` to `node`
63 //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
64 //!
65 //! let mut heap = BinaryHeap::new();
66 //!
67 //! // We're at `start`, with a zero cost
68 //! dist[start] = 0;
69 //! heap.push(State { cost: 0, position: start });
70 //!
71 //! // Examine the frontier with lower cost nodes first (min-heap)
72 //! while let Some(State { cost, position }) = heap.pop() {
73 //! // Alternatively we could have continued to find all shortest paths
74 //! if position == goal { return Some(cost); }
75 //!
76 //! // Important as we may have already found a better way
77 //! if cost > dist[position] { continue; }
78 //!
79 //! // For each node we can reach, see if we can find a way with
80 //! // a lower cost going through this node
81 //! for edge in &adj_list[position] {
82 //! let next = State { cost: cost + edge.cost, position: edge.node };
83 //!
84 //! // If so, add it to the frontier and continue
85 //! if next.cost < dist[next.position] {
86 //! heap.push(next);
87 //! // Relaxation, we have now found a better way
88 //! dist[next.position] = next.cost;
89 //! }
90 //! }
91 //! }
92 //!
93 //! // Goal not reachable
94 //! None
95 //! }
96 //!
97 //! fn main() {
98 //! // This is the directed graph we're going to use.
99 //! // The node numbers correspond to the different states,
100 //! // and the edge weights symbolize the cost of moving
101 //! // from one node to another.
102 //! // Note that the edges are one-way.
103 //! //
104 //! // 7
105 //! // +-----------------+
106 //! // | |
107 //! // v 1 2 | 2
108 //! // 0 -----> 1 -----> 3 ---> 4
109 //! // | ^ ^ ^
110 //! // | | 1 | |
111 //! // | | | 3 | 1
112 //! // +------> 2 -------+ |
113 //! // 10 | |
114 //! // +---------------+
115 //! //
116 //! // The graph is represented as an adjacency list where each index,
117 //! // corresponding to a node value, has a list of outgoing edges.
118 //! // Chosen for its efficiency.
119 //! let graph = vec![
120 //! // Node 0
121 //! vec![Edge { node: 2, cost: 10 },
122 //! Edge { node: 1, cost: 1 }],
123 //! // Node 1
124 //! vec![Edge { node: 3, cost: 2 }],
125 //! // Node 2
126 //! vec![Edge { node: 1, cost: 1 },
127 //! Edge { node: 3, cost: 3 },
128 //! Edge { node: 4, cost: 1 }],
129 //! // Node 3
130 //! vec![Edge { node: 0, cost: 7 },
131 //! Edge { node: 4, cost: 2 }],
132 //! // Node 4
133 //! vec![]];
134 //!
135 //! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //! assert_eq!(shortest_path(&graph, 4, 0), None);
140 //! }
141 //! ```
142
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
145
146 use core::fmt;
147 use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
148 use core::mem::{self, swap, ManuallyDrop};
149 use core::ops::{Deref, DerefMut};
150 use core::ptr;
151
152 use crate::collections::TryReserveError;
153 use crate::slice;
154 use crate::vec::{self, AsVecIntoIter, Vec};
155
156 use super::SpecExtend;
157
158 #[cfg(test)]
159 mod tests;
160
161 /// A priority queue implemented with a binary heap.
162 ///
163 /// This will be a max-heap.
164 ///
165 /// It is a logic error for an item to be modified in such a way that the
166 /// item's ordering relative to any other item, as determined by the [`Ord`]
167 /// trait, changes while it is in the heap. This is normally only possible
168 /// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
169 /// behavior resulting from such a logic error is not specified, but will
170 /// be encapsulated to the `BinaryHeap` that observed the logic error and not
171 /// result in undefined behavior. This could include panics, incorrect results,
172 /// aborts, memory leaks, and non-termination.
173 ///
174 /// # Examples
175 ///
176 /// ```
177 /// use std::collections::BinaryHeap;
178 ///
179 /// // Type inference lets us omit an explicit type signature (which
180 /// // would be `BinaryHeap<i32>` in this example).
181 /// let mut heap = BinaryHeap::new();
182 ///
183 /// // We can use peek to look at the next item in the heap. In this case,
184 /// // there's no items in there yet so we get None.
185 /// assert_eq!(heap.peek(), None);
186 ///
187 /// // Let's add some scores...
188 /// heap.push(1);
189 /// heap.push(5);
190 /// heap.push(2);
191 ///
192 /// // Now peek shows the most important item in the heap.
193 /// assert_eq!(heap.peek(), Some(&5));
194 ///
195 /// // We can check the length of a heap.
196 /// assert_eq!(heap.len(), 3);
197 ///
198 /// // We can iterate over the items in the heap, although they are returned in
199 /// // a random order.
200 /// for x in &heap {
201 /// println!("{x}");
202 /// }
203 ///
204 /// // If we instead pop these scores, they should come back in order.
205 /// assert_eq!(heap.pop(), Some(5));
206 /// assert_eq!(heap.pop(), Some(2));
207 /// assert_eq!(heap.pop(), Some(1));
208 /// assert_eq!(heap.pop(), None);
209 ///
210 /// // We can clear the heap of any remaining items.
211 /// heap.clear();
212 ///
213 /// // The heap should now be empty.
214 /// assert!(heap.is_empty())
215 /// ```
216 ///
217 /// A `BinaryHeap` with a known list of items can be initialized from an array:
218 ///
219 /// ```
220 /// use std::collections::BinaryHeap;
221 ///
222 /// let heap = BinaryHeap::from([1, 5, 2]);
223 /// ```
224 ///
225 /// ## Min-heap
226 ///
227 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
228 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
229 /// value instead of the greatest one.
230 ///
231 /// ```
232 /// use std::collections::BinaryHeap;
233 /// use std::cmp::Reverse;
234 ///
235 /// let mut heap = BinaryHeap::new();
236 ///
237 /// // Wrap values in `Reverse`
238 /// heap.push(Reverse(1));
239 /// heap.push(Reverse(5));
240 /// heap.push(Reverse(2));
241 ///
242 /// // If we pop these scores now, they should come back in the reverse order.
243 /// assert_eq!(heap.pop(), Some(Reverse(1)));
244 /// assert_eq!(heap.pop(), Some(Reverse(2)));
245 /// assert_eq!(heap.pop(), Some(Reverse(5)));
246 /// assert_eq!(heap.pop(), None);
247 /// ```
248 ///
249 /// # Time complexity
250 ///
251 /// | [push] | [pop] | [peek]/[peek\_mut] |
252 /// |---------|---------------|--------------------|
253 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
254 ///
255 /// The value for `push` is an expected cost; the method documentation gives a
256 /// more detailed analysis.
257 ///
258 /// [`core::cmp::Reverse`]: core::cmp::Reverse
259 /// [`Ord`]: core::cmp::Ord
260 /// [`Cell`]: core::cell::Cell
261 /// [`RefCell`]: core::cell::RefCell
262 /// [push]: BinaryHeap::push
263 /// [pop]: BinaryHeap::pop
264 /// [peek]: BinaryHeap::peek
265 /// [peek\_mut]: BinaryHeap::peek_mut
266 #[stable(feature = "rust1", since = "1.0.0")]
267 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
268 pub struct BinaryHeap<T> {
269 data: Vec<T>,
270 }
271
272 /// Structure wrapping a mutable reference to the greatest item on a
273 /// `BinaryHeap`.
274 ///
275 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
276 /// its documentation for more.
277 ///
278 /// [`peek_mut`]: BinaryHeap::peek_mut
279 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
280 pub struct PeekMut<'a, T: 'a + Ord> {
281 heap: &'a mut BinaryHeap<T>,
282 sift: bool,
283 }
284
285 #[stable(feature = "collection_debug", since = "1.17.0")]
286 impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
287 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
288 f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
289 }
290 }
291
292 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
293 impl<T: Ord> Drop for PeekMut<'_, T> {
294 fn drop(&mut self) {
295 if self.sift {
296 // SAFETY: PeekMut is only instantiated for non-empty heaps.
297 unsafe { self.heap.sift_down(0) };
298 }
299 }
300 }
301
302 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
303 impl<T: Ord> Deref for PeekMut<'_, T> {
304 type Target = T;
305 fn deref(&self) -> &T {
306 debug_assert!(!self.heap.is_empty());
307 // SAFE: PeekMut is only instantiated for non-empty heaps
308 unsafe { self.heap.data.get_unchecked(0) }
309 }
310 }
311
312 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
313 impl<T: Ord> DerefMut for PeekMut<'_, T> {
314 fn deref_mut(&mut self) -> &mut T {
315 debug_assert!(!self.heap.is_empty());
316 self.sift = true;
317 // SAFE: PeekMut is only instantiated for non-empty heaps
318 unsafe { self.heap.data.get_unchecked_mut(0) }
319 }
320 }
321
322 impl<'a, T: Ord> PeekMut<'a, T> {
323 /// Removes the peeked value from the heap and returns it.
324 #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
325 pub fn pop(mut this: PeekMut<'a, T>) -> T {
326 let value = this.heap.pop().unwrap();
327 this.sift = false;
328 value
329 }
330 }
331
332 #[stable(feature = "rust1", since = "1.0.0")]
333 impl<T: Clone> Clone for BinaryHeap<T> {
334 fn clone(&self) -> Self {
335 BinaryHeap { data: self.data.clone() }
336 }
337
338 fn clone_from(&mut self, source: &Self) {
339 self.data.clone_from(&source.data);
340 }
341 }
342
343 #[stable(feature = "rust1", since = "1.0.0")]
344 impl<T: Ord> Default for BinaryHeap<T> {
345 /// Creates an empty `BinaryHeap<T>`.
346 #[inline]
347 fn default() -> BinaryHeap<T> {
348 BinaryHeap::new()
349 }
350 }
351
352 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
353 impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
354 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
355 f.debug_list().entries(self.iter()).finish()
356 }
357 }
358
359 impl<T: Ord> BinaryHeap<T> {
360 /// Creates an empty `BinaryHeap` as a max-heap.
361 ///
362 /// # Examples
363 ///
364 /// Basic usage:
365 ///
366 /// ```
367 /// use std::collections::BinaryHeap;
368 /// let mut heap = BinaryHeap::new();
369 /// heap.push(4);
370 /// ```
371 #[stable(feature = "rust1", since = "1.0.0")]
372 #[must_use]
373 pub fn new() -> BinaryHeap<T> {
374 BinaryHeap { data: vec![] }
375 }
376
377 /// Creates an empty `BinaryHeap` with at least the specified capacity.
378 ///
379 /// The binary heap will be able to hold at least `capacity` elements without
380 /// reallocating. This method is allowed to allocate for more elements than
381 /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
382 ///
383 /// # Examples
384 ///
385 /// Basic usage:
386 ///
387 /// ```
388 /// use std::collections::BinaryHeap;
389 /// let mut heap = BinaryHeap::with_capacity(10);
390 /// heap.push(4);
391 /// ```
392 #[stable(feature = "rust1", since = "1.0.0")]
393 #[must_use]
394 pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
395 BinaryHeap { data: Vec::with_capacity(capacity) }
396 }
397
398 /// Returns a mutable reference to the greatest item in the binary heap, or
399 /// `None` if it is empty.
400 ///
401 /// Note: If the `PeekMut` value is leaked, the heap may be in an
402 /// inconsistent state.
403 ///
404 /// # Examples
405 ///
406 /// Basic usage:
407 ///
408 /// ```
409 /// use std::collections::BinaryHeap;
410 /// let mut heap = BinaryHeap::new();
411 /// assert!(heap.peek_mut().is_none());
412 ///
413 /// heap.push(1);
414 /// heap.push(5);
415 /// heap.push(2);
416 /// {
417 /// let mut val = heap.peek_mut().unwrap();
418 /// *val = 0;
419 /// }
420 /// assert_eq!(heap.peek(), Some(&2));
421 /// ```
422 ///
423 /// # Time complexity
424 ///
425 /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
426 /// otherwise it's *O*(1).
427 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
428 pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
429 if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
430 }
431
432 /// Removes the greatest item from the binary heap and returns it, or `None` if it
433 /// is empty.
434 ///
435 /// # Examples
436 ///
437 /// Basic usage:
438 ///
439 /// ```
440 /// use std::collections::BinaryHeap;
441 /// let mut heap = BinaryHeap::from([1, 3]);
442 ///
443 /// assert_eq!(heap.pop(), Some(3));
444 /// assert_eq!(heap.pop(), Some(1));
445 /// assert_eq!(heap.pop(), None);
446 /// ```
447 ///
448 /// # Time complexity
449 ///
450 /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
451 #[stable(feature = "rust1", since = "1.0.0")]
452 pub fn pop(&mut self) -> Option<T> {
453 self.data.pop().map(|mut item| {
454 if !self.is_empty() {
455 swap(&mut item, &mut self.data[0]);
456 // SAFETY: !self.is_empty() means that self.len() > 0
457 unsafe { self.sift_down_to_bottom(0) };
458 }
459 item
460 })
461 }
462
463 /// Pushes an item onto the binary heap.
464 ///
465 /// # Examples
466 ///
467 /// Basic usage:
468 ///
469 /// ```
470 /// use std::collections::BinaryHeap;
471 /// let mut heap = BinaryHeap::new();
472 /// heap.push(3);
473 /// heap.push(5);
474 /// heap.push(1);
475 ///
476 /// assert_eq!(heap.len(), 3);
477 /// assert_eq!(heap.peek(), Some(&5));
478 /// ```
479 ///
480 /// # Time complexity
481 ///
482 /// The expected cost of `push`, averaged over every possible ordering of
483 /// the elements being pushed, and over a sufficiently large number of
484 /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
485 /// elements that are *not* already in any sorted pattern.
486 ///
487 /// The time complexity degrades if elements are pushed in predominantly
488 /// ascending order. In the worst case, elements are pushed in ascending
489 /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
490 /// containing *n* elements.
491 ///
492 /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
493 /// occurs when capacity is exhausted and needs a resize. The resize cost
494 /// has been amortized in the previous figures.
495 #[stable(feature = "rust1", since = "1.0.0")]
496 pub fn push(&mut self, item: T) {
497 let old_len = self.len();
498 self.data.push(item);
499 // SAFETY: Since we pushed a new item it means that
500 // old_len = self.len() - 1 < self.len()
501 unsafe { self.sift_up(0, old_len) };
502 }
503
504 /// Consumes the `BinaryHeap` and returns a vector in sorted
505 /// (ascending) order.
506 ///
507 /// # Examples
508 ///
509 /// Basic usage:
510 ///
511 /// ```
512 /// use std::collections::BinaryHeap;
513 ///
514 /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
515 /// heap.push(6);
516 /// heap.push(3);
517 ///
518 /// let vec = heap.into_sorted_vec();
519 /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
520 /// ```
521 #[must_use = "`self` will be dropped if the result is not used"]
522 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
523 pub fn into_sorted_vec(mut self) -> Vec<T> {
524 let mut end = self.len();
525 while end > 1 {
526 end -= 1;
527 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
528 // so it's always a valid index to access.
529 // It is safe to access index 0 (i.e. `ptr`), because
530 // 1 <= end < self.len(), which means self.len() >= 2.
531 unsafe {
532 let ptr = self.data.as_mut_ptr();
533 ptr::swap(ptr, ptr.add(end));
534 }
535 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
536 // 0 < 1 <= end <= self.len() - 1 < self.len()
537 // Which means 0 < end and end < self.len().
538 unsafe { self.sift_down_range(0, end) };
539 }
540 self.into_vec()
541 }
542
543 // The implementations of sift_up and sift_down use unsafe blocks in
544 // order to move an element out of the vector (leaving behind a
545 // hole), shift along the others and move the removed element back into the
546 // vector at the final location of the hole.
547 // The `Hole` type is used to represent this, and make sure
548 // the hole is filled back at the end of its scope, even on panic.
549 // Using a hole reduces the constant factor compared to using swaps,
550 // which involves twice as many moves.
551
552 /// # Safety
553 ///
554 /// The caller must guarantee that `pos < self.len()`.
555 unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
556 // Take out the value at `pos` and create a hole.
557 // SAFETY: The caller guarantees that pos < self.len()
558 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
559
560 while hole.pos() > start {
561 let parent = (hole.pos() - 1) / 2;
562
563 // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
564 // and so hole.pos() - 1 can't underflow.
565 // This guarantees that parent < hole.pos() so
566 // it's a valid index and also != hole.pos().
567 if hole.element() <= unsafe { hole.get(parent) } {
568 break;
569 }
570
571 // SAFETY: Same as above
572 unsafe { hole.move_to(parent) };
573 }
574
575 hole.pos()
576 }
577
578 /// Take an element at `pos` and move it down the heap,
579 /// while its children are larger.
580 ///
581 /// # Safety
582 ///
583 /// The caller must guarantee that `pos < end <= self.len()`.
584 unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
585 // SAFETY: The caller guarantees that pos < end <= self.len().
586 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
587 let mut child = 2 * hole.pos() + 1;
588
589 // Loop invariant: child == 2 * hole.pos() + 1.
590 while child <= end.saturating_sub(2) {
591 // compare with the greater of the two children
592 // SAFETY: child < end - 1 < self.len() and
593 // child + 1 < end <= self.len(), so they're valid indexes.
594 // child == 2 * hole.pos() + 1 != hole.pos() and
595 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
596 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
597 // if T is a ZST
598 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
599
600 // if we are already in order, stop.
601 // SAFETY: child is now either the old child or the old child+1
602 // We already proven that both are < self.len() and != hole.pos()
603 if hole.element() >= unsafe { hole.get(child) } {
604 return;
605 }
606
607 // SAFETY: same as above.
608 unsafe { hole.move_to(child) };
609 child = 2 * hole.pos() + 1;
610 }
611
612 // SAFETY: && short circuit, which means that in the
613 // second condition it's already true that child == end - 1 < self.len().
614 if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
615 // SAFETY: child is already proven to be a valid index and
616 // child == 2 * hole.pos() + 1 != hole.pos().
617 unsafe { hole.move_to(child) };
618 }
619 }
620
621 /// # Safety
622 ///
623 /// The caller must guarantee that `pos < self.len()`.
624 unsafe fn sift_down(&mut self, pos: usize) {
625 let len = self.len();
626 // SAFETY: pos < len is guaranteed by the caller and
627 // obviously len = self.len() <= self.len().
628 unsafe { self.sift_down_range(pos, len) };
629 }
630
631 /// Take an element at `pos` and move it all the way down the heap,
632 /// then sift it up to its position.
633 ///
634 /// Note: This is faster when the element is known to be large / should
635 /// be closer to the bottom.
636 ///
637 /// # Safety
638 ///
639 /// The caller must guarantee that `pos < self.len()`.
640 unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
641 let end = self.len();
642 let start = pos;
643
644 // SAFETY: The caller guarantees that pos < self.len().
645 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
646 let mut child = 2 * hole.pos() + 1;
647
648 // Loop invariant: child == 2 * hole.pos() + 1.
649 while child <= end.saturating_sub(2) {
650 // SAFETY: child < end - 1 < self.len() and
651 // child + 1 < end <= self.len(), so they're valid indexes.
652 // child == 2 * hole.pos() + 1 != hole.pos() and
653 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
654 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
655 // if T is a ZST
656 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
657
658 // SAFETY: Same as above
659 unsafe { hole.move_to(child) };
660 child = 2 * hole.pos() + 1;
661 }
662
663 if child == end - 1 {
664 // SAFETY: child == end - 1 < self.len(), so it's a valid index
665 // and child == 2 * hole.pos() + 1 != hole.pos().
666 unsafe { hole.move_to(child) };
667 }
668 pos = hole.pos();
669 drop(hole);
670
671 // SAFETY: pos is the position in the hole and was already proven
672 // to be a valid index.
673 unsafe { self.sift_up(start, pos) };
674 }
675
676 /// Rebuild assuming data[0..start] is still a proper heap.
677 fn rebuild_tail(&mut self, start: usize) {
678 if start == self.len() {
679 return;
680 }
681
682 let tail_len = self.len() - start;
683
684 #[inline(always)]
685 fn log2_fast(x: usize) -> usize {
686 (usize::BITS - x.leading_zeros() - 1) as usize
687 }
688
689 // `rebuild` takes O(self.len()) operations
690 // and about 2 * self.len() comparisons in the worst case
691 // while repeating `sift_up` takes O(tail_len * log(start)) operations
692 // and about 1 * tail_len * log_2(start) comparisons in the worst case,
693 // assuming start >= tail_len. For larger heaps, the crossover point
694 // no longer follows this reasoning and was determined empirically.
695 let better_to_rebuild = if start < tail_len {
696 true
697 } else if self.len() <= 2048 {
698 2 * self.len() < tail_len * log2_fast(start)
699 } else {
700 2 * self.len() < tail_len * 11
701 };
702
703 if better_to_rebuild {
704 self.rebuild();
705 } else {
706 for i in start..self.len() {
707 // SAFETY: The index `i` is always less than self.len().
708 unsafe { self.sift_up(0, i) };
709 }
710 }
711 }
712
713 fn rebuild(&mut self) {
714 let mut n = self.len() / 2;
715 while n > 0 {
716 n -= 1;
717 // SAFETY: n starts from self.len() / 2 and goes down to 0.
718 // The only case when !(n < self.len()) is if
719 // self.len() == 0, but it's ruled out by the loop condition.
720 unsafe { self.sift_down(n) };
721 }
722 }
723
724 /// Moves all the elements of `other` into `self`, leaving `other` empty.
725 ///
726 /// # Examples
727 ///
728 /// Basic usage:
729 ///
730 /// ```
731 /// use std::collections::BinaryHeap;
732 ///
733 /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
734 /// let mut b = BinaryHeap::from([-20, 5, 43]);
735 ///
736 /// a.append(&mut b);
737 ///
738 /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
739 /// assert!(b.is_empty());
740 /// ```
741 #[stable(feature = "binary_heap_append", since = "1.11.0")]
742 pub fn append(&mut self, other: &mut Self) {
743 if self.len() < other.len() {
744 swap(self, other);
745 }
746
747 let start = self.data.len();
748
749 self.data.append(&mut other.data);
750
751 self.rebuild_tail(start);
752 }
753
754 /// Clears the binary heap, returning an iterator over the removed elements
755 /// in heap order. If the iterator is dropped before being fully consumed,
756 /// it drops the remaining elements in heap order.
757 ///
758 /// The returned iterator keeps a mutable borrow on the heap to optimize
759 /// its implementation.
760 ///
761 /// Note:
762 /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
763 /// You should use the latter for most cases.
764 ///
765 /// # Examples
766 ///
767 /// Basic usage:
768 ///
769 /// ```
770 /// #![feature(binary_heap_drain_sorted)]
771 /// use std::collections::BinaryHeap;
772 ///
773 /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
774 /// assert_eq!(heap.len(), 5);
775 ///
776 /// drop(heap.drain_sorted()); // removes all elements in heap order
777 /// assert_eq!(heap.len(), 0);
778 /// ```
779 #[inline]
780 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
781 pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
782 DrainSorted { inner: self }
783 }
784
785 /// Retains only the elements specified by the predicate.
786 ///
787 /// In other words, remove all elements `e` for which `f(&e)` returns
788 /// `false`. The elements are visited in unsorted (and unspecified) order.
789 ///
790 /// # Examples
791 ///
792 /// Basic usage:
793 ///
794 /// ```
795 /// #![feature(binary_heap_retain)]
796 /// use std::collections::BinaryHeap;
797 ///
798 /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
799 ///
800 /// heap.retain(|x| x % 2 == 0); // only keep even numbers
801 ///
802 /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
803 /// ```
804 #[unstable(feature = "binary_heap_retain", issue = "71503")]
805 pub fn retain<F>(&mut self, mut f: F)
806 where
807 F: FnMut(&T) -> bool,
808 {
809 let mut first_removed = self.len();
810 let mut i = 0;
811 self.data.retain(|e| {
812 let keep = f(e);
813 if !keep && i < first_removed {
814 first_removed = i;
815 }
816 i += 1;
817 keep
818 });
819 // data[0..first_removed] is untouched, so we only need to rebuild the tail:
820 self.rebuild_tail(first_removed);
821 }
822 }
823
824 impl<T> BinaryHeap<T> {
825 /// Returns an iterator visiting all values in the underlying vector, in
826 /// arbitrary order.
827 ///
828 /// # Examples
829 ///
830 /// Basic usage:
831 ///
832 /// ```
833 /// use std::collections::BinaryHeap;
834 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
835 ///
836 /// // Print 1, 2, 3, 4 in arbitrary order
837 /// for x in heap.iter() {
838 /// println!("{x}");
839 /// }
840 /// ```
841 #[stable(feature = "rust1", since = "1.0.0")]
842 pub fn iter(&self) -> Iter<'_, T> {
843 Iter { iter: self.data.iter() }
844 }
845
846 /// Returns an iterator which retrieves elements in heap order.
847 /// This method consumes the original heap.
848 ///
849 /// # Examples
850 ///
851 /// Basic usage:
852 ///
853 /// ```
854 /// #![feature(binary_heap_into_iter_sorted)]
855 /// use std::collections::BinaryHeap;
856 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
857 ///
858 /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
859 /// ```
860 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
861 pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
862 IntoIterSorted { inner: self }
863 }
864
865 /// Returns the greatest item in the binary heap, or `None` if it is empty.
866 ///
867 /// # Examples
868 ///
869 /// Basic usage:
870 ///
871 /// ```
872 /// use std::collections::BinaryHeap;
873 /// let mut heap = BinaryHeap::new();
874 /// assert_eq!(heap.peek(), None);
875 ///
876 /// heap.push(1);
877 /// heap.push(5);
878 /// heap.push(2);
879 /// assert_eq!(heap.peek(), Some(&5));
880 ///
881 /// ```
882 ///
883 /// # Time complexity
884 ///
885 /// Cost is *O*(1) in the worst case.
886 #[must_use]
887 #[stable(feature = "rust1", since = "1.0.0")]
888 pub fn peek(&self) -> Option<&T> {
889 self.data.get(0)
890 }
891
892 /// Returns the number of elements the binary heap can hold without reallocating.
893 ///
894 /// # Examples
895 ///
896 /// Basic usage:
897 ///
898 /// ```
899 /// use std::collections::BinaryHeap;
900 /// let mut heap = BinaryHeap::with_capacity(100);
901 /// assert!(heap.capacity() >= 100);
902 /// heap.push(4);
903 /// ```
904 #[must_use]
905 #[stable(feature = "rust1", since = "1.0.0")]
906 pub fn capacity(&self) -> usize {
907 self.data.capacity()
908 }
909
910 /// Reserves the minimum capacity for at least `additional` elements more than
911 /// the current length. Unlike [`reserve`], this will not
912 /// deliberately over-allocate to speculatively avoid frequent allocations.
913 /// After calling `reserve_exact`, capacity will be greater than or equal to
914 /// `self.len() + additional`. Does nothing if the capacity is already
915 /// sufficient.
916 ///
917 /// [`reserve`]: BinaryHeap::reserve
918 ///
919 /// # Panics
920 ///
921 /// Panics if the new capacity overflows [`usize`].
922 ///
923 /// # Examples
924 ///
925 /// Basic usage:
926 ///
927 /// ```
928 /// use std::collections::BinaryHeap;
929 /// let mut heap = BinaryHeap::new();
930 /// heap.reserve_exact(100);
931 /// assert!(heap.capacity() >= 100);
932 /// heap.push(4);
933 /// ```
934 ///
935 /// [`reserve`]: BinaryHeap::reserve
936 #[stable(feature = "rust1", since = "1.0.0")]
937 pub fn reserve_exact(&mut self, additional: usize) {
938 self.data.reserve_exact(additional);
939 }
940
941 /// Reserves capacity for at least `additional` elements more than the
942 /// current length. The allocator may reserve more space to speculatively
943 /// avoid frequent allocations. After calling `reserve`,
944 /// capacity will be greater than or equal to `self.len() + additional`.
945 /// Does nothing if capacity is already sufficient.
946 ///
947 /// # Panics
948 ///
949 /// Panics if the new capacity overflows [`usize`].
950 ///
951 /// # Examples
952 ///
953 /// Basic usage:
954 ///
955 /// ```
956 /// use std::collections::BinaryHeap;
957 /// let mut heap = BinaryHeap::new();
958 /// heap.reserve(100);
959 /// assert!(heap.capacity() >= 100);
960 /// heap.push(4);
961 /// ```
962 #[stable(feature = "rust1", since = "1.0.0")]
963 pub fn reserve(&mut self, additional: usize) {
964 self.data.reserve(additional);
965 }
966
967 /// Tries to reserve the minimum capacity for at least `additional` elements
968 /// more than the current length. Unlike [`try_reserve`], this will not
969 /// deliberately over-allocate to speculatively avoid frequent allocations.
970 /// After calling `try_reserve_exact`, capacity will be greater than or
971 /// equal to `self.len() + additional` if it returns `Ok(())`.
972 /// Does nothing if the capacity is already sufficient.
973 ///
974 /// Note that the allocator may give the collection more space than it
975 /// requests. Therefore, capacity can not be relied upon to be precisely
976 /// minimal. Prefer [`try_reserve`] if future insertions are expected.
977 ///
978 /// [`try_reserve`]: BinaryHeap::try_reserve
979 ///
980 /// # Errors
981 ///
982 /// If the capacity overflows, or the allocator reports a failure, then an error
983 /// is returned.
984 ///
985 /// # Examples
986 ///
987 /// ```
988 /// use std::collections::BinaryHeap;
989 /// use std::collections::TryReserveError;
990 ///
991 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
992 /// let mut heap = BinaryHeap::new();
993 ///
994 /// // Pre-reserve the memory, exiting if we can't
995 /// heap.try_reserve_exact(data.len())?;
996 ///
997 /// // Now we know this can't OOM in the middle of our complex work
998 /// heap.extend(data.iter());
999 ///
1000 /// Ok(heap.pop())
1001 /// }
1002 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1003 /// ```
1004 #[stable(feature = "try_reserve_2", since = "1.63.0")]
1005 pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
1006 self.data.try_reserve_exact(additional)
1007 }
1008
1009 /// Tries to reserve capacity for at least `additional` elements more than the
1010 /// current length. The allocator may reserve more space to speculatively
1011 /// avoid frequent allocations. After calling `try_reserve`, capacity will be
1012 /// greater than or equal to `self.len() + additional` if it returns
1013 /// `Ok(())`. Does nothing if capacity is already sufficient. This method
1014 /// preserves the contents even if an error occurs.
1015 ///
1016 /// # Errors
1017 ///
1018 /// If the capacity overflows, or the allocator reports a failure, then an error
1019 /// is returned.
1020 ///
1021 /// # Examples
1022 ///
1023 /// ```
1024 /// use std::collections::BinaryHeap;
1025 /// use std::collections::TryReserveError;
1026 ///
1027 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1028 /// let mut heap = BinaryHeap::new();
1029 ///
1030 /// // Pre-reserve the memory, exiting if we can't
1031 /// heap.try_reserve(data.len())?;
1032 ///
1033 /// // Now we know this can't OOM in the middle of our complex work
1034 /// heap.extend(data.iter());
1035 ///
1036 /// Ok(heap.pop())
1037 /// }
1038 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1039 /// ```
1040 #[stable(feature = "try_reserve_2", since = "1.63.0")]
1041 pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1042 self.data.try_reserve(additional)
1043 }
1044
1045 /// Discards as much additional capacity as possible.
1046 ///
1047 /// # Examples
1048 ///
1049 /// Basic usage:
1050 ///
1051 /// ```
1052 /// use std::collections::BinaryHeap;
1053 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1054 ///
1055 /// assert!(heap.capacity() >= 100);
1056 /// heap.shrink_to_fit();
1057 /// assert!(heap.capacity() == 0);
1058 /// ```
1059 #[stable(feature = "rust1", since = "1.0.0")]
1060 pub fn shrink_to_fit(&mut self) {
1061 self.data.shrink_to_fit();
1062 }
1063
1064 /// Discards capacity with a lower bound.
1065 ///
1066 /// The capacity will remain at least as large as both the length
1067 /// and the supplied value.
1068 ///
1069 /// If the current capacity is less than the lower limit, this is a no-op.
1070 ///
1071 /// # Examples
1072 ///
1073 /// ```
1074 /// use std::collections::BinaryHeap;
1075 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1076 ///
1077 /// assert!(heap.capacity() >= 100);
1078 /// heap.shrink_to(10);
1079 /// assert!(heap.capacity() >= 10);
1080 /// ```
1081 #[inline]
1082 #[stable(feature = "shrink_to", since = "1.56.0")]
1083 pub fn shrink_to(&mut self, min_capacity: usize) {
1084 self.data.shrink_to(min_capacity)
1085 }
1086
1087 /// Returns a slice of all values in the underlying vector, in arbitrary
1088 /// order.
1089 ///
1090 /// # Examples
1091 ///
1092 /// Basic usage:
1093 ///
1094 /// ```
1095 /// #![feature(binary_heap_as_slice)]
1096 /// use std::collections::BinaryHeap;
1097 /// use std::io::{self, Write};
1098 ///
1099 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1100 ///
1101 /// io::sink().write(heap.as_slice()).unwrap();
1102 /// ```
1103 #[must_use]
1104 #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
1105 pub fn as_slice(&self) -> &[T] {
1106 self.data.as_slice()
1107 }
1108
1109 /// Consumes the `BinaryHeap` and returns the underlying vector
1110 /// in arbitrary order.
1111 ///
1112 /// # Examples
1113 ///
1114 /// Basic usage:
1115 ///
1116 /// ```
1117 /// use std::collections::BinaryHeap;
1118 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1119 /// let vec = heap.into_vec();
1120 ///
1121 /// // Will print in some order
1122 /// for x in vec {
1123 /// println!("{x}");
1124 /// }
1125 /// ```
1126 #[must_use = "`self` will be dropped if the result is not used"]
1127 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1128 pub fn into_vec(self) -> Vec<T> {
1129 self.into()
1130 }
1131
1132 /// Returns the length of the binary heap.
1133 ///
1134 /// # Examples
1135 ///
1136 /// Basic usage:
1137 ///
1138 /// ```
1139 /// use std::collections::BinaryHeap;
1140 /// let heap = BinaryHeap::from([1, 3]);
1141 ///
1142 /// assert_eq!(heap.len(), 2);
1143 /// ```
1144 #[must_use]
1145 #[stable(feature = "rust1", since = "1.0.0")]
1146 pub fn len(&self) -> usize {
1147 self.data.len()
1148 }
1149
1150 /// Checks if the binary heap is empty.
1151 ///
1152 /// # Examples
1153 ///
1154 /// Basic usage:
1155 ///
1156 /// ```
1157 /// use std::collections::BinaryHeap;
1158 /// let mut heap = BinaryHeap::new();
1159 ///
1160 /// assert!(heap.is_empty());
1161 ///
1162 /// heap.push(3);
1163 /// heap.push(5);
1164 /// heap.push(1);
1165 ///
1166 /// assert!(!heap.is_empty());
1167 /// ```
1168 #[must_use]
1169 #[stable(feature = "rust1", since = "1.0.0")]
1170 pub fn is_empty(&self) -> bool {
1171 self.len() == 0
1172 }
1173
1174 /// Clears the binary heap, returning an iterator over the removed elements
1175 /// in arbitrary order. If the iterator is dropped before being fully
1176 /// consumed, it drops the remaining elements in arbitrary order.
1177 ///
1178 /// The returned iterator keeps a mutable borrow on the heap to optimize
1179 /// its implementation.
1180 ///
1181 /// # Examples
1182 ///
1183 /// Basic usage:
1184 ///
1185 /// ```
1186 /// use std::collections::BinaryHeap;
1187 /// let mut heap = BinaryHeap::from([1, 3]);
1188 ///
1189 /// assert!(!heap.is_empty());
1190 ///
1191 /// for x in heap.drain() {
1192 /// println!("{x}");
1193 /// }
1194 ///
1195 /// assert!(heap.is_empty());
1196 /// ```
1197 #[inline]
1198 #[stable(feature = "drain", since = "1.6.0")]
1199 pub fn drain(&mut self) -> Drain<'_, T> {
1200 Drain { iter: self.data.drain(..) }
1201 }
1202
1203 /// Drops all items from the binary heap.
1204 ///
1205 /// # Examples
1206 ///
1207 /// Basic usage:
1208 ///
1209 /// ```
1210 /// use std::collections::BinaryHeap;
1211 /// let mut heap = BinaryHeap::from([1, 3]);
1212 ///
1213 /// assert!(!heap.is_empty());
1214 ///
1215 /// heap.clear();
1216 ///
1217 /// assert!(heap.is_empty());
1218 /// ```
1219 #[stable(feature = "rust1", since = "1.0.0")]
1220 pub fn clear(&mut self) {
1221 self.drain();
1222 }
1223 }
1224
1225 /// Hole represents a hole in a slice i.e., an index without valid value
1226 /// (because it was moved from or duplicated).
1227 /// In drop, `Hole` will restore the slice by filling the hole
1228 /// position with the value that was originally removed.
1229 struct Hole<'a, T: 'a> {
1230 data: &'a mut [T],
1231 elt: ManuallyDrop<T>,
1232 pos: usize,
1233 }
1234
1235 impl<'a, T> Hole<'a, T> {
1236 /// Create a new `Hole` at index `pos`.
1237 ///
1238 /// Unsafe because pos must be within the data slice.
1239 #[inline]
1240 unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1241 debug_assert!(pos < data.len());
1242 // SAFE: pos should be inside the slice
1243 let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1244 Hole { data, elt: ManuallyDrop::new(elt), pos }
1245 }
1246
1247 #[inline]
1248 fn pos(&self) -> usize {
1249 self.pos
1250 }
1251
1252 /// Returns a reference to the element removed.
1253 #[inline]
1254 fn element(&self) -> &T {
1255 &self.elt
1256 }
1257
1258 /// Returns a reference to the element at `index`.
1259 ///
1260 /// Unsafe because index must be within the data slice and not equal to pos.
1261 #[inline]
1262 unsafe fn get(&self, index: usize) -> &T {
1263 debug_assert!(index != self.pos);
1264 debug_assert!(index < self.data.len());
1265 unsafe { self.data.get_unchecked(index) }
1266 }
1267
1268 /// Move hole to new location
1269 ///
1270 /// Unsafe because index must be within the data slice and not equal to pos.
1271 #[inline]
1272 unsafe fn move_to(&mut self, index: usize) {
1273 debug_assert!(index != self.pos);
1274 debug_assert!(index < self.data.len());
1275 unsafe {
1276 let ptr = self.data.as_mut_ptr();
1277 let index_ptr: *const _ = ptr.add(index);
1278 let hole_ptr = ptr.add(self.pos);
1279 ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1280 }
1281 self.pos = index;
1282 }
1283 }
1284
1285 impl<T> Drop for Hole<'_, T> {
1286 #[inline]
1287 fn drop(&mut self) {
1288 // fill the hole again
1289 unsafe {
1290 let pos = self.pos;
1291 ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1292 }
1293 }
1294 }
1295
1296 /// An iterator over the elements of a `BinaryHeap`.
1297 ///
1298 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1299 /// documentation for more.
1300 ///
1301 /// [`iter`]: BinaryHeap::iter
1302 #[must_use = "iterators are lazy and do nothing unless consumed"]
1303 #[stable(feature = "rust1", since = "1.0.0")]
1304 pub struct Iter<'a, T: 'a> {
1305 iter: slice::Iter<'a, T>,
1306 }
1307
1308 #[stable(feature = "collection_debug", since = "1.17.0")]
1309 impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1310 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1311 f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1312 }
1313 }
1314
1315 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1316 #[stable(feature = "rust1", since = "1.0.0")]
1317 impl<T> Clone for Iter<'_, T> {
1318 fn clone(&self) -> Self {
1319 Iter { iter: self.iter.clone() }
1320 }
1321 }
1322
1323 #[stable(feature = "rust1", since = "1.0.0")]
1324 impl<'a, T> Iterator for Iter<'a, T> {
1325 type Item = &'a T;
1326
1327 #[inline]
1328 fn next(&mut self) -> Option<&'a T> {
1329 self.iter.next()
1330 }
1331
1332 #[inline]
1333 fn size_hint(&self) -> (usize, Option<usize>) {
1334 self.iter.size_hint()
1335 }
1336
1337 #[inline]
1338 fn last(self) -> Option<&'a T> {
1339 self.iter.last()
1340 }
1341 }
1342
1343 #[stable(feature = "rust1", since = "1.0.0")]
1344 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1345 #[inline]
1346 fn next_back(&mut self) -> Option<&'a T> {
1347 self.iter.next_back()
1348 }
1349 }
1350
1351 #[stable(feature = "rust1", since = "1.0.0")]
1352 impl<T> ExactSizeIterator for Iter<'_, T> {
1353 fn is_empty(&self) -> bool {
1354 self.iter.is_empty()
1355 }
1356 }
1357
1358 #[stable(feature = "fused", since = "1.26.0")]
1359 impl<T> FusedIterator for Iter<'_, T> {}
1360
1361 /// An owning iterator over the elements of a `BinaryHeap`.
1362 ///
1363 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1364 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1365 ///
1366 /// [`into_iter`]: BinaryHeap::into_iter
1367 /// [`IntoIterator`]: core::iter::IntoIterator
1368 #[stable(feature = "rust1", since = "1.0.0")]
1369 #[derive(Clone)]
1370 pub struct IntoIter<T> {
1371 iter: vec::IntoIter<T>,
1372 }
1373
1374 #[stable(feature = "collection_debug", since = "1.17.0")]
1375 impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
1376 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1377 f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1378 }
1379 }
1380
1381 #[stable(feature = "rust1", since = "1.0.0")]
1382 impl<T> Iterator for IntoIter<T> {
1383 type Item = T;
1384
1385 #[inline]
1386 fn next(&mut self) -> Option<T> {
1387 self.iter.next()
1388 }
1389
1390 #[inline]
1391 fn size_hint(&self) -> (usize, Option<usize>) {
1392 self.iter.size_hint()
1393 }
1394 }
1395
1396 #[stable(feature = "rust1", since = "1.0.0")]
1397 impl<T> DoubleEndedIterator for IntoIter<T> {
1398 #[inline]
1399 fn next_back(&mut self) -> Option<T> {
1400 self.iter.next_back()
1401 }
1402 }
1403
1404 #[stable(feature = "rust1", since = "1.0.0")]
1405 impl<T> ExactSizeIterator for IntoIter<T> {
1406 fn is_empty(&self) -> bool {
1407 self.iter.is_empty()
1408 }
1409 }
1410
1411 #[stable(feature = "fused", since = "1.26.0")]
1412 impl<T> FusedIterator for IntoIter<T> {}
1413
1414 // In addition to the SAFETY invariants of the following three unsafe traits
1415 // also refer to the vec::in_place_collect module documentation to get an overview
1416 #[unstable(issue = "none", feature = "inplace_iteration")]
1417 #[doc(hidden)]
1418 unsafe impl<T> SourceIter for IntoIter<T> {
1419 type Source = IntoIter<T>;
1420
1421 #[inline]
1422 unsafe fn as_inner(&mut self) -> &mut Self::Source {
1423 self
1424 }
1425 }
1426
1427 #[unstable(issue = "none", feature = "inplace_iteration")]
1428 #[doc(hidden)]
1429 unsafe impl<I> InPlaceIterable for IntoIter<I> {}
1430
1431 unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1432 type Item = I;
1433
1434 fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1435 &mut self.iter
1436 }
1437 }
1438
1439 #[must_use = "iterators are lazy and do nothing unless consumed"]
1440 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1441 #[derive(Clone, Debug)]
1442 pub struct IntoIterSorted<T> {
1443 inner: BinaryHeap<T>,
1444 }
1445
1446 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1447 impl<T: Ord> Iterator for IntoIterSorted<T> {
1448 type Item = T;
1449
1450 #[inline]
1451 fn next(&mut self) -> Option<T> {
1452 self.inner.pop()
1453 }
1454
1455 #[inline]
1456 fn size_hint(&self) -> (usize, Option<usize>) {
1457 let exact = self.inner.len();
1458 (exact, Some(exact))
1459 }
1460 }
1461
1462 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1463 impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
1464
1465 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1466 impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
1467
1468 #[unstable(feature = "trusted_len", issue = "37572")]
1469 unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
1470
1471 /// A draining iterator over the elements of a `BinaryHeap`.
1472 ///
1473 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1474 /// documentation for more.
1475 ///
1476 /// [`drain`]: BinaryHeap::drain
1477 #[stable(feature = "drain", since = "1.6.0")]
1478 #[derive(Debug)]
1479 pub struct Drain<'a, T: 'a> {
1480 iter: vec::Drain<'a, T>,
1481 }
1482
1483 #[stable(feature = "drain", since = "1.6.0")]
1484 impl<T> Iterator for Drain<'_, T> {
1485 type Item = T;
1486
1487 #[inline]
1488 fn next(&mut self) -> Option<T> {
1489 self.iter.next()
1490 }
1491
1492 #[inline]
1493 fn size_hint(&self) -> (usize, Option<usize>) {
1494 self.iter.size_hint()
1495 }
1496 }
1497
1498 #[stable(feature = "drain", since = "1.6.0")]
1499 impl<T> DoubleEndedIterator for Drain<'_, T> {
1500 #[inline]
1501 fn next_back(&mut self) -> Option<T> {
1502 self.iter.next_back()
1503 }
1504 }
1505
1506 #[stable(feature = "drain", since = "1.6.0")]
1507 impl<T> ExactSizeIterator for Drain<'_, T> {
1508 fn is_empty(&self) -> bool {
1509 self.iter.is_empty()
1510 }
1511 }
1512
1513 #[stable(feature = "fused", since = "1.26.0")]
1514 impl<T> FusedIterator for Drain<'_, T> {}
1515
1516 /// A draining iterator over the elements of a `BinaryHeap`.
1517 ///
1518 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1519 /// documentation for more.
1520 ///
1521 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1522 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1523 #[derive(Debug)]
1524 pub struct DrainSorted<'a, T: Ord> {
1525 inner: &'a mut BinaryHeap<T>,
1526 }
1527
1528 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1529 impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
1530 /// Removes heap elements in heap order.
1531 fn drop(&mut self) {
1532 struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
1533
1534 impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
1535 fn drop(&mut self) {
1536 while self.0.inner.pop().is_some() {}
1537 }
1538 }
1539
1540 while let Some(item) = self.inner.pop() {
1541 let guard = DropGuard(self);
1542 drop(item);
1543 mem::forget(guard);
1544 }
1545 }
1546 }
1547
1548 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1549 impl<T: Ord> Iterator for DrainSorted<'_, T> {
1550 type Item = T;
1551
1552 #[inline]
1553 fn next(&mut self) -> Option<T> {
1554 self.inner.pop()
1555 }
1556
1557 #[inline]
1558 fn size_hint(&self) -> (usize, Option<usize>) {
1559 let exact = self.inner.len();
1560 (exact, Some(exact))
1561 }
1562 }
1563
1564 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1565 impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
1566
1567 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1568 impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
1569
1570 #[unstable(feature = "trusted_len", issue = "37572")]
1571 unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
1572
1573 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1574 impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
1575 /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1576 ///
1577 /// This conversion happens in-place, and has *O*(*n*) time complexity.
1578 fn from(vec: Vec<T>) -> BinaryHeap<T> {
1579 let mut heap = BinaryHeap { data: vec };
1580 heap.rebuild();
1581 heap
1582 }
1583 }
1584
1585 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1586 impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1587 /// ```
1588 /// use std::collections::BinaryHeap;
1589 ///
1590 /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1591 /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1592 /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1593 /// assert_eq!(a, b);
1594 /// }
1595 /// ```
1596 fn from(arr: [T; N]) -> Self {
1597 Self::from_iter(arr)
1598 }
1599 }
1600
1601 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1602 impl<T> From<BinaryHeap<T>> for Vec<T> {
1603 /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1604 ///
1605 /// This conversion requires no data movement or allocation, and has
1606 /// constant time complexity.
1607 fn from(heap: BinaryHeap<T>) -> Vec<T> {
1608 heap.data
1609 }
1610 }
1611
1612 #[stable(feature = "rust1", since = "1.0.0")]
1613 impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1614 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1615 BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1616 }
1617 }
1618
1619 #[stable(feature = "rust1", since = "1.0.0")]
1620 impl<T> IntoIterator for BinaryHeap<T> {
1621 type Item = T;
1622 type IntoIter = IntoIter<T>;
1623
1624 /// Creates a consuming iterator, that is, one that moves each value out of
1625 /// the binary heap in arbitrary order. The binary heap cannot be used
1626 /// after calling this.
1627 ///
1628 /// # Examples
1629 ///
1630 /// Basic usage:
1631 ///
1632 /// ```
1633 /// use std::collections::BinaryHeap;
1634 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1635 ///
1636 /// // Print 1, 2, 3, 4 in arbitrary order
1637 /// for x in heap.into_iter() {
1638 /// // x has type i32, not &i32
1639 /// println!("{x}");
1640 /// }
1641 /// ```
1642 fn into_iter(self) -> IntoIter<T> {
1643 IntoIter { iter: self.data.into_iter() }
1644 }
1645 }
1646
1647 #[stable(feature = "rust1", since = "1.0.0")]
1648 impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
1649 type Item = &'a T;
1650 type IntoIter = Iter<'a, T>;
1651
1652 fn into_iter(self) -> Iter<'a, T> {
1653 self.iter()
1654 }
1655 }
1656
1657 #[stable(feature = "rust1", since = "1.0.0")]
1658 impl<T: Ord> Extend<T> for BinaryHeap<T> {
1659 #[inline]
1660 fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1661 <Self as SpecExtend<I>>::spec_extend(self, iter);
1662 }
1663
1664 #[inline]
1665 fn extend_one(&mut self, item: T) {
1666 self.push(item);
1667 }
1668
1669 #[inline]
1670 fn extend_reserve(&mut self, additional: usize) {
1671 self.reserve(additional);
1672 }
1673 }
1674
1675 impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
1676 default fn spec_extend(&mut self, iter: I) {
1677 self.extend_desugared(iter.into_iter());
1678 }
1679 }
1680
1681 impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
1682 fn spec_extend(&mut self, ref mut other: Vec<T>) {
1683 let start = self.data.len();
1684 self.data.append(other);
1685 self.rebuild_tail(start);
1686 }
1687 }
1688
1689 impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
1690 fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
1691 self.append(other);
1692 }
1693 }
1694
1695 impl<T: Ord> BinaryHeap<T> {
1696 fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1697 let iterator = iter.into_iter();
1698 let (lower, _) = iterator.size_hint();
1699
1700 self.reserve(lower);
1701
1702 iterator.for_each(move |elem| self.push(elem));
1703 }
1704 }
1705
1706 #[stable(feature = "extend_ref", since = "1.2.0")]
1707 impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
1708 fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1709 self.extend(iter.into_iter().cloned());
1710 }
1711
1712 #[inline]
1713 fn extend_one(&mut self, &item: &'a T) {
1714 self.push(item);
1715 }
1716
1717 #[inline]
1718 fn extend_reserve(&mut self, additional: usize) {
1719 self.reserve(additional);
1720 }
1721 }