]>
Commit | Line | Data |
---|---|---|
29967ef6 XL |
1 | use super::debug::term_type; |
2 | use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; | |
3 | ||
4 | use crate::util::spanview::source_range_no_file; | |
5 | ||
6 | use rustc_data_structures::graph::WithNumNodes; | |
7 | use rustc_index::bit_set::BitSet; | |
8 | use rustc_middle::mir::{ | |
9 | self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator, | |
10 | TerminatorKind, | |
11 | }; | |
12 | use rustc_middle::ty::TyCtxt; | |
13 | ||
14 | use rustc_span::source_map::original_sp; | |
15 | use rustc_span::{BytePos, Span, SyntaxContext}; | |
16 | ||
17 | use std::cmp::Ordering; | |
18 | ||
19 | #[derive(Debug, Copy, Clone)] | |
20 | pub(crate) enum CoverageStatement { | |
21 | Statement(BasicBlock, Span, usize), | |
22 | Terminator(BasicBlock, Span), | |
23 | } | |
24 | ||
25 | impl CoverageStatement { | |
26 | pub fn format(&self, tcx: TyCtxt<'tcx>, mir_body: &'a mir::Body<'tcx>) -> String { | |
27 | match *self { | |
28 | Self::Statement(bb, span, stmt_index) => { | |
29 | let stmt = &mir_body[bb].statements[stmt_index]; | |
30 | format!( | |
31 | "{}: @{}[{}]: {:?}", | |
32 | source_range_no_file(tcx, &span), | |
33 | bb.index(), | |
34 | stmt_index, | |
35 | stmt | |
36 | ) | |
37 | } | |
38 | Self::Terminator(bb, span) => { | |
39 | let term = mir_body[bb].terminator(); | |
40 | format!( | |
41 | "{}: @{}.{}: {:?}", | |
42 | source_range_no_file(tcx, &span), | |
43 | bb.index(), | |
44 | term_type(&term.kind), | |
45 | term.kind | |
46 | ) | |
47 | } | |
48 | } | |
49 | } | |
50 | ||
51 | pub fn span(&self) -> &Span { | |
52 | match self { | |
53 | Self::Statement(_, span, _) | Self::Terminator(_, span) => span, | |
54 | } | |
55 | } | |
56 | } | |
57 | ||
58 | /// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that | |
59 | /// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s. | |
60 | /// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent | |
61 | /// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the | |
62 | /// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s. | |
63 | /// | |
64 | /// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that | |
65 | /// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches | |
66 | /// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock` | |
67 | /// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`. | |
68 | #[derive(Debug, Clone)] | |
69 | pub(crate) struct CoverageSpan { | |
70 | pub span: Span, | |
71 | pub bcb: BasicCoverageBlock, | |
72 | pub coverage_statements: Vec<CoverageStatement>, | |
73 | pub is_closure: bool, | |
74 | } | |
75 | ||
76 | impl CoverageSpan { | |
77 | pub fn for_statement( | |
78 | statement: &Statement<'tcx>, | |
79 | span: Span, | |
80 | bcb: BasicCoverageBlock, | |
81 | bb: BasicBlock, | |
82 | stmt_index: usize, | |
83 | ) -> Self { | |
84 | let is_closure = match statement.kind { | |
85 | StatementKind::Assign(box ( | |
86 | _, | |
87 | Rvalue::Aggregate(box AggregateKind::Closure(_, _), _), | |
88 | )) => true, | |
89 | _ => false, | |
90 | }; | |
91 | ||
92 | Self { | |
93 | span, | |
94 | bcb, | |
95 | coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)], | |
96 | is_closure, | |
97 | } | |
98 | } | |
99 | ||
100 | pub fn for_terminator(span: Span, bcb: BasicCoverageBlock, bb: BasicBlock) -> Self { | |
101 | Self { | |
102 | span, | |
103 | bcb, | |
104 | coverage_statements: vec![CoverageStatement::Terminator(bb, span)], | |
105 | is_closure: false, | |
106 | } | |
107 | } | |
108 | ||
109 | pub fn merge_from(&mut self, mut other: CoverageSpan) { | |
110 | debug_assert!(self.is_mergeable(&other)); | |
111 | self.span = self.span.to(other.span); | |
112 | if other.is_closure { | |
113 | self.is_closure = true; | |
114 | } | |
115 | self.coverage_statements.append(&mut other.coverage_statements); | |
116 | } | |
117 | ||
118 | pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) { | |
119 | self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos); | |
120 | if let Some(highest_covstmt) = | |
121 | self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi()) | |
122 | { | |
123 | self.span = self.span.with_hi(highest_covstmt.span().hi()); | |
124 | } | |
125 | } | |
126 | ||
127 | #[inline] | |
128 | pub fn is_mergeable(&self, other: &Self) -> bool { | |
129 | self.is_in_same_bcb(other) && !(self.is_closure || other.is_closure) | |
130 | } | |
131 | ||
132 | #[inline] | |
133 | pub fn is_in_same_bcb(&self, other: &Self) -> bool { | |
134 | self.bcb == other.bcb | |
135 | } | |
136 | ||
137 | pub fn format(&self, tcx: TyCtxt<'tcx>, mir_body: &'a mir::Body<'tcx>) -> String { | |
138 | format!( | |
139 | "{}\n {}", | |
140 | source_range_no_file(tcx, &self.span), | |
141 | self.format_coverage_statements(tcx, mir_body).replace("\n", "\n "), | |
142 | ) | |
143 | } | |
144 | ||
145 | pub fn format_coverage_statements( | |
146 | &self, | |
147 | tcx: TyCtxt<'tcx>, | |
148 | mir_body: &'a mir::Body<'tcx>, | |
149 | ) -> String { | |
150 | let mut sorted_coverage_statements = self.coverage_statements.clone(); | |
151 | sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt { | |
152 | CoverageStatement::Statement(bb, _, index) => (bb, index), | |
153 | CoverageStatement::Terminator(bb, _) => (bb, usize::MAX), | |
154 | }); | |
155 | sorted_coverage_statements | |
156 | .iter() | |
157 | .map(|covstmt| covstmt.format(tcx, mir_body)) | |
158 | .collect::<Vec<_>>() | |
159 | .join("\n") | |
160 | } | |
161 | } | |
162 | ||
163 | /// Converts the initial set of `CoverageSpan`s (one per MIR `Statement` or `Terminator`) into a | |
164 | /// minimal set of `CoverageSpan`s, using the BCB CFG to determine where it is safe and useful to: | |
165 | /// | |
166 | /// * Remove duplicate source code coverage regions | |
167 | /// * Merge spans that represent continuous (both in source code and control flow), non-branching | |
168 | /// execution | |
169 | /// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures) | |
170 | pub struct CoverageSpans<'a, 'tcx> { | |
171 | /// The MIR, used to look up `BasicBlockData`. | |
172 | mir_body: &'a mir::Body<'tcx>, | |
173 | ||
174 | /// A `Span` covering the function body of the MIR (typically from left curly brace to right | |
175 | /// curly brace). | |
176 | body_span: Span, | |
177 | ||
178 | /// The BasicCoverageBlock Control Flow Graph (BCB CFG). | |
179 | basic_coverage_blocks: &'a CoverageGraph, | |
180 | ||
181 | /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative | |
182 | /// dominance between the `BasicCoverageBlock`s of equal `Span`s. | |
183 | sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>, | |
184 | ||
185 | /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the | |
186 | /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to | |
187 | /// `pending_dups`). If `curr` is not discarded or merged, it becomes `prev` for the next | |
188 | /// iteration. | |
189 | some_curr: Option<CoverageSpan>, | |
190 | ||
191 | /// The original `span` for `curr`, in case the `curr` span is modified. | |
192 | curr_original_span: Span, | |
193 | ||
194 | /// The CoverageSpan from a prior iteration; typically assigned from that iteration's `curr`. | |
195 | /// If that `curr` was discarded, `prev` retains its value from the previous iteration. | |
196 | some_prev: Option<CoverageSpan>, | |
197 | ||
198 | /// Assigned from `curr_original_span` from the previous iteration. | |
199 | prev_original_span: Span, | |
200 | ||
201 | /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and | |
202 | /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list. | |
203 | /// If a new `curr` span also fits this criteria (compared to an existing list of | |
204 | /// `pending_dups`), that `curr` `CoverageSpan` moves to `prev` before possibly being added to | |
205 | /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups` | |
206 | /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev` | |
207 | /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a | |
208 | /// `prev` with a matching `Span`) | |
209 | pending_dups: Vec<CoverageSpan>, | |
210 | ||
211 | /// The final `CoverageSpan`s to add to the coverage map. A `Counter` or `Expression` | |
212 | /// will also be injected into the MIR for each `CoverageSpan`. | |
213 | refined_spans: Vec<CoverageSpan>, | |
214 | } | |
215 | ||
216 | impl<'a, 'tcx> CoverageSpans<'a, 'tcx> { | |
217 | pub(crate) fn generate_coverage_spans( | |
218 | mir_body: &'a mir::Body<'tcx>, | |
219 | body_span: Span, | |
220 | basic_coverage_blocks: &'a CoverageGraph, | |
221 | ) -> Vec<CoverageSpan> { | |
222 | let mut coverage_spans = CoverageSpans { | |
223 | mir_body, | |
224 | body_span, | |
225 | basic_coverage_blocks, | |
226 | sorted_spans_iter: None, | |
227 | refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2), | |
228 | some_curr: None, | |
229 | curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)), | |
230 | some_prev: None, | |
231 | prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)), | |
232 | pending_dups: Vec::new(), | |
233 | }; | |
234 | ||
235 | let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans(); | |
236 | ||
237 | coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter()); | |
238 | coverage_spans.some_prev = coverage_spans.sorted_spans_iter.as_mut().unwrap().next(); | |
239 | coverage_spans.prev_original_span = | |
240 | coverage_spans.some_prev.as_ref().expect("at least one span").span; | |
241 | ||
242 | coverage_spans.to_refined_spans() | |
243 | } | |
244 | ||
245 | /// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be | |
246 | /// counted. | |
247 | /// | |
248 | /// The basic steps are: | |
249 | /// | |
250 | /// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each | |
251 | /// `BasicCoverageBlockData`. | |
252 | /// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position | |
253 | /// are sorted with longer spans before shorter spans; and equal spans are sorted | |
254 | /// (deterministically) based on "dominator" relationship (if any). | |
255 | /// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance, | |
256 | /// if another span or spans are already counting the same code region), or should be merged | |
257 | /// into a broader combined span (because it represents a contiguous, non-branching, and | |
258 | /// uninterrupted region of source code). | |
259 | /// | |
260 | /// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since | |
261 | /// closures have their own MIR, their `Span` in their enclosing function should be left | |
262 | /// "uncovered". | |
263 | /// | |
264 | /// Note the resulting vector of `CoverageSpan`s does may not be fully sorted (and does not need | |
265 | /// to be). | |
266 | fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> { | |
267 | let mut initial_spans = Vec::<CoverageSpan>::with_capacity(self.mir_body.num_nodes() * 2); | |
268 | for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() { | |
269 | for coverage_span in self.bcb_to_initial_coverage_spans(bcb, bcb_data) { | |
270 | initial_spans.push(coverage_span); | |
271 | } | |
272 | } | |
273 | ||
274 | if initial_spans.is_empty() { | |
275 | // This can happen if, for example, the function is unreachable (contains only a | |
276 | // `BasicBlock`(s) with an `Unreachable` terminator). | |
277 | return initial_spans; | |
278 | } | |
279 | ||
280 | initial_spans.sort_unstable_by(|a, b| { | |
281 | if a.span.lo() == b.span.lo() { | |
282 | if a.span.hi() == b.span.hi() { | |
283 | if a.is_in_same_bcb(b) { | |
284 | Some(Ordering::Equal) | |
285 | } else { | |
286 | // Sort equal spans by dominator relationship, in reverse order (so | |
287 | // dominators always come after the dominated equal spans). When later | |
288 | // comparing two spans in order, the first will either dominate the second, | |
289 | // or they will have no dominator relationship. | |
290 | self.basic_coverage_blocks.dominators().rank_partial_cmp(b.bcb, a.bcb) | |
291 | } | |
292 | } else { | |
293 | // Sort hi() in reverse order so shorter spans are attempted after longer spans. | |
294 | // This guarantees that, if a `prev` span overlaps, and is not equal to, a | |
295 | // `curr` span, the prev span either extends further left of the curr span, or | |
296 | // they start at the same position and the prev span extends further right of | |
297 | // the end of the curr span. | |
298 | b.span.hi().partial_cmp(&a.span.hi()) | |
299 | } | |
300 | } else { | |
301 | a.span.lo().partial_cmp(&b.span.lo()) | |
302 | } | |
303 | .unwrap() | |
304 | }); | |
305 | ||
306 | initial_spans | |
307 | } | |
308 | ||
309 | /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and | |
310 | /// de-duplicated `CoverageSpan`s. | |
311 | fn to_refined_spans(mut self) -> Vec<CoverageSpan> { | |
312 | while self.next_coverage_span() { | |
313 | if self.curr().is_mergeable(self.prev()) { | |
314 | debug!(" same bcb (and neither is a closure), merge with prev={:?}", self.prev()); | |
315 | let prev = self.take_prev(); | |
316 | self.curr_mut().merge_from(prev); | |
317 | // Note that curr.span may now differ from curr_original_span | |
318 | } else if self.prev_ends_before_curr() { | |
319 | debug!( | |
320 | " different bcbs and disjoint spans, so keep curr for next iter, and add \ | |
321 | prev={:?}", | |
322 | self.prev() | |
323 | ); | |
324 | let prev = self.take_prev(); | |
325 | self.refined_spans.push(prev); | |
326 | } else if self.prev().is_closure { | |
327 | // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the | |
328 | // next iter | |
329 | debug!( | |
330 | " curr overlaps a closure (prev). Drop curr and keep prev for next iter. \ | |
331 | prev={:?}", | |
332 | self.prev() | |
333 | ); | |
334 | self.discard_curr(); | |
335 | } else if self.curr().is_closure { | |
336 | self.carve_out_span_for_closure(); | |
337 | } else if self.prev_original_span == self.curr().span { | |
338 | // Note that this compares the new span to `prev_original_span`, which may not | |
339 | // be the full `prev.span` (if merged during the previous iteration). | |
340 | self.hold_pending_dups_unless_dominated(); | |
341 | } else { | |
342 | self.cutoff_prev_at_overlapping_curr(); | |
343 | } | |
344 | } | |
345 | ||
346 | debug!(" AT END, adding last prev={:?}", self.prev()); | |
347 | let prev = self.take_prev(); | |
348 | let CoverageSpans { | |
349 | mir_body, basic_coverage_blocks, pending_dups, mut refined_spans, .. | |
350 | } = self; | |
351 | for dup in pending_dups { | |
352 | debug!(" ...adding at least one pending dup={:?}", dup); | |
353 | refined_spans.push(dup); | |
354 | } | |
355 | refined_spans.push(prev); | |
356 | ||
357 | // Remove `CoverageSpan`s with empty spans ONLY if the empty `CoverageSpan`s BCB also has at | |
358 | // least one other non-empty `CoverageSpan`. | |
359 | let mut has_coverage = BitSet::new_empty(basic_coverage_blocks.num_nodes()); | |
360 | for covspan in &refined_spans { | |
361 | if !covspan.span.is_empty() { | |
362 | has_coverage.insert(covspan.bcb); | |
363 | } | |
364 | } | |
365 | refined_spans.retain(|covspan| { | |
366 | !(covspan.span.is_empty() | |
367 | && is_goto(&basic_coverage_blocks[covspan.bcb].terminator(mir_body).kind) | |
368 | && has_coverage.contains(covspan.bcb)) | |
369 | }); | |
370 | ||
371 | // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage | |
372 | // regions for the current function leave room for the closure's own coverage regions | |
373 | // (injected separately, from the closure's own MIR). | |
374 | refined_spans.retain(|covspan| !covspan.is_closure); | |
375 | refined_spans | |
376 | } | |
377 | ||
378 | // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of | |
379 | // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated | |
380 | // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will | |
381 | // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple | |
382 | // `Statement`s and/or `Terminator`s.) | |
383 | fn bcb_to_initial_coverage_spans( | |
384 | &self, | |
385 | bcb: BasicCoverageBlock, | |
386 | bcb_data: &'a BasicCoverageBlockData, | |
387 | ) -> Vec<CoverageSpan> { | |
388 | bcb_data | |
389 | .basic_blocks | |
390 | .iter() | |
391 | .flat_map(|&bb| { | |
392 | let data = &self.mir_body[bb]; | |
393 | data.statements | |
394 | .iter() | |
395 | .enumerate() | |
396 | .filter_map(move |(index, statement)| { | |
397 | filtered_statement_span(statement, self.body_span).map(|span| { | |
398 | CoverageSpan::for_statement(statement, span, bcb, bb, index) | |
399 | }) | |
400 | }) | |
401 | .chain( | |
402 | filtered_terminator_span(data.terminator(), self.body_span) | |
403 | .map(|span| CoverageSpan::for_terminator(span, bcb, bb)), | |
404 | ) | |
405 | }) | |
406 | .collect() | |
407 | } | |
408 | ||
409 | fn curr(&self) -> &CoverageSpan { | |
410 | self.some_curr | |
411 | .as_ref() | |
412 | .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) | |
413 | } | |
414 | ||
415 | fn curr_mut(&mut self) -> &mut CoverageSpan { | |
416 | self.some_curr | |
417 | .as_mut() | |
418 | .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) | |
419 | } | |
420 | ||
421 | fn prev(&self) -> &CoverageSpan { | |
422 | self.some_prev | |
423 | .as_ref() | |
424 | .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) | |
425 | } | |
426 | ||
427 | fn prev_mut(&mut self) -> &mut CoverageSpan { | |
428 | self.some_prev | |
429 | .as_mut() | |
430 | .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) | |
431 | } | |
432 | ||
433 | fn take_prev(&mut self) -> CoverageSpan { | |
434 | self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) | |
435 | } | |
436 | ||
437 | /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the | |
438 | /// `pending_dups` spans), then one of the following two things happened during the previous | |
439 | /// iteration: | |
440 | /// * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups | |
441 | /// (in which case there should be at least two spans in `pending_dups`); or | |
442 | /// * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case | |
443 | /// `pending_dups` could have as few as one span) | |
444 | /// In either case, no more spans will match the span of `pending_dups`, so | |
445 | /// add the `pending_dups` if they don't overlap `curr`, and clear the list. | |
446 | fn check_pending_dups(&mut self) { | |
447 | if let Some(dup) = self.pending_dups.last() { | |
448 | if dup.span != self.prev().span { | |
449 | debug!( | |
450 | " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \ | |
451 | previous iteration, or prev started a new disjoint span" | |
452 | ); | |
453 | if dup.span.hi() <= self.curr().span.lo() { | |
454 | let pending_dups = self.pending_dups.split_off(0); | |
455 | for dup in pending_dups.into_iter() { | |
456 | debug!(" ...adding at least one pending={:?}", dup); | |
457 | self.refined_spans.push(dup); | |
458 | } | |
459 | } else { | |
460 | self.pending_dups.clear(); | |
461 | } | |
462 | } | |
463 | } | |
464 | } | |
465 | ||
466 | /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order. | |
467 | fn next_coverage_span(&mut self) -> bool { | |
468 | if let Some(curr) = self.some_curr.take() { | |
469 | self.some_prev = Some(curr); | |
470 | self.prev_original_span = self.curr_original_span; | |
471 | } | |
472 | while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() { | |
473 | debug!("FOR curr={:?}", curr); | |
474 | if self.prev_starts_after_next(&curr) { | |
475 | debug!( | |
476 | " prev.span starts after curr.span, so curr will be dropped (skipping past \ | |
477 | closure?); prev={:?}", | |
478 | self.prev() | |
479 | ); | |
480 | } else { | |
481 | // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed | |
482 | // by `self.curr_mut().merge_from(prev)`. | |
483 | self.curr_original_span = curr.span; | |
484 | self.some_curr.replace(curr); | |
485 | self.check_pending_dups(); | |
486 | return true; | |
487 | } | |
488 | } | |
489 | false | |
490 | } | |
491 | ||
492 | /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the | |
493 | /// `curr` coverage span. | |
494 | fn discard_curr(&mut self) { | |
495 | self.some_curr = None; | |
496 | } | |
497 | ||
498 | /// Returns true if the curr span should be skipped because prev has already advanced beyond the | |
499 | /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region | |
500 | /// of code, such as skipping past a closure. | |
501 | fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool { | |
502 | self.prev().span.lo() > next_curr.span.lo() | |
503 | } | |
504 | ||
505 | /// Returns true if the curr span starts past the end of the prev span, which means they don't | |
506 | /// overlap, so we now know the prev can be added to the refined coverage spans. | |
507 | fn prev_ends_before_curr(&self) -> bool { | |
508 | self.prev().span.hi() <= self.curr().span.lo() | |
509 | } | |
510 | ||
511 | /// If `prev`s span extends left of the closure (`curr`), carve out the closure's | |
512 | /// span from `prev`'s span. (The closure's coverage counters will be injected when | |
513 | /// processing the closure's own MIR.) Add the portion of the span to the left of the | |
514 | /// closure; and if the span extends to the right of the closure, update `prev` to | |
515 | /// that portion of the span. For any `pending_dups`, repeat the same process. | |
516 | fn carve_out_span_for_closure(&mut self) { | |
517 | let curr_span = self.curr().span; | |
518 | let left_cutoff = curr_span.lo(); | |
519 | let right_cutoff = curr_span.hi(); | |
520 | let has_pre_closure_span = self.prev().span.lo() < right_cutoff; | |
521 | let has_post_closure_span = self.prev().span.hi() > right_cutoff; | |
522 | let mut pending_dups = self.pending_dups.split_off(0); | |
523 | if has_pre_closure_span { | |
524 | let mut pre_closure = self.prev().clone(); | |
525 | pre_closure.span = pre_closure.span.with_hi(left_cutoff); | |
526 | debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure); | |
527 | if !pending_dups.is_empty() { | |
528 | for mut dup in pending_dups.iter().cloned() { | |
529 | dup.span = dup.span.with_hi(left_cutoff); | |
530 | debug!(" ...and at least one pre_closure dup={:?}", dup); | |
531 | self.refined_spans.push(dup); | |
532 | } | |
533 | } | |
534 | self.refined_spans.push(pre_closure); | |
535 | } | |
536 | if has_post_closure_span { | |
537 | // Update prev.span to start after the closure (and discard curr) | |
538 | self.prev_mut().span = self.prev().span.with_lo(right_cutoff); | |
539 | self.prev_original_span = self.prev().span; | |
540 | for dup in pending_dups.iter_mut() { | |
541 | dup.span = dup.span.with_lo(right_cutoff); | |
542 | } | |
543 | self.pending_dups.append(&mut pending_dups); | |
544 | self.discard_curr(); // since self.prev() was already updated | |
545 | } else { | |
546 | pending_dups.clear(); | |
547 | } | |
548 | } | |
549 | ||
550 | /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all | |
551 | /// `pending_dups` spans, if any); but keep in mind, `prev.span` may start at a `Span.lo()` that | |
552 | /// is less than (further left of) `prev_original_span.lo()`. | |
553 | /// | |
554 | /// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if | |
555 | /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held, | |
556 | /// until their disposition is determined. In this latter case, the `prev` dup is moved into | |
557 | /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration. | |
558 | fn hold_pending_dups_unless_dominated(&mut self) { | |
559 | // Equal coverage spans are ordered by dominators before dominated (if any), so it should be | |
560 | // impossible for `curr` to dominate any previous `CoverageSpan`. | |
561 | debug_assert!(!self.span_bcb_is_dominated_by(self.prev(), self.curr())); | |
562 | ||
563 | let initial_pending_count = self.pending_dups.len(); | |
564 | if initial_pending_count > 0 { | |
565 | let mut pending_dups = self.pending_dups.split_off(0); | |
566 | pending_dups.retain(|dup| !self.span_bcb_is_dominated_by(self.curr(), dup)); | |
567 | self.pending_dups.append(&mut pending_dups); | |
568 | if self.pending_dups.len() < initial_pending_count { | |
569 | debug!( | |
570 | " discarded {} of {} pending_dups that dominated curr", | |
571 | initial_pending_count - self.pending_dups.len(), | |
572 | initial_pending_count | |
573 | ); | |
574 | } | |
575 | } | |
576 | ||
577 | if self.span_bcb_is_dominated_by(self.curr(), self.prev()) { | |
578 | debug!( | |
579 | " different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}", | |
580 | self.prev() | |
581 | ); | |
582 | self.cutoff_prev_at_overlapping_curr(); | |
583 | // If one span dominates the other, assocate the span with the code from the dominated | |
584 | // block only (`curr`), and discard the overlapping portion of the `prev` span. (Note | |
585 | // that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still | |
586 | // be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.) | |
587 | // | |
588 | // For example: | |
589 | // match somenum { | |
590 | // x if x < 1 => { ... } | |
591 | // }... | |
592 | // | |
593 | // The span for the first `x` is referenced by both the pattern block (every time it is | |
594 | // evaluated) and the arm code (only when matched). The counter will be applied only to | |
595 | // the dominated block. This allows coverage to track and highlight things like the | |
596 | // assignment of `x` above, if the branch is matched, making `x` available to the arm | |
597 | // code; and to track and highlight the question mark `?` "try" operator at the end of | |
598 | // a function call returning a `Result`, so the `?` is covered when the function returns | |
599 | // an `Err`, and not counted as covered if the function always returns `Ok`. | |
600 | } else { | |
601 | // Save `prev` in `pending_dups`. (`curr` will become `prev` in the next iteration.) | |
602 | // If the `curr` CoverageSpan is later discarded, `pending_dups` can be discarded as | |
603 | // well; but if `curr` is added to refined_spans, the `pending_dups` will also be added. | |
604 | debug!( | |
605 | " different bcbs but SAME spans, and neither dominates, so keep curr for \ | |
606 | next iter, and, pending upcoming spans (unless overlapping) add prev={:?}", | |
607 | self.prev() | |
608 | ); | |
609 | let prev = self.take_prev(); | |
610 | self.pending_dups.push(prev); | |
611 | } | |
612 | } | |
613 | ||
614 | /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_ | |
615 | /// statements that end before `curr.lo()` (if any), and add the portion of the | |
616 | /// combined span for those statements. Any other statements have overlapping spans | |
617 | /// that can be ignored because `curr` and/or other upcoming statements/spans inside | |
618 | /// the overlap area will produce their own counters. This disambiguation process | |
619 | /// avoids injecting multiple counters for overlapping spans, and the potential for | |
620 | /// double-counting. | |
621 | fn cutoff_prev_at_overlapping_curr(&mut self) { | |
622 | debug!( | |
623 | " different bcbs, overlapping spans, so ignore/drop pending and only add prev \ | |
624 | if it has statements that end before curr; prev={:?}", | |
625 | self.prev() | |
626 | ); | |
627 | if self.pending_dups.is_empty() { | |
628 | let curr_span = self.curr().span; | |
629 | self.prev_mut().cutoff_statements_at(curr_span.lo()); | |
630 | if self.prev().coverage_statements.is_empty() { | |
631 | debug!(" ... no non-overlapping statements to add"); | |
632 | } else { | |
633 | debug!(" ... adding modified prev={:?}", self.prev()); | |
634 | let prev = self.take_prev(); | |
635 | self.refined_spans.push(prev); | |
636 | } | |
637 | } else { | |
638 | // with `pending_dups`, `prev` cannot have any statements that don't overlap | |
639 | self.pending_dups.clear(); | |
640 | } | |
641 | } | |
642 | ||
643 | fn span_bcb_is_dominated_by(&self, covspan: &CoverageSpan, dom_covspan: &CoverageSpan) -> bool { | |
644 | self.basic_coverage_blocks.is_dominated_by(covspan.bcb, dom_covspan.bcb) | |
645 | } | |
646 | } | |
647 | ||
648 | fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> Option<Span> { | |
649 | match statement.kind { | |
650 | // These statements have spans that are often outside the scope of the executed source code | |
651 | // for their parent `BasicBlock`. | |
652 | StatementKind::StorageLive(_) | |
653 | | StatementKind::StorageDead(_) | |
654 | // Coverage should not be encountered, but don't inject coverage coverage | |
655 | | StatementKind::Coverage(_) | |
656 | // Ignore `Nop`s | |
657 | | StatementKind::Nop => None, | |
658 | ||
659 | // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead` | |
660 | // statements be more consistent? | |
661 | // | |
662 | // FakeReadCause::ForGuardBinding, in this example: | |
663 | // match somenum { | |
664 | // x if x < 1 => { ... } | |
665 | // }... | |
666 | // The BasicBlock within the match arm code included one of these statements, but the span | |
667 | // for it covered the `1` in this source. The actual statements have nothing to do with that | |
668 | // source span: | |
669 | // FakeRead(ForGuardBinding, _4); | |
670 | // where `_4` is: | |
671 | // _4 = &_1; (at the span for the first `x`) | |
672 | // and `_1` is the `Place` for `somenum`. | |
673 | // | |
674 | // If and when the Issue is resolved, remove this special case match pattern: | |
675 | StatementKind::FakeRead(cause, _) if cause == FakeReadCause::ForGuardBinding => None, | |
676 | ||
677 | // Retain spans from all other statements | |
678 | StatementKind::FakeRead(_, _) // Not including `ForGuardBinding` | |
679 | | StatementKind::Assign(_) | |
680 | | StatementKind::SetDiscriminant { .. } | |
681 | | StatementKind::LlvmInlineAsm(_) | |
682 | | StatementKind::Retag(_, _) | |
683 | | StatementKind::AscribeUserType(_, _) => { | |
684 | Some(function_source_span(statement.source_info.span, body_span)) | |
685 | } | |
686 | } | |
687 | } | |
688 | ||
689 | fn filtered_terminator_span(terminator: &'a Terminator<'tcx>, body_span: Span) -> Option<Span> { | |
690 | match terminator.kind { | |
691 | // These terminators have spans that don't positively contribute to computing a reasonable | |
692 | // span of actually executed source code. (For example, SwitchInt terminators extracted from | |
693 | // an `if condition { block }` has a span that includes the executed block, if true, | |
694 | // but for coverage, the code region executed, up to *and* through the SwitchInt, | |
695 | // actually stops before the if's block.) | |
696 | TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG | |
697 | | TerminatorKind::Assert { .. } | |
698 | | TerminatorKind::Drop { .. } | |
699 | | TerminatorKind::DropAndReplace { .. } | |
700 | | TerminatorKind::SwitchInt { .. } | |
701 | // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`. | |
702 | // FIXME(richkadel): Note that `Goto` was moved to it's own match arm, for the reasons | |
703 | // described below. Add tests to confirm whether or not similar cases also apply to | |
704 | // `FalseEdge`. | |
705 | | TerminatorKind::FalseEdge { .. } => None, | |
706 | ||
707 | // FIXME(#78542): Can spans for `TerminatorKind::Goto` be improved to avoid special cases? | |
708 | // | |
709 | // `Goto`s are often the targets of `SwitchInt` branches, and certain important | |
710 | // optimizations to replace some `Counter`s with `Expression`s require a separate | |
711 | // `BasicCoverageBlock` for each branch, to support the `Counter`, when needed. | |
712 | // | |
713 | // Also, some test cases showed that `Goto` terminators, and to some degree their `Span`s, | |
714 | // provided useful context for coverage, such as to count and show when `if` blocks | |
715 | // _without_ `else` blocks execute the `false` case (counting when the body of the `if` | |
716 | // was _not_ taken). In these cases, the `Goto` span is ultimately given a `CoverageSpan` | |
717 | // of 1 character, at the end of it's original `Span`. | |
718 | // | |
719 | // However, in other cases, a visible `CoverageSpan` is not wanted, but the `Goto` | |
720 | // block must still be counted (for example, to contribute its count to an `Expression` | |
721 | // that reports the execution count for some other block). In these cases, the code region | |
722 | // is set to `None`. (See `Instrumentor::is_code_region_redundant()`.) | |
723 | TerminatorKind::Goto { .. } => { | |
724 | Some(function_source_span(terminator.source_info.span.shrink_to_hi(), body_span)) | |
725 | } | |
726 | ||
727 | // Retain spans from all other terminators | |
728 | TerminatorKind::Resume | |
729 | | TerminatorKind::Abort | |
730 | | TerminatorKind::Return | |
731 | | TerminatorKind::Call { .. } | |
732 | | TerminatorKind::Yield { .. } | |
733 | | TerminatorKind::GeneratorDrop | |
734 | | TerminatorKind::FalseUnwind { .. } | |
735 | | TerminatorKind::InlineAsm { .. } => { | |
736 | Some(function_source_span(terminator.source_info.span, body_span)) | |
737 | } | |
738 | } | |
739 | } | |
740 | ||
741 | #[inline] | |
742 | fn function_source_span(span: Span, body_span: Span) -> Span { | |
743 | let span = original_sp(span, body_span).with_ctxt(SyntaxContext::root()); | |
744 | if body_span.contains(span) { span } else { body_span } | |
745 | } | |
746 | ||
747 | #[inline(always)] | |
748 | fn is_goto(term_kind: &TerminatorKind<'tcx>) -> bool { | |
749 | match term_kind { | |
750 | TerminatorKind::Goto { .. } => true, | |
751 | _ => false, | |
752 | } | |
753 | } |