]> git.proxmox.com Git - rustc.git/blob - src/librustc_data_structures/profiling.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / src / librustc_data_structures / profiling.rs
1 //! # Rust Compiler Self-Profiling
2 //!
3 //! This module implements the basic framework for the compiler's self-
4 //! profiling support. It provides the `SelfProfiler` type which enables
5 //! recording "events". An event is something that starts and ends at a given
6 //! point in time and has an ID and a kind attached to it. This allows for
7 //! tracing the compiler's activity.
8 //!
9 //! Internally this module uses the custom tailored [measureme][mm] crate for
10 //! efficiently recording events to disk in a compact format that can be
11 //! post-processed and analyzed by the suite of tools in the `measureme`
12 //! project. The highest priority for the tracing framework is on incurring as
13 //! little overhead as possible.
14 //!
15 //!
16 //! ## Event Overview
17 //!
18 //! Events have a few properties:
19 //!
20 //! - The `event_kind` designates the broad category of an event (e.g. does it
21 //! correspond to the execution of a query provider or to loading something
22 //! from the incr. comp. on-disk cache, etc).
23 //! - The `event_id` designates the query invocation or function call it
24 //! corresponds to, possibly including the query key or function arguments.
25 //! - Each event stores the ID of the thread it was recorded on.
26 //! - The timestamp stores beginning and end of the event, or the single point
27 //! in time it occurred at for "instant" events.
28 //!
29 //!
30 //! ## Event Filtering
31 //!
32 //! Event generation can be filtered by event kind. Recording all possible
33 //! events generates a lot of data, much of which is not needed for most kinds
34 //! of analysis. So, in order to keep overhead as low as possible for a given
35 //! use case, the `SelfProfiler` will only record the kinds of events that
36 //! pass the filter specified as a command line argument to the compiler.
37 //!
38 //!
39 //! ## `event_id` Assignment
40 //!
41 //! As far as `measureme` is concerned, `event_id`s are just strings. However,
42 //! it would incur too much overhead to generate and persist each `event_id`
43 //! string at the point where the event is recorded. In order to make this more
44 //! efficient `measureme` has two features:
45 //!
46 //! - Strings can share their content, so that re-occurring parts don't have to
47 //! be copied over and over again. One allocates a string in `measureme` and
48 //! gets back a `StringId`. This `StringId` is then used to refer to that
49 //! string. `measureme` strings are actually DAGs of string components so that
50 //! arbitrary sharing of substrings can be done efficiently. This is useful
51 //! because `event_id`s contain lots of redundant text like query names or
52 //! def-path components.
53 //!
54 //! - `StringId`s can be "virtual" which means that the client picks a numeric
55 //! ID according to some application-specific scheme and can later make that
56 //! ID be mapped to an actual string. This is used to cheaply generate
57 //! `event_id`s while the events actually occur, causing little timing
58 //! distortion, and then later map those `StringId`s, in bulk, to actual
59 //! `event_id` strings. This way the largest part of the tracing overhead is
60 //! localized to one contiguous chunk of time.
61 //!
62 //! How are these `event_id`s generated in the compiler? For things that occur
63 //! infrequently (e.g. "generic activities"), we just allocate the string the
64 //! first time it is used and then keep the `StringId` in a hash table. This
65 //! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
66 //!
67 //! For queries it gets more interesting: First we need a unique numeric ID for
68 //! each query invocation (the `QueryInvocationId`). This ID is used as the
69 //! virtual `StringId` we use as `event_id` for a given event. This ID has to
70 //! be available both when the query is executed and later, together with the
71 //! query key, when we allocate the actual `event_id` strings in bulk.
72 //!
73 //! We could make the compiler generate and keep track of such an ID for each
74 //! query invocation but luckily we already have something that fits all the
75 //! the requirements: the query's `DepNodeIndex`. So we use the numeric value
76 //! of the `DepNodeIndex` as `event_id` when recording the event and then,
77 //! just before the query context is dropped, we walk the entire query cache
78 //! (which stores the `DepNodeIndex` along with the query key for each
79 //! invocation) and allocate the corresponding strings together with a mapping
80 //! for `DepNodeIndex as StringId`.
81 //!
82 //! [mm]: https://github.com/rust-lang/measureme/
83
84 use crate::cold_path;
85 use crate::fx::FxHashMap;
86
87 use std::borrow::Borrow;
88 use std::collections::hash_map::Entry;
89 use std::convert::Into;
90 use std::error::Error;
91 use std::fs;
92 use std::path::Path;
93 use std::process;
94 use std::sync::Arc;
95 use std::time::{Duration, Instant};
96 use std::u32;
97
98 use measureme::{EventId, EventIdBuilder, SerializableString, StringId};
99 use parking_lot::RwLock;
100
101 /// MmapSerializatioSink is faster on macOS and Linux
102 /// but FileSerializationSink is faster on Windows
103 #[cfg(not(windows))]
104 type SerializationSink = measureme::MmapSerializationSink;
105 #[cfg(windows)]
106 type SerializationSink = measureme::FileSerializationSink;
107
108 type Profiler = measureme::Profiler<SerializationSink>;
109
110 #[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
111 pub enum ProfileCategory {
112 Parsing,
113 Expansion,
114 TypeChecking,
115 BorrowChecking,
116 Codegen,
117 Linking,
118 Other,
119 }
120
121 bitflags::bitflags! {
122 struct EventFilter: u32 {
123 const GENERIC_ACTIVITIES = 1 << 0;
124 const QUERY_PROVIDERS = 1 << 1;
125 const QUERY_CACHE_HITS = 1 << 2;
126 const QUERY_BLOCKED = 1 << 3;
127 const INCR_CACHE_LOADS = 1 << 4;
128
129 const QUERY_KEYS = 1 << 5;
130 const FUNCTION_ARGS = 1 << 6;
131 const LLVM = 1 << 7;
132
133 const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
134 Self::QUERY_PROVIDERS.bits |
135 Self::QUERY_BLOCKED.bits |
136 Self::INCR_CACHE_LOADS.bits;
137
138 const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
139 }
140 }
141
142 // keep this in sync with the `-Z self-profile-events` help message in librustc_session/options.rs
143 const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
144 ("none", EventFilter::empty()),
145 ("all", EventFilter::all()),
146 ("default", EventFilter::DEFAULT),
147 ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
148 ("query-provider", EventFilter::QUERY_PROVIDERS),
149 ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
150 ("query-blocked", EventFilter::QUERY_BLOCKED),
151 ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
152 ("query-keys", EventFilter::QUERY_KEYS),
153 ("function-args", EventFilter::FUNCTION_ARGS),
154 ("args", EventFilter::ARGS),
155 ("llvm", EventFilter::LLVM),
156 ];
157
158 /// Something that uniquely identifies a query invocation.
159 pub struct QueryInvocationId(pub u32);
160
161 /// A reference to the SelfProfiler. It can be cloned and sent across thread
162 /// boundaries at will.
163 #[derive(Clone)]
164 pub struct SelfProfilerRef {
165 // This field is `None` if self-profiling is disabled for the current
166 // compilation session.
167 profiler: Option<Arc<SelfProfiler>>,
168
169 // We store the filter mask directly in the reference because that doesn't
170 // cost anything and allows for filtering with checking if the profiler is
171 // actually enabled.
172 event_filter_mask: EventFilter,
173
174 // Print verbose generic activities to stdout
175 print_verbose_generic_activities: bool,
176
177 // Print extra verbose generic activities to stdout
178 print_extra_verbose_generic_activities: bool,
179 }
180
181 impl SelfProfilerRef {
182 pub fn new(
183 profiler: Option<Arc<SelfProfiler>>,
184 print_verbose_generic_activities: bool,
185 print_extra_verbose_generic_activities: bool,
186 ) -> SelfProfilerRef {
187 // If there is no SelfProfiler then the filter mask is set to NONE,
188 // ensuring that nothing ever tries to actually access it.
189 let event_filter_mask =
190 profiler.as_ref().map(|p| p.event_filter_mask).unwrap_or(EventFilter::empty());
191
192 SelfProfilerRef {
193 profiler,
194 event_filter_mask,
195 print_verbose_generic_activities,
196 print_extra_verbose_generic_activities,
197 }
198 }
199
200 // This shim makes sure that calls only get executed if the filter mask
201 // lets them pass. It also contains some trickery to make sure that
202 // code is optimized for non-profiling compilation sessions, i.e. anything
203 // past the filter check is never inlined so it doesn't clutter the fast
204 // path.
205 #[inline(always)]
206 fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
207 where
208 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
209 {
210 #[inline(never)]
211 fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
212 where
213 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
214 {
215 let profiler = profiler_ref.profiler.as_ref().unwrap();
216 f(&**profiler)
217 }
218
219 if unlikely!(self.event_filter_mask.contains(event_filter)) {
220 cold_call(self, f)
221 } else {
222 TimingGuard::none()
223 }
224 }
225
226 /// Start profiling a verbose generic activity. Profiling continues until the
227 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
228 /// a measureme event, "verbose" generic activities also print a timing entry to
229 /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
230 pub fn verbose_generic_activity<'a>(
231 &'a self,
232 event_label: &'static str,
233 ) -> VerboseTimingGuard<'a> {
234 let message =
235 if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
236
237 VerboseTimingGuard::start(message, self.generic_activity(event_label))
238 }
239
240 /// Start profiling a extra verbose generic activity. Profiling continues until the
241 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
242 /// a measureme event, "extra verbose" generic activities also print a timing entry to
243 /// stdout if the compiler is invoked with -Ztime-passes.
244 pub fn extra_verbose_generic_activity<'a, A>(
245 &'a self,
246 event_label: &'static str,
247 event_arg: A,
248 ) -> VerboseTimingGuard<'a>
249 where
250 A: Borrow<str> + Into<String>,
251 {
252 let message = if self.print_extra_verbose_generic_activities {
253 Some(format!("{}({})", event_label, event_arg.borrow()))
254 } else {
255 None
256 };
257
258 VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
259 }
260
261 /// Start profiling a generic activity. Profiling continues until the
262 /// TimingGuard returned from this call is dropped.
263 #[inline(always)]
264 pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
265 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
266 let event_label = profiler.get_or_alloc_cached_string(event_label);
267 let event_id = EventId::from_label(event_label);
268 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
269 })
270 }
271
272 /// Start profiling a generic activity. Profiling continues until the
273 /// TimingGuard returned from this call is dropped.
274 #[inline(always)]
275 pub fn generic_activity_with_arg<A>(
276 &self,
277 event_label: &'static str,
278 event_arg: A,
279 ) -> TimingGuard<'_>
280 where
281 A: Borrow<str> + Into<String>,
282 {
283 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
284 let builder = EventIdBuilder::new(&profiler.profiler);
285 let event_label = profiler.get_or_alloc_cached_string(event_label);
286 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
287 let event_arg = profiler.get_or_alloc_cached_string(event_arg);
288 builder.from_label_and_arg(event_label, event_arg)
289 } else {
290 builder.from_label(event_label)
291 };
292 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
293 })
294 }
295
296 /// Start profiling a query provider. Profiling continues until the
297 /// TimingGuard returned from this call is dropped.
298 #[inline(always)]
299 pub fn query_provider(&self) -> TimingGuard<'_> {
300 self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
301 TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
302 })
303 }
304
305 /// Record a query in-memory cache hit.
306 #[inline(always)]
307 pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
308 self.instant_query_event(
309 |profiler| profiler.query_cache_hit_event_kind,
310 query_invocation_id,
311 EventFilter::QUERY_CACHE_HITS,
312 );
313 }
314
315 /// Start profiling a query being blocked on a concurrent execution.
316 /// Profiling continues until the TimingGuard returned from this call is
317 /// dropped.
318 #[inline(always)]
319 pub fn query_blocked(&self) -> TimingGuard<'_> {
320 self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
321 TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
322 })
323 }
324
325 /// Start profiling how long it takes to load a query result from the
326 /// incremental compilation on-disk cache. Profiling continues until the
327 /// TimingGuard returned from this call is dropped.
328 #[inline(always)]
329 pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
330 self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
331 TimingGuard::start(
332 profiler,
333 profiler.incremental_load_result_event_kind,
334 EventId::INVALID,
335 )
336 })
337 }
338
339 #[inline(always)]
340 fn instant_query_event(
341 &self,
342 event_kind: fn(&SelfProfiler) -> StringId,
343 query_invocation_id: QueryInvocationId,
344 event_filter: EventFilter,
345 ) {
346 drop(self.exec(event_filter, |profiler| {
347 let event_id = StringId::new_virtual(query_invocation_id.0);
348 let thread_id = std::thread::current().id().as_u64() as u32;
349
350 profiler.profiler.record_instant_event(
351 event_kind(profiler),
352 EventId::from_virtual(event_id),
353 thread_id,
354 );
355
356 TimingGuard::none()
357 }));
358 }
359
360 pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
361 if let Some(profiler) = &self.profiler {
362 f(&profiler)
363 }
364 }
365
366 #[inline]
367 pub fn enabled(&self) -> bool {
368 self.profiler.is_some()
369 }
370
371 #[inline]
372 pub fn llvm_recording_enabled(&self) -> bool {
373 self.event_filter_mask.contains(EventFilter::LLVM)
374 }
375 #[inline]
376 pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
377 self.profiler.clone()
378 }
379 }
380
381 pub struct SelfProfiler {
382 profiler: Profiler,
383 event_filter_mask: EventFilter,
384
385 string_cache: RwLock<FxHashMap<String, StringId>>,
386
387 query_event_kind: StringId,
388 generic_activity_event_kind: StringId,
389 incremental_load_result_event_kind: StringId,
390 query_blocked_event_kind: StringId,
391 query_cache_hit_event_kind: StringId,
392 }
393
394 impl SelfProfiler {
395 pub fn new(
396 output_directory: &Path,
397 crate_name: Option<&str>,
398 event_filters: &Option<Vec<String>>,
399 ) -> Result<SelfProfiler, Box<dyn Error>> {
400 fs::create_dir_all(output_directory)?;
401
402 let crate_name = crate_name.unwrap_or("unknown-crate");
403 let filename = format!("{}-{}.rustc_profile", crate_name, process::id());
404 let path = output_directory.join(&filename);
405 let profiler = Profiler::new(&path)?;
406
407 let query_event_kind = profiler.alloc_string("Query");
408 let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
409 let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
410 let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
411 let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
412
413 let mut event_filter_mask = EventFilter::empty();
414
415 if let Some(ref event_filters) = *event_filters {
416 let mut unknown_events = vec![];
417 for item in event_filters {
418 if let Some(&(_, mask)) =
419 EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
420 {
421 event_filter_mask |= mask;
422 } else {
423 unknown_events.push(item.clone());
424 }
425 }
426
427 // Warn about any unknown event names
428 if !unknown_events.is_empty() {
429 unknown_events.sort();
430 unknown_events.dedup();
431
432 warn!(
433 "Unknown self-profiler events specified: {}. Available options are: {}.",
434 unknown_events.join(", "),
435 EVENT_FILTERS_BY_NAME
436 .iter()
437 .map(|&(name, _)| name.to_string())
438 .collect::<Vec<_>>()
439 .join(", ")
440 );
441 }
442 } else {
443 event_filter_mask = EventFilter::DEFAULT;
444 }
445
446 Ok(SelfProfiler {
447 profiler,
448 event_filter_mask,
449 string_cache: RwLock::new(FxHashMap::default()),
450 query_event_kind,
451 generic_activity_event_kind,
452 incremental_load_result_event_kind,
453 query_blocked_event_kind,
454 query_cache_hit_event_kind,
455 })
456 }
457
458 /// Allocates a new string in the profiling data. Does not do any caching
459 /// or deduplication.
460 pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
461 self.profiler.alloc_string(s)
462 }
463
464 /// Gets a `StringId` for the given string. This method makes sure that
465 /// any strings going through it will only be allocated once in the
466 /// profiling data.
467 pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
468 where
469 A: Borrow<str> + Into<String>,
470 {
471 // Only acquire a read-lock first since we assume that the string is
472 // already present in the common case.
473 {
474 let string_cache = self.string_cache.read();
475
476 if let Some(&id) = string_cache.get(s.borrow()) {
477 return id;
478 }
479 }
480
481 let mut string_cache = self.string_cache.write();
482 // Check if the string has already been added in the small time window
483 // between dropping the read lock and acquiring the write lock.
484 match string_cache.entry(s.into()) {
485 Entry::Occupied(e) => *e.get(),
486 Entry::Vacant(e) => {
487 let string_id = self.profiler.alloc_string(&e.key()[..]);
488 *e.insert(string_id)
489 }
490 }
491 }
492
493 pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
494 let from = StringId::new_virtual(from.0);
495 self.profiler.map_virtual_to_concrete_string(from, to);
496 }
497
498 pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
499 where
500 I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
501 {
502 let from = from.map(|qid| StringId::new_virtual(qid.0));
503 self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
504 }
505
506 pub fn query_key_recording_enabled(&self) -> bool {
507 self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
508 }
509
510 pub fn event_id_builder(&self) -> EventIdBuilder<'_, SerializationSink> {
511 EventIdBuilder::new(&self.profiler)
512 }
513 }
514
515 #[must_use]
516 pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a, SerializationSink>>);
517
518 impl<'a> TimingGuard<'a> {
519 #[inline]
520 pub fn start(
521 profiler: &'a SelfProfiler,
522 event_kind: StringId,
523 event_id: EventId,
524 ) -> TimingGuard<'a> {
525 let thread_id = std::thread::current().id().as_u64() as u32;
526 let raw_profiler = &profiler.profiler;
527 let timing_guard =
528 raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
529 TimingGuard(Some(timing_guard))
530 }
531
532 #[inline]
533 pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
534 if let Some(guard) = self.0 {
535 cold_path(|| {
536 let event_id = StringId::new_virtual(query_invocation_id.0);
537 let event_id = EventId::from_virtual(event_id);
538 guard.finish_with_override_event_id(event_id);
539 });
540 }
541 }
542
543 #[inline]
544 pub fn none() -> TimingGuard<'a> {
545 TimingGuard(None)
546 }
547
548 #[inline(always)]
549 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
550 let _timer = self;
551 f()
552 }
553 }
554
555 #[must_use]
556 pub struct VerboseTimingGuard<'a> {
557 start_and_message: Option<(Instant, String)>,
558 _guard: TimingGuard<'a>,
559 }
560
561 impl<'a> VerboseTimingGuard<'a> {
562 pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
563 VerboseTimingGuard { _guard, start_and_message: message.map(|msg| (Instant::now(), msg)) }
564 }
565
566 #[inline(always)]
567 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
568 let _timer = self;
569 f()
570 }
571 }
572
573 impl Drop for VerboseTimingGuard<'_> {
574 fn drop(&mut self) {
575 if let Some((start, ref message)) = self.start_and_message {
576 print_time_passes_entry(true, &message[..], start.elapsed());
577 }
578 }
579 }
580
581 pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) {
582 if !do_it {
583 return;
584 }
585
586 let mem_string = match get_resident() {
587 Some(n) => {
588 let mb = n as f64 / 1_000_000.0;
589 format!("; rss: {}MB", mb.round() as usize)
590 }
591 None => String::new(),
592 };
593 println!("time: {}{}\t{}", duration_to_secs_str(dur), mem_string, what);
594 }
595
596 // Hack up our own formatting for the duration to make it easier for scripts
597 // to parse (always use the same number of decimal places and the same unit).
598 pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
599 const NANOS_PER_SEC: f64 = 1_000_000_000.0;
600 let secs = dur.as_secs() as f64 + dur.subsec_nanos() as f64 / NANOS_PER_SEC;
601
602 format!("{:.3}", secs)
603 }
604
605 // Memory reporting
606 #[cfg(unix)]
607 fn get_resident() -> Option<usize> {
608 let field = 1;
609 let contents = fs::read("/proc/self/statm").ok()?;
610 let contents = String::from_utf8(contents).ok()?;
611 let s = contents.split_whitespace().nth(field)?;
612 let npages = s.parse::<usize>().ok()?;
613 Some(npages * 4096)
614 }
615
616 #[cfg(windows)]
617 fn get_resident() -> Option<usize> {
618 use std::mem::{self, MaybeUninit};
619 use winapi::shared::minwindef::DWORD;
620 use winapi::um::processthreadsapi::GetCurrentProcess;
621 use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
622
623 let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
624 match unsafe {
625 GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
626 } {
627 0 => None,
628 _ => {
629 let pmc = unsafe { pmc.assume_init() };
630 Some(pmc.WorkingSetSize as usize)
631 }
632 }
633 }