]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_data_structures/src/profiling.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_data_structures / src / profiling.rs
CommitLineData
dfeec247
XL
1//! # Rust Compiler Self-Profiling
2//!
3//! This module implements the basic framework for the compiler's self-
4//! profiling support. It provides the `SelfProfiler` type which enables
5//! recording "events". An event is something that starts and ends at a given
6//! point in time and has an ID and a kind attached to it. This allows for
7//! tracing the compiler's activity.
8//!
9//! Internally this module uses the custom tailored [measureme][mm] crate for
10//! efficiently recording events to disk in a compact format that can be
11//! post-processed and analyzed by the suite of tools in the `measureme`
12//! project. The highest priority for the tracing framework is on incurring as
13//! little overhead as possible.
14//!
15//!
16//! ## Event Overview
17//!
18//! Events have a few properties:
19//!
20//! - The `event_kind` designates the broad category of an event (e.g. does it
21//! correspond to the execution of a query provider or to loading something
22//! from the incr. comp. on-disk cache, etc).
23//! - The `event_id` designates the query invocation or function call it
24//! corresponds to, possibly including the query key or function arguments.
25//! - Each event stores the ID of the thread it was recorded on.
26//! - The timestamp stores beginning and end of the event, or the single point
27//! in time it occurred at for "instant" events.
28//!
29//!
30//! ## Event Filtering
31//!
32//! Event generation can be filtered by event kind. Recording all possible
33//! events generates a lot of data, much of which is not needed for most kinds
34//! of analysis. So, in order to keep overhead as low as possible for a given
35//! use case, the `SelfProfiler` will only record the kinds of events that
36//! pass the filter specified as a command line argument to the compiler.
37//!
38//!
39//! ## `event_id` Assignment
40//!
41//! As far as `measureme` is concerned, `event_id`s are just strings. However,
42//! it would incur too much overhead to generate and persist each `event_id`
43//! string at the point where the event is recorded. In order to make this more
44//! efficient `measureme` has two features:
45//!
46//! - Strings can share their content, so that re-occurring parts don't have to
47//! be copied over and over again. One allocates a string in `measureme` and
48//! gets back a `StringId`. This `StringId` is then used to refer to that
49//! string. `measureme` strings are actually DAGs of string components so that
50//! arbitrary sharing of substrings can be done efficiently. This is useful
51//! because `event_id`s contain lots of redundant text like query names or
52//! def-path components.
53//!
54//! - `StringId`s can be "virtual" which means that the client picks a numeric
55//! ID according to some application-specific scheme and can later make that
56//! ID be mapped to an actual string. This is used to cheaply generate
57//! `event_id`s while the events actually occur, causing little timing
58//! distortion, and then later map those `StringId`s, in bulk, to actual
59//! `event_id` strings. This way the largest part of the tracing overhead is
60//! localized to one contiguous chunk of time.
61//!
62//! How are these `event_id`s generated in the compiler? For things that occur
63//! infrequently (e.g. "generic activities"), we just allocate the string the
64//! first time it is used and then keep the `StringId` in a hash table. This
65//! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
66//!
67//! For queries it gets more interesting: First we need a unique numeric ID for
68//! each query invocation (the `QueryInvocationId`). This ID is used as the
69//! virtual `StringId` we use as `event_id` for a given event. This ID has to
70//! be available both when the query is executed and later, together with the
71//! query key, when we allocate the actual `event_id` strings in bulk.
72//!
73//! We could make the compiler generate and keep track of such an ID for each
74//! query invocation but luckily we already have something that fits all the
75//! the requirements: the query's `DepNodeIndex`. So we use the numeric value
76//! of the `DepNodeIndex` as `event_id` when recording the event and then,
77//! just before the query context is dropped, we walk the entire query cache
78//! (which stores the `DepNodeIndex` along with the query key for each
79//! invocation) and allocate the corresponding strings together with a mapping
80//! for `DepNodeIndex as StringId`.
81//!
82//! [mm]: https://github.com/rust-lang/measureme/
83
74b04a01 84use crate::cold_path;
dfeec247
XL
85use crate::fx::FxHashMap;
86
74b04a01
XL
87use std::borrow::Borrow;
88use std::collections::hash_map::Entry;
48663c56 89use std::error::Error;
dc9dc135 90use std::fs;
dc9dc135 91use std::path::Path;
532ac7d7 92use std::process;
e74abb32 93use std::sync::Arc;
dfeec247 94use std::time::{Duration, Instant};
b7449926 95
136023e0
XL
96pub use measureme::EventId;
97use measureme::{EventIdBuilder, Profiler, SerializableString, StringId};
dfeec247 98use parking_lot::RwLock;
04454e1e 99use smallvec::SmallVec;
48663c56 100
60c5eb7d 101bitflags::bitflags! {
48663c56 102 struct EventFilter: u32 {
136023e0
XL
103 const GENERIC_ACTIVITIES = 1 << 0;
104 const QUERY_PROVIDERS = 1 << 1;
105 const QUERY_CACHE_HITS = 1 << 2;
106 const QUERY_BLOCKED = 1 << 3;
107 const INCR_CACHE_LOADS = 1 << 4;
48663c56 108
136023e0
XL
109 const QUERY_KEYS = 1 << 5;
110 const FUNCTION_ARGS = 1 << 6;
111 const LLVM = 1 << 7;
112 const INCR_RESULT_HASHING = 1 << 8;
3c0e092e 113 const ARTIFACT_SIZES = 1 << 9;
dfeec247 114
48663c56
XL
115 const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
116 Self::QUERY_PROVIDERS.bits |
117 Self::QUERY_BLOCKED.bits |
136023e0 118 Self::INCR_CACHE_LOADS.bits |
3c0e092e
XL
119 Self::INCR_RESULT_HASHING.bits |
120 Self::ARTIFACT_SIZES.bits;
74b04a01
XL
121
122 const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
9fa01778
XL
123 }
124}
b7449926 125
136023e0 126// keep this in sync with the `-Z self-profile-events` help message in rustc_session/options.rs
48663c56 127const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
dfeec247
XL
128 ("none", EventFilter::empty()),
129 ("all", EventFilter::all()),
130 ("default", EventFilter::DEFAULT),
48663c56
XL
131 ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
132 ("query-provider", EventFilter::QUERY_PROVIDERS),
133 ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
dfeec247 134 ("query-blocked", EventFilter::QUERY_BLOCKED),
48663c56 135 ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
dfeec247 136 ("query-keys", EventFilter::QUERY_KEYS),
74b04a01
XL
137 ("function-args", EventFilter::FUNCTION_ARGS),
138 ("args", EventFilter::ARGS),
139 ("llvm", EventFilter::LLVM),
136023e0 140 ("incr-result-hashing", EventFilter::INCR_RESULT_HASHING),
3c0e092e 141 ("artifact-sizes", EventFilter::ARTIFACT_SIZES),
48663c56
XL
142];
143
dfeec247
XL
144/// Something that uniquely identifies a query invocation.
145pub struct QueryInvocationId(pub u32);
e74abb32
XL
146
147/// A reference to the SelfProfiler. It can be cloned and sent across thread
148/// boundaries at will.
149#[derive(Clone)]
150pub struct SelfProfilerRef {
151 // This field is `None` if self-profiling is disabled for the current
152 // compilation session.
153 profiler: Option<Arc<SelfProfiler>>,
154
155 // We store the filter mask directly in the reference because that doesn't
156 // cost anything and allows for filtering with checking if the profiler is
157 // actually enabled.
158 event_filter_mask: EventFilter,
dfeec247 159
2b03887a 160 // Print verbose generic activities to stderr?
dfeec247 161 print_verbose_generic_activities: bool,
e74abb32
XL
162}
163
164impl SelfProfilerRef {
dfeec247
XL
165 pub fn new(
166 profiler: Option<Arc<SelfProfiler>>,
167 print_verbose_generic_activities: bool,
dfeec247 168 ) -> SelfProfilerRef {
e74abb32
XL
169 // If there is no SelfProfiler then the filter mask is set to NONE,
170 // ensuring that nothing ever tries to actually access it.
dfeec247 171 let event_filter_mask =
5869c6ff 172 profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
e74abb32 173
2b03887a 174 SelfProfilerRef { profiler, event_filter_mask, print_verbose_generic_activities }
e74abb32
XL
175 }
176
04454e1e
FG
177 /// This shim makes sure that calls only get executed if the filter mask
178 /// lets them pass. It also contains some trickery to make sure that
179 /// code is optimized for non-profiling compilation sessions, i.e. anything
180 /// past the filter check is never inlined so it doesn't clutter the fast
181 /// path.
e74abb32
XL
182 #[inline(always)]
183 fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
dfeec247
XL
184 where
185 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
e74abb32
XL
186 {
187 #[inline(never)]
923072b8 188 #[cold]
e74abb32 189 fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
dfeec247
XL
190 where
191 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
e74abb32
XL
192 {
193 let profiler = profiler_ref.profiler.as_ref().unwrap();
487cf647 194 f(profiler)
e74abb32
XL
195 }
196
923072b8 197 if self.event_filter_mask.contains(event_filter) {
e74abb32
XL
198 cold_call(self, f)
199 } else {
200 TimingGuard::none()
201 }
202 }
203
dfeec247
XL
204 /// Start profiling a verbose generic activity. Profiling continues until the
205 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
206 /// a measureme event, "verbose" generic activities also print a timing entry to
2b03887a 207 /// stderr if the compiler is invoked with -Ztime-passes.
9c376795 208 pub fn verbose_generic_activity(&self, event_label: &'static str) -> VerboseTimingGuard<'_> {
74b04a01
XL
209 let message =
210 if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
211
212 VerboseTimingGuard::start(message, self.generic_activity(event_label))
dfeec247
XL
213 }
214
2b03887a 215 /// Like `verbose_generic_activity`, but with an extra arg.
9c376795
FG
216 pub fn verbose_generic_activity_with_arg<A>(
217 &self,
74b04a01
XL
218 event_label: &'static str,
219 event_arg: A,
9c376795 220 ) -> VerboseTimingGuard<'_>
74b04a01
XL
221 where
222 A: Borrow<str> + Into<String>,
223 {
2b03887a 224 let message = if self.print_verbose_generic_activities {
74b04a01
XL
225 Some(format!("{}({})", event_label, event_arg.borrow()))
226 } else {
227 None
228 };
229
230 VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
231 }
232
233 /// Start profiling a generic activity. Profiling continues until the
234 /// TimingGuard returned from this call is dropped.
235 #[inline(always)]
236 pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
237 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
238 let event_label = profiler.get_or_alloc_cached_string(event_label);
239 let event_id = EventId::from_label(event_label);
240 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
241 })
dfeec247
XL
242 }
243
136023e0
XL
244 /// Start profiling with some event filter for a given event. Profiling continues until the
245 /// TimingGuard returned from this call is dropped.
246 #[inline(always)]
247 pub fn generic_activity_with_event_id(&self, event_id: EventId) -> TimingGuard<'_> {
248 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
249 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
250 })
251 }
252
e74abb32
XL
253 /// Start profiling a generic activity. Profiling continues until the
254 /// TimingGuard returned from this call is dropped.
255 #[inline(always)]
74b04a01
XL
256 pub fn generic_activity_with_arg<A>(
257 &self,
258 event_label: &'static str,
259 event_arg: A,
260 ) -> TimingGuard<'_>
261 where
262 A: Borrow<str> + Into<String>,
263 {
e74abb32 264 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
74b04a01
XL
265 let builder = EventIdBuilder::new(&profiler.profiler);
266 let event_label = profiler.get_or_alloc_cached_string(event_label);
267 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
268 let event_arg = profiler.get_or_alloc_cached_string(event_arg);
269 builder.from_label_and_arg(event_label, event_arg)
270 } else {
271 builder.from_label(event_label)
272 };
dfeec247 273 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
e74abb32
XL
274 })
275 }
276
04454e1e
FG
277 /// Start profiling a generic activity, allowing costly arguments to be recorded. Profiling
278 /// continues until the `TimingGuard` returned from this call is dropped.
279 ///
280 /// If the arguments to a generic activity are cheap to create, use `generic_activity_with_arg`
281 /// or `generic_activity_with_args` for their simpler API. However, if they are costly or
282 /// require allocation in sufficiently hot contexts, then this allows for a closure to be called
283 /// only when arguments were asked to be recorded via `-Z self-profile-events=args`.
284 ///
285 /// In this case, the closure will be passed a `&mut EventArgRecorder`, to help with recording
286 /// one or many arguments within the generic activity being profiled, by calling its
287 /// `record_arg` method for example.
288 ///
289 /// This `EventArgRecorder` may implement more specific traits from other rustc crates, e.g. for
290 /// richer handling of rustc-specific argument types, while keeping this single entry-point API
291 /// for recording arguments.
292 ///
293 /// Note: recording at least one argument is *required* for the self-profiler to create the
294 /// `TimingGuard`. A panic will be triggered if that doesn't happen. This function exists
295 /// explicitly to record arguments, so it fails loudly when there are none to record.
296 ///
297 #[inline(always)]
298 pub fn generic_activity_with_arg_recorder<F>(
299 &self,
300 event_label: &'static str,
301 mut f: F,
302 ) -> TimingGuard<'_>
303 where
304 F: FnMut(&mut EventArgRecorder<'_>),
305 {
306 // Ensure this event will only be recorded when self-profiling is turned on.
307 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
308 let builder = EventIdBuilder::new(&profiler.profiler);
309 let event_label = profiler.get_or_alloc_cached_string(event_label);
310
311 // Ensure the closure to create event arguments will only be called when argument
312 // recording is turned on.
313 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
314 // Set up the builder and call the user-provided closure to record potentially
315 // costly event arguments.
316 let mut recorder = EventArgRecorder { profiler, args: SmallVec::new() };
317 f(&mut recorder);
318
319 // It is expected that the closure will record at least one argument. If that
320 // doesn't happen, it's a bug: we've been explicitly called in order to record
321 // arguments, so we fail loudly when there are none to record.
322 if recorder.args.is_empty() {
323 panic!(
324 "The closure passed to `generic_activity_with_arg_recorder` needs to \
325 record at least one argument"
326 );
327 }
328
329 builder.from_label_and_args(event_label, &recorder.args)
330 } else {
331 builder.from_label(event_label)
332 };
333 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
334 })
335 }
336
3c0e092e
XL
337 /// Record the size of an artifact that the compiler produces
338 ///
339 /// `artifact_kind` is the class of artifact (e.g., query_cache, object_file, etc.)
340 /// `artifact_name` is an identifier to the specific artifact being stored (usually a filename)
341 #[inline(always)]
342 pub fn artifact_size<A>(&self, artifact_kind: &str, artifact_name: A, size: u64)
343 where
344 A: Borrow<str> + Into<String>,
345 {
346 drop(self.exec(EventFilter::ARTIFACT_SIZES, |profiler| {
347 let builder = EventIdBuilder::new(&profiler.profiler);
348 let event_label = profiler.get_or_alloc_cached_string(artifact_kind);
349 let event_arg = profiler.get_or_alloc_cached_string(artifact_name);
350 let event_id = builder.from_label_and_arg(event_label, event_arg);
351 let thread_id = get_thread_id();
352
353 profiler.profiler.record_integer_event(
354 profiler.artifact_size_event_kind,
355 event_id,
356 thread_id,
357 size,
358 );
359
360 TimingGuard::none()
361 }))
362 }
363
fc512014
XL
364 #[inline(always)]
365 pub fn generic_activity_with_args(
366 &self,
367 event_label: &'static str,
368 event_args: &[String],
369 ) -> TimingGuard<'_> {
370 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
371 let builder = EventIdBuilder::new(&profiler.profiler);
372 let event_label = profiler.get_or_alloc_cached_string(event_label);
373 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
374 let event_args: Vec<_> = event_args
375 .iter()
376 .map(|s| profiler.get_or_alloc_cached_string(&s[..]))
377 .collect();
378 builder.from_label_and_args(event_label, &event_args)
379 } else {
380 builder.from_label(event_label)
381 };
382 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
383 })
384 }
385
e74abb32
XL
386 /// Start profiling a query provider. Profiling continues until the
387 /// TimingGuard returned from this call is dropped.
388 #[inline(always)]
dfeec247 389 pub fn query_provider(&self) -> TimingGuard<'_> {
e74abb32 390 self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
dfeec247 391 TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
e74abb32
XL
392 })
393 }
394
395 /// Record a query in-memory cache hit.
396 #[inline(always)]
dfeec247 397 pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
60c5eb7d 398 self.instant_query_event(
e74abb32 399 |profiler| profiler.query_cache_hit_event_kind,
dfeec247 400 query_invocation_id,
e74abb32 401 EventFilter::QUERY_CACHE_HITS,
e74abb32
XL
402 );
403 }
404
405 /// Start profiling a query being blocked on a concurrent execution.
406 /// Profiling continues until the TimingGuard returned from this call is
407 /// dropped.
408 #[inline(always)]
dfeec247 409 pub fn query_blocked(&self) -> TimingGuard<'_> {
e74abb32 410 self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
dfeec247 411 TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
e74abb32
XL
412 })
413 }
414
415 /// Start profiling how long it takes to load a query result from the
416 /// incremental compilation on-disk cache. Profiling continues until the
417 /// TimingGuard returned from this call is dropped.
418 #[inline(always)]
dfeec247 419 pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
e74abb32 420 self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
e74abb32
XL
421 TimingGuard::start(
422 profiler,
423 profiler.incremental_load_result_event_kind,
dfeec247 424 EventId::INVALID,
e74abb32
XL
425 )
426 })
427 }
428
136023e0
XL
429 /// Start profiling how long it takes to hash query results for incremental compilation.
430 /// Profiling continues until the TimingGuard returned from this call is dropped.
431 #[inline(always)]
432 pub fn incr_result_hashing(&self) -> TimingGuard<'_> {
433 self.exec(EventFilter::INCR_RESULT_HASHING, |profiler| {
434 TimingGuard::start(
435 profiler,
436 profiler.incremental_result_hashing_event_kind,
437 EventId::INVALID,
438 )
439 })
440 }
441
e74abb32 442 #[inline(always)]
60c5eb7d 443 fn instant_query_event(
e74abb32
XL
444 &self,
445 event_kind: fn(&SelfProfiler) -> StringId,
dfeec247 446 query_invocation_id: QueryInvocationId,
e74abb32 447 event_filter: EventFilter,
e74abb32
XL
448 ) {
449 drop(self.exec(event_filter, |profiler| {
dfeec247 450 let event_id = StringId::new_virtual(query_invocation_id.0);
3c0e092e 451 let thread_id = get_thread_id();
e74abb32 452
60c5eb7d 453 profiler.profiler.record_instant_event(
e74abb32 454 event_kind(profiler),
dfeec247 455 EventId::from_virtual(event_id),
e74abb32 456 thread_id,
e74abb32
XL
457 );
458
459 TimingGuard::none()
460 }));
461 }
60c5eb7d 462
dfeec247 463 pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
60c5eb7d 464 if let Some(profiler) = &self.profiler {
487cf647 465 f(profiler)
60c5eb7d
XL
466 }
467 }
dfeec247 468
136023e0
XL
469 /// Gets a `StringId` for the given string. This method makes sure that
470 /// any strings going through it will only be allocated once in the
471 /// profiling data.
472 /// Returns `None` if the self-profiling is not enabled.
473 pub fn get_or_alloc_cached_string(&self, s: &str) -> Option<StringId> {
474 self.profiler.as_ref().map(|p| p.get_or_alloc_cached_string(s))
475 }
476
dfeec247
XL
477 #[inline]
478 pub fn enabled(&self) -> bool {
479 self.profiler.is_some()
480 }
74b04a01
XL
481
482 #[inline]
483 pub fn llvm_recording_enabled(&self) -> bool {
484 self.event_filter_mask.contains(EventFilter::LLVM)
485 }
486 #[inline]
487 pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
488 self.profiler.clone()
489 }
e74abb32
XL
490}
491
04454e1e
FG
492/// A helper for recording costly arguments to self-profiling events. Used with
493/// `SelfProfilerRef::generic_activity_with_arg_recorder`.
494pub struct EventArgRecorder<'p> {
495 /// The `SelfProfiler` used to intern the event arguments that users will ask to record.
496 profiler: &'p SelfProfiler,
497
498 /// The interned event arguments to be recorded in the generic activity event.
499 ///
500 /// The most common case, when actually recording event arguments, is to have one argument. Then
501 /// followed by recording two, in a couple places.
502 args: SmallVec<[StringId; 2]>,
503}
504
505impl EventArgRecorder<'_> {
506 /// Records a single argument within the current generic activity being profiled.
507 ///
508 /// Note: when self-profiling with costly event arguments, at least one argument
509 /// needs to be recorded. A panic will be triggered if that doesn't happen.
510 pub fn record_arg<A>(&mut self, event_arg: A)
511 where
512 A: Borrow<str> + Into<String>,
513 {
514 let event_arg = self.profiler.get_or_alloc_cached_string(event_arg);
515 self.args.push(event_arg);
516 }
517}
518
532ac7d7 519pub struct SelfProfiler {
48663c56
XL
520 profiler: Profiler,
521 event_filter_mask: EventFilter,
dfeec247 522
74b04a01 523 string_cache: RwLock<FxHashMap<String, StringId>>,
dfeec247 524
48663c56
XL
525 query_event_kind: StringId,
526 generic_activity_event_kind: StringId,
527 incremental_load_result_event_kind: StringId,
136023e0 528 incremental_result_hashing_event_kind: StringId,
48663c56
XL
529 query_blocked_event_kind: StringId,
530 query_cache_hit_event_kind: StringId,
3c0e092e 531 artifact_size_event_kind: StringId,
b7449926
XL
532}
533
534impl SelfProfiler {
dc9dc135
XL
535 pub fn new(
536 output_directory: &Path,
537 crate_name: Option<&str>,
923072b8
FG
538 event_filters: Option<&[String]>,
539 counter_name: &str,
29967ef6 540 ) -> Result<SelfProfiler, Box<dyn Error + Send + Sync>> {
dc9dc135
XL
541 fs::create_dir_all(output_directory)?;
542
543 let crate_name = crate_name.unwrap_or("unknown-crate");
923072b8
FG
544 // HACK(eddyb) we need to pad the PID, strange as it may seem, as its
545 // length can behave as a source of entropy for heap addresses, when
546 // ASLR is disabled and the heap is otherwise determinic.
547 let pid: u32 = process::id();
9c376795 548 let filename = format!("{crate_name}-{pid:07}.rustc_profile");
dc9dc135 549 let path = output_directory.join(&filename);
923072b8
FG
550 let profiler =
551 Profiler::with_counter(&path, measureme::counters::Counter::by_name(counter_name)?)?;
48663c56
XL
552
553 let query_event_kind = profiler.alloc_string("Query");
554 let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
555 let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
136023e0
XL
556 let incremental_result_hashing_event_kind =
557 profiler.alloc_string("IncrementalResultHashing");
48663c56
XL
558 let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
559 let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
3c0e092e 560 let artifact_size_event_kind = profiler.alloc_string("ArtifactSize");
48663c56
XL
561
562 let mut event_filter_mask = EventFilter::empty();
563
923072b8 564 if let Some(event_filters) = event_filters {
48663c56
XL
565 let mut unknown_events = vec![];
566 for item in event_filters {
dfeec247
XL
567 if let Some(&(_, mask)) =
568 EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
569 {
48663c56
XL
570 event_filter_mask |= mask;
571 } else {
572 unknown_events.push(item.clone());
573 }
574 }
575
576 // Warn about any unknown event names
74b04a01 577 if !unknown_events.is_empty() {
48663c56
XL
578 unknown_events.sort();
579 unknown_events.dedup();
580
dfeec247
XL
581 warn!(
582 "Unknown self-profiler events specified: {}. Available options are: {}.",
48663c56 583 unknown_events.join(", "),
dfeec247
XL
584 EVENT_FILTERS_BY_NAME
585 .iter()
586 .map(|&(name, _)| name.to_string())
587 .collect::<Vec<_>>()
588 .join(", ")
589 );
48663c56
XL
590 }
591 } else {
592 event_filter_mask = EventFilter::DEFAULT;
593 }
594
595 Ok(SelfProfiler {
596 profiler,
597 event_filter_mask,
dfeec247 598 string_cache: RwLock::new(FxHashMap::default()),
48663c56
XL
599 query_event_kind,
600 generic_activity_event_kind,
601 incremental_load_result_event_kind,
136023e0 602 incremental_result_hashing_event_kind,
48663c56
XL
603 query_blocked_event_kind,
604 query_cache_hit_event_kind,
3c0e092e 605 artifact_size_event_kind,
48663c56
XL
606 })
607 }
608
dfeec247
XL
609 /// Allocates a new string in the profiling data. Does not do any caching
610 /// or deduplication.
611 pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
612 self.profiler.alloc_string(s)
613 }
614
615 /// Gets a `StringId` for the given string. This method makes sure that
616 /// any strings going through it will only be allocated once in the
617 /// profiling data.
74b04a01
XL
618 pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
619 where
620 A: Borrow<str> + Into<String>,
621 {
dfeec247
XL
622 // Only acquire a read-lock first since we assume that the string is
623 // already present in the common case.
624 {
625 let string_cache = self.string_cache.read();
626
74b04a01 627 if let Some(&id) = string_cache.get(s.borrow()) {
dfeec247
XL
628 return id;
629 }
630 }
631
632 let mut string_cache = self.string_cache.write();
633 // Check if the string has already been added in the small time window
634 // between dropping the read lock and acquiring the write lock.
74b04a01
XL
635 match string_cache.entry(s.into()) {
636 Entry::Occupied(e) => *e.get(),
637 Entry::Vacant(e) => {
638 let string_id = self.profiler.alloc_string(&e.key()[..]);
639 *e.insert(string_id)
640 }
641 }
dfeec247
XL
642 }
643
644 pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
645 let from = StringId::new_virtual(from.0);
646 self.profiler.map_virtual_to_concrete_string(from, to);
647 }
b7449926 648
dfeec247
XL
649 pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
650 where
651 I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
652 {
653 let from = from.map(|qid| StringId::new_virtual(qid.0));
654 self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
48663c56
XL
655 }
656
dfeec247
XL
657 pub fn query_key_recording_enabled(&self) -> bool {
658 self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
659 }
660
29967ef6 661 pub fn event_id_builder(&self) -> EventIdBuilder<'_> {
dfeec247 662 EventIdBuilder::new(&self.profiler)
b7449926 663 }
e74abb32 664}
b7449926 665
e74abb32 666#[must_use]
29967ef6 667pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a>>);
9fa01778 668
e74abb32 669impl<'a> TimingGuard<'a> {
9fa01778 670 #[inline]
e74abb32
XL
671 pub fn start(
672 profiler: &'a SelfProfiler,
673 event_kind: StringId,
dfeec247 674 event_id: EventId,
e74abb32 675 ) -> TimingGuard<'a> {
3c0e092e 676 let thread_id = get_thread_id();
e74abb32 677 let raw_profiler = &profiler.profiler;
dfeec247
XL
678 let timing_guard =
679 raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
e74abb32 680 TimingGuard(Some(timing_guard))
9fa01778
XL
681 }
682
dfeec247
XL
683 #[inline]
684 pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
685 if let Some(guard) = self.0 {
74b04a01
XL
686 cold_path(|| {
687 let event_id = StringId::new_virtual(query_invocation_id.0);
688 let event_id = EventId::from_virtual(event_id);
689 guard.finish_with_override_event_id(event_id);
690 });
dfeec247
XL
691 }
692 }
693
532ac7d7 694 #[inline]
e74abb32
XL
695 pub fn none() -> TimingGuard<'a> {
696 TimingGuard(None)
b7449926 697 }
dfeec247
XL
698
699 #[inline(always)]
700 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
701 let _timer = self;
702 f()
703 }
704}
705
706#[must_use]
707pub struct VerboseTimingGuard<'a> {
5869c6ff 708 start_and_message: Option<(Instant, Option<usize>, String)>,
dfeec247
XL
709 _guard: TimingGuard<'a>,
710}
711
712impl<'a> VerboseTimingGuard<'a> {
74b04a01 713 pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
5869c6ff
XL
714 VerboseTimingGuard {
715 _guard,
716 start_and_message: message.map(|msg| (Instant::now(), get_resident_set_size(), msg)),
717 }
dfeec247
XL
718 }
719
720 #[inline(always)]
721 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
722 let _timer = self;
723 f()
724 }
725}
726
727impl Drop for VerboseTimingGuard<'_> {
728 fn drop(&mut self) {
5869c6ff
XL
729 if let Some((start_time, start_rss, ref message)) = self.start_and_message {
730 let end_rss = get_resident_set_size();
04454e1e 731 let dur = start_time.elapsed();
487cf647 732 print_time_passes_entry(message, dur, start_rss, end_rss);
74b04a01 733 }
dfeec247
XL
734 }
735}
736
5869c6ff
XL
737pub fn print_time_passes_entry(
738 what: &str,
739 dur: Duration,
740 start_rss: Option<usize>,
741 end_rss: Option<usize>,
742) {
2b03887a
FG
743 // Print the pass if its duration is greater than 5 ms, or it changed the
744 // measured RSS.
745 let is_notable = || {
746 if dur.as_millis() > 5 {
747 return true;
748 }
749
750 if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
751 let change_rss = end_rss.abs_diff(start_rss);
752 if change_rss > 0 {
753 return true;
754 }
755 }
756
757 false
758 };
759 if !is_notable() {
760 return;
761 }
762
5869c6ff
XL
763 let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
764 let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
765
766 let mem_string = match (start_rss, end_rss) {
767 (Some(start_rss), Some(end_rss)) => {
768 let change_rss = end_rss as i128 - start_rss as i128;
769
770 format!(
771 "; rss: {:>4}MB -> {:>4}MB ({:>+5}MB)",
772 rss_to_mb(start_rss),
773 rss_to_mb(end_rss),
774 rss_change_to_mb(change_rss),
775 )
dfeec247 776 }
5869c6ff
XL
777 (Some(start_rss), None) => format!("; rss start: {:>4}MB", rss_to_mb(start_rss)),
778 (None, Some(end_rss)) => format!("; rss end: {:>4}MB", rss_to_mb(end_rss)),
779 (None, None) => String::new(),
dfeec247 780 };
5869c6ff 781
6a06907d 782 eprintln!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
dfeec247
XL
783}
784
785// Hack up our own formatting for the duration to make it easier for scripts
786// to parse (always use the same number of decimal places and the same unit).
787pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
1b1a35ee 788 format!("{:.3}", dur.as_secs_f64())
dfeec247
XL
789}
790
3c0e092e
XL
791fn get_thread_id() -> u32 {
792 std::thread::current().id().as_u64().get() as u32
793}
794
dfeec247 795// Memory reporting
f9f354fc
XL
796cfg_if! {
797 if #[cfg(windows)] {
5869c6ff 798 pub fn get_resident_set_size() -> Option<usize> {
f9f354fc
XL
799 use std::mem::{self, MaybeUninit};
800 use winapi::shared::minwindef::DWORD;
801 use winapi::um::processthreadsapi::GetCurrentProcess;
802 use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
803
804 let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
805 match unsafe {
806 GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
807 } {
808 0 => None,
809 _ => {
810 let pmc = unsafe { pmc.assume_init() };
811 Some(pmc.WorkingSetSize as usize)
812 }
813 }
814 }
064997fb
FG
815 } else if #[cfg(target_os = "macos")] {
816 pub fn get_resident_set_size() -> Option<usize> {
817 use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
818 use std::mem;
819 const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
820
821 unsafe {
822 let mut info: proc_taskinfo = mem::zeroed();
823 let info_ptr = &mut info as *mut proc_taskinfo as *mut c_void;
824 let pid = getpid() as c_int;
825 let ret = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, info_ptr, PROC_TASKINFO_SIZE);
826 if ret == PROC_TASKINFO_SIZE {
827 Some(info.pti_resident_size as usize)
828 } else {
829 None
830 }
831 }
832 }
f9f354fc 833 } else if #[cfg(unix)] {
5869c6ff 834 pub fn get_resident_set_size() -> Option<usize> {
f9f354fc
XL
835 let field = 1;
836 let contents = fs::read("/proc/self/statm").ok()?;
837 let contents = String::from_utf8(contents).ok()?;
838 let s = contents.split_whitespace().nth(field)?;
839 let npages = s.parse::<usize>().ok()?;
840 Some(npages * 4096)
841 }
842 } else {
5869c6ff 843 pub fn get_resident_set_size() -> Option<usize> {
f9f354fc 844 None
dfeec247
XL
845 }
846 }
b7449926 847}