]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_data_structures/src/profiling.rs
New upstream version 1.64.0+dfsg1
[rustc.git] / compiler / rustc_data_structures / src / profiling.rs
1 //! # Rust Compiler Self-Profiling
2 //!
3 //! This module implements the basic framework for the compiler's self-
4 //! profiling support. It provides the `SelfProfiler` type which enables
5 //! recording "events". An event is something that starts and ends at a given
6 //! point in time and has an ID and a kind attached to it. This allows for
7 //! tracing the compiler's activity.
8 //!
9 //! Internally this module uses the custom tailored [measureme][mm] crate for
10 //! efficiently recording events to disk in a compact format that can be
11 //! post-processed and analyzed by the suite of tools in the `measureme`
12 //! project. The highest priority for the tracing framework is on incurring as
13 //! little overhead as possible.
14 //!
15 //!
16 //! ## Event Overview
17 //!
18 //! Events have a few properties:
19 //!
20 //! - The `event_kind` designates the broad category of an event (e.g. does it
21 //! correspond to the execution of a query provider or to loading something
22 //! from the incr. comp. on-disk cache, etc).
23 //! - The `event_id` designates the query invocation or function call it
24 //! corresponds to, possibly including the query key or function arguments.
25 //! - Each event stores the ID of the thread it was recorded on.
26 //! - The timestamp stores beginning and end of the event, or the single point
27 //! in time it occurred at for "instant" events.
28 //!
29 //!
30 //! ## Event Filtering
31 //!
32 //! Event generation can be filtered by event kind. Recording all possible
33 //! events generates a lot of data, much of which is not needed for most kinds
34 //! of analysis. So, in order to keep overhead as low as possible for a given
35 //! use case, the `SelfProfiler` will only record the kinds of events that
36 //! pass the filter specified as a command line argument to the compiler.
37 //!
38 //!
39 //! ## `event_id` Assignment
40 //!
41 //! As far as `measureme` is concerned, `event_id`s are just strings. However,
42 //! it would incur too much overhead to generate and persist each `event_id`
43 //! string at the point where the event is recorded. In order to make this more
44 //! efficient `measureme` has two features:
45 //!
46 //! - Strings can share their content, so that re-occurring parts don't have to
47 //! be copied over and over again. One allocates a string in `measureme` and
48 //! gets back a `StringId`. This `StringId` is then used to refer to that
49 //! string. `measureme` strings are actually DAGs of string components so that
50 //! arbitrary sharing of substrings can be done efficiently. This is useful
51 //! because `event_id`s contain lots of redundant text like query names or
52 //! def-path components.
53 //!
54 //! - `StringId`s can be "virtual" which means that the client picks a numeric
55 //! ID according to some application-specific scheme and can later make that
56 //! ID be mapped to an actual string. This is used to cheaply generate
57 //! `event_id`s while the events actually occur, causing little timing
58 //! distortion, and then later map those `StringId`s, in bulk, to actual
59 //! `event_id` strings. This way the largest part of the tracing overhead is
60 //! localized to one contiguous chunk of time.
61 //!
62 //! How are these `event_id`s generated in the compiler? For things that occur
63 //! infrequently (e.g. "generic activities"), we just allocate the string the
64 //! first time it is used and then keep the `StringId` in a hash table. This
65 //! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
66 //!
67 //! For queries it gets more interesting: First we need a unique numeric ID for
68 //! each query invocation (the `QueryInvocationId`). This ID is used as the
69 //! virtual `StringId` we use as `event_id` for a given event. This ID has to
70 //! be available both when the query is executed and later, together with the
71 //! query key, when we allocate the actual `event_id` strings in bulk.
72 //!
73 //! We could make the compiler generate and keep track of such an ID for each
74 //! query invocation but luckily we already have something that fits all the
75 //! the requirements: the query's `DepNodeIndex`. So we use the numeric value
76 //! of the `DepNodeIndex` as `event_id` when recording the event and then,
77 //! just before the query context is dropped, we walk the entire query cache
78 //! (which stores the `DepNodeIndex` along with the query key for each
79 //! invocation) and allocate the corresponding strings together with a mapping
80 //! for `DepNodeIndex as StringId`.
81 //!
82 //! [mm]: https://github.com/rust-lang/measureme/
83
84 use crate::cold_path;
85 use crate::fx::FxHashMap;
86
87 use std::borrow::Borrow;
88 use std::collections::hash_map::Entry;
89 use std::convert::Into;
90 use std::error::Error;
91 use std::fs;
92 use std::path::Path;
93 use std::process;
94 use std::sync::Arc;
95 use std::time::{Duration, Instant};
96
97 pub use measureme::EventId;
98 use measureme::{EventIdBuilder, Profiler, SerializableString, StringId};
99 use parking_lot::RwLock;
100 use smallvec::SmallVec;
101
102 bitflags::bitflags! {
103 struct EventFilter: u32 {
104 const GENERIC_ACTIVITIES = 1 << 0;
105 const QUERY_PROVIDERS = 1 << 1;
106 const QUERY_CACHE_HITS = 1 << 2;
107 const QUERY_BLOCKED = 1 << 3;
108 const INCR_CACHE_LOADS = 1 << 4;
109
110 const QUERY_KEYS = 1 << 5;
111 const FUNCTION_ARGS = 1 << 6;
112 const LLVM = 1 << 7;
113 const INCR_RESULT_HASHING = 1 << 8;
114 const ARTIFACT_SIZES = 1 << 9;
115
116 const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
117 Self::QUERY_PROVIDERS.bits |
118 Self::QUERY_BLOCKED.bits |
119 Self::INCR_CACHE_LOADS.bits |
120 Self::INCR_RESULT_HASHING.bits |
121 Self::ARTIFACT_SIZES.bits;
122
123 const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
124 }
125 }
126
127 // keep this in sync with the `-Z self-profile-events` help message in rustc_session/options.rs
128 const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
129 ("none", EventFilter::empty()),
130 ("all", EventFilter::all()),
131 ("default", EventFilter::DEFAULT),
132 ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
133 ("query-provider", EventFilter::QUERY_PROVIDERS),
134 ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
135 ("query-blocked", EventFilter::QUERY_BLOCKED),
136 ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
137 ("query-keys", EventFilter::QUERY_KEYS),
138 ("function-args", EventFilter::FUNCTION_ARGS),
139 ("args", EventFilter::ARGS),
140 ("llvm", EventFilter::LLVM),
141 ("incr-result-hashing", EventFilter::INCR_RESULT_HASHING),
142 ("artifact-sizes", EventFilter::ARTIFACT_SIZES),
143 ];
144
145 /// Something that uniquely identifies a query invocation.
146 pub struct QueryInvocationId(pub u32);
147
148 /// A reference to the SelfProfiler. It can be cloned and sent across thread
149 /// boundaries at will.
150 #[derive(Clone)]
151 pub struct SelfProfilerRef {
152 // This field is `None` if self-profiling is disabled for the current
153 // compilation session.
154 profiler: Option<Arc<SelfProfiler>>,
155
156 // We store the filter mask directly in the reference because that doesn't
157 // cost anything and allows for filtering with checking if the profiler is
158 // actually enabled.
159 event_filter_mask: EventFilter,
160
161 // Print verbose generic activities to stdout
162 print_verbose_generic_activities: bool,
163
164 // Print extra verbose generic activities to stdout
165 print_extra_verbose_generic_activities: bool,
166 }
167
168 impl SelfProfilerRef {
169 pub fn new(
170 profiler: Option<Arc<SelfProfiler>>,
171 print_verbose_generic_activities: bool,
172 print_extra_verbose_generic_activities: bool,
173 ) -> SelfProfilerRef {
174 // If there is no SelfProfiler then the filter mask is set to NONE,
175 // ensuring that nothing ever tries to actually access it.
176 let event_filter_mask =
177 profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
178
179 SelfProfilerRef {
180 profiler,
181 event_filter_mask,
182 print_verbose_generic_activities,
183 print_extra_verbose_generic_activities,
184 }
185 }
186
187 /// This shim makes sure that calls only get executed if the filter mask
188 /// lets them pass. It also contains some trickery to make sure that
189 /// code is optimized for non-profiling compilation sessions, i.e. anything
190 /// past the filter check is never inlined so it doesn't clutter the fast
191 /// path.
192 #[inline(always)]
193 fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
194 where
195 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
196 {
197 #[inline(never)]
198 #[cold]
199 fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
200 where
201 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
202 {
203 let profiler = profiler_ref.profiler.as_ref().unwrap();
204 f(&**profiler)
205 }
206
207 if self.event_filter_mask.contains(event_filter) {
208 cold_call(self, f)
209 } else {
210 TimingGuard::none()
211 }
212 }
213
214 /// Start profiling a verbose generic activity. Profiling continues until the
215 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
216 /// a measureme event, "verbose" generic activities also print a timing entry to
217 /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
218 pub fn verbose_generic_activity<'a>(
219 &'a self,
220 event_label: &'static str,
221 ) -> VerboseTimingGuard<'a> {
222 let message =
223 if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
224
225 VerboseTimingGuard::start(message, self.generic_activity(event_label))
226 }
227
228 /// Start profiling an extra verbose generic activity. Profiling continues until the
229 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
230 /// a measureme event, "extra verbose" generic activities also print a timing entry to
231 /// stdout if the compiler is invoked with -Ztime-passes.
232 pub fn extra_verbose_generic_activity<'a, A>(
233 &'a self,
234 event_label: &'static str,
235 event_arg: A,
236 ) -> VerboseTimingGuard<'a>
237 where
238 A: Borrow<str> + Into<String>,
239 {
240 let message = if self.print_extra_verbose_generic_activities {
241 Some(format!("{}({})", event_label, event_arg.borrow()))
242 } else {
243 None
244 };
245
246 VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
247 }
248
249 /// Start profiling a generic activity. Profiling continues until the
250 /// TimingGuard returned from this call is dropped.
251 #[inline(always)]
252 pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
253 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
254 let event_label = profiler.get_or_alloc_cached_string(event_label);
255 let event_id = EventId::from_label(event_label);
256 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
257 })
258 }
259
260 /// Start profiling with some event filter for a given event. Profiling continues until the
261 /// TimingGuard returned from this call is dropped.
262 #[inline(always)]
263 pub fn generic_activity_with_event_id(&self, event_id: EventId) -> TimingGuard<'_> {
264 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
265 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
266 })
267 }
268
269 /// Start profiling a generic activity. Profiling continues until the
270 /// TimingGuard returned from this call is dropped.
271 #[inline(always)]
272 pub fn generic_activity_with_arg<A>(
273 &self,
274 event_label: &'static str,
275 event_arg: A,
276 ) -> TimingGuard<'_>
277 where
278 A: Borrow<str> + Into<String>,
279 {
280 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
281 let builder = EventIdBuilder::new(&profiler.profiler);
282 let event_label = profiler.get_or_alloc_cached_string(event_label);
283 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
284 let event_arg = profiler.get_or_alloc_cached_string(event_arg);
285 builder.from_label_and_arg(event_label, event_arg)
286 } else {
287 builder.from_label(event_label)
288 };
289 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
290 })
291 }
292
293 /// Start profiling a generic activity, allowing costly arguments to be recorded. Profiling
294 /// continues until the `TimingGuard` returned from this call is dropped.
295 ///
296 /// If the arguments to a generic activity are cheap to create, use `generic_activity_with_arg`
297 /// or `generic_activity_with_args` for their simpler API. However, if they are costly or
298 /// require allocation in sufficiently hot contexts, then this allows for a closure to be called
299 /// only when arguments were asked to be recorded via `-Z self-profile-events=args`.
300 ///
301 /// In this case, the closure will be passed a `&mut EventArgRecorder`, to help with recording
302 /// one or many arguments within the generic activity being profiled, by calling its
303 /// `record_arg` method for example.
304 ///
305 /// This `EventArgRecorder` may implement more specific traits from other rustc crates, e.g. for
306 /// richer handling of rustc-specific argument types, while keeping this single entry-point API
307 /// for recording arguments.
308 ///
309 /// Note: recording at least one argument is *required* for the self-profiler to create the
310 /// `TimingGuard`. A panic will be triggered if that doesn't happen. This function exists
311 /// explicitly to record arguments, so it fails loudly when there are none to record.
312 ///
313 #[inline(always)]
314 pub fn generic_activity_with_arg_recorder<F>(
315 &self,
316 event_label: &'static str,
317 mut f: F,
318 ) -> TimingGuard<'_>
319 where
320 F: FnMut(&mut EventArgRecorder<'_>),
321 {
322 // Ensure this event will only be recorded when self-profiling is turned on.
323 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
324 let builder = EventIdBuilder::new(&profiler.profiler);
325 let event_label = profiler.get_or_alloc_cached_string(event_label);
326
327 // Ensure the closure to create event arguments will only be called when argument
328 // recording is turned on.
329 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
330 // Set up the builder and call the user-provided closure to record potentially
331 // costly event arguments.
332 let mut recorder = EventArgRecorder { profiler, args: SmallVec::new() };
333 f(&mut recorder);
334
335 // It is expected that the closure will record at least one argument. If that
336 // doesn't happen, it's a bug: we've been explicitly called in order to record
337 // arguments, so we fail loudly when there are none to record.
338 if recorder.args.is_empty() {
339 panic!(
340 "The closure passed to `generic_activity_with_arg_recorder` needs to \
341 record at least one argument"
342 );
343 }
344
345 builder.from_label_and_args(event_label, &recorder.args)
346 } else {
347 builder.from_label(event_label)
348 };
349 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
350 })
351 }
352
353 /// Record the size of an artifact that the compiler produces
354 ///
355 /// `artifact_kind` is the class of artifact (e.g., query_cache, object_file, etc.)
356 /// `artifact_name` is an identifier to the specific artifact being stored (usually a filename)
357 #[inline(always)]
358 pub fn artifact_size<A>(&self, artifact_kind: &str, artifact_name: A, size: u64)
359 where
360 A: Borrow<str> + Into<String>,
361 {
362 drop(self.exec(EventFilter::ARTIFACT_SIZES, |profiler| {
363 let builder = EventIdBuilder::new(&profiler.profiler);
364 let event_label = profiler.get_or_alloc_cached_string(artifact_kind);
365 let event_arg = profiler.get_or_alloc_cached_string(artifact_name);
366 let event_id = builder.from_label_and_arg(event_label, event_arg);
367 let thread_id = get_thread_id();
368
369 profiler.profiler.record_integer_event(
370 profiler.artifact_size_event_kind,
371 event_id,
372 thread_id,
373 size,
374 );
375
376 TimingGuard::none()
377 }))
378 }
379
380 #[inline(always)]
381 pub fn generic_activity_with_args(
382 &self,
383 event_label: &'static str,
384 event_args: &[String],
385 ) -> TimingGuard<'_> {
386 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
387 let builder = EventIdBuilder::new(&profiler.profiler);
388 let event_label = profiler.get_or_alloc_cached_string(event_label);
389 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
390 let event_args: Vec<_> = event_args
391 .iter()
392 .map(|s| profiler.get_or_alloc_cached_string(&s[..]))
393 .collect();
394 builder.from_label_and_args(event_label, &event_args)
395 } else {
396 builder.from_label(event_label)
397 };
398 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
399 })
400 }
401
402 /// Start profiling a query provider. Profiling continues until the
403 /// TimingGuard returned from this call is dropped.
404 #[inline(always)]
405 pub fn query_provider(&self) -> TimingGuard<'_> {
406 self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
407 TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
408 })
409 }
410
411 /// Record a query in-memory cache hit.
412 #[inline(always)]
413 pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
414 self.instant_query_event(
415 |profiler| profiler.query_cache_hit_event_kind,
416 query_invocation_id,
417 EventFilter::QUERY_CACHE_HITS,
418 );
419 }
420
421 /// Start profiling a query being blocked on a concurrent execution.
422 /// Profiling continues until the TimingGuard returned from this call is
423 /// dropped.
424 #[inline(always)]
425 pub fn query_blocked(&self) -> TimingGuard<'_> {
426 self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
427 TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
428 })
429 }
430
431 /// Start profiling how long it takes to load a query result from the
432 /// incremental compilation on-disk cache. Profiling continues until the
433 /// TimingGuard returned from this call is dropped.
434 #[inline(always)]
435 pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
436 self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
437 TimingGuard::start(
438 profiler,
439 profiler.incremental_load_result_event_kind,
440 EventId::INVALID,
441 )
442 })
443 }
444
445 /// Start profiling how long it takes to hash query results for incremental compilation.
446 /// Profiling continues until the TimingGuard returned from this call is dropped.
447 #[inline(always)]
448 pub fn incr_result_hashing(&self) -> TimingGuard<'_> {
449 self.exec(EventFilter::INCR_RESULT_HASHING, |profiler| {
450 TimingGuard::start(
451 profiler,
452 profiler.incremental_result_hashing_event_kind,
453 EventId::INVALID,
454 )
455 })
456 }
457
458 #[inline(always)]
459 fn instant_query_event(
460 &self,
461 event_kind: fn(&SelfProfiler) -> StringId,
462 query_invocation_id: QueryInvocationId,
463 event_filter: EventFilter,
464 ) {
465 drop(self.exec(event_filter, |profiler| {
466 let event_id = StringId::new_virtual(query_invocation_id.0);
467 let thread_id = get_thread_id();
468
469 profiler.profiler.record_instant_event(
470 event_kind(profiler),
471 EventId::from_virtual(event_id),
472 thread_id,
473 );
474
475 TimingGuard::none()
476 }));
477 }
478
479 pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
480 if let Some(profiler) = &self.profiler {
481 f(&profiler)
482 }
483 }
484
485 /// Gets a `StringId` for the given string. This method makes sure that
486 /// any strings going through it will only be allocated once in the
487 /// profiling data.
488 /// Returns `None` if the self-profiling is not enabled.
489 pub fn get_or_alloc_cached_string(&self, s: &str) -> Option<StringId> {
490 self.profiler.as_ref().map(|p| p.get_or_alloc_cached_string(s))
491 }
492
493 #[inline]
494 pub fn enabled(&self) -> bool {
495 self.profiler.is_some()
496 }
497
498 #[inline]
499 pub fn llvm_recording_enabled(&self) -> bool {
500 self.event_filter_mask.contains(EventFilter::LLVM)
501 }
502 #[inline]
503 pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
504 self.profiler.clone()
505 }
506 }
507
508 /// A helper for recording costly arguments to self-profiling events. Used with
509 /// `SelfProfilerRef::generic_activity_with_arg_recorder`.
510 pub struct EventArgRecorder<'p> {
511 /// The `SelfProfiler` used to intern the event arguments that users will ask to record.
512 profiler: &'p SelfProfiler,
513
514 /// The interned event arguments to be recorded in the generic activity event.
515 ///
516 /// The most common case, when actually recording event arguments, is to have one argument. Then
517 /// followed by recording two, in a couple places.
518 args: SmallVec<[StringId; 2]>,
519 }
520
521 impl EventArgRecorder<'_> {
522 /// Records a single argument within the current generic activity being profiled.
523 ///
524 /// Note: when self-profiling with costly event arguments, at least one argument
525 /// needs to be recorded. A panic will be triggered if that doesn't happen.
526 pub fn record_arg<A>(&mut self, event_arg: A)
527 where
528 A: Borrow<str> + Into<String>,
529 {
530 let event_arg = self.profiler.get_or_alloc_cached_string(event_arg);
531 self.args.push(event_arg);
532 }
533 }
534
535 pub struct SelfProfiler {
536 profiler: Profiler,
537 event_filter_mask: EventFilter,
538
539 string_cache: RwLock<FxHashMap<String, StringId>>,
540
541 query_event_kind: StringId,
542 generic_activity_event_kind: StringId,
543 incremental_load_result_event_kind: StringId,
544 incremental_result_hashing_event_kind: StringId,
545 query_blocked_event_kind: StringId,
546 query_cache_hit_event_kind: StringId,
547 artifact_size_event_kind: StringId,
548 }
549
550 impl SelfProfiler {
551 pub fn new(
552 output_directory: &Path,
553 crate_name: Option<&str>,
554 event_filters: Option<&[String]>,
555 counter_name: &str,
556 ) -> Result<SelfProfiler, Box<dyn Error + Send + Sync>> {
557 fs::create_dir_all(output_directory)?;
558
559 let crate_name = crate_name.unwrap_or("unknown-crate");
560 // HACK(eddyb) we need to pad the PID, strange as it may seem, as its
561 // length can behave as a source of entropy for heap addresses, when
562 // ASLR is disabled and the heap is otherwise determinic.
563 let pid: u32 = process::id();
564 let filename = format!("{}-{:07}.rustc_profile", crate_name, pid);
565 let path = output_directory.join(&filename);
566 let profiler =
567 Profiler::with_counter(&path, measureme::counters::Counter::by_name(counter_name)?)?;
568
569 let query_event_kind = profiler.alloc_string("Query");
570 let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
571 let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
572 let incremental_result_hashing_event_kind =
573 profiler.alloc_string("IncrementalResultHashing");
574 let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
575 let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
576 let artifact_size_event_kind = profiler.alloc_string("ArtifactSize");
577
578 let mut event_filter_mask = EventFilter::empty();
579
580 if let Some(event_filters) = event_filters {
581 let mut unknown_events = vec![];
582 for item in event_filters {
583 if let Some(&(_, mask)) =
584 EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
585 {
586 event_filter_mask |= mask;
587 } else {
588 unknown_events.push(item.clone());
589 }
590 }
591
592 // Warn about any unknown event names
593 if !unknown_events.is_empty() {
594 unknown_events.sort();
595 unknown_events.dedup();
596
597 warn!(
598 "Unknown self-profiler events specified: {}. Available options are: {}.",
599 unknown_events.join(", "),
600 EVENT_FILTERS_BY_NAME
601 .iter()
602 .map(|&(name, _)| name.to_string())
603 .collect::<Vec<_>>()
604 .join(", ")
605 );
606 }
607 } else {
608 event_filter_mask = EventFilter::DEFAULT;
609 }
610
611 Ok(SelfProfiler {
612 profiler,
613 event_filter_mask,
614 string_cache: RwLock::new(FxHashMap::default()),
615 query_event_kind,
616 generic_activity_event_kind,
617 incremental_load_result_event_kind,
618 incremental_result_hashing_event_kind,
619 query_blocked_event_kind,
620 query_cache_hit_event_kind,
621 artifact_size_event_kind,
622 })
623 }
624
625 /// Allocates a new string in the profiling data. Does not do any caching
626 /// or deduplication.
627 pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
628 self.profiler.alloc_string(s)
629 }
630
631 /// Gets a `StringId` for the given string. This method makes sure that
632 /// any strings going through it will only be allocated once in the
633 /// profiling data.
634 pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
635 where
636 A: Borrow<str> + Into<String>,
637 {
638 // Only acquire a read-lock first since we assume that the string is
639 // already present in the common case.
640 {
641 let string_cache = self.string_cache.read();
642
643 if let Some(&id) = string_cache.get(s.borrow()) {
644 return id;
645 }
646 }
647
648 let mut string_cache = self.string_cache.write();
649 // Check if the string has already been added in the small time window
650 // between dropping the read lock and acquiring the write lock.
651 match string_cache.entry(s.into()) {
652 Entry::Occupied(e) => *e.get(),
653 Entry::Vacant(e) => {
654 let string_id = self.profiler.alloc_string(&e.key()[..]);
655 *e.insert(string_id)
656 }
657 }
658 }
659
660 pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
661 let from = StringId::new_virtual(from.0);
662 self.profiler.map_virtual_to_concrete_string(from, to);
663 }
664
665 pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
666 where
667 I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
668 {
669 let from = from.map(|qid| StringId::new_virtual(qid.0));
670 self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
671 }
672
673 pub fn query_key_recording_enabled(&self) -> bool {
674 self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
675 }
676
677 pub fn event_id_builder(&self) -> EventIdBuilder<'_> {
678 EventIdBuilder::new(&self.profiler)
679 }
680 }
681
682 #[must_use]
683 pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a>>);
684
685 impl<'a> TimingGuard<'a> {
686 #[inline]
687 pub fn start(
688 profiler: &'a SelfProfiler,
689 event_kind: StringId,
690 event_id: EventId,
691 ) -> TimingGuard<'a> {
692 let thread_id = get_thread_id();
693 let raw_profiler = &profiler.profiler;
694 let timing_guard =
695 raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
696 TimingGuard(Some(timing_guard))
697 }
698
699 #[inline]
700 pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
701 if let Some(guard) = self.0 {
702 cold_path(|| {
703 let event_id = StringId::new_virtual(query_invocation_id.0);
704 let event_id = EventId::from_virtual(event_id);
705 guard.finish_with_override_event_id(event_id);
706 });
707 }
708 }
709
710 #[inline]
711 pub fn none() -> TimingGuard<'a> {
712 TimingGuard(None)
713 }
714
715 #[inline(always)]
716 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
717 let _timer = self;
718 f()
719 }
720 }
721
722 #[must_use]
723 pub struct VerboseTimingGuard<'a> {
724 start_and_message: Option<(Instant, Option<usize>, String)>,
725 _guard: TimingGuard<'a>,
726 }
727
728 impl<'a> VerboseTimingGuard<'a> {
729 pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
730 VerboseTimingGuard {
731 _guard,
732 start_and_message: message.map(|msg| (Instant::now(), get_resident_set_size(), msg)),
733 }
734 }
735
736 #[inline(always)]
737 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
738 let _timer = self;
739 f()
740 }
741 }
742
743 impl Drop for VerboseTimingGuard<'_> {
744 fn drop(&mut self) {
745 if let Some((start_time, start_rss, ref message)) = self.start_and_message {
746 let end_rss = get_resident_set_size();
747 let dur = start_time.elapsed();
748
749 if should_print_passes(dur, start_rss, end_rss) {
750 print_time_passes_entry(&message, dur, start_rss, end_rss);
751 }
752 }
753 }
754 }
755
756 fn should_print_passes(dur: Duration, start_rss: Option<usize>, end_rss: Option<usize>) -> bool {
757 if dur.as_millis() > 5 {
758 return true;
759 }
760
761 if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
762 let change_rss = end_rss.abs_diff(start_rss);
763 if change_rss > 0 {
764 return true;
765 }
766 }
767
768 false
769 }
770
771 pub fn print_time_passes_entry(
772 what: &str,
773 dur: Duration,
774 start_rss: Option<usize>,
775 end_rss: Option<usize>,
776 ) {
777 let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
778 let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
779
780 let mem_string = match (start_rss, end_rss) {
781 (Some(start_rss), Some(end_rss)) => {
782 let change_rss = end_rss as i128 - start_rss as i128;
783
784 format!(
785 "; rss: {:>4}MB -> {:>4}MB ({:>+5}MB)",
786 rss_to_mb(start_rss),
787 rss_to_mb(end_rss),
788 rss_change_to_mb(change_rss),
789 )
790 }
791 (Some(start_rss), None) => format!("; rss start: {:>4}MB", rss_to_mb(start_rss)),
792 (None, Some(end_rss)) => format!("; rss end: {:>4}MB", rss_to_mb(end_rss)),
793 (None, None) => String::new(),
794 };
795
796 eprintln!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
797 }
798
799 // Hack up our own formatting for the duration to make it easier for scripts
800 // to parse (always use the same number of decimal places and the same unit).
801 pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
802 format!("{:.3}", dur.as_secs_f64())
803 }
804
805 fn get_thread_id() -> u32 {
806 std::thread::current().id().as_u64().get() as u32
807 }
808
809 // Memory reporting
810 cfg_if! {
811 if #[cfg(windows)] {
812 pub fn get_resident_set_size() -> Option<usize> {
813 use std::mem::{self, MaybeUninit};
814 use winapi::shared::minwindef::DWORD;
815 use winapi::um::processthreadsapi::GetCurrentProcess;
816 use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
817
818 let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
819 match unsafe {
820 GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
821 } {
822 0 => None,
823 _ => {
824 let pmc = unsafe { pmc.assume_init() };
825 Some(pmc.WorkingSetSize as usize)
826 }
827 }
828 }
829 } else if #[cfg(target_os = "macos")] {
830 pub fn get_resident_set_size() -> Option<usize> {
831 use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
832 use std::mem;
833 const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
834
835 unsafe {
836 let mut info: proc_taskinfo = mem::zeroed();
837 let info_ptr = &mut info as *mut proc_taskinfo as *mut c_void;
838 let pid = getpid() as c_int;
839 let ret = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, info_ptr, PROC_TASKINFO_SIZE);
840 if ret == PROC_TASKINFO_SIZE {
841 Some(info.pti_resident_size as usize)
842 } else {
843 None
844 }
845 }
846 }
847 } else if #[cfg(unix)] {
848 pub fn get_resident_set_size() -> Option<usize> {
849 let field = 1;
850 let contents = fs::read("/proc/self/statm").ok()?;
851 let contents = String::from_utf8(contents).ok()?;
852 let s = contents.split_whitespace().nth(field)?;
853 let npages = s.parse::<usize>().ok()?;
854 Some(npages * 4096)
855 }
856 } else {
857 pub fn get_resident_set_size() -> Option<usize> {
858 None
859 }
860 }
861 }