1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/profile.c
4 * Simple profiling. Manages a direct-mapped profile hit count buffer,
5 * with configurable resolution, support for restricting the cpus on
6 * which profiling is done, and switching between cpu time and
7 * schedule() calls via kernel command line parameters passed at boot.
9 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
11 * Consolidation of architecture support code for profiling,
12 * Nadia Yvette Chambers, Oracle, July 2004
13 * Amortized hit count accounting via per-cpu open-addressed hashtables
14 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
18 #include <linux/export.h>
19 #include <linux/profile.h>
20 #include <linux/memblock.h>
21 #include <linux/notifier.h>
23 #include <linux/cpumask.h>
24 #include <linux/cpu.h>
25 #include <linux/highmem.h>
26 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/stat.h>
31 #include <asm/sections.h>
32 #include <asm/irq_regs.h>
33 #include <asm/ptrace.h>
38 #define PROFILE_GRPSHIFT 3
39 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
40 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
41 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
43 static atomic_t
*prof_buffer
;
44 static unsigned long prof_len
;
45 static unsigned short int prof_shift
;
47 int prof_on __read_mostly
;
48 EXPORT_SYMBOL_GPL(prof_on
);
50 static cpumask_var_t prof_cpu_mask
;
51 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
52 static DEFINE_PER_CPU(struct profile_hit
*[2], cpu_profile_hits
);
53 static DEFINE_PER_CPU(int, cpu_profile_flip
);
54 static DEFINE_MUTEX(profile_flip_mutex
);
55 #endif /* CONFIG_SMP */
57 int profile_setup(char *str
)
59 static const char schedstr
[] = "schedule";
60 static const char sleepstr
[] = "sleep";
61 static const char kvmstr
[] = "kvm";
64 if (!strncmp(str
, sleepstr
, strlen(sleepstr
))) {
65 #ifdef CONFIG_SCHEDSTATS
66 force_schedstat_enabled();
67 prof_on
= SLEEP_PROFILING
;
68 if (str
[strlen(sleepstr
)] == ',')
69 str
+= strlen(sleepstr
) + 1;
70 if (get_option(&str
, &par
))
71 prof_shift
= clamp(par
, 0, BITS_PER_LONG
- 1);
72 pr_info("kernel sleep profiling enabled (shift: %u)\n",
75 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
76 #endif /* CONFIG_SCHEDSTATS */
77 } else if (!strncmp(str
, schedstr
, strlen(schedstr
))) {
78 prof_on
= SCHED_PROFILING
;
79 if (str
[strlen(schedstr
)] == ',')
80 str
+= strlen(schedstr
) + 1;
81 if (get_option(&str
, &par
))
82 prof_shift
= clamp(par
, 0, BITS_PER_LONG
- 1);
83 pr_info("kernel schedule profiling enabled (shift: %u)\n",
85 } else if (!strncmp(str
, kvmstr
, strlen(kvmstr
))) {
86 prof_on
= KVM_PROFILING
;
87 if (str
[strlen(kvmstr
)] == ',')
88 str
+= strlen(kvmstr
) + 1;
89 if (get_option(&str
, &par
))
90 prof_shift
= clamp(par
, 0, BITS_PER_LONG
- 1);
91 pr_info("kernel KVM profiling enabled (shift: %u)\n",
93 } else if (get_option(&str
, &par
)) {
94 prof_shift
= clamp(par
, 0, BITS_PER_LONG
- 1);
95 prof_on
= CPU_PROFILING
;
96 pr_info("kernel profiling enabled (shift: %u)\n",
101 __setup("profile=", profile_setup
);
104 int __ref
profile_init(void)
110 /* only text is profiled */
111 prof_len
= (_etext
- _stext
) >> prof_shift
;
112 buffer_bytes
= prof_len
*sizeof(atomic_t
);
114 if (!alloc_cpumask_var(&prof_cpu_mask
, GFP_KERNEL
))
117 cpumask_copy(prof_cpu_mask
, cpu_possible_mask
);
119 prof_buffer
= kzalloc(buffer_bytes
, GFP_KERNEL
|__GFP_NOWARN
);
123 prof_buffer
= alloc_pages_exact(buffer_bytes
,
124 GFP_KERNEL
|__GFP_ZERO
|__GFP_NOWARN
);
128 prof_buffer
= vzalloc(buffer_bytes
);
132 free_cpumask_var(prof_cpu_mask
);
136 /* Profile event notifications */
138 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier
);
139 static ATOMIC_NOTIFIER_HEAD(task_free_notifier
);
140 static BLOCKING_NOTIFIER_HEAD(munmap_notifier
);
142 void profile_task_exit(struct task_struct
*task
)
144 blocking_notifier_call_chain(&task_exit_notifier
, 0, task
);
147 int profile_handoff_task(struct task_struct
*task
)
150 ret
= atomic_notifier_call_chain(&task_free_notifier
, 0, task
);
151 return (ret
== NOTIFY_OK
) ? 1 : 0;
154 void profile_munmap(unsigned long addr
)
156 blocking_notifier_call_chain(&munmap_notifier
, 0, (void *)addr
);
159 int task_handoff_register(struct notifier_block
*n
)
161 return atomic_notifier_chain_register(&task_free_notifier
, n
);
163 EXPORT_SYMBOL_GPL(task_handoff_register
);
165 int task_handoff_unregister(struct notifier_block
*n
)
167 return atomic_notifier_chain_unregister(&task_free_notifier
, n
);
169 EXPORT_SYMBOL_GPL(task_handoff_unregister
);
171 int profile_event_register(enum profile_type type
, struct notifier_block
*n
)
176 case PROFILE_TASK_EXIT
:
177 err
= blocking_notifier_chain_register(
178 &task_exit_notifier
, n
);
181 err
= blocking_notifier_chain_register(
182 &munmap_notifier
, n
);
188 EXPORT_SYMBOL_GPL(profile_event_register
);
190 int profile_event_unregister(enum profile_type type
, struct notifier_block
*n
)
195 case PROFILE_TASK_EXIT
:
196 err
= blocking_notifier_chain_unregister(
197 &task_exit_notifier
, n
);
200 err
= blocking_notifier_chain_unregister(
201 &munmap_notifier
, n
);
207 EXPORT_SYMBOL_GPL(profile_event_unregister
);
209 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
211 * Each cpu has a pair of open-addressed hashtables for pending
212 * profile hits. read_profile() IPI's all cpus to request them
213 * to flip buffers and flushes their contents to prof_buffer itself.
214 * Flip requests are serialized by the profile_flip_mutex. The sole
215 * use of having a second hashtable is for avoiding cacheline
216 * contention that would otherwise happen during flushes of pending
217 * profile hits required for the accuracy of reported profile hits
218 * and so resurrect the interrupt livelock issue.
220 * The open-addressed hashtables are indexed by profile buffer slot
221 * and hold the number of pending hits to that profile buffer slot on
222 * a cpu in an entry. When the hashtable overflows, all pending hits
223 * are accounted to their corresponding profile buffer slots with
224 * atomic_add() and the hashtable emptied. As numerous pending hits
225 * may be accounted to a profile buffer slot in a hashtable entry,
226 * this amortizes a number of atomic profile buffer increments likely
227 * to be far larger than the number of entries in the hashtable,
228 * particularly given that the number of distinct profile buffer
229 * positions to which hits are accounted during short intervals (e.g.
230 * several seconds) is usually very small. Exclusion from buffer
231 * flipping is provided by interrupt disablement (note that for
232 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
234 * The hash function is meant to be lightweight as opposed to strong,
235 * and was vaguely inspired by ppc64 firmware-supported inverted
236 * pagetable hash functions, but uses a full hashtable full of finite
237 * collision chains, not just pairs of them.
241 static void __profile_flip_buffers(void *unused
)
243 int cpu
= smp_processor_id();
245 per_cpu(cpu_profile_flip
, cpu
) = !per_cpu(cpu_profile_flip
, cpu
);
248 static void profile_flip_buffers(void)
252 mutex_lock(&profile_flip_mutex
);
253 j
= per_cpu(cpu_profile_flip
, get_cpu());
255 on_each_cpu(__profile_flip_buffers
, NULL
, 1);
256 for_each_online_cpu(cpu
) {
257 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[j
];
258 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
264 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
265 hits
[i
].hits
= hits
[i
].pc
= 0;
268 mutex_unlock(&profile_flip_mutex
);
271 static void profile_discard_flip_buffers(void)
275 mutex_lock(&profile_flip_mutex
);
276 i
= per_cpu(cpu_profile_flip
, get_cpu());
278 on_each_cpu(__profile_flip_buffers
, NULL
, 1);
279 for_each_online_cpu(cpu
) {
280 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[i
];
281 memset(hits
, 0, NR_PROFILE_HIT
*sizeof(struct profile_hit
));
283 mutex_unlock(&profile_flip_mutex
);
286 static void do_profile_hits(int type
, void *__pc
, unsigned int nr_hits
)
288 unsigned long primary
, secondary
, flags
, pc
= (unsigned long)__pc
;
290 struct profile_hit
*hits
;
292 pc
= min((pc
- (unsigned long)_stext
) >> prof_shift
, prof_len
- 1);
293 i
= primary
= (pc
& (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
294 secondary
= (~(pc
<< 1) & (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
296 hits
= per_cpu(cpu_profile_hits
, cpu
)[per_cpu(cpu_profile_flip
, cpu
)];
302 * We buffer the global profiler buffer into a per-CPU
303 * queue and thus reduce the number of global (and possibly
304 * NUMA-alien) accesses. The write-queue is self-coalescing:
306 local_irq_save(flags
);
308 for (j
= 0; j
< PROFILE_GRPSZ
; ++j
) {
309 if (hits
[i
+ j
].pc
== pc
) {
310 hits
[i
+ j
].hits
+= nr_hits
;
312 } else if (!hits
[i
+ j
].hits
) {
314 hits
[i
+ j
].hits
= nr_hits
;
318 i
= (i
+ secondary
) & (NR_PROFILE_HIT
- 1);
319 } while (i
!= primary
);
322 * Add the current hit(s) and flush the write-queue out
323 * to the global buffer:
325 atomic_add(nr_hits
, &prof_buffer
[pc
]);
326 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
327 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
328 hits
[i
].pc
= hits
[i
].hits
= 0;
331 local_irq_restore(flags
);
335 static int profile_dead_cpu(unsigned int cpu
)
340 if (prof_cpu_mask
!= NULL
)
341 cpumask_clear_cpu(cpu
, prof_cpu_mask
);
343 for (i
= 0; i
< 2; i
++) {
344 if (per_cpu(cpu_profile_hits
, cpu
)[i
]) {
345 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[i
]);
346 per_cpu(cpu_profile_hits
, cpu
)[i
] = NULL
;
353 static int profile_prepare_cpu(unsigned int cpu
)
355 int i
, node
= cpu_to_mem(cpu
);
358 per_cpu(cpu_profile_flip
, cpu
) = 0;
360 for (i
= 0; i
< 2; i
++) {
361 if (per_cpu(cpu_profile_hits
, cpu
)[i
])
364 page
= __alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
366 profile_dead_cpu(cpu
);
369 per_cpu(cpu_profile_hits
, cpu
)[i
] = page_address(page
);
375 static int profile_online_cpu(unsigned int cpu
)
377 if (prof_cpu_mask
!= NULL
)
378 cpumask_set_cpu(cpu
, prof_cpu_mask
);
383 #else /* !CONFIG_SMP */
384 #define profile_flip_buffers() do { } while (0)
385 #define profile_discard_flip_buffers() do { } while (0)
387 static void do_profile_hits(int type
, void *__pc
, unsigned int nr_hits
)
390 pc
= ((unsigned long)__pc
- (unsigned long)_stext
) >> prof_shift
;
391 atomic_add(nr_hits
, &prof_buffer
[min(pc
, prof_len
- 1)]);
393 #endif /* !CONFIG_SMP */
395 void profile_hits(int type
, void *__pc
, unsigned int nr_hits
)
397 if (prof_on
!= type
|| !prof_buffer
)
399 do_profile_hits(type
, __pc
, nr_hits
);
401 EXPORT_SYMBOL_GPL(profile_hits
);
403 void profile_tick(int type
)
405 struct pt_regs
*regs
= get_irq_regs();
407 if (!user_mode(regs
) && prof_cpu_mask
!= NULL
&&
408 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask
))
409 profile_hit(type
, (void *)profile_pc(regs
));
412 #ifdef CONFIG_PROC_FS
413 #include <linux/proc_fs.h>
414 #include <linux/seq_file.h>
415 #include <linux/uaccess.h>
417 static int prof_cpu_mask_proc_show(struct seq_file
*m
, void *v
)
419 seq_printf(m
, "%*pb\n", cpumask_pr_args(prof_cpu_mask
));
423 static int prof_cpu_mask_proc_open(struct inode
*inode
, struct file
*file
)
425 return single_open(file
, prof_cpu_mask_proc_show
, NULL
);
428 static ssize_t
prof_cpu_mask_proc_write(struct file
*file
,
429 const char __user
*buffer
, size_t count
, loff_t
*pos
)
431 cpumask_var_t new_value
;
434 if (!alloc_cpumask_var(&new_value
, GFP_KERNEL
))
437 err
= cpumask_parse_user(buffer
, count
, new_value
);
439 cpumask_copy(prof_cpu_mask
, new_value
);
442 free_cpumask_var(new_value
);
446 static const struct file_operations prof_cpu_mask_proc_fops
= {
447 .open
= prof_cpu_mask_proc_open
,
450 .release
= single_release
,
451 .write
= prof_cpu_mask_proc_write
,
454 void create_prof_cpu_mask(void)
456 /* create /proc/irq/prof_cpu_mask */
457 proc_create("irq/prof_cpu_mask", 0600, NULL
, &prof_cpu_mask_proc_fops
);
461 * This function accesses profiling information. The returned data is
462 * binary: the sampling step and the actual contents of the profile
463 * buffer. Use of the program readprofile is recommended in order to
464 * get meaningful info out of these data.
467 read_profile(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
469 unsigned long p
= *ppos
;
472 unsigned long sample_step
= 1UL << prof_shift
;
474 profile_flip_buffers();
475 if (p
>= (prof_len
+1)*sizeof(unsigned int))
477 if (count
> (prof_len
+1)*sizeof(unsigned int) - p
)
478 count
= (prof_len
+1)*sizeof(unsigned int) - p
;
481 while (p
< sizeof(unsigned int) && count
> 0) {
482 if (put_user(*((char *)(&sample_step
)+p
), buf
))
484 buf
++; p
++; count
--; read
++;
486 pnt
= (char *)prof_buffer
+ p
- sizeof(atomic_t
);
487 if (copy_to_user(buf
, (void *)pnt
, count
))
495 * Writing to /proc/profile resets the counters
497 * Writing a 'profiling multiplier' value into it also re-sets the profiling
498 * interrupt frequency, on architectures that support this.
500 static ssize_t
write_profile(struct file
*file
, const char __user
*buf
,
501 size_t count
, loff_t
*ppos
)
504 extern int setup_profiling_timer(unsigned int multiplier
);
506 if (count
== sizeof(int)) {
507 unsigned int multiplier
;
509 if (copy_from_user(&multiplier
, buf
, sizeof(int)))
512 if (setup_profiling_timer(multiplier
))
516 profile_discard_flip_buffers();
517 memset(prof_buffer
, 0, prof_len
* sizeof(atomic_t
));
521 static const struct file_operations proc_profile_operations
= {
522 .read
= read_profile
,
523 .write
= write_profile
,
524 .llseek
= default_llseek
,
527 int __ref
create_proc_profile(void)
529 struct proc_dir_entry
*entry
;
531 enum cpuhp_state online_state
;
539 err
= cpuhp_setup_state(CPUHP_PROFILE_PREPARE
, "PROFILE_PREPARE",
540 profile_prepare_cpu
, profile_dead_cpu
);
544 err
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "AP_PROFILE_ONLINE",
545 profile_online_cpu
, NULL
);
551 entry
= proc_create("profile", S_IWUSR
| S_IRUGO
,
552 NULL
, &proc_profile_operations
);
555 proc_set_size(entry
, (1 + prof_len
) * sizeof(atomic_t
));
560 cpuhp_remove_state(online_state
);
562 cpuhp_remove_state(CPUHP_PROFILE_PREPARE
);
566 subsys_initcall(create_proc_profile
);
567 #endif /* CONFIG_PROC_FS */