]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/profile.c | |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, | |
4 | * with configurable resolution, support for restricting the cpus on | |
5 | * which profiling is done, and switching between cpu time and | |
6 | * schedule() calls via kernel command line parameters passed at boot. | |
7 | * | |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, | |
9 | * Red Hat, July 2004 | |
10 | * Consolidation of architecture support code for profiling, | |
11 | * William Irwin, Oracle, July 2004 | |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables | |
13 | * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/bootmem.h> | |
19 | #include <linux/notifier.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/cpumask.h> | |
22 | #include <linux/cpu.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/mutex.h> | |
25 | #include <asm/sections.h> | |
26 | #include <asm/irq_regs.h> | |
27 | #include <asm/ptrace.h> | |
28 | ||
29 | struct profile_hit { | |
30 | u32 pc, hits; | |
31 | }; | |
32 | #define PROFILE_GRPSHIFT 3 | |
33 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) | |
34 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) | |
35 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) | |
36 | ||
37 | /* Oprofile timer tick hook */ | |
38 | static int (*timer_hook)(struct pt_regs *) __read_mostly; | |
39 | ||
40 | static atomic_t *prof_buffer; | |
41 | static unsigned long prof_len, prof_shift; | |
42 | ||
43 | int prof_on __read_mostly; | |
44 | EXPORT_SYMBOL_GPL(prof_on); | |
45 | ||
46 | static cpumask_t prof_cpu_mask = CPU_MASK_ALL; | |
47 | #ifdef CONFIG_SMP | |
48 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); | |
49 | static DEFINE_PER_CPU(int, cpu_profile_flip); | |
50 | static DEFINE_MUTEX(profile_flip_mutex); | |
51 | #endif /* CONFIG_SMP */ | |
52 | ||
53 | static int __init profile_setup(char *str) | |
54 | { | |
55 | static char __initdata schedstr[] = "schedule"; | |
56 | static char __initdata sleepstr[] = "sleep"; | |
57 | static char __initdata kvmstr[] = "kvm"; | |
58 | int par; | |
59 | ||
60 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { | |
61 | #ifdef CONFIG_SCHEDSTATS | |
62 | prof_on = SLEEP_PROFILING; | |
63 | if (str[strlen(sleepstr)] == ',') | |
64 | str += strlen(sleepstr) + 1; | |
65 | if (get_option(&str, &par)) | |
66 | prof_shift = par; | |
67 | printk(KERN_INFO | |
68 | "kernel sleep profiling enabled (shift: %ld)\n", | |
69 | prof_shift); | |
70 | #else | |
71 | printk(KERN_WARNING | |
72 | "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); | |
73 | #endif /* CONFIG_SCHEDSTATS */ | |
74 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { | |
75 | prof_on = SCHED_PROFILING; | |
76 | if (str[strlen(schedstr)] == ',') | |
77 | str += strlen(schedstr) + 1; | |
78 | if (get_option(&str, &par)) | |
79 | prof_shift = par; | |
80 | printk(KERN_INFO | |
81 | "kernel schedule profiling enabled (shift: %ld)\n", | |
82 | prof_shift); | |
83 | } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { | |
84 | prof_on = KVM_PROFILING; | |
85 | if (str[strlen(kvmstr)] == ',') | |
86 | str += strlen(kvmstr) + 1; | |
87 | if (get_option(&str, &par)) | |
88 | prof_shift = par; | |
89 | printk(KERN_INFO | |
90 | "kernel KVM profiling enabled (shift: %ld)\n", | |
91 | prof_shift); | |
92 | } else if (get_option(&str, &par)) { | |
93 | prof_shift = par; | |
94 | prof_on = CPU_PROFILING; | |
95 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", | |
96 | prof_shift); | |
97 | } | |
98 | return 1; | |
99 | } | |
100 | __setup("profile=", profile_setup); | |
101 | ||
102 | ||
103 | void __init profile_init(void) | |
104 | { | |
105 | if (!prof_on) | |
106 | return; | |
107 | ||
108 | /* only text is profiled */ | |
109 | prof_len = (_etext - _stext) >> prof_shift; | |
110 | prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); | |
111 | } | |
112 | ||
113 | /* Profile event notifications */ | |
114 | ||
115 | #ifdef CONFIG_PROFILING | |
116 | ||
117 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); | |
118 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); | |
119 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); | |
120 | ||
121 | void profile_task_exit(struct task_struct *task) | |
122 | { | |
123 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); | |
124 | } | |
125 | ||
126 | int profile_handoff_task(struct task_struct *task) | |
127 | { | |
128 | int ret; | |
129 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); | |
130 | return (ret == NOTIFY_OK) ? 1 : 0; | |
131 | } | |
132 | ||
133 | void profile_munmap(unsigned long addr) | |
134 | { | |
135 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); | |
136 | } | |
137 | ||
138 | int task_handoff_register(struct notifier_block *n) | |
139 | { | |
140 | return atomic_notifier_chain_register(&task_free_notifier, n); | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(task_handoff_register); | |
143 | ||
144 | int task_handoff_unregister(struct notifier_block *n) | |
145 | { | |
146 | return atomic_notifier_chain_unregister(&task_free_notifier, n); | |
147 | } | |
148 | EXPORT_SYMBOL_GPL(task_handoff_unregister); | |
149 | ||
150 | int profile_event_register(enum profile_type type, struct notifier_block *n) | |
151 | { | |
152 | int err = -EINVAL; | |
153 | ||
154 | switch (type) { | |
155 | case PROFILE_TASK_EXIT: | |
156 | err = blocking_notifier_chain_register( | |
157 | &task_exit_notifier, n); | |
158 | break; | |
159 | case PROFILE_MUNMAP: | |
160 | err = blocking_notifier_chain_register( | |
161 | &munmap_notifier, n); | |
162 | break; | |
163 | } | |
164 | ||
165 | return err; | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(profile_event_register); | |
168 | ||
169 | int profile_event_unregister(enum profile_type type, struct notifier_block *n) | |
170 | { | |
171 | int err = -EINVAL; | |
172 | ||
173 | switch (type) { | |
174 | case PROFILE_TASK_EXIT: | |
175 | err = blocking_notifier_chain_unregister( | |
176 | &task_exit_notifier, n); | |
177 | break; | |
178 | case PROFILE_MUNMAP: | |
179 | err = blocking_notifier_chain_unregister( | |
180 | &munmap_notifier, n); | |
181 | break; | |
182 | } | |
183 | ||
184 | return err; | |
185 | } | |
186 | EXPORT_SYMBOL_GPL(profile_event_unregister); | |
187 | ||
188 | int register_timer_hook(int (*hook)(struct pt_regs *)) | |
189 | { | |
190 | if (timer_hook) | |
191 | return -EBUSY; | |
192 | timer_hook = hook; | |
193 | return 0; | |
194 | } | |
195 | EXPORT_SYMBOL_GPL(register_timer_hook); | |
196 | ||
197 | void unregister_timer_hook(int (*hook)(struct pt_regs *)) | |
198 | { | |
199 | WARN_ON(hook != timer_hook); | |
200 | timer_hook = NULL; | |
201 | /* make sure all CPUs see the NULL hook */ | |
202 | synchronize_sched(); /* Allow ongoing interrupts to complete. */ | |
203 | } | |
204 | EXPORT_SYMBOL_GPL(unregister_timer_hook); | |
205 | ||
206 | #endif /* CONFIG_PROFILING */ | |
207 | ||
208 | ||
209 | #ifdef CONFIG_SMP | |
210 | /* | |
211 | * Each cpu has a pair of open-addressed hashtables for pending | |
212 | * profile hits. read_profile() IPI's all cpus to request them | |
213 | * to flip buffers and flushes their contents to prof_buffer itself. | |
214 | * Flip requests are serialized by the profile_flip_mutex. The sole | |
215 | * use of having a second hashtable is for avoiding cacheline | |
216 | * contention that would otherwise happen during flushes of pending | |
217 | * profile hits required for the accuracy of reported profile hits | |
218 | * and so resurrect the interrupt livelock issue. | |
219 | * | |
220 | * The open-addressed hashtables are indexed by profile buffer slot | |
221 | * and hold the number of pending hits to that profile buffer slot on | |
222 | * a cpu in an entry. When the hashtable overflows, all pending hits | |
223 | * are accounted to their corresponding profile buffer slots with | |
224 | * atomic_add() and the hashtable emptied. As numerous pending hits | |
225 | * may be accounted to a profile buffer slot in a hashtable entry, | |
226 | * this amortizes a number of atomic profile buffer increments likely | |
227 | * to be far larger than the number of entries in the hashtable, | |
228 | * particularly given that the number of distinct profile buffer | |
229 | * positions to which hits are accounted during short intervals (e.g. | |
230 | * several seconds) is usually very small. Exclusion from buffer | |
231 | * flipping is provided by interrupt disablement (note that for | |
232 | * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from | |
233 | * process context). | |
234 | * The hash function is meant to be lightweight as opposed to strong, | |
235 | * and was vaguely inspired by ppc64 firmware-supported inverted | |
236 | * pagetable hash functions, but uses a full hashtable full of finite | |
237 | * collision chains, not just pairs of them. | |
238 | * | |
239 | * -- wli | |
240 | */ | |
241 | static void __profile_flip_buffers(void *unused) | |
242 | { | |
243 | int cpu = smp_processor_id(); | |
244 | ||
245 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); | |
246 | } | |
247 | ||
248 | static void profile_flip_buffers(void) | |
249 | { | |
250 | int i, j, cpu; | |
251 | ||
252 | mutex_lock(&profile_flip_mutex); | |
253 | j = per_cpu(cpu_profile_flip, get_cpu()); | |
254 | put_cpu(); | |
255 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
256 | for_each_online_cpu(cpu) { | |
257 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; | |
258 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
259 | if (!hits[i].hits) { | |
260 | if (hits[i].pc) | |
261 | hits[i].pc = 0; | |
262 | continue; | |
263 | } | |
264 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
265 | hits[i].hits = hits[i].pc = 0; | |
266 | } | |
267 | } | |
268 | mutex_unlock(&profile_flip_mutex); | |
269 | } | |
270 | ||
271 | static void profile_discard_flip_buffers(void) | |
272 | { | |
273 | int i, cpu; | |
274 | ||
275 | mutex_lock(&profile_flip_mutex); | |
276 | i = per_cpu(cpu_profile_flip, get_cpu()); | |
277 | put_cpu(); | |
278 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
279 | for_each_online_cpu(cpu) { | |
280 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; | |
281 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); | |
282 | } | |
283 | mutex_unlock(&profile_flip_mutex); | |
284 | } | |
285 | ||
286 | void profile_hits(int type, void *__pc, unsigned int nr_hits) | |
287 | { | |
288 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; | |
289 | int i, j, cpu; | |
290 | struct profile_hit *hits; | |
291 | ||
292 | if (prof_on != type || !prof_buffer) | |
293 | return; | |
294 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); | |
295 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
296 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
297 | cpu = get_cpu(); | |
298 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; | |
299 | if (!hits) { | |
300 | put_cpu(); | |
301 | return; | |
302 | } | |
303 | /* | |
304 | * We buffer the global profiler buffer into a per-CPU | |
305 | * queue and thus reduce the number of global (and possibly | |
306 | * NUMA-alien) accesses. The write-queue is self-coalescing: | |
307 | */ | |
308 | local_irq_save(flags); | |
309 | do { | |
310 | for (j = 0; j < PROFILE_GRPSZ; ++j) { | |
311 | if (hits[i + j].pc == pc) { | |
312 | hits[i + j].hits += nr_hits; | |
313 | goto out; | |
314 | } else if (!hits[i + j].hits) { | |
315 | hits[i + j].pc = pc; | |
316 | hits[i + j].hits = nr_hits; | |
317 | goto out; | |
318 | } | |
319 | } | |
320 | i = (i + secondary) & (NR_PROFILE_HIT - 1); | |
321 | } while (i != primary); | |
322 | ||
323 | /* | |
324 | * Add the current hit(s) and flush the write-queue out | |
325 | * to the global buffer: | |
326 | */ | |
327 | atomic_add(nr_hits, &prof_buffer[pc]); | |
328 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
329 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
330 | hits[i].pc = hits[i].hits = 0; | |
331 | } | |
332 | out: | |
333 | local_irq_restore(flags); | |
334 | put_cpu(); | |
335 | } | |
336 | ||
337 | static int __devinit profile_cpu_callback(struct notifier_block *info, | |
338 | unsigned long action, void *__cpu) | |
339 | { | |
340 | int node, cpu = (unsigned long)__cpu; | |
341 | struct page *page; | |
342 | ||
343 | switch (action) { | |
344 | case CPU_UP_PREPARE: | |
345 | case CPU_UP_PREPARE_FROZEN: | |
346 | node = cpu_to_node(cpu); | |
347 | per_cpu(cpu_profile_flip, cpu) = 0; | |
348 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { | |
349 | page = alloc_pages_node(node, | |
350 | GFP_KERNEL | __GFP_ZERO, | |
351 | 0); | |
352 | if (!page) | |
353 | return NOTIFY_BAD; | |
354 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); | |
355 | } | |
356 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { | |
357 | page = alloc_pages_node(node, | |
358 | GFP_KERNEL | __GFP_ZERO, | |
359 | 0); | |
360 | if (!page) | |
361 | goto out_free; | |
362 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); | |
363 | } | |
364 | break; | |
365 | out_free: | |
366 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
367 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
368 | __free_page(page); | |
369 | return NOTIFY_BAD; | |
370 | case CPU_ONLINE: | |
371 | case CPU_ONLINE_FROZEN: | |
372 | cpu_set(cpu, prof_cpu_mask); | |
373 | break; | |
374 | case CPU_UP_CANCELED: | |
375 | case CPU_UP_CANCELED_FROZEN: | |
376 | case CPU_DEAD: | |
377 | case CPU_DEAD_FROZEN: | |
378 | cpu_clear(cpu, prof_cpu_mask); | |
379 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
380 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
381 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
382 | __free_page(page); | |
383 | } | |
384 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
385 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
386 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
387 | __free_page(page); | |
388 | } | |
389 | break; | |
390 | } | |
391 | return NOTIFY_OK; | |
392 | } | |
393 | #else /* !CONFIG_SMP */ | |
394 | #define profile_flip_buffers() do { } while (0) | |
395 | #define profile_discard_flip_buffers() do { } while (0) | |
396 | #define profile_cpu_callback NULL | |
397 | ||
398 | void profile_hits(int type, void *__pc, unsigned int nr_hits) | |
399 | { | |
400 | unsigned long pc; | |
401 | ||
402 | if (prof_on != type || !prof_buffer) | |
403 | return; | |
404 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; | |
405 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); | |
406 | } | |
407 | #endif /* !CONFIG_SMP */ | |
408 | EXPORT_SYMBOL_GPL(profile_hits); | |
409 | ||
410 | void profile_tick(int type) | |
411 | { | |
412 | struct pt_regs *regs = get_irq_regs(); | |
413 | ||
414 | if (type == CPU_PROFILING && timer_hook) | |
415 | timer_hook(regs); | |
416 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | |
417 | profile_hit(type, (void *)profile_pc(regs)); | |
418 | } | |
419 | ||
420 | #ifdef CONFIG_PROC_FS | |
421 | #include <linux/proc_fs.h> | |
422 | #include <asm/uaccess.h> | |
423 | #include <asm/ptrace.h> | |
424 | ||
425 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | |
426 | int count, int *eof, void *data) | |
427 | { | |
428 | int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); | |
429 | if (count - len < 2) | |
430 | return -EINVAL; | |
431 | len += sprintf(page + len, "\n"); | |
432 | return len; | |
433 | } | |
434 | ||
435 | static int prof_cpu_mask_write_proc(struct file *file, | |
436 | const char __user *buffer, unsigned long count, void *data) | |
437 | { | |
438 | cpumask_t *mask = (cpumask_t *)data; | |
439 | unsigned long full_count = count, err; | |
440 | cpumask_t new_value; | |
441 | ||
442 | err = cpumask_parse_user(buffer, count, new_value); | |
443 | if (err) | |
444 | return err; | |
445 | ||
446 | *mask = new_value; | |
447 | return full_count; | |
448 | } | |
449 | ||
450 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | |
451 | { | |
452 | struct proc_dir_entry *entry; | |
453 | ||
454 | /* create /proc/irq/prof_cpu_mask */ | |
455 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | |
456 | if (!entry) | |
457 | return; | |
458 | entry->data = (void *)&prof_cpu_mask; | |
459 | entry->read_proc = prof_cpu_mask_read_proc; | |
460 | entry->write_proc = prof_cpu_mask_write_proc; | |
461 | } | |
462 | ||
463 | /* | |
464 | * This function accesses profiling information. The returned data is | |
465 | * binary: the sampling step and the actual contents of the profile | |
466 | * buffer. Use of the program readprofile is recommended in order to | |
467 | * get meaningful info out of these data. | |
468 | */ | |
469 | static ssize_t | |
470 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
471 | { | |
472 | unsigned long p = *ppos; | |
473 | ssize_t read; | |
474 | char *pnt; | |
475 | unsigned int sample_step = 1 << prof_shift; | |
476 | ||
477 | profile_flip_buffers(); | |
478 | if (p >= (prof_len+1)*sizeof(unsigned int)) | |
479 | return 0; | |
480 | if (count > (prof_len+1)*sizeof(unsigned int) - p) | |
481 | count = (prof_len+1)*sizeof(unsigned int) - p; | |
482 | read = 0; | |
483 | ||
484 | while (p < sizeof(unsigned int) && count > 0) { | |
485 | if (put_user(*((char *)(&sample_step)+p), buf)) | |
486 | return -EFAULT; | |
487 | buf++; p++; count--; read++; | |
488 | } | |
489 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); | |
490 | if (copy_to_user(buf, (void *)pnt, count)) | |
491 | return -EFAULT; | |
492 | read += count; | |
493 | *ppos += read; | |
494 | return read; | |
495 | } | |
496 | ||
497 | /* | |
498 | * Writing to /proc/profile resets the counters | |
499 | * | |
500 | * Writing a 'profiling multiplier' value into it also re-sets the profiling | |
501 | * interrupt frequency, on architectures that support this. | |
502 | */ | |
503 | static ssize_t write_profile(struct file *file, const char __user *buf, | |
504 | size_t count, loff_t *ppos) | |
505 | { | |
506 | #ifdef CONFIG_SMP | |
507 | extern int setup_profiling_timer(unsigned int multiplier); | |
508 | ||
509 | if (count == sizeof(int)) { | |
510 | unsigned int multiplier; | |
511 | ||
512 | if (copy_from_user(&multiplier, buf, sizeof(int))) | |
513 | return -EFAULT; | |
514 | ||
515 | if (setup_profiling_timer(multiplier)) | |
516 | return -EINVAL; | |
517 | } | |
518 | #endif | |
519 | profile_discard_flip_buffers(); | |
520 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); | |
521 | return count; | |
522 | } | |
523 | ||
524 | static const struct file_operations proc_profile_operations = { | |
525 | .read = read_profile, | |
526 | .write = write_profile, | |
527 | }; | |
528 | ||
529 | #ifdef CONFIG_SMP | |
530 | static void __init profile_nop(void *unused) | |
531 | { | |
532 | } | |
533 | ||
534 | static int __init create_hash_tables(void) | |
535 | { | |
536 | int cpu; | |
537 | ||
538 | for_each_online_cpu(cpu) { | |
539 | int node = cpu_to_node(cpu); | |
540 | struct page *page; | |
541 | ||
542 | page = alloc_pages_node(node, | |
543 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
544 | 0); | |
545 | if (!page) | |
546 | goto out_cleanup; | |
547 | per_cpu(cpu_profile_hits, cpu)[1] | |
548 | = (struct profile_hit *)page_address(page); | |
549 | page = alloc_pages_node(node, | |
550 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
551 | 0); | |
552 | if (!page) | |
553 | goto out_cleanup; | |
554 | per_cpu(cpu_profile_hits, cpu)[0] | |
555 | = (struct profile_hit *)page_address(page); | |
556 | } | |
557 | return 0; | |
558 | out_cleanup: | |
559 | prof_on = 0; | |
560 | smp_mb(); | |
561 | on_each_cpu(profile_nop, NULL, 0, 1); | |
562 | for_each_online_cpu(cpu) { | |
563 | struct page *page; | |
564 | ||
565 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
566 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
567 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
568 | __free_page(page); | |
569 | } | |
570 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
571 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
572 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
573 | __free_page(page); | |
574 | } | |
575 | } | |
576 | return -1; | |
577 | } | |
578 | #else | |
579 | #define create_hash_tables() ({ 0; }) | |
580 | #endif | |
581 | ||
582 | static int __init create_proc_profile(void) | |
583 | { | |
584 | struct proc_dir_entry *entry; | |
585 | ||
586 | if (!prof_on) | |
587 | return 0; | |
588 | if (create_hash_tables()) | |
589 | return -1; | |
590 | entry = proc_create("profile", S_IWUSR | S_IRUGO, | |
591 | NULL, &proc_profile_operations); | |
592 | if (!entry) | |
593 | return 0; | |
594 | entry->size = (1+prof_len) * sizeof(atomic_t); | |
595 | hotcpu_notifier(profile_cpu_callback, 0); | |
596 | return 0; | |
597 | } | |
598 | module_init(create_proc_profile); | |
599 | #endif /* CONFIG_PROC_FS */ |