4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
11 #include <linux/init.h>
12 #include <linux/notifier.h>
13 #include <linux/smp.h>
14 #include <linux/oprofile.h>
15 #include <linux/sysdev.h>
16 #include <linux/slab.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kdebug.h>
19 #include <linux/cpu.h>
24 #include "op_counter.h"
25 #include "op_x86_model.h"
27 static struct op_x86_model_spec
const *model
;
28 static DEFINE_PER_CPU(struct op_msrs
, cpu_msrs
);
29 static DEFINE_PER_CPU(unsigned long, saved_lvtpc
);
31 /* 0 == registered but off, 1 == registered and on */
32 static int nmi_enabled
= 0;
34 static int profile_exceptions_notify(struct notifier_block
*self
,
35 unsigned long val
, void *data
)
37 struct die_args
*args
= (struct die_args
*)data
;
38 int ret
= NOTIFY_DONE
;
39 int cpu
= smp_processor_id();
44 model
->check_ctrs(args
->regs
, &per_cpu(cpu_msrs
, cpu
));
53 static void nmi_cpu_save_registers(struct op_msrs
*msrs
)
55 unsigned int const nr_ctrs
= model
->num_counters
;
56 unsigned int const nr_ctrls
= model
->num_controls
;
57 struct op_msr
*counters
= msrs
->counters
;
58 struct op_msr
*controls
= msrs
->controls
;
61 for (i
= 0; i
< nr_ctrs
; ++i
) {
62 if (counters
[i
].addr
) {
63 rdmsr(counters
[i
].addr
,
64 counters
[i
].saved
.low
,
65 counters
[i
].saved
.high
);
69 for (i
= 0; i
< nr_ctrls
; ++i
) {
70 if (controls
[i
].addr
) {
71 rdmsr(controls
[i
].addr
,
72 controls
[i
].saved
.low
,
73 controls
[i
].saved
.high
);
78 static void nmi_save_registers(void *dummy
)
80 int cpu
= smp_processor_id();
81 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
82 nmi_cpu_save_registers(msrs
);
85 static void free_msrs(void)
88 for_each_possible_cpu(i
) {
89 kfree(per_cpu(cpu_msrs
, i
).counters
);
90 per_cpu(cpu_msrs
, i
).counters
= NULL
;
91 kfree(per_cpu(cpu_msrs
, i
).controls
);
92 per_cpu(cpu_msrs
, i
).controls
= NULL
;
96 static int allocate_msrs(void)
99 size_t controls_size
= sizeof(struct op_msr
) * model
->num_controls
;
100 size_t counters_size
= sizeof(struct op_msr
) * model
->num_counters
;
103 for_each_possible_cpu(i
) {
104 per_cpu(cpu_msrs
, i
).counters
= kmalloc(counters_size
,
106 if (!per_cpu(cpu_msrs
, i
).counters
) {
110 per_cpu(cpu_msrs
, i
).controls
= kmalloc(controls_size
,
112 if (!per_cpu(cpu_msrs
, i
).controls
) {
124 static void nmi_cpu_setup(void *dummy
)
126 int cpu
= smp_processor_id();
127 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
128 spin_lock(&oprofilefs_lock
);
129 model
->setup_ctrs(msrs
);
130 spin_unlock(&oprofilefs_lock
);
131 per_cpu(saved_lvtpc
, cpu
) = apic_read(APIC_LVTPC
);
132 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
135 static struct notifier_block profile_exceptions_nb
= {
136 .notifier_call
= profile_exceptions_notify
,
141 static int nmi_setup(void)
146 if (!allocate_msrs())
149 err
= register_die_notifier(&profile_exceptions_nb
);
155 /* We need to serialize save and setup for HT because the subset
156 * of msrs are distinct for save and setup operations
159 /* Assume saved/restored counters are the same on all CPUs */
160 model
->fill_in_addresses(&per_cpu(cpu_msrs
, 0));
161 for_each_possible_cpu(cpu
) {
163 memcpy(per_cpu(cpu_msrs
, cpu
).counters
,
164 per_cpu(cpu_msrs
, 0).counters
,
165 sizeof(struct op_msr
) * model
->num_counters
);
167 memcpy(per_cpu(cpu_msrs
, cpu
).controls
,
168 per_cpu(cpu_msrs
, 0).controls
,
169 sizeof(struct op_msr
) * model
->num_controls
);
173 on_each_cpu(nmi_save_registers
, NULL
, 1);
174 on_each_cpu(nmi_cpu_setup
, NULL
, 1);
179 static void nmi_restore_registers(struct op_msrs
*msrs
)
181 unsigned int const nr_ctrs
= model
->num_counters
;
182 unsigned int const nr_ctrls
= model
->num_controls
;
183 struct op_msr
*counters
= msrs
->counters
;
184 struct op_msr
*controls
= msrs
->controls
;
187 for (i
= 0; i
< nr_ctrls
; ++i
) {
188 if (controls
[i
].addr
) {
189 wrmsr(controls
[i
].addr
,
190 controls
[i
].saved
.low
,
191 controls
[i
].saved
.high
);
195 for (i
= 0; i
< nr_ctrs
; ++i
) {
196 if (counters
[i
].addr
) {
197 wrmsr(counters
[i
].addr
,
198 counters
[i
].saved
.low
,
199 counters
[i
].saved
.high
);
204 static void nmi_cpu_shutdown(void *dummy
)
207 int cpu
= smp_processor_id();
208 struct op_msrs
*msrs
= &__get_cpu_var(cpu_msrs
);
210 /* restoring APIC_LVTPC can trigger an apic error because the delivery
211 * mode and vector nr combination can be illegal. That's by design: on
212 * power on apic lvt contain a zero vector nr which are legal only for
213 * NMI delivery mode. So inhibit apic err before restoring lvtpc
215 v
= apic_read(APIC_LVTERR
);
216 apic_write(APIC_LVTERR
, v
| APIC_LVT_MASKED
);
217 apic_write(APIC_LVTPC
, per_cpu(saved_lvtpc
, cpu
));
218 apic_write(APIC_LVTERR
, v
);
219 nmi_restore_registers(msrs
);
222 static void nmi_shutdown(void)
224 struct op_msrs
*msrs
;
227 on_each_cpu(nmi_cpu_shutdown
, NULL
, 1);
228 unregister_die_notifier(&profile_exceptions_nb
);
229 msrs
= &get_cpu_var(cpu_msrs
);
230 model
->shutdown(msrs
);
232 put_cpu_var(cpu_msrs
);
235 static void nmi_cpu_start(void *dummy
)
237 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
241 static int nmi_start(void)
243 on_each_cpu(nmi_cpu_start
, NULL
, 1);
247 static void nmi_cpu_stop(void *dummy
)
249 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
253 static void nmi_stop(void)
255 on_each_cpu(nmi_cpu_stop
, NULL
, 1);
258 struct op_counter_config counter_config
[OP_MAX_COUNTER
];
260 static int nmi_create_files(struct super_block
*sb
, struct dentry
*root
)
264 for (i
= 0; i
< model
->num_counters
; ++i
) {
268 /* quick little hack to _not_ expose a counter if it is not
269 * available for use. This should protect userspace app.
270 * NOTE: assumes 1:1 mapping here (that counters are organized
271 * sequentially in their struct assignment).
273 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i
)))
276 snprintf(buf
, sizeof(buf
), "%d", i
);
277 dir
= oprofilefs_mkdir(sb
, root
, buf
);
278 oprofilefs_create_ulong(sb
, dir
, "enabled", &counter_config
[i
].enabled
);
279 oprofilefs_create_ulong(sb
, dir
, "event", &counter_config
[i
].event
);
280 oprofilefs_create_ulong(sb
, dir
, "count", &counter_config
[i
].count
);
281 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &counter_config
[i
].unit_mask
);
282 oprofilefs_create_ulong(sb
, dir
, "kernel", &counter_config
[i
].kernel
);
283 oprofilefs_create_ulong(sb
, dir
, "user", &counter_config
[i
].user
);
290 static int oprofile_cpu_notifier(struct notifier_block
*b
, unsigned long action
,
293 int cpu
= (unsigned long)data
;
295 case CPU_DOWN_FAILED
:
297 smp_call_function_single(cpu
, nmi_cpu_start
, NULL
, 0);
299 case CPU_DOWN_PREPARE
:
300 smp_call_function_single(cpu
, nmi_cpu_stop
, NULL
, 1);
306 static struct notifier_block oprofile_cpu_nb
= {
307 .notifier_call
= oprofile_cpu_notifier
313 static int nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
315 /* Only one CPU left, just stop that one */
316 if (nmi_enabled
== 1)
321 static int nmi_resume(struct sys_device
*dev
)
323 if (nmi_enabled
== 1)
328 static struct sysdev_class oprofile_sysclass
= {
330 .resume
= nmi_resume
,
331 .suspend
= nmi_suspend
,
334 static struct sys_device device_oprofile
= {
336 .cls
= &oprofile_sysclass
,
339 static int __init
init_sysfs(void)
343 error
= sysdev_class_register(&oprofile_sysclass
);
345 error
= sysdev_register(&device_oprofile
);
349 static void exit_sysfs(void)
351 sysdev_unregister(&device_oprofile
);
352 sysdev_class_unregister(&oprofile_sysclass
);
356 #define init_sysfs() do { } while (0)
357 #define exit_sysfs() do { } while (0)
358 #endif /* CONFIG_PM */
360 static int __init
p4_init(char **cpu_type
)
362 __u8 cpu_model
= boot_cpu_data
.x86_model
;
364 if (cpu_model
> 6 || cpu_model
== 5)
368 *cpu_type
= "i386/p4";
372 switch (smp_num_siblings
) {
374 *cpu_type
= "i386/p4";
379 *cpu_type
= "i386/p4-ht";
380 model
= &op_p4_ht2_spec
;
385 printk(KERN_INFO
"oprofile: P4 HyperThreading detected with > 2 threads\n");
386 printk(KERN_INFO
"oprofile: Reverting to timer mode.\n");
390 static int force_arch_perfmon
;
391 static int force_cpu_type(const char *str
, struct kernel_param
*kp
)
393 if (!strcmp(str
, "archperfmon")) {
394 force_arch_perfmon
= 1;
395 printk(KERN_INFO
"oprofile: forcing architectural perfmon\n");
400 module_param_call(cpu_type
, force_cpu_type
, NULL
, NULL
, 0);
402 static int __init
ppro_init(char **cpu_type
)
404 __u8 cpu_model
= boot_cpu_data
.x86_model
;
406 if (force_arch_perfmon
&& cpu_has_arch_perfmon
)
411 *cpu_type
= "i386/ppro";
414 *cpu_type
= "i386/pii";
418 *cpu_type
= "i386/piii";
422 *cpu_type
= "i386/p6_mobile";
425 *cpu_type
= "i386/core";
428 *cpu_type
= "i386/core_2";
431 arch_perfmon_setup_counters();
432 *cpu_type
= "i386/core_i7";
435 *cpu_type
= "i386/atom";
442 model
= &op_ppro_spec
;
446 static int __init
arch_perfmon_init(char **cpu_type
)
448 if (!cpu_has_arch_perfmon
)
450 *cpu_type
= "i386/arch_perfmon";
451 model
= &op_arch_perfmon_spec
;
452 arch_perfmon_setup_counters();
456 /* in order to get sysfs right */
457 static int using_nmi
;
459 int __init
op_nmi_init(struct oprofile_operations
*ops
)
461 __u8 vendor
= boot_cpu_data
.x86_vendor
;
462 __u8 family
= boot_cpu_data
.x86
;
463 char *cpu_type
= NULL
;
471 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
477 model
= &op_amd_spec
;
478 cpu_type
= "i386/athlon";
481 model
= &op_amd_spec
;
482 /* Actually it could be i386/hammer too, but give
483 user space an consistent name. */
484 cpu_type
= "x86-64/hammer";
487 model
= &op_amd_spec
;
488 cpu_type
= "x86-64/family10";
491 model
= &op_amd_spec
;
492 cpu_type
= "x86-64/family11h";
497 case X86_VENDOR_INTEL
:
504 /* A P6-class processor */
506 ppro_init(&cpu_type
);
513 if (!cpu_type
&& !arch_perfmon_init(&cpu_type
))
522 register_cpu_notifier(&oprofile_cpu_nb
);
524 /* default values, can be overwritten by model */
525 ops
->create_files
= nmi_create_files
;
526 ops
->setup
= nmi_setup
;
527 ops
->shutdown
= nmi_shutdown
;
528 ops
->start
= nmi_start
;
529 ops
->stop
= nmi_stop
;
530 ops
->cpu_type
= cpu_type
;
533 ret
= model
->init(ops
);
539 printk(KERN_INFO
"oprofile: using NMI interrupt.\n");
543 void op_nmi_exit(void)
548 unregister_cpu_notifier(&oprofile_cpu_nb
);