]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file implements the perfmon-2 subsystem which is used | |
3 | * to program the IA-64 Performance Monitoring Unit (PMU). | |
4 | * | |
5 | * The initial version of perfmon.c was written by | |
6 | * Ganesh Venkitachalam, IBM Corp. | |
7 | * | |
8 | * Then it was modified for perfmon-1.x by Stephane Eranian and | |
9 | * David Mosberger, Hewlett Packard Co. | |
10 | * | |
11 | * Version Perfmon-2.x is a rewrite of perfmon-1.x | |
12 | * by Stephane Eranian, Hewlett Packard Co. | |
13 | * | |
a1ecf7f6 | 14 | * Copyright (C) 1999-2005 Hewlett Packard Co |
1da177e4 LT |
15 | * Stephane Eranian <eranian@hpl.hp.com> |
16 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
17 | * | |
18 | * More information about perfmon available at: | |
19 | * http://www.hpl.hp.com/research/linux/perfmon | |
20 | */ | |
21 | ||
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/sched.h> | |
29930025 | 25 | #include <linux/sched/task.h> |
68db0cf1 | 26 | #include <linux/sched/task_stack.h> |
1da177e4 | 27 | #include <linux/interrupt.h> |
1da177e4 LT |
28 | #include <linux/proc_fs.h> |
29 | #include <linux/seq_file.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/vmalloc.h> | |
32 | #include <linux/mm.h> | |
33 | #include <linux/sysctl.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/file.h> | |
36 | #include <linux/poll.h> | |
37 | #include <linux/vfs.h> | |
a3bc0dbc | 38 | #include <linux/smp.h> |
1da177e4 LT |
39 | #include <linux/pagemap.h> |
40 | #include <linux/mount.h> | |
1da177e4 | 41 | #include <linux/bitops.h> |
a9415644 | 42 | #include <linux/capability.h> |
badf1662 | 43 | #include <linux/rcupdate.h> |
60f1c444 | 44 | #include <linux/completion.h> |
f14488cc | 45 | #include <linux/tracehook.h> |
5a0e3ad6 | 46 | #include <linux/slab.h> |
91d591c3 | 47 | #include <linux/cpu.h> |
1da177e4 LT |
48 | |
49 | #include <asm/errno.h> | |
50 | #include <asm/intrinsics.h> | |
51 | #include <asm/page.h> | |
52 | #include <asm/perfmon.h> | |
53 | #include <asm/processor.h> | |
54 | #include <asm/signal.h> | |
7c0f6ba6 | 55 | #include <linux/uaccess.h> |
1da177e4 LT |
56 | #include <asm/delay.h> |
57 | ||
58 | #ifdef CONFIG_PERFMON | |
59 | /* | |
60 | * perfmon context state | |
61 | */ | |
62 | #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */ | |
63 | #define PFM_CTX_LOADED 2 /* context is loaded onto a task */ | |
64 | #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */ | |
65 | #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ | |
66 | ||
67 | #define PFM_INVALID_ACTIVATION (~0UL) | |
68 | ||
35589a8f KA |
69 | #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */ |
70 | #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */ | |
71 | ||
1da177e4 LT |
72 | /* |
73 | * depth of message queue | |
74 | */ | |
75 | #define PFM_MAX_MSGS 32 | |
76 | #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail) | |
77 | ||
78 | /* | |
79 | * type of a PMU register (bitmask). | |
80 | * bitmask structure: | |
81 | * bit0 : register implemented | |
82 | * bit1 : end marker | |
83 | * bit2-3 : reserved | |
84 | * bit4 : pmc has pmc.pm | |
85 | * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter | |
86 | * bit6-7 : register type | |
87 | * bit8-31: reserved | |
88 | */ | |
89 | #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */ | |
90 | #define PFM_REG_IMPL 0x1 /* register implemented */ | |
91 | #define PFM_REG_END 0x2 /* end marker */ | |
92 | #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */ | |
93 | #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */ | |
94 | #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */ | |
95 | #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */ | |
96 | #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */ | |
97 | ||
98 | #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END) | |
99 | #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END) | |
100 | ||
101 | #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) | |
102 | ||
103 | /* i assumed unsigned */ | |
104 | #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL)) | |
105 | #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL)) | |
106 | ||
107 | /* XXX: these assume that register i is implemented */ | |
108 | #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | |
109 | #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | |
110 | #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR) | |
111 | #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL) | |
112 | ||
113 | #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value | |
114 | #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask | |
115 | #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0] | |
116 | #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0] | |
117 | ||
118 | #define PFM_NUM_IBRS IA64_NUM_DBG_REGS | |
119 | #define PFM_NUM_DBRS IA64_NUM_DBG_REGS | |
120 | ||
121 | #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) | |
122 | #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling) | |
123 | #define PFM_CTX_TASK(h) (h)->ctx_task | |
124 | ||
125 | #define PMU_PMC_OI 5 /* position of pmc.oi bit */ | |
126 | ||
127 | /* XXX: does not support more than 64 PMDs */ | |
128 | #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask) | |
129 | #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) | |
130 | ||
131 | #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask) | |
132 | ||
133 | #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) | |
134 | #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) | |
135 | #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1) | |
136 | #define PFM_CODE_RR 0 /* requesting code range restriction */ | |
137 | #define PFM_DATA_RR 1 /* requestion data range restriction */ | |
138 | ||
139 | #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v) | |
140 | #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v) | |
141 | #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info) | |
142 | ||
143 | #define RDEP(x) (1UL<<(x)) | |
144 | ||
145 | /* | |
146 | * context protection macros | |
147 | * in SMP: | |
148 | * - we need to protect against CPU concurrency (spin_lock) | |
149 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | |
150 | * in UP: | |
151 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | |
152 | * | |
85d1fe09 | 153 | * spin_lock_irqsave()/spin_unlock_irqrestore(): |
1da177e4 LT |
154 | * in SMP: local_irq_disable + spin_lock |
155 | * in UP : local_irq_disable | |
156 | * | |
157 | * spin_lock()/spin_lock(): | |
158 | * in UP : removed automatically | |
159 | * in SMP: protect against context accesses from other CPU. interrupts | |
160 | * are not masked. This is useful for the PMU interrupt handler | |
161 | * because we know we will not get PMU concurrency in that code. | |
162 | */ | |
163 | #define PROTECT_CTX(c, f) \ | |
164 | do { \ | |
19c5870c | 165 | DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ |
1da177e4 | 166 | spin_lock_irqsave(&(c)->ctx_lock, f); \ |
19c5870c | 167 | DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ |
1da177e4 LT |
168 | } while(0) |
169 | ||
170 | #define UNPROTECT_CTX(c, f) \ | |
171 | do { \ | |
19c5870c | 172 | DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ |
1da177e4 LT |
173 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ |
174 | } while(0) | |
175 | ||
176 | #define PROTECT_CTX_NOPRINT(c, f) \ | |
177 | do { \ | |
178 | spin_lock_irqsave(&(c)->ctx_lock, f); \ | |
179 | } while(0) | |
180 | ||
181 | ||
182 | #define UNPROTECT_CTX_NOPRINT(c, f) \ | |
183 | do { \ | |
184 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ | |
185 | } while(0) | |
186 | ||
187 | ||
188 | #define PROTECT_CTX_NOIRQ(c) \ | |
189 | do { \ | |
190 | spin_lock(&(c)->ctx_lock); \ | |
191 | } while(0) | |
192 | ||
193 | #define UNPROTECT_CTX_NOIRQ(c) \ | |
194 | do { \ | |
195 | spin_unlock(&(c)->ctx_lock); \ | |
196 | } while(0) | |
197 | ||
198 | ||
199 | #ifdef CONFIG_SMP | |
200 | ||
201 | #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number) | |
202 | #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++ | |
203 | #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION() | |
204 | ||
205 | #else /* !CONFIG_SMP */ | |
206 | #define SET_ACTIVATION(t) do {} while(0) | |
207 | #define GET_ACTIVATION(t) do {} while(0) | |
208 | #define INC_ACTIVATION(t) do {} while(0) | |
209 | #endif /* CONFIG_SMP */ | |
210 | ||
211 | #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0) | |
212 | #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner) | |
213 | #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx) | |
214 | ||
215 | #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g) | |
216 | #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g) | |
217 | ||
218 | #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) | |
219 | ||
220 | /* | |
221 | * cmp0 must be the value of pmc0 | |
222 | */ | |
223 | #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL) | |
224 | ||
225 | #define PFMFS_MAGIC 0xa0b4d889 | |
226 | ||
227 | /* | |
228 | * debugging | |
229 | */ | |
230 | #define PFM_DEBUGGING 1 | |
231 | #ifdef PFM_DEBUGGING | |
232 | #define DPRINT(a) \ | |
233 | do { \ | |
d4ed8084 | 234 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ |
1da177e4 LT |
235 | } while (0) |
236 | ||
237 | #define DPRINT_ovfl(a) \ | |
238 | do { \ | |
d4ed8084 | 239 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ |
1da177e4 LT |
240 | } while (0) |
241 | #endif | |
242 | ||
243 | /* | |
244 | * 64-bit software counter structure | |
245 | * | |
246 | * the next_reset_type is applied to the next call to pfm_reset_regs() | |
247 | */ | |
248 | typedef struct { | |
249 | unsigned long val; /* virtual 64bit counter value */ | |
250 | unsigned long lval; /* last reset value */ | |
251 | unsigned long long_reset; /* reset value on sampling overflow */ | |
252 | unsigned long short_reset; /* reset value on overflow */ | |
253 | unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */ | |
254 | unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */ | |
255 | unsigned long seed; /* seed for random-number generator */ | |
256 | unsigned long mask; /* mask for random-number generator */ | |
257 | unsigned int flags; /* notify/do not notify */ | |
258 | unsigned long eventid; /* overflow event identifier */ | |
259 | } pfm_counter_t; | |
260 | ||
261 | /* | |
262 | * context flags | |
263 | */ | |
264 | typedef struct { | |
265 | unsigned int block:1; /* when 1, task will blocked on user notifications */ | |
266 | unsigned int system:1; /* do system wide monitoring */ | |
267 | unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ | |
268 | unsigned int is_sampling:1; /* true if using a custom format */ | |
269 | unsigned int excl_idle:1; /* exclude idle task in system wide session */ | |
270 | unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */ | |
271 | unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */ | |
272 | unsigned int no_msg:1; /* no message sent on overflow */ | |
273 | unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */ | |
274 | unsigned int reserved:22; | |
275 | } pfm_context_flags_t; | |
276 | ||
277 | #define PFM_TRAP_REASON_NONE 0x0 /* default value */ | |
278 | #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */ | |
279 | #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */ | |
280 | ||
281 | ||
282 | /* | |
283 | * perfmon context: encapsulates all the state of a monitoring session | |
284 | */ | |
285 | ||
286 | typedef struct pfm_context { | |
287 | spinlock_t ctx_lock; /* context protection */ | |
288 | ||
289 | pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */ | |
290 | unsigned int ctx_state; /* state: active/inactive (no bitfield) */ | |
291 | ||
292 | struct task_struct *ctx_task; /* task to which context is attached */ | |
293 | ||
294 | unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ | |
295 | ||
60f1c444 | 296 | struct completion ctx_restart_done; /* use for blocking notification mode */ |
1da177e4 LT |
297 | |
298 | unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ | |
299 | unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ | |
300 | unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */ | |
301 | ||
302 | unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */ | |
303 | unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ | |
304 | unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ | |
305 | ||
35589a8f | 306 | unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */ |
1da177e4 LT |
307 | |
308 | unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ | |
309 | unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ | |
310 | unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ | |
311 | unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ | |
312 | ||
35589a8f KA |
313 | pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */ |
314 | ||
315 | unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ | |
316 | unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ | |
1da177e4 | 317 | |
e088a4ad | 318 | unsigned long ctx_saved_psr_up; /* only contains psr.up value */ |
1da177e4 LT |
319 | |
320 | unsigned long ctx_last_activation; /* context last activation number for last_cpu */ | |
321 | unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ | |
322 | unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */ | |
323 | ||
324 | int ctx_fd; /* file descriptor used my this context */ | |
325 | pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */ | |
326 | ||
327 | pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */ | |
328 | void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */ | |
329 | unsigned long ctx_smpl_size; /* size of sampling buffer */ | |
330 | void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */ | |
331 | ||
332 | wait_queue_head_t ctx_msgq_wait; | |
333 | pfm_msg_t ctx_msgq[PFM_MAX_MSGS]; | |
334 | int ctx_msgq_head; | |
335 | int ctx_msgq_tail; | |
336 | struct fasync_struct *ctx_async_queue; | |
337 | ||
338 | wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */ | |
339 | } pfm_context_t; | |
340 | ||
341 | /* | |
342 | * magic number used to verify that structure is really | |
343 | * a perfmon context | |
344 | */ | |
345 | #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops) | |
346 | ||
347 | #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context) | |
348 | ||
349 | #ifdef CONFIG_SMP | |
350 | #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v) | |
351 | #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu | |
352 | #else | |
353 | #define SET_LAST_CPU(ctx, v) do {} while(0) | |
354 | #define GET_LAST_CPU(ctx) do {} while(0) | |
355 | #endif | |
356 | ||
357 | ||
358 | #define ctx_fl_block ctx_flags.block | |
359 | #define ctx_fl_system ctx_flags.system | |
360 | #define ctx_fl_using_dbreg ctx_flags.using_dbreg | |
361 | #define ctx_fl_is_sampling ctx_flags.is_sampling | |
362 | #define ctx_fl_excl_idle ctx_flags.excl_idle | |
363 | #define ctx_fl_going_zombie ctx_flags.going_zombie | |
364 | #define ctx_fl_trap_reason ctx_flags.trap_reason | |
365 | #define ctx_fl_no_msg ctx_flags.no_msg | |
366 | #define ctx_fl_can_restart ctx_flags.can_restart | |
367 | ||
368 | #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0); | |
369 | #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking | |
370 | ||
371 | /* | |
372 | * global information about all sessions | |
373 | * mostly used to synchronize between system wide and per-process | |
374 | */ | |
375 | typedef struct { | |
376 | spinlock_t pfs_lock; /* lock the structure */ | |
377 | ||
378 | unsigned int pfs_task_sessions; /* number of per task sessions */ | |
379 | unsigned int pfs_sys_sessions; /* number of per system wide sessions */ | |
380 | unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ | |
381 | unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ | |
382 | struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ | |
383 | } pfm_session_t; | |
384 | ||
385 | /* | |
386 | * information about a PMC or PMD. | |
387 | * dep_pmd[]: a bitmask of dependent PMD registers | |
388 | * dep_pmc[]: a bitmask of dependent PMC registers | |
389 | */ | |
390 | typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); | |
391 | typedef struct { | |
392 | unsigned int type; | |
393 | int pm_pos; | |
394 | unsigned long default_value; /* power-on default value */ | |
395 | unsigned long reserved_mask; /* bitmask of reserved bits */ | |
396 | pfm_reg_check_t read_check; | |
397 | pfm_reg_check_t write_check; | |
398 | unsigned long dep_pmd[4]; | |
399 | unsigned long dep_pmc[4]; | |
400 | } pfm_reg_desc_t; | |
401 | ||
402 | /* assume cnum is a valid monitor */ | |
403 | #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1) | |
404 | ||
405 | /* | |
406 | * This structure is initialized at boot time and contains | |
407 | * a description of the PMU main characteristics. | |
408 | * | |
409 | * If the probe function is defined, detection is based | |
410 | * on its return value: | |
411 | * - 0 means recognized PMU | |
412 | * - anything else means not supported | |
413 | * When the probe function is not defined, then the pmu_family field | |
414 | * is used and it must match the host CPU family such that: | |
415 | * - cpu->family & config->pmu_family != 0 | |
416 | */ | |
417 | typedef struct { | |
418 | unsigned long ovfl_val; /* overflow value for counters */ | |
419 | ||
420 | pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */ | |
421 | pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */ | |
422 | ||
423 | unsigned int num_pmcs; /* number of PMCS: computed at init time */ | |
424 | unsigned int num_pmds; /* number of PMDS: computed at init time */ | |
425 | unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */ | |
426 | unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */ | |
427 | ||
428 | char *pmu_name; /* PMU family name */ | |
429 | unsigned int pmu_family; /* cpuid family pattern used to identify pmu */ | |
430 | unsigned int flags; /* pmu specific flags */ | |
431 | unsigned int num_ibrs; /* number of IBRS: computed at init time */ | |
432 | unsigned int num_dbrs; /* number of DBRS: computed at init time */ | |
433 | unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */ | |
434 | int (*probe)(void); /* customized probe routine */ | |
435 | unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */ | |
436 | } pmu_config_t; | |
437 | /* | |
438 | * PMU specific flags | |
439 | */ | |
440 | #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */ | |
441 | ||
442 | /* | |
443 | * debug register related type definitions | |
444 | */ | |
445 | typedef struct { | |
446 | unsigned long ibr_mask:56; | |
447 | unsigned long ibr_plm:4; | |
448 | unsigned long ibr_ig:3; | |
449 | unsigned long ibr_x:1; | |
450 | } ibr_mask_reg_t; | |
451 | ||
452 | typedef struct { | |
453 | unsigned long dbr_mask:56; | |
454 | unsigned long dbr_plm:4; | |
455 | unsigned long dbr_ig:2; | |
456 | unsigned long dbr_w:1; | |
457 | unsigned long dbr_r:1; | |
458 | } dbr_mask_reg_t; | |
459 | ||
460 | typedef union { | |
461 | unsigned long val; | |
462 | ibr_mask_reg_t ibr; | |
463 | dbr_mask_reg_t dbr; | |
464 | } dbreg_t; | |
465 | ||
466 | ||
467 | /* | |
468 | * perfmon command descriptions | |
469 | */ | |
470 | typedef struct { | |
471 | int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
472 | char *cmd_name; | |
473 | int cmd_flags; | |
474 | unsigned int cmd_narg; | |
475 | size_t cmd_argsize; | |
476 | int (*cmd_getsize)(void *arg, size_t *sz); | |
477 | } pfm_cmd_desc_t; | |
478 | ||
479 | #define PFM_CMD_FD 0x01 /* command requires a file descriptor */ | |
480 | #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */ | |
481 | #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */ | |
482 | #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */ | |
483 | ||
484 | ||
485 | #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name | |
486 | #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ) | |
487 | #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW) | |
488 | #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD) | |
489 | #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP) | |
490 | ||
491 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | |
492 | ||
1da177e4 LT |
493 | typedef struct { |
494 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | |
495 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | |
496 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | |
497 | unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */ | |
498 | unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */ | |
499 | unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */ | |
500 | unsigned long pfm_smpl_handler_calls; | |
501 | unsigned long pfm_smpl_handler_cycles; | |
502 | char pad[SMP_CACHE_BYTES] ____cacheline_aligned; | |
503 | } pfm_stats_t; | |
504 | ||
505 | /* | |
506 | * perfmon internal variables | |
507 | */ | |
508 | static pfm_stats_t pfm_stats[NR_CPUS]; | |
509 | static pfm_session_t pfm_sessions; /* global sessions information */ | |
510 | ||
a9f6a0dd | 511 | static DEFINE_SPINLOCK(pfm_alt_install_check); |
a1ecf7f6 TL |
512 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; |
513 | ||
1da177e4 LT |
514 | static struct proc_dir_entry *perfmon_dir; |
515 | static pfm_uuid_t pfm_null_uuid = {0,}; | |
516 | ||
517 | static spinlock_t pfm_buffer_fmt_lock; | |
518 | static LIST_HEAD(pfm_buffer_fmt_list); | |
519 | ||
520 | static pmu_config_t *pmu_conf; | |
521 | ||
522 | /* sysctl() controls */ | |
4944930a SE |
523 | pfm_sysctl_t pfm_sysctl; |
524 | EXPORT_SYMBOL(pfm_sysctl); | |
1da177e4 | 525 | |
2841efa6 | 526 | static struct ctl_table pfm_ctl_table[] = { |
4e009901 | 527 | { |
4e009901 EB |
528 | .procname = "debug", |
529 | .data = &pfm_sysctl.debug, | |
530 | .maxlen = sizeof(int), | |
531 | .mode = 0666, | |
6d456111 | 532 | .proc_handler = proc_dointvec, |
4e009901 EB |
533 | }, |
534 | { | |
4e009901 EB |
535 | .procname = "debug_ovfl", |
536 | .data = &pfm_sysctl.debug_ovfl, | |
537 | .maxlen = sizeof(int), | |
538 | .mode = 0666, | |
6d456111 | 539 | .proc_handler = proc_dointvec, |
4e009901 EB |
540 | }, |
541 | { | |
4e009901 EB |
542 | .procname = "fastctxsw", |
543 | .data = &pfm_sysctl.fastctxsw, | |
544 | .maxlen = sizeof(int), | |
545 | .mode = 0600, | |
6d456111 | 546 | .proc_handler = proc_dointvec, |
4e009901 EB |
547 | }, |
548 | { | |
4e009901 EB |
549 | .procname = "expert_mode", |
550 | .data = &pfm_sysctl.expert_mode, | |
551 | .maxlen = sizeof(int), | |
552 | .mode = 0600, | |
6d456111 | 553 | .proc_handler = proc_dointvec, |
4e009901 EB |
554 | }, |
555 | {} | |
1da177e4 | 556 | }; |
2841efa6 | 557 | static struct ctl_table pfm_sysctl_dir[] = { |
4e009901 | 558 | { |
4e009901 | 559 | .procname = "perfmon", |
e3ad42be | 560 | .mode = 0555, |
4e009901 EB |
561 | .child = pfm_ctl_table, |
562 | }, | |
563 | {} | |
1da177e4 | 564 | }; |
2841efa6 | 565 | static struct ctl_table pfm_sysctl_root[] = { |
4e009901 | 566 | { |
4e009901 | 567 | .procname = "kernel", |
e3ad42be | 568 | .mode = 0555, |
4e009901 EB |
569 | .child = pfm_sysctl_dir, |
570 | }, | |
571 | {} | |
1da177e4 LT |
572 | }; |
573 | static struct ctl_table_header *pfm_sysctl_header; | |
574 | ||
575 | static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
1da177e4 LT |
576 | |
577 | #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) | |
578 | #define pfm_get_cpu_data(a,b) per_cpu(a, b) | |
579 | ||
580 | static inline void | |
581 | pfm_put_task(struct task_struct *task) | |
582 | { | |
583 | if (task != current) put_task_struct(task); | |
584 | } | |
585 | ||
1da177e4 LT |
586 | static inline void |
587 | pfm_reserve_page(unsigned long a) | |
588 | { | |
589 | SetPageReserved(vmalloc_to_page((void *)a)); | |
590 | } | |
591 | static inline void | |
592 | pfm_unreserve_page(unsigned long a) | |
593 | { | |
594 | ClearPageReserved(vmalloc_to_page((void*)a)); | |
595 | } | |
596 | ||
597 | static inline unsigned long | |
598 | pfm_protect_ctx_ctxsw(pfm_context_t *x) | |
599 | { | |
600 | spin_lock(&(x)->ctx_lock); | |
601 | return 0UL; | |
602 | } | |
603 | ||
24b8e0cc | 604 | static inline void |
1da177e4 LT |
605 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) |
606 | { | |
607 | spin_unlock(&(x)->ctx_lock); | |
608 | } | |
609 | ||
c74a1cbb | 610 | /* forward declaration */ |
09579770 | 611 | static const struct dentry_operations pfmfs_dentry_operations; |
1da177e4 | 612 | |
51139ada AV |
613 | static struct dentry * |
614 | pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) | |
1da177e4 | 615 | { |
c74a1cbb AV |
616 | return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations, |
617 | PFMFS_MAGIC); | |
1da177e4 LT |
618 | } |
619 | ||
620 | static struct file_system_type pfm_fs_type = { | |
621 | .name = "pfmfs", | |
51139ada | 622 | .mount = pfmfs_mount, |
1da177e4 LT |
623 | .kill_sb = kill_anon_super, |
624 | }; | |
7f78e035 | 625 | MODULE_ALIAS_FS("pfmfs"); |
1da177e4 LT |
626 | |
627 | DEFINE_PER_CPU(unsigned long, pfm_syst_info); | |
628 | DEFINE_PER_CPU(struct task_struct *, pmu_owner); | |
629 | DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); | |
630 | DEFINE_PER_CPU(unsigned long, pmu_activation_number); | |
fffcc150 | 631 | EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info); |
1da177e4 LT |
632 | |
633 | ||
634 | /* forward declaration */ | |
5dfe4c96 | 635 | static const struct file_operations pfm_file_ops; |
1da177e4 LT |
636 | |
637 | /* | |
638 | * forward declarations | |
639 | */ | |
640 | #ifndef CONFIG_SMP | |
641 | static void pfm_lazy_save_regs (struct task_struct *ta); | |
642 | #endif | |
643 | ||
644 | void dump_pmu_state(const char *); | |
645 | static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
646 | ||
647 | #include "perfmon_itanium.h" | |
648 | #include "perfmon_mckinley.h" | |
9179cb65 | 649 | #include "perfmon_montecito.h" |
1da177e4 LT |
650 | #include "perfmon_generic.h" |
651 | ||
652 | static pmu_config_t *pmu_confs[]={ | |
9179cb65 | 653 | &pmu_conf_mont, |
1da177e4 LT |
654 | &pmu_conf_mck, |
655 | &pmu_conf_ita, | |
656 | &pmu_conf_gen, /* must be last */ | |
657 | NULL | |
658 | }; | |
659 | ||
660 | ||
661 | static int pfm_end_notify_user(pfm_context_t *ctx); | |
662 | ||
663 | static inline void | |
664 | pfm_clear_psr_pp(void) | |
665 | { | |
666 | ia64_rsm(IA64_PSR_PP); | |
667 | ia64_srlz_i(); | |
668 | } | |
669 | ||
670 | static inline void | |
671 | pfm_set_psr_pp(void) | |
672 | { | |
673 | ia64_ssm(IA64_PSR_PP); | |
674 | ia64_srlz_i(); | |
675 | } | |
676 | ||
677 | static inline void | |
678 | pfm_clear_psr_up(void) | |
679 | { | |
680 | ia64_rsm(IA64_PSR_UP); | |
681 | ia64_srlz_i(); | |
682 | } | |
683 | ||
684 | static inline void | |
685 | pfm_set_psr_up(void) | |
686 | { | |
687 | ia64_ssm(IA64_PSR_UP); | |
688 | ia64_srlz_i(); | |
689 | } | |
690 | ||
691 | static inline unsigned long | |
692 | pfm_get_psr(void) | |
693 | { | |
694 | unsigned long tmp; | |
695 | tmp = ia64_getreg(_IA64_REG_PSR); | |
696 | ia64_srlz_i(); | |
697 | return tmp; | |
698 | } | |
699 | ||
700 | static inline void | |
701 | pfm_set_psr_l(unsigned long val) | |
702 | { | |
703 | ia64_setreg(_IA64_REG_PSR_L, val); | |
704 | ia64_srlz_i(); | |
705 | } | |
706 | ||
707 | static inline void | |
708 | pfm_freeze_pmu(void) | |
709 | { | |
710 | ia64_set_pmc(0,1UL); | |
711 | ia64_srlz_d(); | |
712 | } | |
713 | ||
714 | static inline void | |
715 | pfm_unfreeze_pmu(void) | |
716 | { | |
717 | ia64_set_pmc(0,0UL); | |
718 | ia64_srlz_d(); | |
719 | } | |
720 | ||
721 | static inline void | |
722 | pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs) | |
723 | { | |
724 | int i; | |
725 | ||
726 | for (i=0; i < nibrs; i++) { | |
727 | ia64_set_ibr(i, ibrs[i]); | |
728 | ia64_dv_serialize_instruction(); | |
729 | } | |
730 | ia64_srlz_i(); | |
731 | } | |
732 | ||
733 | static inline void | |
734 | pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs) | |
735 | { | |
736 | int i; | |
737 | ||
738 | for (i=0; i < ndbrs; i++) { | |
739 | ia64_set_dbr(i, dbrs[i]); | |
740 | ia64_dv_serialize_data(); | |
741 | } | |
742 | ia64_srlz_d(); | |
743 | } | |
744 | ||
745 | /* | |
746 | * PMD[i] must be a counter. no check is made | |
747 | */ | |
748 | static inline unsigned long | |
749 | pfm_read_soft_counter(pfm_context_t *ctx, int i) | |
750 | { | |
751 | return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val); | |
752 | } | |
753 | ||
754 | /* | |
755 | * PMD[i] must be a counter. no check is made | |
756 | */ | |
757 | static inline void | |
758 | pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) | |
759 | { | |
760 | unsigned long ovfl_val = pmu_conf->ovfl_val; | |
761 | ||
762 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | |
763 | /* | |
764 | * writing to unimplemented part is ignore, so we do not need to | |
765 | * mask off top part | |
766 | */ | |
767 | ia64_set_pmd(i, val & ovfl_val); | |
768 | } | |
769 | ||
770 | static pfm_msg_t * | |
771 | pfm_get_new_msg(pfm_context_t *ctx) | |
772 | { | |
773 | int idx, next; | |
774 | ||
775 | next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; | |
776 | ||
777 | DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
778 | if (next == ctx->ctx_msgq_head) return NULL; | |
779 | ||
780 | idx = ctx->ctx_msgq_tail; | |
781 | ctx->ctx_msgq_tail = next; | |
782 | ||
783 | DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); | |
784 | ||
785 | return ctx->ctx_msgq+idx; | |
786 | } | |
787 | ||
788 | static pfm_msg_t * | |
789 | pfm_get_next_msg(pfm_context_t *ctx) | |
790 | { | |
791 | pfm_msg_t *msg; | |
792 | ||
793 | DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
794 | ||
795 | if (PFM_CTXQ_EMPTY(ctx)) return NULL; | |
796 | ||
797 | /* | |
798 | * get oldest message | |
799 | */ | |
800 | msg = ctx->ctx_msgq+ctx->ctx_msgq_head; | |
801 | ||
802 | /* | |
803 | * and move forward | |
804 | */ | |
805 | ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; | |
806 | ||
807 | DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); | |
808 | ||
809 | return msg; | |
810 | } | |
811 | ||
812 | static void | |
813 | pfm_reset_msgq(pfm_context_t *ctx) | |
814 | { | |
815 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | |
816 | DPRINT(("ctx=%p msgq reset\n", ctx)); | |
817 | } | |
818 | ||
819 | static void * | |
820 | pfm_rvmalloc(unsigned long size) | |
821 | { | |
822 | void *mem; | |
823 | unsigned long addr; | |
824 | ||
825 | size = PAGE_ALIGN(size); | |
e21763db | 826 | mem = vzalloc(size); |
1da177e4 LT |
827 | if (mem) { |
828 | //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); | |
1da177e4 LT |
829 | addr = (unsigned long)mem; |
830 | while (size > 0) { | |
831 | pfm_reserve_page(addr); | |
832 | addr+=PAGE_SIZE; | |
833 | size-=PAGE_SIZE; | |
834 | } | |
835 | } | |
836 | return mem; | |
837 | } | |
838 | ||
839 | static void | |
840 | pfm_rvfree(void *mem, unsigned long size) | |
841 | { | |
842 | unsigned long addr; | |
843 | ||
844 | if (mem) { | |
845 | DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); | |
846 | addr = (unsigned long) mem; | |
847 | while ((long) size > 0) { | |
848 | pfm_unreserve_page(addr); | |
849 | addr+=PAGE_SIZE; | |
850 | size-=PAGE_SIZE; | |
851 | } | |
852 | vfree(mem); | |
853 | } | |
854 | return; | |
855 | } | |
856 | ||
857 | static pfm_context_t * | |
f8e811b9 | 858 | pfm_context_alloc(int ctx_flags) |
1da177e4 LT |
859 | { |
860 | pfm_context_t *ctx; | |
861 | ||
862 | /* | |
863 | * allocate context descriptor | |
864 | * must be able to free with interrupts disabled | |
865 | */ | |
52fd9108 | 866 | ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); |
1da177e4 | 867 | if (ctx) { |
1da177e4 | 868 | DPRINT(("alloc ctx @%p\n", ctx)); |
f8e811b9 AV |
869 | |
870 | /* | |
871 | * init context protection lock | |
872 | */ | |
873 | spin_lock_init(&ctx->ctx_lock); | |
874 | ||
875 | /* | |
876 | * context is unloaded | |
877 | */ | |
878 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
879 | ||
880 | /* | |
881 | * initialization of context's flags | |
882 | */ | |
883 | ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; | |
884 | ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; | |
885 | ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; | |
886 | /* | |
887 | * will move to set properties | |
888 | * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; | |
889 | */ | |
890 | ||
891 | /* | |
892 | * init restart semaphore to locked | |
893 | */ | |
894 | init_completion(&ctx->ctx_restart_done); | |
895 | ||
896 | /* | |
897 | * activation is used in SMP only | |
898 | */ | |
899 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
900 | SET_LAST_CPU(ctx, -1); | |
901 | ||
902 | /* | |
903 | * initialize notification message queue | |
904 | */ | |
905 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | |
906 | init_waitqueue_head(&ctx->ctx_msgq_wait); | |
907 | init_waitqueue_head(&ctx->ctx_zombieq); | |
908 | ||
1da177e4 LT |
909 | } |
910 | return ctx; | |
911 | } | |
912 | ||
913 | static void | |
914 | pfm_context_free(pfm_context_t *ctx) | |
915 | { | |
916 | if (ctx) { | |
917 | DPRINT(("free ctx @%p\n", ctx)); | |
918 | kfree(ctx); | |
919 | } | |
920 | } | |
921 | ||
922 | static void | |
923 | pfm_mask_monitoring(struct task_struct *task) | |
924 | { | |
925 | pfm_context_t *ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
926 | unsigned long mask, val, ovfl_mask; |
927 | int i; | |
928 | ||
19c5870c | 929 | DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
930 | |
931 | ovfl_mask = pmu_conf->ovfl_val; | |
932 | /* | |
933 | * monitoring can only be masked as a result of a valid | |
934 | * counter overflow. In UP, it means that the PMU still | |
935 | * has an owner. Note that the owner can be different | |
936 | * from the current task. However the PMU state belongs | |
937 | * to the owner. | |
938 | * In SMP, a valid overflow only happens when task is | |
939 | * current. Therefore if we come here, we know that | |
940 | * the PMU state belongs to the current task, therefore | |
941 | * we can access the live registers. | |
942 | * | |
943 | * So in both cases, the live register contains the owner's | |
944 | * state. We can ONLY touch the PMU registers and NOT the PSR. | |
945 | * | |
35589a8f | 946 | * As a consequence to this call, the ctx->th_pmds[] array |
1da177e4 LT |
947 | * contains stale information which must be ignored |
948 | * when context is reloaded AND monitoring is active (see | |
949 | * pfm_restart). | |
950 | */ | |
951 | mask = ctx->ctx_used_pmds[0]; | |
952 | for (i = 0; mask; i++, mask>>=1) { | |
953 | /* skip non used pmds */ | |
954 | if ((mask & 0x1) == 0) continue; | |
955 | val = ia64_get_pmd(i); | |
956 | ||
957 | if (PMD_IS_COUNTING(i)) { | |
958 | /* | |
959 | * we rebuild the full 64 bit value of the counter | |
960 | */ | |
961 | ctx->ctx_pmds[i].val += (val & ovfl_mask); | |
962 | } else { | |
963 | ctx->ctx_pmds[i].val = val; | |
964 | } | |
965 | DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | |
966 | i, | |
967 | ctx->ctx_pmds[i].val, | |
968 | val & ovfl_mask)); | |
969 | } | |
970 | /* | |
971 | * mask monitoring by setting the privilege level to 0 | |
972 | * we cannot use psr.pp/psr.up for this, it is controlled by | |
973 | * the user | |
974 | * | |
975 | * if task is current, modify actual registers, otherwise modify | |
976 | * thread save state, i.e., what will be restored in pfm_load_regs() | |
977 | */ | |
978 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | |
979 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | |
980 | if ((mask & 0x1) == 0UL) continue; | |
35589a8f KA |
981 | ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL); |
982 | ctx->th_pmcs[i] &= ~0xfUL; | |
983 | DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); | |
1da177e4 LT |
984 | } |
985 | /* | |
986 | * make all of this visible | |
987 | */ | |
988 | ia64_srlz_d(); | |
989 | } | |
990 | ||
991 | /* | |
992 | * must always be done with task == current | |
993 | * | |
994 | * context must be in MASKED state when calling | |
995 | */ | |
996 | static void | |
997 | pfm_restore_monitoring(struct task_struct *task) | |
998 | { | |
999 | pfm_context_t *ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
1000 | unsigned long mask, ovfl_mask; |
1001 | unsigned long psr, val; | |
1002 | int i, is_system; | |
1003 | ||
1004 | is_system = ctx->ctx_fl_system; | |
1005 | ovfl_mask = pmu_conf->ovfl_val; | |
1006 | ||
1007 | if (task != current) { | |
19c5870c | 1008 | printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); |
1da177e4 LT |
1009 | return; |
1010 | } | |
1011 | if (ctx->ctx_state != PFM_CTX_MASKED) { | |
1012 | printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, | |
19c5870c | 1013 | task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); |
1da177e4 LT |
1014 | return; |
1015 | } | |
1016 | psr = pfm_get_psr(); | |
1017 | /* | |
1018 | * monitoring is masked via the PMC. | |
1019 | * As we restore their value, we do not want each counter to | |
1020 | * restart right away. We stop monitoring using the PSR, | |
1021 | * restore the PMC (and PMD) and then re-establish the psr | |
1022 | * as it was. Note that there can be no pending overflow at | |
1023 | * this point, because monitoring was MASKED. | |
1024 | * | |
1025 | * system-wide session are pinned and self-monitoring | |
1026 | */ | |
1027 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | |
1028 | /* disable dcr pp */ | |
1029 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | |
1030 | pfm_clear_psr_pp(); | |
1031 | } else { | |
1032 | pfm_clear_psr_up(); | |
1033 | } | |
1034 | /* | |
1035 | * first, we restore the PMD | |
1036 | */ | |
1037 | mask = ctx->ctx_used_pmds[0]; | |
1038 | for (i = 0; mask; i++, mask>>=1) { | |
1039 | /* skip non used pmds */ | |
1040 | if ((mask & 0x1) == 0) continue; | |
1041 | ||
1042 | if (PMD_IS_COUNTING(i)) { | |
1043 | /* | |
1044 | * we split the 64bit value according to | |
1045 | * counter width | |
1046 | */ | |
1047 | val = ctx->ctx_pmds[i].val & ovfl_mask; | |
1048 | ctx->ctx_pmds[i].val &= ~ovfl_mask; | |
1049 | } else { | |
1050 | val = ctx->ctx_pmds[i].val; | |
1051 | } | |
1052 | ia64_set_pmd(i, val); | |
1053 | ||
1054 | DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | |
1055 | i, | |
1056 | ctx->ctx_pmds[i].val, | |
1057 | val)); | |
1058 | } | |
1059 | /* | |
1060 | * restore the PMCs | |
1061 | */ | |
1062 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | |
1063 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | |
1064 | if ((mask & 0x1) == 0UL) continue; | |
35589a8f KA |
1065 | ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; |
1066 | ia64_set_pmc(i, ctx->th_pmcs[i]); | |
19c5870c AD |
1067 | DPRINT(("[%d] pmc[%d]=0x%lx\n", |
1068 | task_pid_nr(task), i, ctx->th_pmcs[i])); | |
1da177e4 LT |
1069 | } |
1070 | ia64_srlz_d(); | |
1071 | ||
1072 | /* | |
1073 | * must restore DBR/IBR because could be modified while masked | |
1074 | * XXX: need to optimize | |
1075 | */ | |
1076 | if (ctx->ctx_fl_using_dbreg) { | |
1077 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
1078 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * now restore PSR | |
1083 | */ | |
1084 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | |
1085 | /* enable dcr pp */ | |
1086 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | |
1087 | ia64_srlz_i(); | |
1088 | } | |
1089 | pfm_set_psr_l(psr); | |
1090 | } | |
1091 | ||
1092 | static inline void | |
1093 | pfm_save_pmds(unsigned long *pmds, unsigned long mask) | |
1094 | { | |
1095 | int i; | |
1096 | ||
1097 | ia64_srlz_d(); | |
1098 | ||
1099 | for (i=0; mask; i++, mask>>=1) { | |
1100 | if (mask & 0x1) pmds[i] = ia64_get_pmd(i); | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | /* | |
1105 | * reload from thread state (used for ctxw only) | |
1106 | */ | |
1107 | static inline void | |
1108 | pfm_restore_pmds(unsigned long *pmds, unsigned long mask) | |
1109 | { | |
1110 | int i; | |
1111 | unsigned long val, ovfl_val = pmu_conf->ovfl_val; | |
1112 | ||
1113 | for (i=0; mask; i++, mask>>=1) { | |
1114 | if ((mask & 0x1) == 0) continue; | |
1115 | val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; | |
1116 | ia64_set_pmd(i, val); | |
1117 | } | |
1118 | ia64_srlz_d(); | |
1119 | } | |
1120 | ||
1121 | /* | |
1122 | * propagate PMD from context to thread-state | |
1123 | */ | |
1124 | static inline void | |
1125 | pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) | |
1126 | { | |
1da177e4 LT |
1127 | unsigned long ovfl_val = pmu_conf->ovfl_val; |
1128 | unsigned long mask = ctx->ctx_all_pmds[0]; | |
1129 | unsigned long val; | |
1130 | int i; | |
1131 | ||
1132 | DPRINT(("mask=0x%lx\n", mask)); | |
1133 | ||
1134 | for (i=0; mask; i++, mask>>=1) { | |
1135 | ||
1136 | val = ctx->ctx_pmds[i].val; | |
1137 | ||
1138 | /* | |
1139 | * We break up the 64 bit value into 2 pieces | |
1140 | * the lower bits go to the machine state in the | |
1141 | * thread (will be reloaded on ctxsw in). | |
1142 | * The upper part stays in the soft-counter. | |
1143 | */ | |
1144 | if (PMD_IS_COUNTING(i)) { | |
1145 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | |
1146 | val &= ovfl_val; | |
1147 | } | |
35589a8f | 1148 | ctx->th_pmds[i] = val; |
1da177e4 LT |
1149 | |
1150 | DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", | |
1151 | i, | |
35589a8f | 1152 | ctx->th_pmds[i], |
1da177e4 LT |
1153 | ctx->ctx_pmds[i].val)); |
1154 | } | |
1155 | } | |
1156 | ||
1157 | /* | |
1158 | * propagate PMC from context to thread-state | |
1159 | */ | |
1160 | static inline void | |
1161 | pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) | |
1162 | { | |
1da177e4 LT |
1163 | unsigned long mask = ctx->ctx_all_pmcs[0]; |
1164 | int i; | |
1165 | ||
1166 | DPRINT(("mask=0x%lx\n", mask)); | |
1167 | ||
1168 | for (i=0; mask; i++, mask>>=1) { | |
1169 | /* masking 0 with ovfl_val yields 0 */ | |
35589a8f KA |
1170 | ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; |
1171 | DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); | |
1da177e4 LT |
1172 | } |
1173 | } | |
1174 | ||
1175 | ||
1176 | ||
1177 | static inline void | |
1178 | pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) | |
1179 | { | |
1180 | int i; | |
1181 | ||
1182 | for (i=0; mask; i++, mask>>=1) { | |
1183 | if ((mask & 0x1) == 0) continue; | |
1184 | ia64_set_pmc(i, pmcs[i]); | |
1185 | } | |
1186 | ia64_srlz_d(); | |
1187 | } | |
1188 | ||
1189 | static inline int | |
1190 | pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b) | |
1191 | { | |
1192 | return memcmp(a, b, sizeof(pfm_uuid_t)); | |
1193 | } | |
1194 | ||
1195 | static inline int | |
1196 | pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) | |
1197 | { | |
1198 | int ret = 0; | |
1199 | if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); | |
1200 | return ret; | |
1201 | } | |
1202 | ||
1203 | static inline int | |
1204 | pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) | |
1205 | { | |
1206 | int ret = 0; | |
1207 | if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); | |
1208 | return ret; | |
1209 | } | |
1210 | ||
1211 | ||
1212 | static inline int | |
1213 | pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, | |
1214 | int cpu, void *arg) | |
1215 | { | |
1216 | int ret = 0; | |
1217 | if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); | |
1218 | return ret; | |
1219 | } | |
1220 | ||
1221 | static inline int | |
1222 | pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, | |
1223 | int cpu, void *arg) | |
1224 | { | |
1225 | int ret = 0; | |
1226 | if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); | |
1227 | return ret; | |
1228 | } | |
1229 | ||
1230 | static inline int | |
1231 | pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | |
1232 | { | |
1233 | int ret = 0; | |
1234 | if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); | |
1235 | return ret; | |
1236 | } | |
1237 | ||
1238 | static inline int | |
1239 | pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | |
1240 | { | |
1241 | int ret = 0; | |
1242 | if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); | |
1243 | return ret; | |
1244 | } | |
1245 | ||
1246 | static pfm_buffer_fmt_t * | |
1247 | __pfm_find_buffer_fmt(pfm_uuid_t uuid) | |
1248 | { | |
1249 | struct list_head * pos; | |
1250 | pfm_buffer_fmt_t * entry; | |
1251 | ||
1252 | list_for_each(pos, &pfm_buffer_fmt_list) { | |
1253 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | |
1254 | if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0) | |
1255 | return entry; | |
1256 | } | |
1257 | return NULL; | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * find a buffer format based on its uuid | |
1262 | */ | |
1263 | static pfm_buffer_fmt_t * | |
1264 | pfm_find_buffer_fmt(pfm_uuid_t uuid) | |
1265 | { | |
1266 | pfm_buffer_fmt_t * fmt; | |
1267 | spin_lock(&pfm_buffer_fmt_lock); | |
1268 | fmt = __pfm_find_buffer_fmt(uuid); | |
1269 | spin_unlock(&pfm_buffer_fmt_lock); | |
1270 | return fmt; | |
1271 | } | |
1272 | ||
1273 | int | |
1274 | pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt) | |
1275 | { | |
1276 | int ret = 0; | |
1277 | ||
1278 | /* some sanity checks */ | |
1279 | if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL; | |
1280 | ||
1281 | /* we need at least a handler */ | |
1282 | if (fmt->fmt_handler == NULL) return -EINVAL; | |
1283 | ||
1284 | /* | |
1285 | * XXX: need check validity of fmt_arg_size | |
1286 | */ | |
1287 | ||
1288 | spin_lock(&pfm_buffer_fmt_lock); | |
1289 | ||
1290 | if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) { | |
1291 | printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name); | |
1292 | ret = -EBUSY; | |
1293 | goto out; | |
1294 | } | |
1295 | list_add(&fmt->fmt_list, &pfm_buffer_fmt_list); | |
1296 | printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name); | |
1297 | ||
1298 | out: | |
1299 | spin_unlock(&pfm_buffer_fmt_lock); | |
1300 | return ret; | |
1301 | } | |
1302 | EXPORT_SYMBOL(pfm_register_buffer_fmt); | |
1303 | ||
1304 | int | |
1305 | pfm_unregister_buffer_fmt(pfm_uuid_t uuid) | |
1306 | { | |
1307 | pfm_buffer_fmt_t *fmt; | |
1308 | int ret = 0; | |
1309 | ||
1310 | spin_lock(&pfm_buffer_fmt_lock); | |
1311 | ||
1312 | fmt = __pfm_find_buffer_fmt(uuid); | |
1313 | if (!fmt) { | |
1314 | printk(KERN_ERR "perfmon: cannot unregister format, not found\n"); | |
1315 | ret = -EINVAL; | |
1316 | goto out; | |
1317 | } | |
1318 | list_del_init(&fmt->fmt_list); | |
1319 | printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name); | |
1320 | ||
1321 | out: | |
1322 | spin_unlock(&pfm_buffer_fmt_lock); | |
1323 | return ret; | |
1324 | ||
1325 | } | |
1326 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | |
1327 | ||
1328 | static int | |
1329 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |
1330 | { | |
1331 | unsigned long flags; | |
1332 | /* | |
72fdbdce | 1333 | * validity checks on cpu_mask have been done upstream |
1da177e4 LT |
1334 | */ |
1335 | LOCK_PFS(flags); | |
1336 | ||
1337 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1338 | pfm_sessions.pfs_sys_sessions, | |
1339 | pfm_sessions.pfs_task_sessions, | |
1340 | pfm_sessions.pfs_sys_use_dbregs, | |
1341 | is_syswide, | |
1342 | cpu)); | |
1343 | ||
1344 | if (is_syswide) { | |
1345 | /* | |
1346 | * cannot mix system wide and per-task sessions | |
1347 | */ | |
1348 | if (pfm_sessions.pfs_task_sessions > 0UL) { | |
1349 | DPRINT(("system wide not possible, %u conflicting task_sessions\n", | |
1350 | pfm_sessions.pfs_task_sessions)); | |
1351 | goto abort; | |
1352 | } | |
1353 | ||
1354 | if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict; | |
1355 | ||
1356 | DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id())); | |
1357 | ||
1358 | pfm_sessions.pfs_sys_session[cpu] = task; | |
1359 | ||
1360 | pfm_sessions.pfs_sys_sessions++ ; | |
1361 | ||
1362 | } else { | |
1363 | if (pfm_sessions.pfs_sys_sessions) goto abort; | |
1364 | pfm_sessions.pfs_task_sessions++; | |
1365 | } | |
1366 | ||
1367 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1368 | pfm_sessions.pfs_sys_sessions, | |
1369 | pfm_sessions.pfs_task_sessions, | |
1370 | pfm_sessions.pfs_sys_use_dbregs, | |
1371 | is_syswide, | |
1372 | cpu)); | |
1373 | ||
8df5a500 | 1374 | /* |
91d591c3 | 1375 | * Force idle() into poll mode |
8df5a500 | 1376 | */ |
91d591c3 | 1377 | cpu_idle_poll_ctrl(true); |
8df5a500 | 1378 | |
1da177e4 LT |
1379 | UNLOCK_PFS(flags); |
1380 | ||
1381 | return 0; | |
1382 | ||
1383 | error_conflict: | |
1384 | DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", | |
19c5870c | 1385 | task_pid_nr(pfm_sessions.pfs_sys_session[cpu]), |
a1ecf7f6 | 1386 | cpu)); |
1da177e4 LT |
1387 | abort: |
1388 | UNLOCK_PFS(flags); | |
1389 | ||
1390 | return -EBUSY; | |
1391 | ||
1392 | } | |
1393 | ||
1394 | static int | |
1395 | pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | |
1396 | { | |
1397 | unsigned long flags; | |
1398 | /* | |
72fdbdce | 1399 | * validity checks on cpu_mask have been done upstream |
1da177e4 LT |
1400 | */ |
1401 | LOCK_PFS(flags); | |
1402 | ||
1403 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1404 | pfm_sessions.pfs_sys_sessions, | |
1405 | pfm_sessions.pfs_task_sessions, | |
1406 | pfm_sessions.pfs_sys_use_dbregs, | |
1407 | is_syswide, | |
1408 | cpu)); | |
1409 | ||
1410 | ||
1411 | if (is_syswide) { | |
1412 | pfm_sessions.pfs_sys_session[cpu] = NULL; | |
1413 | /* | |
1414 | * would not work with perfmon+more than one bit in cpu_mask | |
1415 | */ | |
1416 | if (ctx && ctx->ctx_fl_using_dbreg) { | |
1417 | if (pfm_sessions.pfs_sys_use_dbregs == 0) { | |
1418 | printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); | |
1419 | } else { | |
1420 | pfm_sessions.pfs_sys_use_dbregs--; | |
1421 | } | |
1422 | } | |
1423 | pfm_sessions.pfs_sys_sessions--; | |
1424 | } else { | |
1425 | pfm_sessions.pfs_task_sessions--; | |
1426 | } | |
1427 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | |
1428 | pfm_sessions.pfs_sys_sessions, | |
1429 | pfm_sessions.pfs_task_sessions, | |
1430 | pfm_sessions.pfs_sys_use_dbregs, | |
1431 | is_syswide, | |
1432 | cpu)); | |
1433 | ||
91d591c3 TG |
1434 | /* Undo forced polling. Last session reenables pal_halt */ |
1435 | cpu_idle_poll_ctrl(false); | |
8df5a500 | 1436 | |
1da177e4 LT |
1437 | UNLOCK_PFS(flags); |
1438 | ||
1439 | return 0; | |
1440 | } | |
1441 | ||
1442 | /* | |
1443 | * removes virtual mapping of the sampling buffer. | |
1444 | * IMPORTANT: cannot be called with interrupts disable, e.g. inside | |
1445 | * a PROTECT_CTX() section. | |
1446 | */ | |
1447 | static int | |
9f3a4afb | 1448 | pfm_remove_smpl_mapping(void *vaddr, unsigned long size) |
1da177e4 | 1449 | { |
9f3a4afb | 1450 | struct task_struct *task = current; |
1da177e4 LT |
1451 | int r; |
1452 | ||
1453 | /* sanity checks */ | |
1454 | if (task->mm == NULL || size == 0UL || vaddr == NULL) { | |
19c5870c | 1455 | printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); |
1da177e4 LT |
1456 | return -EINVAL; |
1457 | } | |
1458 | ||
1459 | DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size)); | |
1460 | ||
1461 | /* | |
1462 | * does the actual unmapping | |
1463 | */ | |
bfce281c | 1464 | r = vm_munmap((unsigned long)vaddr, size); |
1da177e4 | 1465 | |
1da177e4 | 1466 | if (r !=0) { |
19c5870c | 1467 | printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); |
1da177e4 LT |
1468 | } |
1469 | ||
1470 | DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); | |
1471 | ||
1472 | return 0; | |
1473 | } | |
1474 | ||
1475 | /* | |
1476 | * free actual physical storage used by sampling buffer | |
1477 | */ | |
1478 | #if 0 | |
1479 | static int | |
1480 | pfm_free_smpl_buffer(pfm_context_t *ctx) | |
1481 | { | |
1482 | pfm_buffer_fmt_t *fmt; | |
1483 | ||
1484 | if (ctx->ctx_smpl_hdr == NULL) goto invalid_free; | |
1485 | ||
1486 | /* | |
1487 | * we won't use the buffer format anymore | |
1488 | */ | |
1489 | fmt = ctx->ctx_buf_fmt; | |
1490 | ||
1491 | DPRINT(("sampling buffer @%p size %lu vaddr=%p\n", | |
1492 | ctx->ctx_smpl_hdr, | |
1493 | ctx->ctx_smpl_size, | |
1494 | ctx->ctx_smpl_vaddr)); | |
1495 | ||
1496 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | |
1497 | ||
1498 | /* | |
1499 | * free the buffer | |
1500 | */ | |
1501 | pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size); | |
1502 | ||
1503 | ctx->ctx_smpl_hdr = NULL; | |
1504 | ctx->ctx_smpl_size = 0UL; | |
1505 | ||
1506 | return 0; | |
1507 | ||
1508 | invalid_free: | |
19c5870c | 1509 | printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); |
1da177e4 LT |
1510 | return -EINVAL; |
1511 | } | |
1512 | #endif | |
1513 | ||
1514 | static inline void | |
1515 | pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) | |
1516 | { | |
1517 | if (fmt == NULL) return; | |
1518 | ||
1519 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | |
1520 | ||
1521 | } | |
1522 | ||
1523 | /* | |
1524 | * pfmfs should _never_ be mounted by userland - too much of security hassle, | |
1525 | * no real gain from having the whole whorehouse mounted. So we don't need | |
1526 | * any operations on the root directory. However, we need a non-trivial | |
1527 | * d_name - pfm: will go nicely and kill the special-casing in procfs. | |
1528 | */ | |
b3e19d92 | 1529 | static struct vfsmount *pfmfs_mnt __read_mostly; |
1da177e4 LT |
1530 | |
1531 | static int __init | |
1532 | init_pfm_fs(void) | |
1533 | { | |
1534 | int err = register_filesystem(&pfm_fs_type); | |
1535 | if (!err) { | |
1536 | pfmfs_mnt = kern_mount(&pfm_fs_type); | |
1537 | err = PTR_ERR(pfmfs_mnt); | |
1538 | if (IS_ERR(pfmfs_mnt)) | |
1539 | unregister_filesystem(&pfm_fs_type); | |
1540 | else | |
1541 | err = 0; | |
1542 | } | |
1543 | return err; | |
1544 | } | |
1545 | ||
1da177e4 LT |
1546 | static ssize_t |
1547 | pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | |
1548 | { | |
1549 | pfm_context_t *ctx; | |
1550 | pfm_msg_t *msg; | |
1551 | ssize_t ret; | |
1552 | unsigned long flags; | |
1553 | DECLARE_WAITQUEUE(wait, current); | |
1554 | if (PFM_IS_FILE(filp) == 0) { | |
19c5870c | 1555 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1556 | return -EINVAL; |
1557 | } | |
1558 | ||
df0a59a1 | 1559 | ctx = filp->private_data; |
1da177e4 | 1560 | if (ctx == NULL) { |
19c5870c | 1561 | printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1562 | return -EINVAL; |
1563 | } | |
1564 | ||
1565 | /* | |
1566 | * check even when there is no message | |
1567 | */ | |
1568 | if (size < sizeof(pfm_msg_t)) { | |
1569 | DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); | |
1570 | return -EINVAL; | |
1571 | } | |
1572 | ||
1573 | PROTECT_CTX(ctx, flags); | |
1574 | ||
1575 | /* | |
1576 | * put ourselves on the wait queue | |
1577 | */ | |
1578 | add_wait_queue(&ctx->ctx_msgq_wait, &wait); | |
1579 | ||
1580 | ||
1581 | for(;;) { | |
1582 | /* | |
1583 | * check wait queue | |
1584 | */ | |
1585 | ||
1586 | set_current_state(TASK_INTERRUPTIBLE); | |
1587 | ||
1588 | DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | |
1589 | ||
1590 | ret = 0; | |
1591 | if(PFM_CTXQ_EMPTY(ctx) == 0) break; | |
1592 | ||
1593 | UNPROTECT_CTX(ctx, flags); | |
1594 | ||
1595 | /* | |
1596 | * check non-blocking read | |
1597 | */ | |
1598 | ret = -EAGAIN; | |
1599 | if(filp->f_flags & O_NONBLOCK) break; | |
1600 | ||
1601 | /* | |
1602 | * check pending signals | |
1603 | */ | |
1604 | if(signal_pending(current)) { | |
1605 | ret = -EINTR; | |
1606 | break; | |
1607 | } | |
1608 | /* | |
1609 | * no message, so wait | |
1610 | */ | |
1611 | schedule(); | |
1612 | ||
1613 | PROTECT_CTX(ctx, flags); | |
1614 | } | |
19c5870c | 1615 | DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); |
1da177e4 LT |
1616 | set_current_state(TASK_RUNNING); |
1617 | remove_wait_queue(&ctx->ctx_msgq_wait, &wait); | |
1618 | ||
1619 | if (ret < 0) goto abort; | |
1620 | ||
1621 | ret = -EINVAL; | |
1622 | msg = pfm_get_next_msg(ctx); | |
1623 | if (msg == NULL) { | |
19c5870c | 1624 | printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); |
1da177e4 LT |
1625 | goto abort_locked; |
1626 | } | |
1627 | ||
4944930a | 1628 | DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); |
1da177e4 LT |
1629 | |
1630 | ret = -EFAULT; | |
1631 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | |
1632 | ||
1633 | abort_locked: | |
1634 | UNPROTECT_CTX(ctx, flags); | |
1635 | abort: | |
1636 | return ret; | |
1637 | } | |
1638 | ||
1639 | static ssize_t | |
1640 | pfm_write(struct file *file, const char __user *ubuf, | |
1641 | size_t size, loff_t *ppos) | |
1642 | { | |
1643 | DPRINT(("pfm_write called\n")); | |
1644 | return -EINVAL; | |
1645 | } | |
1646 | ||
1647 | static unsigned int | |
1648 | pfm_poll(struct file *filp, poll_table * wait) | |
1649 | { | |
1650 | pfm_context_t *ctx; | |
1651 | unsigned long flags; | |
1652 | unsigned int mask = 0; | |
1653 | ||
1654 | if (PFM_IS_FILE(filp) == 0) { | |
19c5870c | 1655 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1656 | return 0; |
1657 | } | |
1658 | ||
df0a59a1 | 1659 | ctx = filp->private_data; |
1da177e4 | 1660 | if (ctx == NULL) { |
19c5870c | 1661 | printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1662 | return 0; |
1663 | } | |
1664 | ||
1665 | ||
1666 | DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); | |
1667 | ||
1668 | poll_wait(filp, &ctx->ctx_msgq_wait, wait); | |
1669 | ||
1670 | PROTECT_CTX(ctx, flags); | |
1671 | ||
1672 | if (PFM_CTXQ_EMPTY(ctx) == 0) | |
1673 | mask = POLLIN | POLLRDNORM; | |
1674 | ||
1675 | UNPROTECT_CTX(ctx, flags); | |
1676 | ||
1677 | DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); | |
1678 | ||
1679 | return mask; | |
1680 | } | |
1681 | ||
ba58aebf AB |
1682 | static long |
1683 | pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
1da177e4 LT |
1684 | { |
1685 | DPRINT(("pfm_ioctl called\n")); | |
1686 | return -EINVAL; | |
1687 | } | |
1688 | ||
1689 | /* | |
1690 | * interrupt cannot be masked when coming here | |
1691 | */ | |
1692 | static inline int | |
1693 | pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) | |
1694 | { | |
1695 | int ret; | |
1696 | ||
1697 | ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); | |
1698 | ||
1699 | DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | |
19c5870c | 1700 | task_pid_nr(current), |
1da177e4 LT |
1701 | fd, |
1702 | on, | |
1703 | ctx->ctx_async_queue, ret)); | |
1704 | ||
1705 | return ret; | |
1706 | } | |
1707 | ||
1708 | static int | |
1709 | pfm_fasync(int fd, struct file *filp, int on) | |
1710 | { | |
1711 | pfm_context_t *ctx; | |
1712 | int ret; | |
1713 | ||
1714 | if (PFM_IS_FILE(filp) == 0) { | |
19c5870c | 1715 | printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1716 | return -EBADF; |
1717 | } | |
1718 | ||
df0a59a1 | 1719 | ctx = filp->private_data; |
1da177e4 | 1720 | if (ctx == NULL) { |
19c5870c | 1721 | printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1722 | return -EBADF; |
1723 | } | |
1724 | /* | |
1725 | * we cannot mask interrupts during this call because this may | |
1726 | * may go to sleep if memory is not readily avalaible. | |
1727 | * | |
1728 | * We are protected from the conetxt disappearing by the get_fd()/put_fd() | |
1729 | * done in caller. Serialization of this function is ensured by caller. | |
1730 | */ | |
1731 | ret = pfm_do_fasync(fd, filp, ctx, on); | |
1732 | ||
1733 | ||
1734 | DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | |
1735 | fd, | |
1736 | on, | |
1737 | ctx->ctx_async_queue, ret)); | |
1738 | ||
1739 | return ret; | |
1740 | } | |
1741 | ||
1742 | #ifdef CONFIG_SMP | |
1743 | /* | |
1744 | * this function is exclusively called from pfm_close(). | |
1745 | * The context is not protected at that time, nor are interrupts | |
1746 | * on the remote CPU. That's necessary to avoid deadlocks. | |
1747 | */ | |
1748 | static void | |
1749 | pfm_syswide_force_stop(void *info) | |
1750 | { | |
1751 | pfm_context_t *ctx = (pfm_context_t *)info; | |
6450578f | 1752 | struct pt_regs *regs = task_pt_regs(current); |
1da177e4 LT |
1753 | struct task_struct *owner; |
1754 | unsigned long flags; | |
1755 | int ret; | |
1756 | ||
1757 | if (ctx->ctx_cpu != smp_processor_id()) { | |
1758 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", | |
1759 | ctx->ctx_cpu, | |
1760 | smp_processor_id()); | |
1761 | return; | |
1762 | } | |
1763 | owner = GET_PMU_OWNER(); | |
1764 | if (owner != ctx->ctx_task) { | |
1765 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", | |
1766 | smp_processor_id(), | |
19c5870c | 1767 | task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); |
1da177e4 LT |
1768 | return; |
1769 | } | |
1770 | if (GET_PMU_CTX() != ctx) { | |
1771 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", | |
1772 | smp_processor_id(), | |
1773 | GET_PMU_CTX(), ctx); | |
1774 | return; | |
1775 | } | |
1776 | ||
19c5870c | 1777 | DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); |
1da177e4 LT |
1778 | /* |
1779 | * the context is already protected in pfm_close(), we simply | |
1780 | * need to mask interrupts to avoid a PMU interrupt race on | |
1781 | * this CPU | |
1782 | */ | |
1783 | local_irq_save(flags); | |
1784 | ||
1785 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
1786 | if (ret) { | |
1787 | DPRINT(("context_unload returned %d\n", ret)); | |
1788 | } | |
1789 | ||
1790 | /* | |
1791 | * unmask interrupts, PMU interrupts are now spurious here | |
1792 | */ | |
1793 | local_irq_restore(flags); | |
1794 | } | |
1795 | ||
1796 | static void | |
1797 | pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |
1798 | { | |
1799 | int ret; | |
1800 | ||
1801 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | |
8691e5a8 | 1802 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); |
1da177e4 LT |
1803 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); |
1804 | } | |
1805 | #endif /* CONFIG_SMP */ | |
1806 | ||
1807 | /* | |
1808 | * called for each close(). Partially free resources. | |
1809 | * When caller is self-monitoring, the context is unloaded. | |
1810 | */ | |
1811 | static int | |
75e1fcc0 | 1812 | pfm_flush(struct file *filp, fl_owner_t id) |
1da177e4 LT |
1813 | { |
1814 | pfm_context_t *ctx; | |
1815 | struct task_struct *task; | |
1816 | struct pt_regs *regs; | |
1817 | unsigned long flags; | |
1818 | unsigned long smpl_buf_size = 0UL; | |
1819 | void *smpl_buf_vaddr = NULL; | |
1820 | int state, is_system; | |
1821 | ||
1822 | if (PFM_IS_FILE(filp) == 0) { | |
1823 | DPRINT(("bad magic for\n")); | |
1824 | return -EBADF; | |
1825 | } | |
1826 | ||
df0a59a1 | 1827 | ctx = filp->private_data; |
1da177e4 | 1828 | if (ctx == NULL) { |
19c5870c | 1829 | printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1830 | return -EBADF; |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * remove our file from the async queue, if we use this mode. | |
1835 | * This can be done without the context being protected. We come | |
72fdbdce | 1836 | * here when the context has become unreachable by other tasks. |
1da177e4 LT |
1837 | * |
1838 | * We may still have active monitoring at this point and we may | |
1839 | * end up in pfm_overflow_handler(). However, fasync_helper() | |
1840 | * operates with interrupts disabled and it cleans up the | |
1841 | * queue. If the PMU handler is called prior to entering | |
1842 | * fasync_helper() then it will send a signal. If it is | |
1843 | * invoked after, it will find an empty queue and no | |
1844 | * signal will be sent. In both case, we are safe | |
1845 | */ | |
1da177e4 LT |
1846 | PROTECT_CTX(ctx, flags); |
1847 | ||
1848 | state = ctx->ctx_state; | |
1849 | is_system = ctx->ctx_fl_system; | |
1850 | ||
1851 | task = PFM_CTX_TASK(ctx); | |
6450578f | 1852 | regs = task_pt_regs(task); |
1da177e4 LT |
1853 | |
1854 | DPRINT(("ctx_state=%d is_current=%d\n", | |
1855 | state, | |
1856 | task == current ? 1 : 0)); | |
1857 | ||
1858 | /* | |
1859 | * if state == UNLOADED, then task is NULL | |
1860 | */ | |
1861 | ||
1862 | /* | |
1863 | * we must stop and unload because we are losing access to the context. | |
1864 | */ | |
1865 | if (task == current) { | |
1866 | #ifdef CONFIG_SMP | |
1867 | /* | |
1868 | * the task IS the owner but it migrated to another CPU: that's bad | |
1869 | * but we must handle this cleanly. Unfortunately, the kernel does | |
1870 | * not provide a mechanism to block migration (while the context is loaded). | |
1871 | * | |
1872 | * We need to release the resource on the ORIGINAL cpu. | |
1873 | */ | |
1874 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
1875 | ||
1876 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
1877 | /* | |
1878 | * keep context protected but unmask interrupt for IPI | |
1879 | */ | |
1880 | local_irq_restore(flags); | |
1881 | ||
1882 | pfm_syswide_cleanup_other_cpu(ctx); | |
1883 | ||
1884 | /* | |
1885 | * restore interrupt masking | |
1886 | */ | |
1887 | local_irq_save(flags); | |
1888 | ||
1889 | /* | |
1890 | * context is unloaded at this point | |
1891 | */ | |
1892 | } else | |
1893 | #endif /* CONFIG_SMP */ | |
1894 | { | |
1895 | ||
1896 | DPRINT(("forcing unload\n")); | |
1897 | /* | |
1898 | * stop and unload, returning with state UNLOADED | |
1899 | * and session unreserved. | |
1900 | */ | |
1901 | pfm_context_unload(ctx, NULL, 0, regs); | |
1902 | ||
1903 | DPRINT(("ctx_state=%d\n", ctx->ctx_state)); | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | /* | |
1908 | * remove virtual mapping, if any, for the calling task. | |
1909 | * cannot reset ctx field until last user is calling close(). | |
1910 | * | |
1911 | * ctx_smpl_vaddr must never be cleared because it is needed | |
1912 | * by every task with access to the context | |
1913 | * | |
1914 | * When called from do_exit(), the mm context is gone already, therefore | |
1915 | * mm is NULL, i.e., the VMA is already gone and we do not have to | |
1916 | * do anything here | |
1917 | */ | |
1918 | if (ctx->ctx_smpl_vaddr && current->mm) { | |
1919 | smpl_buf_vaddr = ctx->ctx_smpl_vaddr; | |
1920 | smpl_buf_size = ctx->ctx_smpl_size; | |
1921 | } | |
1922 | ||
1923 | UNPROTECT_CTX(ctx, flags); | |
1924 | ||
1925 | /* | |
1926 | * if there was a mapping, then we systematically remove it | |
1927 | * at this point. Cannot be done inside critical section | |
1928 | * because some VM function reenables interrupts. | |
1929 | * | |
1930 | */ | |
9f3a4afb | 1931 | if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size); |
1da177e4 LT |
1932 | |
1933 | return 0; | |
1934 | } | |
1935 | /* | |
1936 | * called either on explicit close() or from exit_files(). | |
1937 | * Only the LAST user of the file gets to this point, i.e., it is | |
1938 | * called only ONCE. | |
1939 | * | |
1940 | * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero | |
1941 | * (fput()),i.e, last task to access the file. Nobody else can access the | |
1942 | * file at this point. | |
1943 | * | |
1944 | * When called from exit_files(), the VMA has been freed because exit_mm() | |
1945 | * is executed before exit_files(). | |
1946 | * | |
1947 | * When called from exit_files(), the current task is not yet ZOMBIE but we | |
1948 | * flush the PMU state to the context. | |
1949 | */ | |
1950 | static int | |
1951 | pfm_close(struct inode *inode, struct file *filp) | |
1952 | { | |
1953 | pfm_context_t *ctx; | |
1954 | struct task_struct *task; | |
1955 | struct pt_regs *regs; | |
1956 | DECLARE_WAITQUEUE(wait, current); | |
1957 | unsigned long flags; | |
1958 | unsigned long smpl_buf_size = 0UL; | |
1959 | void *smpl_buf_addr = NULL; | |
1960 | int free_possible = 1; | |
1961 | int state, is_system; | |
1962 | ||
1963 | DPRINT(("pfm_close called private=%p\n", filp->private_data)); | |
1964 | ||
1965 | if (PFM_IS_FILE(filp) == 0) { | |
1966 | DPRINT(("bad magic\n")); | |
1967 | return -EBADF; | |
1968 | } | |
1969 | ||
df0a59a1 | 1970 | ctx = filp->private_data; |
1da177e4 | 1971 | if (ctx == NULL) { |
19c5870c | 1972 | printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); |
1da177e4 LT |
1973 | return -EBADF; |
1974 | } | |
1975 | ||
1976 | PROTECT_CTX(ctx, flags); | |
1977 | ||
1978 | state = ctx->ctx_state; | |
1979 | is_system = ctx->ctx_fl_system; | |
1980 | ||
1981 | task = PFM_CTX_TASK(ctx); | |
6450578f | 1982 | regs = task_pt_regs(task); |
1da177e4 LT |
1983 | |
1984 | DPRINT(("ctx_state=%d is_current=%d\n", | |
1985 | state, | |
1986 | task == current ? 1 : 0)); | |
1987 | ||
1988 | /* | |
1989 | * if task == current, then pfm_flush() unloaded the context | |
1990 | */ | |
1991 | if (state == PFM_CTX_UNLOADED) goto doit; | |
1992 | ||
1993 | /* | |
1994 | * context is loaded/masked and task != current, we need to | |
1995 | * either force an unload or go zombie | |
1996 | */ | |
1997 | ||
1998 | /* | |
1999 | * The task is currently blocked or will block after an overflow. | |
2000 | * we must force it to wakeup to get out of the | |
2001 | * MASKED state and transition to the unloaded state by itself. | |
2002 | * | |
2003 | * This situation is only possible for per-task mode | |
2004 | */ | |
2005 | if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { | |
2006 | ||
2007 | /* | |
2008 | * set a "partial" zombie state to be checked | |
2009 | * upon return from down() in pfm_handle_work(). | |
2010 | * | |
2011 | * We cannot use the ZOMBIE state, because it is checked | |
2012 | * by pfm_load_regs() which is called upon wakeup from down(). | |
2013 | * In such case, it would free the context and then we would | |
2014 | * return to pfm_handle_work() which would access the | |
2015 | * stale context. Instead, we set a flag invisible to pfm_load_regs() | |
2016 | * but visible to pfm_handle_work(). | |
2017 | * | |
2018 | * For some window of time, we have a zombie context with | |
2019 | * ctx_state = MASKED and not ZOMBIE | |
2020 | */ | |
2021 | ctx->ctx_fl_going_zombie = 1; | |
2022 | ||
2023 | /* | |
2024 | * force task to wake up from MASKED state | |
2025 | */ | |
60f1c444 | 2026 | complete(&ctx->ctx_restart_done); |
1da177e4 LT |
2027 | |
2028 | DPRINT(("waking up ctx_state=%d\n", state)); | |
2029 | ||
2030 | /* | |
2031 | * put ourself to sleep waiting for the other | |
2032 | * task to report completion | |
2033 | * | |
2034 | * the context is protected by mutex, therefore there | |
2035 | * is no risk of being notified of completion before | |
2036 | * begin actually on the waitq. | |
2037 | */ | |
2038 | set_current_state(TASK_INTERRUPTIBLE); | |
2039 | add_wait_queue(&ctx->ctx_zombieq, &wait); | |
2040 | ||
2041 | UNPROTECT_CTX(ctx, flags); | |
2042 | ||
2043 | /* | |
2044 | * XXX: check for signals : | |
2045 | * - ok for explicit close | |
2046 | * - not ok when coming from exit_files() | |
2047 | */ | |
2048 | schedule(); | |
2049 | ||
2050 | ||
2051 | PROTECT_CTX(ctx, flags); | |
2052 | ||
2053 | ||
2054 | remove_wait_queue(&ctx->ctx_zombieq, &wait); | |
2055 | set_current_state(TASK_RUNNING); | |
2056 | ||
2057 | /* | |
2058 | * context is unloaded at this point | |
2059 | */ | |
2060 | DPRINT(("after zombie wakeup ctx_state=%d for\n", state)); | |
2061 | } | |
2062 | else if (task != current) { | |
2063 | #ifdef CONFIG_SMP | |
2064 | /* | |
2065 | * switch context to zombie state | |
2066 | */ | |
2067 | ctx->ctx_state = PFM_CTX_ZOMBIE; | |
2068 | ||
19c5870c | 2069 | DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
2070 | /* |
2071 | * cannot free the context on the spot. deferred until | |
2072 | * the task notices the ZOMBIE state | |
2073 | */ | |
2074 | free_possible = 0; | |
2075 | #else | |
2076 | pfm_context_unload(ctx, NULL, 0, regs); | |
2077 | #endif | |
2078 | } | |
2079 | ||
2080 | doit: | |
2081 | /* reload state, may have changed during opening of critical section */ | |
2082 | state = ctx->ctx_state; | |
2083 | ||
2084 | /* | |
2085 | * the context is still attached to a task (possibly current) | |
2086 | * we cannot destroy it right now | |
2087 | */ | |
2088 | ||
2089 | /* | |
2090 | * we must free the sampling buffer right here because | |
2091 | * we cannot rely on it being cleaned up later by the | |
2092 | * monitored task. It is not possible to free vmalloc'ed | |
2093 | * memory in pfm_load_regs(). Instead, we remove the buffer | |
2094 | * now. should there be subsequent PMU overflow originally | |
2095 | * meant for sampling, the will be converted to spurious | |
2096 | * and that's fine because the monitoring tools is gone anyway. | |
2097 | */ | |
2098 | if (ctx->ctx_smpl_hdr) { | |
2099 | smpl_buf_addr = ctx->ctx_smpl_hdr; | |
2100 | smpl_buf_size = ctx->ctx_smpl_size; | |
2101 | /* no more sampling */ | |
2102 | ctx->ctx_smpl_hdr = NULL; | |
2103 | ctx->ctx_fl_is_sampling = 0; | |
2104 | } | |
2105 | ||
2106 | DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n", | |
2107 | state, | |
2108 | free_possible, | |
2109 | smpl_buf_addr, | |
2110 | smpl_buf_size)); | |
2111 | ||
2112 | if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); | |
2113 | ||
2114 | /* | |
2115 | * UNLOADED that the session has already been unreserved. | |
2116 | */ | |
2117 | if (state == PFM_CTX_ZOMBIE) { | |
2118 | pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); | |
2119 | } | |
2120 | ||
2121 | /* | |
2122 | * disconnect file descriptor from context must be done | |
2123 | * before we unlock. | |
2124 | */ | |
2125 | filp->private_data = NULL; | |
2126 | ||
2127 | /* | |
72fdbdce | 2128 | * if we free on the spot, the context is now completely unreachable |
1da177e4 LT |
2129 | * from the callers side. The monitored task side is also cut, so we |
2130 | * can freely cut. | |
2131 | * | |
2132 | * If we have a deferred free, only the caller side is disconnected. | |
2133 | */ | |
2134 | UNPROTECT_CTX(ctx, flags); | |
2135 | ||
2136 | /* | |
2137 | * All memory free operations (especially for vmalloc'ed memory) | |
2138 | * MUST be done with interrupts ENABLED. | |
2139 | */ | |
2140 | if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); | |
2141 | ||
2142 | /* | |
2143 | * return the memory used by the context | |
2144 | */ | |
2145 | if (free_possible) pfm_context_free(ctx); | |
2146 | ||
2147 | return 0; | |
2148 | } | |
2149 | ||
5dfe4c96 | 2150 | static const struct file_operations pfm_file_ops = { |
ba58aebf AB |
2151 | .llseek = no_llseek, |
2152 | .read = pfm_read, | |
2153 | .write = pfm_write, | |
2154 | .poll = pfm_poll, | |
2155 | .unlocked_ioctl = pfm_ioctl, | |
ba58aebf AB |
2156 | .fasync = pfm_fasync, |
2157 | .release = pfm_close, | |
2158 | .flush = pfm_flush | |
1da177e4 LT |
2159 | }; |
2160 | ||
7ae6bdbd MS |
2161 | static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) |
2162 | { | |
2163 | return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", | |
75c3cfa8 | 2164 | d_inode(dentry)->i_ino); |
7ae6bdbd MS |
2165 | } |
2166 | ||
3ba13d17 | 2167 | static const struct dentry_operations pfmfs_dentry_operations = { |
b26d4cd3 | 2168 | .d_delete = always_delete_dentry, |
7ae6bdbd | 2169 | .d_dname = pfmfs_dname, |
1da177e4 LT |
2170 | }; |
2171 | ||
2172 | ||
f8e811b9 AV |
2173 | static struct file * |
2174 | pfm_alloc_file(pfm_context_t *ctx) | |
1da177e4 | 2175 | { |
f8e811b9 AV |
2176 | struct file *file; |
2177 | struct inode *inode; | |
2c48b9c4 | 2178 | struct path path; |
7ae6bdbd | 2179 | struct qstr this = { .name = "" }; |
1da177e4 | 2180 | |
1da177e4 LT |
2181 | /* |
2182 | * allocate a new inode | |
2183 | */ | |
2184 | inode = new_inode(pfmfs_mnt->mnt_sb); | |
f8e811b9 AV |
2185 | if (!inode) |
2186 | return ERR_PTR(-ENOMEM); | |
1da177e4 LT |
2187 | |
2188 | DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); | |
2189 | ||
2190 | inode->i_mode = S_IFCHR|S_IRUGO; | |
ef81ee98 DH |
2191 | inode->i_uid = current_fsuid(); |
2192 | inode->i_gid = current_fsgid(); | |
1da177e4 | 2193 | |
1da177e4 LT |
2194 | /* |
2195 | * allocate a new dcache entry | |
2196 | */ | |
4c1d5a64 | 2197 | path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this); |
2c48b9c4 | 2198 | if (!path.dentry) { |
f8e811b9 AV |
2199 | iput(inode); |
2200 | return ERR_PTR(-ENOMEM); | |
2201 | } | |
2c48b9c4 | 2202 | path.mnt = mntget(pfmfs_mnt); |
1da177e4 | 2203 | |
2c48b9c4 | 2204 | d_add(path.dentry, inode); |
1da177e4 | 2205 | |
2c48b9c4 | 2206 | file = alloc_file(&path, FMODE_READ, &pfm_file_ops); |
39b65252 | 2207 | if (IS_ERR(file)) { |
2c48b9c4 | 2208 | path_put(&path); |
39b65252 | 2209 | return file; |
f8e811b9 | 2210 | } |
1da177e4 | 2211 | |
1da177e4 | 2212 | file->f_flags = O_RDONLY; |
f8e811b9 | 2213 | file->private_data = ctx; |
1da177e4 | 2214 | |
f8e811b9 | 2215 | return file; |
1da177e4 LT |
2216 | } |
2217 | ||
2218 | static int | |
2219 | pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) | |
2220 | { | |
2221 | DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size)); | |
2222 | ||
2223 | while (size > 0) { | |
2224 | unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT; | |
2225 | ||
2226 | ||
2227 | if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY)) | |
2228 | return -ENOMEM; | |
2229 | ||
2230 | addr += PAGE_SIZE; | |
2231 | buf += PAGE_SIZE; | |
2232 | size -= PAGE_SIZE; | |
2233 | } | |
2234 | return 0; | |
2235 | } | |
2236 | ||
2237 | /* | |
2238 | * allocate a sampling buffer and remaps it into the user address space of the task | |
2239 | */ | |
2240 | static int | |
41d5e5d7 | 2241 | pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) |
1da177e4 LT |
2242 | { |
2243 | struct mm_struct *mm = task->mm; | |
2244 | struct vm_area_struct *vma = NULL; | |
2245 | unsigned long size; | |
2246 | void *smpl_buf; | |
2247 | ||
2248 | ||
2249 | /* | |
2250 | * the fixed header + requested size and align to page boundary | |
2251 | */ | |
2252 | size = PAGE_ALIGN(rsize); | |
2253 | ||
2254 | DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); | |
2255 | ||
2256 | /* | |
2257 | * check requested size to avoid Denial-of-service attacks | |
2258 | * XXX: may have to refine this test | |
2259 | * Check against address space limit. | |
2260 | * | |
2261 | * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) | |
2262 | * return -ENOMEM; | |
2263 | */ | |
02b763b8 | 2264 | if (size > task_rlimit(task, RLIMIT_MEMLOCK)) |
1da177e4 LT |
2265 | return -ENOMEM; |
2266 | ||
2267 | /* | |
2268 | * We do the easy to undo allocations first. | |
2269 | * | |
2270 | * pfm_rvmalloc(), clears the buffer, so there is no leak | |
2271 | */ | |
2272 | smpl_buf = pfm_rvmalloc(size); | |
2273 | if (smpl_buf == NULL) { | |
2274 | DPRINT(("Can't allocate sampling buffer\n")); | |
2275 | return -ENOMEM; | |
2276 | } | |
2277 | ||
2278 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | |
2279 | ||
2280 | /* allocate vma */ | |
0d2c7e02 | 2281 | vma = vm_area_alloc(mm); |
1da177e4 LT |
2282 | if (!vma) { |
2283 | DPRINT(("Cannot allocate vma\n")); | |
2284 | goto error_kmem; | |
2285 | } | |
1da177e4 LT |
2286 | |
2287 | /* | |
2288 | * partially initialize the vma for the sampling buffer | |
2289 | */ | |
cb0942b8 | 2290 | vma->vm_file = get_file(filp); |
314e51b9 | 2291 | vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; |
1da177e4 LT |
2292 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ |
2293 | ||
2294 | /* | |
2295 | * Now we have everything we need and we can initialize | |
2296 | * and connect all the data structures | |
2297 | */ | |
2298 | ||
2299 | ctx->ctx_smpl_hdr = smpl_buf; | |
2300 | ctx->ctx_smpl_size = size; /* aligned size */ | |
2301 | ||
2302 | /* | |
2303 | * Let's do the difficult operations next. | |
2304 | * | |
2305 | * now we atomically find some area in the address space and | |
2306 | * remap the buffer in it. | |
2307 | */ | |
2308 | down_write(&task->mm->mmap_sem); | |
2309 | ||
2310 | /* find some free area in address space, must have mmap sem held */ | |
4ad310b8 AV |
2311 | vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); |
2312 | if (IS_ERR_VALUE(vma->vm_start)) { | |
1da177e4 LT |
2313 | DPRINT(("Cannot find unmapped area for size %ld\n", size)); |
2314 | up_write(&task->mm->mmap_sem); | |
2315 | goto error; | |
2316 | } | |
2317 | vma->vm_end = vma->vm_start + size; | |
2318 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; | |
2319 | ||
2320 | DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); | |
2321 | ||
2322 | /* can only be applied to current task, need to have the mm semaphore held when called */ | |
2323 | if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { | |
2324 | DPRINT(("Can't remap buffer\n")); | |
2325 | up_write(&task->mm->mmap_sem); | |
2326 | goto error; | |
2327 | } | |
2328 | ||
2329 | /* | |
2330 | * now insert the vma in the vm list for the process, must be | |
2331 | * done with mmap lock held | |
2332 | */ | |
2333 | insert_vm_struct(mm, vma); | |
2334 | ||
84638335 | 2335 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma)); |
1da177e4 LT |
2336 | up_write(&task->mm->mmap_sem); |
2337 | ||
2338 | /* | |
2339 | * keep track of user level virtual address | |
2340 | */ | |
2341 | ctx->ctx_smpl_vaddr = (void *)vma->vm_start; | |
2342 | *(unsigned long *)user_vaddr = vma->vm_start; | |
2343 | ||
2344 | return 0; | |
2345 | ||
2346 | error: | |
e88c5cf9 | 2347 | vm_area_free(vma); |
1da177e4 LT |
2348 | error_kmem: |
2349 | pfm_rvfree(smpl_buf, size); | |
2350 | ||
2351 | return -ENOMEM; | |
2352 | } | |
2353 | ||
2354 | /* | |
2355 | * XXX: do something better here | |
2356 | */ | |
2357 | static int | |
2358 | pfm_bad_permissions(struct task_struct *task) | |
2359 | { | |
c69e8d9c | 2360 | const struct cred *tcred; |
6c1ee033 EB |
2361 | kuid_t uid = current_uid(); |
2362 | kgid_t gid = current_gid(); | |
c69e8d9c DH |
2363 | int ret; |
2364 | ||
2365 | rcu_read_lock(); | |
2366 | tcred = __task_cred(task); | |
ef81ee98 | 2367 | |
1da177e4 LT |
2368 | /* inspired by ptrace_attach() */ |
2369 | DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", | |
6c1ee033 EB |
2370 | from_kuid(&init_user_ns, uid), |
2371 | from_kgid(&init_user_ns, gid), | |
2372 | from_kuid(&init_user_ns, tcred->euid), | |
2373 | from_kuid(&init_user_ns, tcred->suid), | |
2374 | from_kuid(&init_user_ns, tcred->uid), | |
2375 | from_kgid(&init_user_ns, tcred->egid), | |
2376 | from_kgid(&init_user_ns, tcred->sgid))); | |
2377 | ||
2378 | ret = ((!uid_eq(uid, tcred->euid)) | |
2379 | || (!uid_eq(uid, tcred->suid)) | |
2380 | || (!uid_eq(uid, tcred->uid)) | |
2381 | || (!gid_eq(gid, tcred->egid)) | |
2382 | || (!gid_eq(gid, tcred->sgid)) | |
2383 | || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE); | |
c69e8d9c DH |
2384 | |
2385 | rcu_read_unlock(); | |
2386 | return ret; | |
1da177e4 LT |
2387 | } |
2388 | ||
2389 | static int | |
2390 | pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) | |
2391 | { | |
2392 | int ctx_flags; | |
2393 | ||
2394 | /* valid signal */ | |
2395 | ||
2396 | ctx_flags = pfx->ctx_flags; | |
2397 | ||
2398 | if (ctx_flags & PFM_FL_SYSTEM_WIDE) { | |
2399 | ||
2400 | /* | |
2401 | * cannot block in this mode | |
2402 | */ | |
2403 | if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { | |
2404 | DPRINT(("cannot use blocking mode when in system wide monitoring\n")); | |
2405 | return -EINVAL; | |
2406 | } | |
2407 | } else { | |
2408 | } | |
2409 | /* probably more to add here */ | |
2410 | ||
2411 | return 0; | |
2412 | } | |
2413 | ||
2414 | static int | |
41d5e5d7 | 2415 | pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, |
1da177e4 LT |
2416 | unsigned int cpu, pfarg_context_t *arg) |
2417 | { | |
2418 | pfm_buffer_fmt_t *fmt = NULL; | |
2419 | unsigned long size = 0UL; | |
2420 | void *uaddr = NULL; | |
2421 | void *fmt_arg = NULL; | |
2422 | int ret = 0; | |
2423 | #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1) | |
2424 | ||
2425 | /* invoke and lock buffer format, if found */ | |
2426 | fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); | |
2427 | if (fmt == NULL) { | |
19c5870c | 2428 | DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); |
1da177e4 LT |
2429 | return -EINVAL; |
2430 | } | |
2431 | ||
2432 | /* | |
2433 | * buffer argument MUST be contiguous to pfarg_context_t | |
2434 | */ | |
2435 | if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg); | |
2436 | ||
2437 | ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); | |
2438 | ||
19c5870c | 2439 | DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); |
1da177e4 LT |
2440 | |
2441 | if (ret) goto error; | |
2442 | ||
2443 | /* link buffer format and context */ | |
2444 | ctx->ctx_buf_fmt = fmt; | |
f8e811b9 | 2445 | ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */ |
1da177e4 LT |
2446 | |
2447 | /* | |
2448 | * check if buffer format wants to use perfmon buffer allocation/mapping service | |
2449 | */ | |
2450 | ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); | |
2451 | if (ret) goto error; | |
2452 | ||
2453 | if (size) { | |
2454 | /* | |
2455 | * buffer is always remapped into the caller's address space | |
2456 | */ | |
41d5e5d7 | 2457 | ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); |
1da177e4 LT |
2458 | if (ret) goto error; |
2459 | ||
2460 | /* keep track of user address of buffer */ | |
2461 | arg->ctx_smpl_vaddr = uaddr; | |
2462 | } | |
2463 | ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); | |
2464 | ||
2465 | error: | |
2466 | return ret; | |
2467 | } | |
2468 | ||
2469 | static void | |
2470 | pfm_reset_pmu_state(pfm_context_t *ctx) | |
2471 | { | |
2472 | int i; | |
2473 | ||
2474 | /* | |
2475 | * install reset values for PMC. | |
2476 | */ | |
2477 | for (i=1; PMC_IS_LAST(i) == 0; i++) { | |
2478 | if (PMC_IS_IMPL(i) == 0) continue; | |
2479 | ctx->ctx_pmcs[i] = PMC_DFL_VAL(i); | |
2480 | DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); | |
2481 | } | |
2482 | /* | |
2483 | * PMD registers are set to 0UL when the context in memset() | |
2484 | */ | |
2485 | ||
2486 | /* | |
2487 | * On context switched restore, we must restore ALL pmc and ALL pmd even | |
2488 | * when they are not actively used by the task. In UP, the incoming process | |
2489 | * may otherwise pick up left over PMC, PMD state from the previous process. | |
2490 | * As opposed to PMD, stale PMC can cause harm to the incoming | |
2491 | * process because they may change what is being measured. | |
2492 | * Therefore, we must systematically reinstall the entire | |
2493 | * PMC state. In SMP, the same thing is possible on the | |
2494 | * same CPU but also on between 2 CPUs. | |
2495 | * | |
2496 | * The problem with PMD is information leaking especially | |
2497 | * to user level when psr.sp=0 | |
2498 | * | |
2499 | * There is unfortunately no easy way to avoid this problem | |
2500 | * on either UP or SMP. This definitively slows down the | |
2501 | * pfm_load_regs() function. | |
2502 | */ | |
2503 | ||
2504 | /* | |
2505 | * bitmask of all PMCs accessible to this context | |
2506 | * | |
2507 | * PMC0 is treated differently. | |
2508 | */ | |
2509 | ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; | |
2510 | ||
2511 | /* | |
72fdbdce | 2512 | * bitmask of all PMDs that are accessible to this context |
1da177e4 LT |
2513 | */ |
2514 | ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; | |
2515 | ||
2516 | DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); | |
2517 | ||
2518 | /* | |
2519 | * useful in case of re-enable after disable | |
2520 | */ | |
2521 | ctx->ctx_used_ibrs[0] = 0UL; | |
2522 | ctx->ctx_used_dbrs[0] = 0UL; | |
2523 | } | |
2524 | ||
2525 | static int | |
2526 | pfm_ctx_getsize(void *arg, size_t *sz) | |
2527 | { | |
2528 | pfarg_context_t *req = (pfarg_context_t *)arg; | |
2529 | pfm_buffer_fmt_t *fmt; | |
2530 | ||
2531 | *sz = 0; | |
2532 | ||
2533 | if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0; | |
2534 | ||
2535 | fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id); | |
2536 | if (fmt == NULL) { | |
2537 | DPRINT(("cannot find buffer format\n")); | |
2538 | return -EINVAL; | |
2539 | } | |
2540 | /* get just enough to copy in user parameters */ | |
2541 | *sz = fmt->fmt_arg_size; | |
2542 | DPRINT(("arg_size=%lu\n", *sz)); | |
2543 | ||
2544 | return 0; | |
2545 | } | |
2546 | ||
2547 | ||
2548 | ||
2549 | /* | |
2550 | * cannot attach if : | |
2551 | * - kernel task | |
2552 | * - task not owned by caller | |
2553 | * - task incompatible with context mode | |
2554 | */ | |
2555 | static int | |
2556 | pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) | |
2557 | { | |
2558 | /* | |
2559 | * no kernel task or task not owner by caller | |
2560 | */ | |
2561 | if (task->mm == NULL) { | |
19c5870c | 2562 | DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); |
1da177e4 LT |
2563 | return -EPERM; |
2564 | } | |
2565 | if (pfm_bad_permissions(task)) { | |
19c5870c | 2566 | DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
2567 | return -EPERM; |
2568 | } | |
2569 | /* | |
2570 | * cannot block in self-monitoring mode | |
2571 | */ | |
2572 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { | |
19c5870c | 2573 | DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
2574 | return -EINVAL; |
2575 | } | |
2576 | ||
2577 | if (task->exit_state == EXIT_ZOMBIE) { | |
19c5870c | 2578 | DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
2579 | return -EBUSY; |
2580 | } | |
2581 | ||
2582 | /* | |
2583 | * always ok for self | |
2584 | */ | |
2585 | if (task == current) return 0; | |
2586 | ||
21498223 | 2587 | if (!task_is_stopped_or_traced(task)) { |
19c5870c | 2588 | DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); |
1da177e4 LT |
2589 | return -EBUSY; |
2590 | } | |
2591 | /* | |
2592 | * make sure the task is off any CPU | |
2593 | */ | |
85ba2d86 | 2594 | wait_task_inactive(task, 0); |
1da177e4 LT |
2595 | |
2596 | /* more to come... */ | |
2597 | ||
2598 | return 0; | |
2599 | } | |
2600 | ||
2601 | static int | |
2602 | pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) | |
2603 | { | |
2604 | struct task_struct *p = current; | |
2605 | int ret; | |
2606 | ||
2607 | /* XXX: need to add more checks here */ | |
2608 | if (pid < 2) return -EPERM; | |
2609 | ||
e1b0d4ba | 2610 | if (pid != task_pid_vnr(current)) { |
1da177e4 LT |
2611 | |
2612 | read_lock(&tasklist_lock); | |
2613 | ||
e1b0d4ba | 2614 | p = find_task_by_vpid(pid); |
1da177e4 LT |
2615 | |
2616 | /* make sure task cannot go away while we operate on it */ | |
2617 | if (p) get_task_struct(p); | |
2618 | ||
2619 | read_unlock(&tasklist_lock); | |
2620 | ||
2621 | if (p == NULL) return -ESRCH; | |
2622 | } | |
2623 | ||
2624 | ret = pfm_task_incompatible(ctx, p); | |
2625 | if (ret == 0) { | |
2626 | *task = p; | |
2627 | } else if (p != current) { | |
2628 | pfm_put_task(p); | |
2629 | } | |
2630 | return ret; | |
2631 | } | |
2632 | ||
2633 | ||
2634 | ||
2635 | static int | |
2636 | pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
2637 | { | |
2638 | pfarg_context_t *req = (pfarg_context_t *)arg; | |
2639 | struct file *filp; | |
f8e811b9 | 2640 | struct path path; |
1da177e4 | 2641 | int ctx_flags; |
f8e811b9 | 2642 | int fd; |
1da177e4 LT |
2643 | int ret; |
2644 | ||
2645 | /* let's check the arguments first */ | |
2646 | ret = pfarg_is_sane(current, req); | |
f8e811b9 AV |
2647 | if (ret < 0) |
2648 | return ret; | |
1da177e4 LT |
2649 | |
2650 | ctx_flags = req->ctx_flags; | |
2651 | ||
2652 | ret = -ENOMEM; | |
2653 | ||
aeb682dd | 2654 | fd = get_unused_fd_flags(0); |
f8e811b9 AV |
2655 | if (fd < 0) |
2656 | return fd; | |
1da177e4 | 2657 | |
f8e811b9 AV |
2658 | ctx = pfm_context_alloc(ctx_flags); |
2659 | if (!ctx) | |
2660 | goto error; | |
1da177e4 | 2661 | |
f8e811b9 AV |
2662 | filp = pfm_alloc_file(ctx); |
2663 | if (IS_ERR(filp)) { | |
2664 | ret = PTR_ERR(filp); | |
2665 | goto error_file; | |
2666 | } | |
1da177e4 | 2667 | |
f8e811b9 | 2668 | req->ctx_fd = ctx->ctx_fd = fd; |
1da177e4 LT |
2669 | |
2670 | /* | |
2671 | * does the user want to sample? | |
2672 | */ | |
2673 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { | |
41d5e5d7 | 2674 | ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); |
f8e811b9 AV |
2675 | if (ret) |
2676 | goto buffer_error; | |
1da177e4 LT |
2677 | } |
2678 | ||
04157e4c | 2679 | DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", |
1da177e4 LT |
2680 | ctx, |
2681 | ctx_flags, | |
2682 | ctx->ctx_fl_system, | |
2683 | ctx->ctx_fl_block, | |
2684 | ctx->ctx_fl_excl_idle, | |
2685 | ctx->ctx_fl_no_msg, | |
2686 | ctx->ctx_fd)); | |
2687 | ||
2688 | /* | |
2689 | * initialize soft PMU state | |
2690 | */ | |
2691 | pfm_reset_pmu_state(ctx); | |
2692 | ||
f8e811b9 AV |
2693 | fd_install(fd, filp); |
2694 | ||
1da177e4 LT |
2695 | return 0; |
2696 | ||
2697 | buffer_error: | |
f8e811b9 AV |
2698 | path = filp->f_path; |
2699 | put_filp(filp); | |
2700 | path_put(&path); | |
1da177e4 LT |
2701 | |
2702 | if (ctx->ctx_buf_fmt) { | |
2703 | pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); | |
2704 | } | |
2705 | error_file: | |
2706 | pfm_context_free(ctx); | |
2707 | ||
2708 | error: | |
f8e811b9 | 2709 | put_unused_fd(fd); |
1da177e4 LT |
2710 | return ret; |
2711 | } | |
2712 | ||
2713 | static inline unsigned long | |
2714 | pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset) | |
2715 | { | |
2716 | unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset; | |
2717 | unsigned long new_seed, old_seed = reg->seed, mask = reg->mask; | |
2718 | extern unsigned long carta_random32 (unsigned long seed); | |
2719 | ||
2720 | if (reg->flags & PFM_REGFL_RANDOM) { | |
2721 | new_seed = carta_random32(old_seed); | |
2722 | val -= (old_seed & mask); /* counter values are negative numbers! */ | |
2723 | if ((mask >> 32) != 0) | |
2724 | /* construct a full 64-bit random value: */ | |
2725 | new_seed |= carta_random32(old_seed >> 32) << 32; | |
2726 | reg->seed = new_seed; | |
2727 | } | |
2728 | reg->lval = val; | |
2729 | return val; | |
2730 | } | |
2731 | ||
2732 | static void | |
2733 | pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | |
2734 | { | |
2735 | unsigned long mask = ovfl_regs[0]; | |
2736 | unsigned long reset_others = 0UL; | |
2737 | unsigned long val; | |
2738 | int i; | |
2739 | ||
2740 | /* | |
2741 | * now restore reset value on sampling overflowed counters | |
2742 | */ | |
2743 | mask >>= PMU_FIRST_COUNTER; | |
2744 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | |
2745 | ||
2746 | if ((mask & 0x1UL) == 0UL) continue; | |
2747 | ||
2748 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | |
2749 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | |
2750 | ||
2751 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | |
2752 | } | |
2753 | ||
2754 | /* | |
2755 | * Now take care of resetting the other registers | |
2756 | */ | |
2757 | for(i = 0; reset_others; i++, reset_others >>= 1) { | |
2758 | ||
2759 | if ((reset_others & 0x1) == 0) continue; | |
2760 | ||
2761 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | |
2762 | ||
2763 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | |
2764 | is_long_reset ? "long" : "short", i, val)); | |
2765 | } | |
2766 | } | |
2767 | ||
2768 | static void | |
2769 | pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | |
2770 | { | |
2771 | unsigned long mask = ovfl_regs[0]; | |
2772 | unsigned long reset_others = 0UL; | |
2773 | unsigned long val; | |
2774 | int i; | |
2775 | ||
2776 | DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset)); | |
2777 | ||
2778 | if (ctx->ctx_state == PFM_CTX_MASKED) { | |
2779 | pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset); | |
2780 | return; | |
2781 | } | |
2782 | ||
2783 | /* | |
2784 | * now restore reset value on sampling overflowed counters | |
2785 | */ | |
2786 | mask >>= PMU_FIRST_COUNTER; | |
2787 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | |
2788 | ||
2789 | if ((mask & 0x1UL) == 0UL) continue; | |
2790 | ||
2791 | val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | |
2792 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | |
2793 | ||
2794 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | |
2795 | ||
2796 | pfm_write_soft_counter(ctx, i, val); | |
2797 | } | |
2798 | ||
2799 | /* | |
2800 | * Now take care of resetting the other registers | |
2801 | */ | |
2802 | for(i = 0; reset_others; i++, reset_others >>= 1) { | |
2803 | ||
2804 | if ((reset_others & 0x1) == 0) continue; | |
2805 | ||
2806 | val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | |
2807 | ||
2808 | if (PMD_IS_COUNTING(i)) { | |
2809 | pfm_write_soft_counter(ctx, i, val); | |
2810 | } else { | |
2811 | ia64_set_pmd(i, val); | |
2812 | } | |
2813 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | |
2814 | is_long_reset ? "long" : "short", i, val)); | |
2815 | } | |
2816 | ia64_srlz_d(); | |
2817 | } | |
2818 | ||
2819 | static int | |
2820 | pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
2821 | { | |
1da177e4 LT |
2822 | struct task_struct *task; |
2823 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
2824 | unsigned long value, pmc_pm; | |
2825 | unsigned long smpl_pmds, reset_pmds, impl_pmds; | |
2826 | unsigned int cnum, reg_flags, flags, pmc_type; | |
2827 | int i, can_access_pmu = 0, is_loaded, is_system, expert_mode; | |
2828 | int is_monitor, is_counting, state; | |
2829 | int ret = -EINVAL; | |
2830 | pfm_reg_check_t wr_func; | |
2831 | #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) | |
2832 | ||
2833 | state = ctx->ctx_state; | |
2834 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
2835 | is_system = ctx->ctx_fl_system; | |
2836 | task = ctx->ctx_task; | |
2837 | impl_pmds = pmu_conf->impl_pmds[0]; | |
2838 | ||
2839 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
2840 | ||
2841 | if (is_loaded) { | |
1da177e4 LT |
2842 | /* |
2843 | * In system wide and when the context is loaded, access can only happen | |
2844 | * when the caller is running on the CPU being monitored by the session. | |
2845 | * It does not have to be the owner (ctx_task) of the context per se. | |
2846 | */ | |
2847 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
2848 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
2849 | return -EBUSY; | |
2850 | } | |
2851 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
2852 | } | |
2853 | expert_mode = pfm_sysctl.expert_mode; | |
2854 | ||
2855 | for (i = 0; i < count; i++, req++) { | |
2856 | ||
2857 | cnum = req->reg_num; | |
2858 | reg_flags = req->reg_flags; | |
2859 | value = req->reg_value; | |
2860 | smpl_pmds = req->reg_smpl_pmds[0]; | |
2861 | reset_pmds = req->reg_reset_pmds[0]; | |
2862 | flags = 0; | |
2863 | ||
2864 | ||
2865 | if (cnum >= PMU_MAX_PMCS) { | |
2866 | DPRINT(("pmc%u is invalid\n", cnum)); | |
2867 | goto error; | |
2868 | } | |
2869 | ||
2870 | pmc_type = pmu_conf->pmc_desc[cnum].type; | |
2871 | pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1; | |
2872 | is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0; | |
2873 | is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0; | |
2874 | ||
2875 | /* | |
2876 | * we reject all non implemented PMC as well | |
2877 | * as attempts to modify PMC[0-3] which are used | |
2878 | * as status registers by the PMU | |
2879 | */ | |
2880 | if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) { | |
2881 | DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type)); | |
2882 | goto error; | |
2883 | } | |
2884 | wr_func = pmu_conf->pmc_desc[cnum].write_check; | |
2885 | /* | |
2886 | * If the PMC is a monitor, then if the value is not the default: | |
2887 | * - system-wide session: PMCx.pm=1 (privileged monitor) | |
2888 | * - per-task : PMCx.pm=0 (user monitor) | |
2889 | */ | |
2890 | if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) { | |
2891 | DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n", | |
2892 | cnum, | |
2893 | pmc_pm, | |
2894 | is_system)); | |
2895 | goto error; | |
2896 | } | |
2897 | ||
2898 | if (is_counting) { | |
2899 | /* | |
2900 | * enforce generation of overflow interrupt. Necessary on all | |
2901 | * CPUs. | |
2902 | */ | |
2903 | value |= 1 << PMU_PMC_OI; | |
2904 | ||
2905 | if (reg_flags & PFM_REGFL_OVFL_NOTIFY) { | |
2906 | flags |= PFM_REGFL_OVFL_NOTIFY; | |
2907 | } | |
2908 | ||
2909 | if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM; | |
2910 | ||
2911 | /* verify validity of smpl_pmds */ | |
2912 | if ((smpl_pmds & impl_pmds) != smpl_pmds) { | |
2913 | DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum)); | |
2914 | goto error; | |
2915 | } | |
2916 | ||
2917 | /* verify validity of reset_pmds */ | |
2918 | if ((reset_pmds & impl_pmds) != reset_pmds) { | |
2919 | DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum)); | |
2920 | goto error; | |
2921 | } | |
2922 | } else { | |
2923 | if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) { | |
2924 | DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum)); | |
2925 | goto error; | |
2926 | } | |
2927 | /* eventid on non-counting monitors are ignored */ | |
2928 | } | |
2929 | ||
2930 | /* | |
2931 | * execute write checker, if any | |
2932 | */ | |
2933 | if (likely(expert_mode == 0 && wr_func)) { | |
2934 | ret = (*wr_func)(task, ctx, cnum, &value, regs); | |
2935 | if (ret) goto error; | |
2936 | ret = -EINVAL; | |
2937 | } | |
2938 | ||
2939 | /* | |
2940 | * no error on this register | |
2941 | */ | |
2942 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
2943 | ||
2944 | /* | |
2945 | * Now we commit the changes to the software state | |
2946 | */ | |
2947 | ||
2948 | /* | |
2949 | * update overflow information | |
2950 | */ | |
2951 | if (is_counting) { | |
2952 | /* | |
2953 | * full flag update each time a register is programmed | |
2954 | */ | |
2955 | ctx->ctx_pmds[cnum].flags = flags; | |
2956 | ||
2957 | ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds; | |
2958 | ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds; | |
2959 | ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid; | |
2960 | ||
2961 | /* | |
2962 | * Mark all PMDS to be accessed as used. | |
2963 | * | |
2964 | * We do not keep track of PMC because we have to | |
2965 | * systematically restore ALL of them. | |
2966 | * | |
2967 | * We do not update the used_monitors mask, because | |
2968 | * if we have not programmed them, then will be in | |
2969 | * a quiescent state, therefore we will not need to | |
2970 | * mask/restore then when context is MASKED. | |
2971 | */ | |
2972 | CTX_USED_PMD(ctx, reset_pmds); | |
2973 | CTX_USED_PMD(ctx, smpl_pmds); | |
2974 | /* | |
2975 | * make sure we do not try to reset on | |
2976 | * restart because we have established new values | |
2977 | */ | |
2978 | if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | |
2979 | } | |
2980 | /* | |
2981 | * Needed in case the user does not initialize the equivalent | |
2982 | * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no | |
2983 | * possible leak here. | |
2984 | */ | |
2985 | CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]); | |
2986 | ||
2987 | /* | |
2988 | * keep track of the monitor PMC that we are using. | |
2989 | * we save the value of the pmc in ctx_pmcs[] and if | |
2990 | * the monitoring is not stopped for the context we also | |
2991 | * place it in the saved state area so that it will be | |
2992 | * picked up later by the context switch code. | |
2993 | * | |
2994 | * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). | |
2995 | * | |
35589a8f | 2996 | * The value in th_pmcs[] may be modified on overflow, i.e., when |
1da177e4 LT |
2997 | * monitoring needs to be stopped. |
2998 | */ | |
2999 | if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); | |
3000 | ||
3001 | /* | |
3002 | * update context state | |
3003 | */ | |
3004 | ctx->ctx_pmcs[cnum] = value; | |
3005 | ||
3006 | if (is_loaded) { | |
3007 | /* | |
3008 | * write thread state | |
3009 | */ | |
35589a8f | 3010 | if (is_system == 0) ctx->th_pmcs[cnum] = value; |
1da177e4 LT |
3011 | |
3012 | /* | |
3013 | * write hardware register if we can | |
3014 | */ | |
3015 | if (can_access_pmu) { | |
3016 | ia64_set_pmc(cnum, value); | |
3017 | } | |
3018 | #ifdef CONFIG_SMP | |
3019 | else { | |
3020 | /* | |
3021 | * per-task SMP only here | |
3022 | * | |
3023 | * we are guaranteed that the task is not running on the other CPU, | |
3024 | * we indicate that this PMD will need to be reloaded if the task | |
3025 | * is rescheduled on the CPU it ran last on. | |
3026 | */ | |
3027 | ctx->ctx_reload_pmcs[0] |= 1UL << cnum; | |
3028 | } | |
3029 | #endif | |
3030 | } | |
3031 | ||
3032 | DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n", | |
3033 | cnum, | |
3034 | value, | |
3035 | is_loaded, | |
3036 | can_access_pmu, | |
3037 | flags, | |
3038 | ctx->ctx_all_pmcs[0], | |
3039 | ctx->ctx_used_pmds[0], | |
3040 | ctx->ctx_pmds[cnum].eventid, | |
3041 | smpl_pmds, | |
3042 | reset_pmds, | |
3043 | ctx->ctx_reload_pmcs[0], | |
3044 | ctx->ctx_used_monitors[0], | |
3045 | ctx->ctx_ovfl_regs[0])); | |
3046 | } | |
3047 | ||
3048 | /* | |
3049 | * make sure the changes are visible | |
3050 | */ | |
3051 | if (can_access_pmu) ia64_srlz_d(); | |
3052 | ||
3053 | return 0; | |
3054 | error: | |
3055 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3056 | return ret; | |
3057 | } | |
3058 | ||
3059 | static int | |
3060 | pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3061 | { | |
1da177e4 LT |
3062 | struct task_struct *task; |
3063 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
3064 | unsigned long value, hw_value, ovfl_mask; | |
3065 | unsigned int cnum; | |
3066 | int i, can_access_pmu = 0, state; | |
3067 | int is_counting, is_loaded, is_system, expert_mode; | |
3068 | int ret = -EINVAL; | |
3069 | pfm_reg_check_t wr_func; | |
3070 | ||
3071 | ||
3072 | state = ctx->ctx_state; | |
3073 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3074 | is_system = ctx->ctx_fl_system; | |
3075 | ovfl_mask = pmu_conf->ovfl_val; | |
3076 | task = ctx->ctx_task; | |
3077 | ||
3078 | if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL; | |
3079 | ||
3080 | /* | |
3081 | * on both UP and SMP, we can only write to the PMC when the task is | |
3082 | * the owner of the local PMU. | |
3083 | */ | |
3084 | if (likely(is_loaded)) { | |
1da177e4 LT |
3085 | /* |
3086 | * In system wide and when the context is loaded, access can only happen | |
3087 | * when the caller is running on the CPU being monitored by the session. | |
3088 | * It does not have to be the owner (ctx_task) of the context per se. | |
3089 | */ | |
3090 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3091 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3092 | return -EBUSY; | |
3093 | } | |
3094 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3095 | } | |
3096 | expert_mode = pfm_sysctl.expert_mode; | |
3097 | ||
3098 | for (i = 0; i < count; i++, req++) { | |
3099 | ||
3100 | cnum = req->reg_num; | |
3101 | value = req->reg_value; | |
3102 | ||
3103 | if (!PMD_IS_IMPL(cnum)) { | |
3104 | DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum)); | |
3105 | goto abort_mission; | |
3106 | } | |
3107 | is_counting = PMD_IS_COUNTING(cnum); | |
3108 | wr_func = pmu_conf->pmd_desc[cnum].write_check; | |
3109 | ||
3110 | /* | |
3111 | * execute write checker, if any | |
3112 | */ | |
3113 | if (unlikely(expert_mode == 0 && wr_func)) { | |
3114 | unsigned long v = value; | |
3115 | ||
3116 | ret = (*wr_func)(task, ctx, cnum, &v, regs); | |
3117 | if (ret) goto abort_mission; | |
3118 | ||
3119 | value = v; | |
3120 | ret = -EINVAL; | |
3121 | } | |
3122 | ||
3123 | /* | |
3124 | * no error on this register | |
3125 | */ | |
3126 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
3127 | ||
3128 | /* | |
3129 | * now commit changes to software state | |
3130 | */ | |
3131 | hw_value = value; | |
3132 | ||
3133 | /* | |
3134 | * update virtualized (64bits) counter | |
3135 | */ | |
3136 | if (is_counting) { | |
3137 | /* | |
3138 | * write context state | |
3139 | */ | |
3140 | ctx->ctx_pmds[cnum].lval = value; | |
3141 | ||
3142 | /* | |
3143 | * when context is load we use the split value | |
3144 | */ | |
3145 | if (is_loaded) { | |
3146 | hw_value = value & ovfl_mask; | |
3147 | value = value & ~ovfl_mask; | |
3148 | } | |
3149 | } | |
3150 | /* | |
3151 | * update reset values (not just for counters) | |
3152 | */ | |
3153 | ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset; | |
3154 | ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset; | |
3155 | ||
3156 | /* | |
3157 | * update randomization parameters (not just for counters) | |
3158 | */ | |
3159 | ctx->ctx_pmds[cnum].seed = req->reg_random_seed; | |
3160 | ctx->ctx_pmds[cnum].mask = req->reg_random_mask; | |
3161 | ||
3162 | /* | |
3163 | * update context value | |
3164 | */ | |
3165 | ctx->ctx_pmds[cnum].val = value; | |
3166 | ||
3167 | /* | |
3168 | * Keep track of what we use | |
3169 | * | |
3170 | * We do not keep track of PMC because we have to | |
3171 | * systematically restore ALL of them. | |
3172 | */ | |
3173 | CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum)); | |
3174 | ||
3175 | /* | |
3176 | * mark this PMD register used as well | |
3177 | */ | |
3178 | CTX_USED_PMD(ctx, RDEP(cnum)); | |
3179 | ||
3180 | /* | |
3181 | * make sure we do not try to reset on | |
3182 | * restart because we have established new values | |
3183 | */ | |
3184 | if (is_counting && state == PFM_CTX_MASKED) { | |
3185 | ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | |
3186 | } | |
3187 | ||
3188 | if (is_loaded) { | |
3189 | /* | |
3190 | * write thread state | |
3191 | */ | |
35589a8f | 3192 | if (is_system == 0) ctx->th_pmds[cnum] = hw_value; |
1da177e4 LT |
3193 | |
3194 | /* | |
3195 | * write hardware register if we can | |
3196 | */ | |
3197 | if (can_access_pmu) { | |
3198 | ia64_set_pmd(cnum, hw_value); | |
3199 | } else { | |
3200 | #ifdef CONFIG_SMP | |
3201 | /* | |
3202 | * we are guaranteed that the task is not running on the other CPU, | |
3203 | * we indicate that this PMD will need to be reloaded if the task | |
3204 | * is rescheduled on the CPU it ran last on. | |
3205 | */ | |
3206 | ctx->ctx_reload_pmds[0] |= 1UL << cnum; | |
3207 | #endif | |
3208 | } | |
3209 | } | |
3210 | ||
3211 | DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx " | |
3212 | "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n", | |
3213 | cnum, | |
3214 | value, | |
3215 | is_loaded, | |
3216 | can_access_pmu, | |
3217 | hw_value, | |
3218 | ctx->ctx_pmds[cnum].val, | |
3219 | ctx->ctx_pmds[cnum].short_reset, | |
3220 | ctx->ctx_pmds[cnum].long_reset, | |
3221 | PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', | |
3222 | ctx->ctx_pmds[cnum].seed, | |
3223 | ctx->ctx_pmds[cnum].mask, | |
3224 | ctx->ctx_used_pmds[0], | |
3225 | ctx->ctx_pmds[cnum].reset_pmds[0], | |
3226 | ctx->ctx_reload_pmds[0], | |
3227 | ctx->ctx_all_pmds[0], | |
3228 | ctx->ctx_ovfl_regs[0])); | |
3229 | } | |
3230 | ||
3231 | /* | |
3232 | * make changes visible | |
3233 | */ | |
3234 | if (can_access_pmu) ia64_srlz_d(); | |
3235 | ||
3236 | return 0; | |
3237 | ||
3238 | abort_mission: | |
3239 | /* | |
3240 | * for now, we have only one possibility for error | |
3241 | */ | |
3242 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3243 | return ret; | |
3244 | } | |
3245 | ||
3246 | /* | |
3247 | * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function. | |
3248 | * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an | |
3249 | * interrupt is delivered during the call, it will be kept pending until we leave, making | |
3250 | * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are | |
3251 | * guaranteed to return consistent data to the user, it may simply be old. It is not | |
3252 | * trivial to treat the overflow while inside the call because you may end up in | |
3253 | * some module sampling buffer code causing deadlocks. | |
3254 | */ | |
3255 | static int | |
3256 | pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3257 | { | |
1da177e4 LT |
3258 | struct task_struct *task; |
3259 | unsigned long val = 0UL, lval, ovfl_mask, sval; | |
3260 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
3261 | unsigned int cnum, reg_flags = 0; | |
3262 | int i, can_access_pmu = 0, state; | |
3263 | int is_loaded, is_system, is_counting, expert_mode; | |
3264 | int ret = -EINVAL; | |
3265 | pfm_reg_check_t rd_func; | |
3266 | ||
3267 | /* | |
3268 | * access is possible when loaded only for | |
3269 | * self-monitoring tasks or in UP mode | |
3270 | */ | |
3271 | ||
3272 | state = ctx->ctx_state; | |
3273 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3274 | is_system = ctx->ctx_fl_system; | |
3275 | ovfl_mask = pmu_conf->ovfl_val; | |
3276 | task = ctx->ctx_task; | |
3277 | ||
3278 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
3279 | ||
3280 | if (likely(is_loaded)) { | |
1da177e4 LT |
3281 | /* |
3282 | * In system wide and when the context is loaded, access can only happen | |
3283 | * when the caller is running on the CPU being monitored by the session. | |
3284 | * It does not have to be the owner (ctx_task) of the context per se. | |
3285 | */ | |
3286 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3287 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3288 | return -EBUSY; | |
3289 | } | |
3290 | /* | |
3291 | * this can be true when not self-monitoring only in UP | |
3292 | */ | |
3293 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3294 | ||
3295 | if (can_access_pmu) ia64_srlz_d(); | |
3296 | } | |
3297 | expert_mode = pfm_sysctl.expert_mode; | |
3298 | ||
3299 | DPRINT(("ld=%d apmu=%d ctx_state=%d\n", | |
3300 | is_loaded, | |
3301 | can_access_pmu, | |
3302 | state)); | |
3303 | ||
3304 | /* | |
3305 | * on both UP and SMP, we can only read the PMD from the hardware register when | |
3306 | * the task is the owner of the local PMU. | |
3307 | */ | |
3308 | ||
3309 | for (i = 0; i < count; i++, req++) { | |
3310 | ||
3311 | cnum = req->reg_num; | |
3312 | reg_flags = req->reg_flags; | |
3313 | ||
3314 | if (unlikely(!PMD_IS_IMPL(cnum))) goto error; | |
3315 | /* | |
3316 | * we can only read the register that we use. That includes | |
72fdbdce | 3317 | * the one we explicitly initialize AND the one we want included |
1da177e4 LT |
3318 | * in the sampling buffer (smpl_regs). |
3319 | * | |
3320 | * Having this restriction allows optimization in the ctxsw routine | |
3321 | * without compromising security (leaks) | |
3322 | */ | |
3323 | if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; | |
3324 | ||
3325 | sval = ctx->ctx_pmds[cnum].val; | |
3326 | lval = ctx->ctx_pmds[cnum].lval; | |
3327 | is_counting = PMD_IS_COUNTING(cnum); | |
3328 | ||
3329 | /* | |
3330 | * If the task is not the current one, then we check if the | |
3331 | * PMU state is still in the local live register due to lazy ctxsw. | |
3332 | * If true, then we read directly from the registers. | |
3333 | */ | |
3334 | if (can_access_pmu){ | |
3335 | val = ia64_get_pmd(cnum); | |
3336 | } else { | |
3337 | /* | |
3338 | * context has been saved | |
3339 | * if context is zombie, then task does not exist anymore. | |
3340 | * In this case, we use the full value saved in the context (pfm_flush_regs()). | |
3341 | */ | |
35589a8f | 3342 | val = is_loaded ? ctx->th_pmds[cnum] : 0UL; |
1da177e4 LT |
3343 | } |
3344 | rd_func = pmu_conf->pmd_desc[cnum].read_check; | |
3345 | ||
3346 | if (is_counting) { | |
3347 | /* | |
3348 | * XXX: need to check for overflow when loaded | |
3349 | */ | |
3350 | val &= ovfl_mask; | |
3351 | val += sval; | |
3352 | } | |
3353 | ||
3354 | /* | |
3355 | * execute read checker, if any | |
3356 | */ | |
3357 | if (unlikely(expert_mode == 0 && rd_func)) { | |
3358 | unsigned long v = val; | |
3359 | ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); | |
3360 | if (ret) goto error; | |
3361 | val = v; | |
3362 | ret = -EINVAL; | |
3363 | } | |
3364 | ||
3365 | PFM_REG_RETFLAG_SET(reg_flags, 0); | |
3366 | ||
3367 | DPRINT(("pmd[%u]=0x%lx\n", cnum, val)); | |
3368 | ||
3369 | /* | |
3370 | * update register return value, abort all if problem during copy. | |
3371 | * we only modify the reg_flags field. no check mode is fine because | |
3372 | * access has been verified upfront in sys_perfmonctl(). | |
3373 | */ | |
3374 | req->reg_value = val; | |
3375 | req->reg_flags = reg_flags; | |
3376 | req->reg_last_reset_val = lval; | |
3377 | } | |
3378 | ||
3379 | return 0; | |
3380 | ||
3381 | error: | |
3382 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
3383 | return ret; | |
3384 | } | |
3385 | ||
3386 | int | |
3387 | pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3388 | { | |
3389 | pfm_context_t *ctx; | |
3390 | ||
3391 | if (req == NULL) return -EINVAL; | |
3392 | ||
3393 | ctx = GET_PMU_CTX(); | |
3394 | ||
3395 | if (ctx == NULL) return -EINVAL; | |
3396 | ||
3397 | /* | |
3398 | * for now limit to current task, which is enough when calling | |
3399 | * from overflow handler | |
3400 | */ | |
3401 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3402 | ||
3403 | return pfm_write_pmcs(ctx, req, nreq, regs); | |
3404 | } | |
3405 | EXPORT_SYMBOL(pfm_mod_write_pmcs); | |
3406 | ||
3407 | int | |
3408 | pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3409 | { | |
3410 | pfm_context_t *ctx; | |
3411 | ||
3412 | if (req == NULL) return -EINVAL; | |
3413 | ||
3414 | ctx = GET_PMU_CTX(); | |
3415 | ||
3416 | if (ctx == NULL) return -EINVAL; | |
3417 | ||
3418 | /* | |
3419 | * for now limit to current task, which is enough when calling | |
3420 | * from overflow handler | |
3421 | */ | |
3422 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3423 | ||
3424 | return pfm_read_pmds(ctx, req, nreq, regs); | |
3425 | } | |
3426 | EXPORT_SYMBOL(pfm_mod_read_pmds); | |
3427 | ||
3428 | /* | |
3429 | * Only call this function when a process it trying to | |
3430 | * write the debug registers (reading is always allowed) | |
3431 | */ | |
3432 | int | |
3433 | pfm_use_debug_registers(struct task_struct *task) | |
3434 | { | |
3435 | pfm_context_t *ctx = task->thread.pfm_context; | |
3436 | unsigned long flags; | |
3437 | int ret = 0; | |
3438 | ||
3439 | if (pmu_conf->use_rr_dbregs == 0) return 0; | |
3440 | ||
19c5870c | 3441 | DPRINT(("called for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
3442 | |
3443 | /* | |
3444 | * do it only once | |
3445 | */ | |
3446 | if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; | |
3447 | ||
3448 | /* | |
3449 | * Even on SMP, we do not need to use an atomic here because | |
3450 | * the only way in is via ptrace() and this is possible only when the | |
3451 | * process is stopped. Even in the case where the ctxsw out is not totally | |
3452 | * completed by the time we come here, there is no way the 'stopped' process | |
3453 | * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine. | |
3454 | * So this is always safe. | |
3455 | */ | |
3456 | if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; | |
3457 | ||
3458 | LOCK_PFS(flags); | |
3459 | ||
3460 | /* | |
3461 | * We cannot allow setting breakpoints when system wide monitoring | |
3462 | * sessions are using the debug registers. | |
3463 | */ | |
3464 | if (pfm_sessions.pfs_sys_use_dbregs> 0) | |
3465 | ret = -1; | |
3466 | else | |
3467 | pfm_sessions.pfs_ptrace_use_dbregs++; | |
3468 | ||
3469 | DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", | |
3470 | pfm_sessions.pfs_ptrace_use_dbregs, | |
3471 | pfm_sessions.pfs_sys_use_dbregs, | |
19c5870c | 3472 | task_pid_nr(task), ret)); |
1da177e4 LT |
3473 | |
3474 | UNLOCK_PFS(flags); | |
3475 | ||
3476 | return ret; | |
3477 | } | |
3478 | ||
3479 | /* | |
3480 | * This function is called for every task that exits with the | |
3481 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | |
3482 | * able to use the debug registers for debugging purposes via | |
3483 | * ptrace(). Therefore we know it was not using them for | |
af901ca1 | 3484 | * performance monitoring, so we only decrement the number |
1da177e4 LT |
3485 | * of "ptraced" debug register users to keep the count up to date |
3486 | */ | |
3487 | int | |
3488 | pfm_release_debug_registers(struct task_struct *task) | |
3489 | { | |
3490 | unsigned long flags; | |
3491 | int ret; | |
3492 | ||
3493 | if (pmu_conf->use_rr_dbregs == 0) return 0; | |
3494 | ||
3495 | LOCK_PFS(flags); | |
3496 | if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { | |
19c5870c | 3497 | printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); |
1da177e4 LT |
3498 | ret = -1; |
3499 | } else { | |
3500 | pfm_sessions.pfs_ptrace_use_dbregs--; | |
3501 | ret = 0; | |
3502 | } | |
3503 | UNLOCK_PFS(flags); | |
3504 | ||
3505 | return ret; | |
3506 | } | |
3507 | ||
3508 | static int | |
3509 | pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3510 | { | |
3511 | struct task_struct *task; | |
3512 | pfm_buffer_fmt_t *fmt; | |
3513 | pfm_ovfl_ctrl_t rst_ctrl; | |
3514 | int state, is_system; | |
3515 | int ret = 0; | |
3516 | ||
3517 | state = ctx->ctx_state; | |
3518 | fmt = ctx->ctx_buf_fmt; | |
3519 | is_system = ctx->ctx_fl_system; | |
3520 | task = PFM_CTX_TASK(ctx); | |
3521 | ||
3522 | switch(state) { | |
3523 | case PFM_CTX_MASKED: | |
3524 | break; | |
3525 | case PFM_CTX_LOADED: | |
3526 | if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; | |
3527 | /* fall through */ | |
3528 | case PFM_CTX_UNLOADED: | |
3529 | case PFM_CTX_ZOMBIE: | |
3530 | DPRINT(("invalid state=%d\n", state)); | |
3531 | return -EBUSY; | |
3532 | default: | |
3533 | DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); | |
3534 | return -EINVAL; | |
3535 | } | |
3536 | ||
3537 | /* | |
3538 | * In system wide and when the context is loaded, access can only happen | |
3539 | * when the caller is running on the CPU being monitored by the session. | |
3540 | * It does not have to be the owner (ctx_task) of the context per se. | |
3541 | */ | |
3542 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
3543 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3544 | return -EBUSY; | |
3545 | } | |
3546 | ||
3547 | /* sanity check */ | |
3548 | if (unlikely(task == NULL)) { | |
19c5870c | 3549 | printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); |
1da177e4 LT |
3550 | return -EINVAL; |
3551 | } | |
3552 | ||
3553 | if (task == current || is_system) { | |
3554 | ||
3555 | fmt = ctx->ctx_buf_fmt; | |
3556 | ||
3557 | DPRINT(("restarting self %d ovfl=0x%lx\n", | |
19c5870c | 3558 | task_pid_nr(task), |
1da177e4 LT |
3559 | ctx->ctx_ovfl_regs[0])); |
3560 | ||
3561 | if (CTX_HAS_SMPL(ctx)) { | |
3562 | ||
3563 | prefetch(ctx->ctx_smpl_hdr); | |
3564 | ||
3565 | rst_ctrl.bits.mask_monitoring = 0; | |
3566 | rst_ctrl.bits.reset_ovfl_pmds = 0; | |
3567 | ||
3568 | if (state == PFM_CTX_LOADED) | |
3569 | ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
3570 | else | |
3571 | ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
3572 | } else { | |
3573 | rst_ctrl.bits.mask_monitoring = 0; | |
3574 | rst_ctrl.bits.reset_ovfl_pmds = 1; | |
3575 | } | |
3576 | ||
3577 | if (ret == 0) { | |
3578 | if (rst_ctrl.bits.reset_ovfl_pmds) | |
3579 | pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); | |
3580 | ||
3581 | if (rst_ctrl.bits.mask_monitoring == 0) { | |
19c5870c | 3582 | DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
3583 | |
3584 | if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); | |
3585 | } else { | |
19c5870c | 3586 | DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
3587 | |
3588 | // cannot use pfm_stop_monitoring(task, regs); | |
3589 | } | |
3590 | } | |
3591 | /* | |
3592 | * clear overflowed PMD mask to remove any stale information | |
3593 | */ | |
3594 | ctx->ctx_ovfl_regs[0] = 0UL; | |
3595 | ||
3596 | /* | |
3597 | * back to LOADED state | |
3598 | */ | |
3599 | ctx->ctx_state = PFM_CTX_LOADED; | |
3600 | ||
3601 | /* | |
3602 | * XXX: not really useful for self monitoring | |
3603 | */ | |
3604 | ctx->ctx_fl_can_restart = 0; | |
3605 | ||
3606 | return 0; | |
3607 | } | |
3608 | ||
3609 | /* | |
3610 | * restart another task | |
3611 | */ | |
3612 | ||
3613 | /* | |
3614 | * When PFM_CTX_MASKED, we cannot issue a restart before the previous | |
3615 | * one is seen by the task. | |
3616 | */ | |
3617 | if (state == PFM_CTX_MASKED) { | |
3618 | if (ctx->ctx_fl_can_restart == 0) return -EINVAL; | |
3619 | /* | |
3620 | * will prevent subsequent restart before this one is | |
3621 | * seen by other task | |
3622 | */ | |
3623 | ctx->ctx_fl_can_restart = 0; | |
3624 | } | |
3625 | ||
3626 | /* | |
3627 | * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e. | |
3628 | * the task is blocked or on its way to block. That's the normal | |
3629 | * restart path. If the monitoring is not masked, then the task | |
3630 | * can be actively monitoring and we cannot directly intervene. | |
3631 | * Therefore we use the trap mechanism to catch the task and | |
3632 | * force it to reset the buffer/reset PMDs. | |
3633 | * | |
3634 | * if non-blocking, then we ensure that the task will go into | |
3635 | * pfm_handle_work() before returning to user mode. | |
3636 | * | |
72fdbdce | 3637 | * We cannot explicitly reset another task, it MUST always |
1da177e4 LT |
3638 | * be done by the task itself. This works for system wide because |
3639 | * the tool that is controlling the session is logically doing | |
3640 | * "self-monitoring". | |
3641 | */ | |
3642 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { | |
04157e4c | 3643 | DPRINT(("unblocking [%d]\n", task_pid_nr(task))); |
60f1c444 | 3644 | complete(&ctx->ctx_restart_done); |
1da177e4 | 3645 | } else { |
19c5870c | 3646 | DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); |
1da177e4 LT |
3647 | |
3648 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; | |
3649 | ||
3650 | PFM_SET_WORK_PENDING(task, 1); | |
3651 | ||
f14488cc | 3652 | set_notify_resume(task); |
1da177e4 LT |
3653 | |
3654 | /* | |
3655 | * XXX: send reschedule if task runs on another CPU | |
3656 | */ | |
3657 | } | |
3658 | return 0; | |
3659 | } | |
3660 | ||
3661 | static int | |
3662 | pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3663 | { | |
3664 | unsigned int m = *(unsigned int *)arg; | |
3665 | ||
3666 | pfm_sysctl.debug = m == 0 ? 0 : 1; | |
3667 | ||
1da177e4 LT |
3668 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); |
3669 | ||
3670 | if (m == 0) { | |
3671 | memset(pfm_stats, 0, sizeof(pfm_stats)); | |
3672 | for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; | |
3673 | } | |
3674 | return 0; | |
3675 | } | |
3676 | ||
3677 | /* | |
3678 | * arg can be NULL and count can be zero for this function | |
3679 | */ | |
3680 | static int | |
3681 | pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3682 | { | |
3683 | struct thread_struct *thread = NULL; | |
3684 | struct task_struct *task; | |
3685 | pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg; | |
3686 | unsigned long flags; | |
3687 | dbreg_t dbreg; | |
3688 | unsigned int rnum; | |
3689 | int first_time; | |
3690 | int ret = 0, state; | |
3691 | int i, can_access_pmu = 0; | |
3692 | int is_system, is_loaded; | |
3693 | ||
3694 | if (pmu_conf->use_rr_dbregs == 0) return -EINVAL; | |
3695 | ||
3696 | state = ctx->ctx_state; | |
3697 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | |
3698 | is_system = ctx->ctx_fl_system; | |
3699 | task = ctx->ctx_task; | |
3700 | ||
3701 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | |
3702 | ||
3703 | /* | |
3704 | * on both UP and SMP, we can only write to the PMC when the task is | |
3705 | * the owner of the local PMU. | |
3706 | */ | |
3707 | if (is_loaded) { | |
3708 | thread = &task->thread; | |
3709 | /* | |
3710 | * In system wide and when the context is loaded, access can only happen | |
3711 | * when the caller is running on the CPU being monitored by the session. | |
3712 | * It does not have to be the owner (ctx_task) of the context per se. | |
3713 | */ | |
3714 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | |
3715 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3716 | return -EBUSY; | |
3717 | } | |
3718 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | |
3719 | } | |
3720 | ||
3721 | /* | |
3722 | * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w | |
3723 | * ensuring that no real breakpoint can be installed via this call. | |
3724 | * | |
3725 | * IMPORTANT: regs can be NULL in this function | |
3726 | */ | |
3727 | ||
3728 | first_time = ctx->ctx_fl_using_dbreg == 0; | |
3729 | ||
3730 | /* | |
3731 | * don't bother if we are loaded and task is being debugged | |
3732 | */ | |
3733 | if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { | |
19c5870c | 3734 | DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
3735 | return -EBUSY; |
3736 | } | |
3737 | ||
3738 | /* | |
3739 | * check for debug registers in system wide mode | |
3740 | * | |
3741 | * If though a check is done in pfm_context_load(), | |
3742 | * we must repeat it here, in case the registers are | |
3743 | * written after the context is loaded | |
3744 | */ | |
3745 | if (is_loaded) { | |
3746 | LOCK_PFS(flags); | |
3747 | ||
3748 | if (first_time && is_system) { | |
3749 | if (pfm_sessions.pfs_ptrace_use_dbregs) | |
3750 | ret = -EBUSY; | |
3751 | else | |
3752 | pfm_sessions.pfs_sys_use_dbregs++; | |
3753 | } | |
3754 | UNLOCK_PFS(flags); | |
3755 | } | |
3756 | ||
3757 | if (ret != 0) return ret; | |
3758 | ||
3759 | /* | |
3760 | * mark ourself as user of the debug registers for | |
3761 | * perfmon purposes. | |
3762 | */ | |
3763 | ctx->ctx_fl_using_dbreg = 1; | |
3764 | ||
3765 | /* | |
3766 | * clear hardware registers to make sure we don't | |
3767 | * pick up stale state. | |
3768 | * | |
3769 | * for a system wide session, we do not use | |
3770 | * thread.dbr, thread.ibr because this process | |
3771 | * never leaves the current CPU and the state | |
3772 | * is shared by all processes running on it | |
3773 | */ | |
3774 | if (first_time && can_access_pmu) { | |
19c5870c | 3775 | DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); |
1da177e4 LT |
3776 | for (i=0; i < pmu_conf->num_ibrs; i++) { |
3777 | ia64_set_ibr(i, 0UL); | |
3778 | ia64_dv_serialize_instruction(); | |
3779 | } | |
3780 | ia64_srlz_i(); | |
3781 | for (i=0; i < pmu_conf->num_dbrs; i++) { | |
3782 | ia64_set_dbr(i, 0UL); | |
3783 | ia64_dv_serialize_data(); | |
3784 | } | |
3785 | ia64_srlz_d(); | |
3786 | } | |
3787 | ||
3788 | /* | |
3789 | * Now install the values into the registers | |
3790 | */ | |
3791 | for (i = 0; i < count; i++, req++) { | |
3792 | ||
3793 | rnum = req->dbreg_num; | |
3794 | dbreg.val = req->dbreg_value; | |
3795 | ||
3796 | ret = -EINVAL; | |
3797 | ||
3798 | if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) { | |
3799 | DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n", | |
3800 | rnum, dbreg.val, mode, i, count)); | |
3801 | ||
3802 | goto abort_mission; | |
3803 | } | |
3804 | ||
3805 | /* | |
3806 | * make sure we do not install enabled breakpoint | |
3807 | */ | |
3808 | if (rnum & 0x1) { | |
3809 | if (mode == PFM_CODE_RR) | |
3810 | dbreg.ibr.ibr_x = 0; | |
3811 | else | |
3812 | dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0; | |
3813 | } | |
3814 | ||
3815 | PFM_REG_RETFLAG_SET(req->dbreg_flags, 0); | |
3816 | ||
3817 | /* | |
3818 | * Debug registers, just like PMC, can only be modified | |
3819 | * by a kernel call. Moreover, perfmon() access to those | |
3820 | * registers are centralized in this routine. The hardware | |
3821 | * does not modify the value of these registers, therefore, | |
3822 | * if we save them as they are written, we can avoid having | |
3823 | * to save them on context switch out. This is made possible | |
3824 | * by the fact that when perfmon uses debug registers, ptrace() | |
3825 | * won't be able to modify them concurrently. | |
3826 | */ | |
3827 | if (mode == PFM_CODE_RR) { | |
3828 | CTX_USED_IBR(ctx, rnum); | |
3829 | ||
3830 | if (can_access_pmu) { | |
3831 | ia64_set_ibr(rnum, dbreg.val); | |
3832 | ia64_dv_serialize_instruction(); | |
3833 | } | |
3834 | ||
3835 | ctx->ctx_ibrs[rnum] = dbreg.val; | |
3836 | ||
3837 | DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n", | |
3838 | rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu)); | |
3839 | } else { | |
3840 | CTX_USED_DBR(ctx, rnum); | |
3841 | ||
3842 | if (can_access_pmu) { | |
3843 | ia64_set_dbr(rnum, dbreg.val); | |
3844 | ia64_dv_serialize_data(); | |
3845 | } | |
3846 | ctx->ctx_dbrs[rnum] = dbreg.val; | |
3847 | ||
3848 | DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n", | |
3849 | rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu)); | |
3850 | } | |
3851 | } | |
3852 | ||
3853 | return 0; | |
3854 | ||
3855 | abort_mission: | |
3856 | /* | |
3857 | * in case it was our first attempt, we undo the global modifications | |
3858 | */ | |
3859 | if (first_time) { | |
3860 | LOCK_PFS(flags); | |
3861 | if (ctx->ctx_fl_system) { | |
3862 | pfm_sessions.pfs_sys_use_dbregs--; | |
3863 | } | |
3864 | UNLOCK_PFS(flags); | |
3865 | ctx->ctx_fl_using_dbreg = 0; | |
3866 | } | |
3867 | /* | |
3868 | * install error return flag | |
3869 | */ | |
3870 | PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL); | |
3871 | ||
3872 | return ret; | |
3873 | } | |
3874 | ||
3875 | static int | |
3876 | pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3877 | { | |
3878 | return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs); | |
3879 | } | |
3880 | ||
3881 | static int | |
3882 | pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3883 | { | |
3884 | return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); | |
3885 | } | |
3886 | ||
3887 | int | |
3888 | pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3889 | { | |
3890 | pfm_context_t *ctx; | |
3891 | ||
3892 | if (req == NULL) return -EINVAL; | |
3893 | ||
3894 | ctx = GET_PMU_CTX(); | |
3895 | ||
3896 | if (ctx == NULL) return -EINVAL; | |
3897 | ||
3898 | /* | |
3899 | * for now limit to current task, which is enough when calling | |
3900 | * from overflow handler | |
3901 | */ | |
3902 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3903 | ||
3904 | return pfm_write_ibrs(ctx, req, nreq, regs); | |
3905 | } | |
3906 | EXPORT_SYMBOL(pfm_mod_write_ibrs); | |
3907 | ||
3908 | int | |
3909 | pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | |
3910 | { | |
3911 | pfm_context_t *ctx; | |
3912 | ||
3913 | if (req == NULL) return -EINVAL; | |
3914 | ||
3915 | ctx = GET_PMU_CTX(); | |
3916 | ||
3917 | if (ctx == NULL) return -EINVAL; | |
3918 | ||
3919 | /* | |
3920 | * for now limit to current task, which is enough when calling | |
3921 | * from overflow handler | |
3922 | */ | |
3923 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | |
3924 | ||
3925 | return pfm_write_dbrs(ctx, req, nreq, regs); | |
3926 | } | |
3927 | EXPORT_SYMBOL(pfm_mod_write_dbrs); | |
3928 | ||
3929 | ||
3930 | static int | |
3931 | pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3932 | { | |
3933 | pfarg_features_t *req = (pfarg_features_t *)arg; | |
3934 | ||
3935 | req->ft_version = PFM_VERSION; | |
3936 | return 0; | |
3937 | } | |
3938 | ||
3939 | static int | |
3940 | pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
3941 | { | |
3942 | struct pt_regs *tregs; | |
3943 | struct task_struct *task = PFM_CTX_TASK(ctx); | |
3944 | int state, is_system; | |
3945 | ||
3946 | state = ctx->ctx_state; | |
3947 | is_system = ctx->ctx_fl_system; | |
3948 | ||
3949 | /* | |
3950 | * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE) | |
3951 | */ | |
3952 | if (state == PFM_CTX_UNLOADED) return -EINVAL; | |
3953 | ||
3954 | /* | |
3955 | * In system wide and when the context is loaded, access can only happen | |
3956 | * when the caller is running on the CPU being monitored by the session. | |
3957 | * It does not have to be the owner (ctx_task) of the context per se. | |
3958 | */ | |
3959 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
3960 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
3961 | return -EBUSY; | |
3962 | } | |
3963 | DPRINT(("task [%d] ctx_state=%d is_system=%d\n", | |
19c5870c | 3964 | task_pid_nr(PFM_CTX_TASK(ctx)), |
1da177e4 LT |
3965 | state, |
3966 | is_system)); | |
3967 | /* | |
3968 | * in system mode, we need to update the PMU directly | |
3969 | * and the user level state of the caller, which may not | |
3970 | * necessarily be the creator of the context. | |
3971 | */ | |
3972 | if (is_system) { | |
3973 | /* | |
3974 | * Update local PMU first | |
3975 | * | |
3976 | * disable dcr pp | |
3977 | */ | |
3978 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | |
3979 | ia64_srlz_i(); | |
3980 | ||
3981 | /* | |
3982 | * update local cpuinfo | |
3983 | */ | |
3984 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | |
3985 | ||
3986 | /* | |
3987 | * stop monitoring, does srlz.i | |
3988 | */ | |
3989 | pfm_clear_psr_pp(); | |
3990 | ||
3991 | /* | |
3992 | * stop monitoring in the caller | |
3993 | */ | |
3994 | ia64_psr(regs)->pp = 0; | |
3995 | ||
3996 | return 0; | |
3997 | } | |
3998 | /* | |
3999 | * per-task mode | |
4000 | */ | |
4001 | ||
4002 | if (task == current) { | |
4003 | /* stop monitoring at kernel level */ | |
4004 | pfm_clear_psr_up(); | |
4005 | ||
4006 | /* | |
4007 | * stop monitoring at the user level | |
4008 | */ | |
4009 | ia64_psr(regs)->up = 0; | |
4010 | } else { | |
6450578f | 4011 | tregs = task_pt_regs(task); |
1da177e4 LT |
4012 | |
4013 | /* | |
4014 | * stop monitoring at the user level | |
4015 | */ | |
4016 | ia64_psr(tregs)->up = 0; | |
4017 | ||
4018 | /* | |
4019 | * monitoring disabled in kernel at next reschedule | |
4020 | */ | |
4021 | ctx->ctx_saved_psr_up = 0; | |
19c5870c | 4022 | DPRINT(("task=[%d]\n", task_pid_nr(task))); |
1da177e4 LT |
4023 | } |
4024 | return 0; | |
4025 | } | |
4026 | ||
4027 | ||
4028 | static int | |
4029 | pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4030 | { | |
4031 | struct pt_regs *tregs; | |
4032 | int state, is_system; | |
4033 | ||
4034 | state = ctx->ctx_state; | |
4035 | is_system = ctx->ctx_fl_system; | |
4036 | ||
4037 | if (state != PFM_CTX_LOADED) return -EINVAL; | |
4038 | ||
4039 | /* | |
4040 | * In system wide and when the context is loaded, access can only happen | |
4041 | * when the caller is running on the CPU being monitored by the session. | |
4042 | * It does not have to be the owner (ctx_task) of the context per se. | |
4043 | */ | |
4044 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | |
4045 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | |
4046 | return -EBUSY; | |
4047 | } | |
4048 | ||
4049 | /* | |
4050 | * in system mode, we need to update the PMU directly | |
4051 | * and the user level state of the caller, which may not | |
4052 | * necessarily be the creator of the context. | |
4053 | */ | |
4054 | if (is_system) { | |
4055 | ||
4056 | /* | |
4057 | * set user level psr.pp for the caller | |
4058 | */ | |
4059 | ia64_psr(regs)->pp = 1; | |
4060 | ||
4061 | /* | |
4062 | * now update the local PMU and cpuinfo | |
4063 | */ | |
4064 | PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP); | |
4065 | ||
4066 | /* | |
4067 | * start monitoring at kernel level | |
4068 | */ | |
4069 | pfm_set_psr_pp(); | |
4070 | ||
4071 | /* enable dcr pp */ | |
4072 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | |
4073 | ia64_srlz_i(); | |
4074 | ||
4075 | return 0; | |
4076 | } | |
4077 | ||
4078 | /* | |
4079 | * per-process mode | |
4080 | */ | |
4081 | ||
4082 | if (ctx->ctx_task == current) { | |
4083 | ||
4084 | /* start monitoring at kernel level */ | |
4085 | pfm_set_psr_up(); | |
4086 | ||
4087 | /* | |
4088 | * activate monitoring at user level | |
4089 | */ | |
4090 | ia64_psr(regs)->up = 1; | |
4091 | ||
4092 | } else { | |
6450578f | 4093 | tregs = task_pt_regs(ctx->ctx_task); |
1da177e4 LT |
4094 | |
4095 | /* | |
4096 | * start monitoring at the kernel level the next | |
4097 | * time the task is scheduled | |
4098 | */ | |
4099 | ctx->ctx_saved_psr_up = IA64_PSR_UP; | |
4100 | ||
4101 | /* | |
4102 | * activate monitoring at user level | |
4103 | */ | |
4104 | ia64_psr(tregs)->up = 1; | |
4105 | } | |
4106 | return 0; | |
4107 | } | |
4108 | ||
4109 | static int | |
4110 | pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4111 | { | |
4112 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | |
4113 | unsigned int cnum; | |
4114 | int i; | |
4115 | int ret = -EINVAL; | |
4116 | ||
4117 | for (i = 0; i < count; i++, req++) { | |
4118 | ||
4119 | cnum = req->reg_num; | |
4120 | ||
4121 | if (!PMC_IS_IMPL(cnum)) goto abort_mission; | |
4122 | ||
4123 | req->reg_value = PMC_DFL_VAL(cnum); | |
4124 | ||
4125 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | |
4126 | ||
4127 | DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value)); | |
4128 | } | |
4129 | return 0; | |
4130 | ||
4131 | abort_mission: | |
4132 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | |
4133 | return ret; | |
4134 | } | |
4135 | ||
4136 | static int | |
4137 | pfm_check_task_exist(pfm_context_t *ctx) | |
4138 | { | |
4139 | struct task_struct *g, *t; | |
4140 | int ret = -ESRCH; | |
4141 | ||
4142 | read_lock(&tasklist_lock); | |
4143 | ||
4144 | do_each_thread (g, t) { | |
4145 | if (t->thread.pfm_context == ctx) { | |
4146 | ret = 0; | |
6794c752 | 4147 | goto out; |
1da177e4 LT |
4148 | } |
4149 | } while_each_thread (g, t); | |
6794c752 | 4150 | out: |
1da177e4 LT |
4151 | read_unlock(&tasklist_lock); |
4152 | ||
4153 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | |
4154 | ||
4155 | return ret; | |
4156 | } | |
4157 | ||
4158 | static int | |
4159 | pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4160 | { | |
4161 | struct task_struct *task; | |
4162 | struct thread_struct *thread; | |
4163 | struct pfm_context_t *old; | |
4164 | unsigned long flags; | |
4165 | #ifndef CONFIG_SMP | |
4166 | struct task_struct *owner_task = NULL; | |
4167 | #endif | |
4168 | pfarg_load_t *req = (pfarg_load_t *)arg; | |
4169 | unsigned long *pmcs_source, *pmds_source; | |
4170 | int the_cpu; | |
4171 | int ret = 0; | |
4172 | int state, is_system, set_dbregs = 0; | |
4173 | ||
4174 | state = ctx->ctx_state; | |
4175 | is_system = ctx->ctx_fl_system; | |
4176 | /* | |
4177 | * can only load from unloaded or terminated state | |
4178 | */ | |
4179 | if (state != PFM_CTX_UNLOADED) { | |
4180 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", | |
4181 | req->load_pid, | |
4182 | ctx->ctx_state)); | |
a5a70b75 | 4183 | return -EBUSY; |
1da177e4 LT |
4184 | } |
4185 | ||
4186 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); | |
4187 | ||
4188 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { | |
4189 | DPRINT(("cannot use blocking mode on self\n")); | |
4190 | return -EINVAL; | |
4191 | } | |
4192 | ||
4193 | ret = pfm_get_task(ctx, req->load_pid, &task); | |
4194 | if (ret) { | |
4195 | DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret)); | |
4196 | return ret; | |
4197 | } | |
4198 | ||
4199 | ret = -EINVAL; | |
4200 | ||
4201 | /* | |
4202 | * system wide is self monitoring only | |
4203 | */ | |
4204 | if (is_system && task != current) { | |
4205 | DPRINT(("system wide is self monitoring only load_pid=%d\n", | |
4206 | req->load_pid)); | |
4207 | goto error; | |
4208 | } | |
4209 | ||
4210 | thread = &task->thread; | |
4211 | ||
4212 | ret = 0; | |
4213 | /* | |
4214 | * cannot load a context which is using range restrictions, | |
4215 | * into a task that is being debugged. | |
4216 | */ | |
4217 | if (ctx->ctx_fl_using_dbreg) { | |
4218 | if (thread->flags & IA64_THREAD_DBG_VALID) { | |
4219 | ret = -EBUSY; | |
4220 | DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid)); | |
4221 | goto error; | |
4222 | } | |
4223 | LOCK_PFS(flags); | |
4224 | ||
4225 | if (is_system) { | |
4226 | if (pfm_sessions.pfs_ptrace_use_dbregs) { | |
19c5870c AD |
4227 | DPRINT(("cannot load [%d] dbregs in use\n", |
4228 | task_pid_nr(task))); | |
1da177e4 LT |
4229 | ret = -EBUSY; |
4230 | } else { | |
4231 | pfm_sessions.pfs_sys_use_dbregs++; | |
19c5870c | 4232 | DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); |
1da177e4 LT |
4233 | set_dbregs = 1; |
4234 | } | |
4235 | } | |
4236 | ||
4237 | UNLOCK_PFS(flags); | |
4238 | ||
4239 | if (ret) goto error; | |
4240 | } | |
4241 | ||
4242 | /* | |
4243 | * SMP system-wide monitoring implies self-monitoring. | |
4244 | * | |
4245 | * The programming model expects the task to | |
4246 | * be pinned on a CPU throughout the session. | |
4247 | * Here we take note of the current CPU at the | |
4248 | * time the context is loaded. No call from | |
4249 | * another CPU will be allowed. | |
4250 | * | |
4251 | * The pinning via shed_setaffinity() | |
4252 | * must be done by the calling task prior | |
4253 | * to this call. | |
4254 | * | |
4255 | * systemwide: keep track of CPU this session is supposed to run on | |
4256 | */ | |
4257 | the_cpu = ctx->ctx_cpu = smp_processor_id(); | |
4258 | ||
4259 | ret = -EBUSY; | |
4260 | /* | |
4261 | * now reserve the session | |
4262 | */ | |
4263 | ret = pfm_reserve_session(current, is_system, the_cpu); | |
4264 | if (ret) goto error; | |
4265 | ||
4266 | /* | |
4267 | * task is necessarily stopped at this point. | |
4268 | * | |
4269 | * If the previous context was zombie, then it got removed in | |
4270 | * pfm_save_regs(). Therefore we should not see it here. | |
4271 | * If we see a context, then this is an active context | |
4272 | * | |
4273 | * XXX: needs to be atomic | |
4274 | */ | |
4275 | DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n", | |
4276 | thread->pfm_context, ctx)); | |
4277 | ||
6bf11e8c | 4278 | ret = -EBUSY; |
1da177e4 LT |
4279 | old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); |
4280 | if (old != NULL) { | |
4281 | DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); | |
4282 | goto error_unres; | |
4283 | } | |
4284 | ||
4285 | pfm_reset_msgq(ctx); | |
4286 | ||
4287 | ctx->ctx_state = PFM_CTX_LOADED; | |
4288 | ||
4289 | /* | |
4290 | * link context to task | |
4291 | */ | |
4292 | ctx->ctx_task = task; | |
4293 | ||
4294 | if (is_system) { | |
4295 | /* | |
4296 | * we load as stopped | |
4297 | */ | |
4298 | PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE); | |
4299 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | |
4300 | ||
4301 | if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE); | |
4302 | } else { | |
4303 | thread->flags |= IA64_THREAD_PM_VALID; | |
4304 | } | |
4305 | ||
4306 | /* | |
4307 | * propagate into thread-state | |
4308 | */ | |
4309 | pfm_copy_pmds(task, ctx); | |
4310 | pfm_copy_pmcs(task, ctx); | |
4311 | ||
35589a8f KA |
4312 | pmcs_source = ctx->th_pmcs; |
4313 | pmds_source = ctx->th_pmds; | |
1da177e4 LT |
4314 | |
4315 | /* | |
4316 | * always the case for system-wide | |
4317 | */ | |
4318 | if (task == current) { | |
4319 | ||
4320 | if (is_system == 0) { | |
4321 | ||
4322 | /* allow user level control */ | |
4323 | ia64_psr(regs)->sp = 0; | |
19c5870c | 4324 | DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
4325 | |
4326 | SET_LAST_CPU(ctx, smp_processor_id()); | |
4327 | INC_ACTIVATION(); | |
4328 | SET_ACTIVATION(ctx); | |
4329 | #ifndef CONFIG_SMP | |
4330 | /* | |
4331 | * push the other task out, if any | |
4332 | */ | |
4333 | owner_task = GET_PMU_OWNER(); | |
4334 | if (owner_task) pfm_lazy_save_regs(owner_task); | |
4335 | #endif | |
4336 | } | |
4337 | /* | |
4338 | * load all PMD from ctx to PMU (as opposed to thread state) | |
4339 | * restore all PMC from ctx to PMU | |
4340 | */ | |
4341 | pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]); | |
4342 | pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]); | |
4343 | ||
4344 | ctx->ctx_reload_pmcs[0] = 0UL; | |
4345 | ctx->ctx_reload_pmds[0] = 0UL; | |
4346 | ||
4347 | /* | |
4348 | * guaranteed safe by earlier check against DBG_VALID | |
4349 | */ | |
4350 | if (ctx->ctx_fl_using_dbreg) { | |
4351 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
4352 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
4353 | } | |
4354 | /* | |
4355 | * set new ownership | |
4356 | */ | |
4357 | SET_PMU_OWNER(task, ctx); | |
4358 | ||
19c5870c | 4359 | DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
4360 | } else { |
4361 | /* | |
4362 | * when not current, task MUST be stopped, so this is safe | |
4363 | */ | |
6450578f | 4364 | regs = task_pt_regs(task); |
1da177e4 LT |
4365 | |
4366 | /* force a full reload */ | |
4367 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
4368 | SET_LAST_CPU(ctx, -1); | |
4369 | ||
4370 | /* initial saved psr (stopped) */ | |
4371 | ctx->ctx_saved_psr_up = 0UL; | |
4372 | ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; | |
4373 | } | |
4374 | ||
4375 | ret = 0; | |
4376 | ||
4377 | error_unres: | |
4378 | if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu); | |
4379 | error: | |
4380 | /* | |
4381 | * we must undo the dbregs setting (for system-wide) | |
4382 | */ | |
4383 | if (ret && set_dbregs) { | |
4384 | LOCK_PFS(flags); | |
4385 | pfm_sessions.pfs_sys_use_dbregs--; | |
4386 | UNLOCK_PFS(flags); | |
4387 | } | |
4388 | /* | |
4389 | * release task, there is now a link with the context | |
4390 | */ | |
4391 | if (is_system == 0 && task != current) { | |
4392 | pfm_put_task(task); | |
4393 | ||
4394 | if (ret == 0) { | |
4395 | ret = pfm_check_task_exist(ctx); | |
4396 | if (ret) { | |
4397 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
4398 | ctx->ctx_task = NULL; | |
4399 | } | |
4400 | } | |
4401 | } | |
4402 | return ret; | |
4403 | } | |
4404 | ||
4405 | /* | |
4406 | * in this function, we do not need to increase the use count | |
4407 | * for the task via get_task_struct(), because we hold the | |
4408 | * context lock. If the task were to disappear while having | |
4409 | * a context attached, it would go through pfm_exit_thread() | |
4410 | * which also grabs the context lock and would therefore be blocked | |
4411 | * until we are here. | |
4412 | */ | |
4413 | static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); | |
4414 | ||
4415 | static int | |
4416 | pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |
4417 | { | |
4418 | struct task_struct *task = PFM_CTX_TASK(ctx); | |
4419 | struct pt_regs *tregs; | |
4420 | int prev_state, is_system; | |
4421 | int ret; | |
4422 | ||
19c5870c | 4423 | DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); |
1da177e4 LT |
4424 | |
4425 | prev_state = ctx->ctx_state; | |
4426 | is_system = ctx->ctx_fl_system; | |
4427 | ||
4428 | /* | |
4429 | * unload only when necessary | |
4430 | */ | |
4431 | if (prev_state == PFM_CTX_UNLOADED) { | |
4432 | DPRINT(("ctx_state=%d, nothing to do\n", prev_state)); | |
4433 | return 0; | |
4434 | } | |
4435 | ||
4436 | /* | |
4437 | * clear psr and dcr bits | |
4438 | */ | |
4439 | ret = pfm_stop(ctx, NULL, 0, regs); | |
4440 | if (ret) return ret; | |
4441 | ||
4442 | ctx->ctx_state = PFM_CTX_UNLOADED; | |
4443 | ||
4444 | /* | |
4445 | * in system mode, we need to update the PMU directly | |
4446 | * and the user level state of the caller, which may not | |
4447 | * necessarily be the creator of the context. | |
4448 | */ | |
4449 | if (is_system) { | |
4450 | ||
4451 | /* | |
4452 | * Update cpuinfo | |
4453 | * | |
4454 | * local PMU is taken care of in pfm_stop() | |
4455 | */ | |
4456 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE); | |
4457 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE); | |
4458 | ||
4459 | /* | |
4460 | * save PMDs in context | |
4461 | * release ownership | |
4462 | */ | |
4463 | pfm_flush_pmds(current, ctx); | |
4464 | ||
4465 | /* | |
4466 | * at this point we are done with the PMU | |
4467 | * so we can unreserve the resource. | |
4468 | */ | |
4469 | if (prev_state != PFM_CTX_ZOMBIE) | |
4470 | pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); | |
4471 | ||
4472 | /* | |
4473 | * disconnect context from task | |
4474 | */ | |
4475 | task->thread.pfm_context = NULL; | |
4476 | /* | |
4477 | * disconnect task from context | |
4478 | */ | |
4479 | ctx->ctx_task = NULL; | |
4480 | ||
4481 | /* | |
4482 | * There is nothing more to cleanup here. | |
4483 | */ | |
4484 | return 0; | |
4485 | } | |
4486 | ||
4487 | /* | |
4488 | * per-task mode | |
4489 | */ | |
6450578f | 4490 | tregs = task == current ? regs : task_pt_regs(task); |
1da177e4 LT |
4491 | |
4492 | if (task == current) { | |
4493 | /* | |
4494 | * cancel user level control | |
4495 | */ | |
4496 | ia64_psr(regs)->sp = 1; | |
4497 | ||
19c5870c | 4498 | DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
4499 | } |
4500 | /* | |
4501 | * save PMDs to context | |
4502 | * release ownership | |
4503 | */ | |
4504 | pfm_flush_pmds(task, ctx); | |
4505 | ||
4506 | /* | |
4507 | * at this point we are done with the PMU | |
4508 | * so we can unreserve the resource. | |
4509 | * | |
4510 | * when state was ZOMBIE, we have already unreserved. | |
4511 | */ | |
4512 | if (prev_state != PFM_CTX_ZOMBIE) | |
4513 | pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); | |
4514 | ||
4515 | /* | |
4516 | * reset activation counter and psr | |
4517 | */ | |
4518 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | |
4519 | SET_LAST_CPU(ctx, -1); | |
4520 | ||
4521 | /* | |
4522 | * PMU state will not be restored | |
4523 | */ | |
4524 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | |
4525 | ||
4526 | /* | |
4527 | * break links between context and task | |
4528 | */ | |
4529 | task->thread.pfm_context = NULL; | |
4530 | ctx->ctx_task = NULL; | |
4531 | ||
4532 | PFM_SET_WORK_PENDING(task, 0); | |
4533 | ||
4534 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | |
4535 | ctx->ctx_fl_can_restart = 0; | |
4536 | ctx->ctx_fl_going_zombie = 0; | |
4537 | ||
19c5870c | 4538 | DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); |
1da177e4 LT |
4539 | |
4540 | return 0; | |
4541 | } | |
4542 | ||
4543 | ||
4544 | /* | |
e6464694 JS |
4545 | * called only from exit_thread() |
4546 | * we come here only if the task has a context attached (loaded or masked) | |
1da177e4 LT |
4547 | */ |
4548 | void | |
4549 | pfm_exit_thread(struct task_struct *task) | |
4550 | { | |
4551 | pfm_context_t *ctx; | |
4552 | unsigned long flags; | |
6450578f | 4553 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
4554 | int ret, state; |
4555 | int free_ok = 0; | |
4556 | ||
4557 | ctx = PFM_GET_CTX(task); | |
4558 | ||
4559 | PROTECT_CTX(ctx, flags); | |
4560 | ||
19c5870c | 4561 | DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); |
1da177e4 LT |
4562 | |
4563 | state = ctx->ctx_state; | |
4564 | switch(state) { | |
4565 | case PFM_CTX_UNLOADED: | |
4566 | /* | |
72fdbdce | 4567 | * only comes to this function if pfm_context is not NULL, i.e., cannot |
1da177e4 LT |
4568 | * be in unloaded state |
4569 | */ | |
19c5870c | 4570 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); |
1da177e4 LT |
4571 | break; |
4572 | case PFM_CTX_LOADED: | |
4573 | case PFM_CTX_MASKED: | |
4574 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
4575 | if (ret) { | |
19c5870c | 4576 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); |
1da177e4 LT |
4577 | } |
4578 | DPRINT(("ctx unloaded for current state was %d\n", state)); | |
4579 | ||
4580 | pfm_end_notify_user(ctx); | |
4581 | break; | |
4582 | case PFM_CTX_ZOMBIE: | |
4583 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
4584 | if (ret) { | |
19c5870c | 4585 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); |
1da177e4 LT |
4586 | } |
4587 | free_ok = 1; | |
4588 | break; | |
4589 | default: | |
19c5870c | 4590 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); |
1da177e4 LT |
4591 | break; |
4592 | } | |
4593 | UNPROTECT_CTX(ctx, flags); | |
4594 | ||
4595 | { u64 psr = pfm_get_psr(); | |
4596 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
4597 | BUG_ON(GET_PMU_OWNER()); | |
4598 | BUG_ON(ia64_psr(regs)->up); | |
4599 | BUG_ON(ia64_psr(regs)->pp); | |
4600 | } | |
4601 | ||
4602 | /* | |
4603 | * All memory free operations (especially for vmalloc'ed memory) | |
4604 | * MUST be done with interrupts ENABLED. | |
4605 | */ | |
4606 | if (free_ok) pfm_context_free(ctx); | |
4607 | } | |
4608 | ||
4609 | /* | |
4610 | * functions MUST be listed in the increasing order of their index (see permfon.h) | |
4611 | */ | |
4612 | #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz } | |
4613 | #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL } | |
4614 | #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP) | |
4615 | #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW) | |
4616 | #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL} | |
4617 | ||
4618 | static pfm_cmd_desc_t pfm_cmd_tab[]={ | |
4619 | /* 0 */PFM_CMD_NONE, | |
4620 | /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4621 | /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4622 | /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4623 | /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS), | |
4624 | /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS), | |
4625 | /* 6 */PFM_CMD_NONE, | |
4626 | /* 7 */PFM_CMD_NONE, | |
4627 | /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize), | |
4628 | /* 9 */PFM_CMD_NONE, | |
4629 | /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW), | |
4630 | /* 11 */PFM_CMD_NONE, | |
4631 | /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL), | |
4632 | /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL), | |
4633 | /* 14 */PFM_CMD_NONE, | |
4634 | /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | |
4635 | /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL), | |
4636 | /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS), | |
4637 | /* 18 */PFM_CMD_NONE, | |
4638 | /* 19 */PFM_CMD_NONE, | |
4639 | /* 20 */PFM_CMD_NONE, | |
4640 | /* 21 */PFM_CMD_NONE, | |
4641 | /* 22 */PFM_CMD_NONE, | |
4642 | /* 23 */PFM_CMD_NONE, | |
4643 | /* 24 */PFM_CMD_NONE, | |
4644 | /* 25 */PFM_CMD_NONE, | |
4645 | /* 26 */PFM_CMD_NONE, | |
4646 | /* 27 */PFM_CMD_NONE, | |
4647 | /* 28 */PFM_CMD_NONE, | |
4648 | /* 29 */PFM_CMD_NONE, | |
4649 | /* 30 */PFM_CMD_NONE, | |
4650 | /* 31 */PFM_CMD_NONE, | |
4651 | /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL), | |
4652 | /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL) | |
4653 | }; | |
4654 | #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) | |
4655 | ||
4656 | static int | |
4657 | pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) | |
4658 | { | |
4659 | struct task_struct *task; | |
4660 | int state, old_state; | |
4661 | ||
4662 | recheck: | |
4663 | state = ctx->ctx_state; | |
4664 | task = ctx->ctx_task; | |
4665 | ||
4666 | if (task == NULL) { | |
4667 | DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); | |
4668 | return 0; | |
4669 | } | |
4670 | ||
4671 | DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", | |
4672 | ctx->ctx_fd, | |
4673 | state, | |
19c5870c | 4674 | task_pid_nr(task), |
1da177e4 LT |
4675 | task->state, PFM_CMD_STOPPED(cmd))); |
4676 | ||
4677 | /* | |
4678 | * self-monitoring always ok. | |
4679 | * | |
4680 | * for system-wide the caller can either be the creator of the | |
4681 | * context (to one to which the context is attached to) OR | |
4682 | * a task running on the same CPU as the session. | |
4683 | */ | |
4684 | if (task == current || ctx->ctx_fl_system) return 0; | |
4685 | ||
4686 | /* | |
a5a70b75 | 4687 | * we are monitoring another thread |
1da177e4 | 4688 | */ |
a5a70b75 | 4689 | switch(state) { |
4690 | case PFM_CTX_UNLOADED: | |
4691 | /* | |
4692 | * if context is UNLOADED we are safe to go | |
4693 | */ | |
4694 | return 0; | |
4695 | case PFM_CTX_ZOMBIE: | |
4696 | /* | |
4697 | * no command can operate on a zombie context | |
4698 | */ | |
4699 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | |
4700 | return -EINVAL; | |
4701 | case PFM_CTX_MASKED: | |
4702 | /* | |
4703 | * PMU state has been saved to software even though | |
4704 | * the thread may still be running. | |
4705 | */ | |
4706 | if (cmd != PFM_UNLOAD_CONTEXT) return 0; | |
1da177e4 LT |
4707 | } |
4708 | ||
4709 | /* | |
4710 | * context is LOADED or MASKED. Some commands may need to have | |
4711 | * the task stopped. | |
4712 | * | |
4713 | * We could lift this restriction for UP but it would mean that | |
4714 | * the user has no guarantee the task would not run between | |
4715 | * two successive calls to perfmonctl(). That's probably OK. | |
4716 | * If this user wants to ensure the task does not run, then | |
4717 | * the task must be stopped. | |
4718 | */ | |
4719 | if (PFM_CMD_STOPPED(cmd)) { | |
21498223 | 4720 | if (!task_is_stopped_or_traced(task)) { |
19c5870c | 4721 | DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); |
1da177e4 LT |
4722 | return -EBUSY; |
4723 | } | |
4724 | /* | |
4725 | * task is now stopped, wait for ctxsw out | |
4726 | * | |
4727 | * This is an interesting point in the code. | |
4728 | * We need to unprotect the context because | |
4729 | * the pfm_save_regs() routines needs to grab | |
4730 | * the same lock. There are danger in doing | |
4731 | * this because it leaves a window open for | |
4732 | * another task to get access to the context | |
4733 | * and possibly change its state. The one thing | |
4734 | * that is not possible is for the context to disappear | |
4735 | * because we are protected by the VFS layer, i.e., | |
4736 | * get_fd()/put_fd(). | |
4737 | */ | |
4738 | old_state = state; | |
4739 | ||
4740 | UNPROTECT_CTX(ctx, flags); | |
4741 | ||
85ba2d86 | 4742 | wait_task_inactive(task, 0); |
1da177e4 LT |
4743 | |
4744 | PROTECT_CTX(ctx, flags); | |
4745 | ||
4746 | /* | |
4747 | * we must recheck to verify if state has changed | |
4748 | */ | |
4749 | if (ctx->ctx_state != old_state) { | |
4750 | DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state)); | |
4751 | goto recheck; | |
4752 | } | |
4753 | } | |
4754 | return 0; | |
4755 | } | |
4756 | ||
4757 | /* | |
4758 | * system-call entry point (must return long) | |
4759 | */ | |
4760 | asmlinkage long | |
4761 | sys_perfmonctl (int fd, int cmd, void __user *arg, int count) | |
4762 | { | |
2903ff01 | 4763 | struct fd f = {NULL, 0}; |
1da177e4 LT |
4764 | pfm_context_t *ctx = NULL; |
4765 | unsigned long flags = 0UL; | |
4766 | void *args_k = NULL; | |
4767 | long ret; /* will expand int return types */ | |
4768 | size_t base_sz, sz, xtra_sz = 0; | |
4769 | int narg, completed_args = 0, call_made = 0, cmd_flags; | |
4770 | int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | |
4771 | int (*getsize)(void *arg, size_t *sz); | |
4772 | #define PFM_MAX_ARGSIZE 4096 | |
4773 | ||
4774 | /* | |
4775 | * reject any call if perfmon was disabled at initialization | |
4776 | */ | |
4777 | if (unlikely(pmu_conf == NULL)) return -ENOSYS; | |
4778 | ||
4779 | if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { | |
4780 | DPRINT(("invalid cmd=%d\n", cmd)); | |
4781 | return -EINVAL; | |
4782 | } | |
4783 | ||
4784 | func = pfm_cmd_tab[cmd].cmd_func; | |
4785 | narg = pfm_cmd_tab[cmd].cmd_narg; | |
4786 | base_sz = pfm_cmd_tab[cmd].cmd_argsize; | |
4787 | getsize = pfm_cmd_tab[cmd].cmd_getsize; | |
4788 | cmd_flags = pfm_cmd_tab[cmd].cmd_flags; | |
4789 | ||
4790 | if (unlikely(func == NULL)) { | |
4791 | DPRINT(("invalid cmd=%d\n", cmd)); | |
4792 | return -EINVAL; | |
4793 | } | |
4794 | ||
4795 | DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n", | |
4796 | PFM_CMD_NAME(cmd), | |
4797 | cmd, | |
4798 | narg, | |
4799 | base_sz, | |
4800 | count)); | |
4801 | ||
4802 | /* | |
4803 | * check if number of arguments matches what the command expects | |
4804 | */ | |
4805 | if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count))) | |
4806 | return -EINVAL; | |
4807 | ||
4808 | restart_args: | |
4809 | sz = xtra_sz + base_sz*count; | |
4810 | /* | |
4811 | * limit abuse to min page size | |
4812 | */ | |
4813 | if (unlikely(sz > PFM_MAX_ARGSIZE)) { | |
19c5870c | 4814 | printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz); |
1da177e4 LT |
4815 | return -E2BIG; |
4816 | } | |
4817 | ||
4818 | /* | |
4819 | * allocate default-sized argument buffer | |
4820 | */ | |
4821 | if (likely(count && args_k == NULL)) { | |
4822 | args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL); | |
4823 | if (args_k == NULL) return -ENOMEM; | |
4824 | } | |
4825 | ||
4826 | ret = -EFAULT; | |
4827 | ||
4828 | /* | |
4829 | * copy arguments | |
4830 | * | |
4831 | * assume sz = 0 for command without parameters | |
4832 | */ | |
4833 | if (sz && copy_from_user(args_k, arg, sz)) { | |
4834 | DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg)); | |
4835 | goto error_args; | |
4836 | } | |
4837 | ||
4838 | /* | |
4839 | * check if command supports extra parameters | |
4840 | */ | |
4841 | if (completed_args == 0 && getsize) { | |
4842 | /* | |
4843 | * get extra parameters size (based on main argument) | |
4844 | */ | |
4845 | ret = (*getsize)(args_k, &xtra_sz); | |
4846 | if (ret) goto error_args; | |
4847 | ||
4848 | completed_args = 1; | |
4849 | ||
4850 | DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz)); | |
4851 | ||
4852 | /* retry if necessary */ | |
4853 | if (likely(xtra_sz)) goto restart_args; | |
4854 | } | |
4855 | ||
4856 | if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd; | |
4857 | ||
4858 | ret = -EBADF; | |
4859 | ||
2903ff01 AV |
4860 | f = fdget(fd); |
4861 | if (unlikely(f.file == NULL)) { | |
1da177e4 LT |
4862 | DPRINT(("invalid fd %d\n", fd)); |
4863 | goto error_args; | |
4864 | } | |
2903ff01 | 4865 | if (unlikely(PFM_IS_FILE(f.file) == 0)) { |
1da177e4 LT |
4866 | DPRINT(("fd %d not related to perfmon\n", fd)); |
4867 | goto error_args; | |
4868 | } | |
4869 | ||
2903ff01 | 4870 | ctx = f.file->private_data; |
1da177e4 LT |
4871 | if (unlikely(ctx == NULL)) { |
4872 | DPRINT(("no context for fd %d\n", fd)); | |
4873 | goto error_args; | |
4874 | } | |
4875 | prefetch(&ctx->ctx_state); | |
4876 | ||
4877 | PROTECT_CTX(ctx, flags); | |
4878 | ||
4879 | /* | |
4880 | * check task is stopped | |
4881 | */ | |
4882 | ret = pfm_check_task_state(ctx, cmd, flags); | |
4883 | if (unlikely(ret)) goto abort_locked; | |
4884 | ||
4885 | skip_fd: | |
6450578f | 4886 | ret = (*func)(ctx, args_k, count, task_pt_regs(current)); |
1da177e4 LT |
4887 | |
4888 | call_made = 1; | |
4889 | ||
4890 | abort_locked: | |
4891 | if (likely(ctx)) { | |
4892 | DPRINT(("context unlocked\n")); | |
4893 | UNPROTECT_CTX(ctx, flags); | |
1da177e4 LT |
4894 | } |
4895 | ||
4896 | /* copy argument back to user, if needed */ | |
4897 | if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; | |
4898 | ||
4899 | error_args: | |
2903ff01 AV |
4900 | if (f.file) |
4901 | fdput(f); | |
b8444d00 | 4902 | |
b2325fe1 | 4903 | kfree(args_k); |
1da177e4 LT |
4904 | |
4905 | DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); | |
4906 | ||
4907 | return ret; | |
4908 | } | |
4909 | ||
4910 | static void | |
4911 | pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs) | |
4912 | { | |
4913 | pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt; | |
4914 | pfm_ovfl_ctrl_t rst_ctrl; | |
4915 | int state; | |
4916 | int ret = 0; | |
4917 | ||
4918 | state = ctx->ctx_state; | |
4919 | /* | |
4920 | * Unlock sampling buffer and reset index atomically | |
4921 | * XXX: not really needed when blocking | |
4922 | */ | |
4923 | if (CTX_HAS_SMPL(ctx)) { | |
4924 | ||
4925 | rst_ctrl.bits.mask_monitoring = 0; | |
4926 | rst_ctrl.bits.reset_ovfl_pmds = 0; | |
4927 | ||
4928 | if (state == PFM_CTX_LOADED) | |
4929 | ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
4930 | else | |
4931 | ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | |
4932 | } else { | |
4933 | rst_ctrl.bits.mask_monitoring = 0; | |
4934 | rst_ctrl.bits.reset_ovfl_pmds = 1; | |
4935 | } | |
4936 | ||
4937 | if (ret == 0) { | |
4938 | if (rst_ctrl.bits.reset_ovfl_pmds) { | |
4939 | pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); | |
4940 | } | |
4941 | if (rst_ctrl.bits.mask_monitoring == 0) { | |
4942 | DPRINT(("resuming monitoring\n")); | |
4943 | if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); | |
4944 | } else { | |
4945 | DPRINT(("stopping monitoring\n")); | |
4946 | //pfm_stop_monitoring(current, regs); | |
4947 | } | |
4948 | ctx->ctx_state = PFM_CTX_LOADED; | |
4949 | } | |
4950 | } | |
4951 | ||
4952 | /* | |
4953 | * context MUST BE LOCKED when calling | |
4954 | * can only be called for current | |
4955 | */ | |
4956 | static void | |
4957 | pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | |
4958 | { | |
4959 | int ret; | |
4960 | ||
19c5870c | 4961 | DPRINT(("entering for [%d]\n", task_pid_nr(current))); |
1da177e4 LT |
4962 | |
4963 | ret = pfm_context_unload(ctx, NULL, 0, regs); | |
4964 | if (ret) { | |
19c5870c | 4965 | printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret); |
1da177e4 LT |
4966 | } |
4967 | ||
4968 | /* | |
4969 | * and wakeup controlling task, indicating we are now disconnected | |
4970 | */ | |
4971 | wake_up_interruptible(&ctx->ctx_zombieq); | |
4972 | ||
4973 | /* | |
4974 | * given that context is still locked, the controlling | |
4975 | * task will only get access when we return from | |
4976 | * pfm_handle_work(). | |
4977 | */ | |
4978 | } | |
4979 | ||
4980 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | |
0fb232fd | 4981 | |
4944930a SE |
4982 | /* |
4983 | * pfm_handle_work() can be called with interrupts enabled | |
4984 | * (TIF_NEED_RESCHED) or disabled. The down_interruptible | |
4985 | * call may sleep, therefore we must re-enable interrupts | |
4986 | * to avoid deadlocks. It is safe to do so because this function | |
0fb232fd | 4987 | * is called ONLY when returning to user level (pUStk=1), in which case |
4944930a SE |
4988 | * there is no risk of kernel stack overflow due to deep |
4989 | * interrupt nesting. | |
4990 | */ | |
1da177e4 LT |
4991 | void |
4992 | pfm_handle_work(void) | |
4993 | { | |
4994 | pfm_context_t *ctx; | |
4995 | struct pt_regs *regs; | |
4944930a | 4996 | unsigned long flags, dummy_flags; |
1da177e4 LT |
4997 | unsigned long ovfl_regs; |
4998 | unsigned int reason; | |
4999 | int ret; | |
5000 | ||
5001 | ctx = PFM_GET_CTX(current); | |
5002 | if (ctx == NULL) { | |
0fb232fd HS |
5003 | printk(KERN_ERR "perfmon: [%d] has no PFM context\n", |
5004 | task_pid_nr(current)); | |
1da177e4 LT |
5005 | return; |
5006 | } | |
5007 | ||
5008 | PROTECT_CTX(ctx, flags); | |
5009 | ||
5010 | PFM_SET_WORK_PENDING(current, 0); | |
5011 | ||
6450578f | 5012 | regs = task_pt_regs(current); |
1da177e4 LT |
5013 | |
5014 | /* | |
5015 | * extract reason for being here and clear | |
5016 | */ | |
5017 | reason = ctx->ctx_fl_trap_reason; | |
5018 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | |
5019 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | |
5020 | ||
5021 | DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state)); | |
5022 | ||
5023 | /* | |
5024 | * must be done before we check for simple-reset mode | |
5025 | */ | |
0fb232fd HS |
5026 | if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) |
5027 | goto do_zombie; | |
1da177e4 LT |
5028 | |
5029 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | |
0fb232fd HS |
5030 | if (reason == PFM_TRAP_REASON_RESET) |
5031 | goto skip_blocking; | |
1da177e4 | 5032 | |
4944930a SE |
5033 | /* |
5034 | * restore interrupt mask to what it was on entry. | |
5035 | * Could be enabled/diasbled. | |
5036 | */ | |
1da177e4 LT |
5037 | UNPROTECT_CTX(ctx, flags); |
5038 | ||
4944930a SE |
5039 | /* |
5040 | * force interrupt enable because of down_interruptible() | |
5041 | */ | |
1da177e4 LT |
5042 | local_irq_enable(); |
5043 | ||
5044 | DPRINT(("before block sleeping\n")); | |
5045 | ||
5046 | /* | |
5047 | * may go through without blocking on SMP systems | |
5048 | * if restart has been received already by the time we call down() | |
5049 | */ | |
60f1c444 | 5050 | ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); |
1da177e4 LT |
5051 | |
5052 | DPRINT(("after block sleeping ret=%d\n", ret)); | |
5053 | ||
5054 | /* | |
4944930a SE |
5055 | * lock context and mask interrupts again |
5056 | * We save flags into a dummy because we may have | |
5057 | * altered interrupts mask compared to entry in this | |
5058 | * function. | |
1da177e4 | 5059 | */ |
4944930a | 5060 | PROTECT_CTX(ctx, dummy_flags); |
1da177e4 LT |
5061 | |
5062 | /* | |
5063 | * we need to read the ovfl_regs only after wake-up | |
5064 | * because we may have had pfm_write_pmds() in between | |
5065 | * and that can changed PMD values and therefore | |
5066 | * ovfl_regs is reset for these new PMD values. | |
5067 | */ | |
5068 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | |
5069 | ||
5070 | if (ctx->ctx_fl_going_zombie) { | |
5071 | do_zombie: | |
5072 | DPRINT(("context is zombie, bailing out\n")); | |
5073 | pfm_context_force_terminate(ctx, regs); | |
5074 | goto nothing_to_do; | |
5075 | } | |
5076 | /* | |
5077 | * in case of interruption of down() we don't restart anything | |
5078 | */ | |
0fb232fd HS |
5079 | if (ret < 0) |
5080 | goto nothing_to_do; | |
1da177e4 LT |
5081 | |
5082 | skip_blocking: | |
5083 | pfm_resume_after_ovfl(ctx, ovfl_regs, regs); | |
5084 | ctx->ctx_ovfl_regs[0] = 0UL; | |
5085 | ||
5086 | nothing_to_do: | |
4944930a SE |
5087 | /* |
5088 | * restore flags as they were upon entry | |
5089 | */ | |
1da177e4 LT |
5090 | UNPROTECT_CTX(ctx, flags); |
5091 | } | |
5092 | ||
5093 | static int | |
5094 | pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) | |
5095 | { | |
5096 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | |
5097 | DPRINT(("ignoring overflow notification, owner is zombie\n")); | |
5098 | return 0; | |
5099 | } | |
5100 | ||
5101 | DPRINT(("waking up somebody\n")); | |
5102 | ||
5103 | if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); | |
5104 | ||
5105 | /* | |
5106 | * safe, we are not in intr handler, nor in ctxsw when | |
5107 | * we come here | |
5108 | */ | |
5109 | kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN); | |
5110 | ||
5111 | return 0; | |
5112 | } | |
5113 | ||
5114 | static int | |
5115 | pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) | |
5116 | { | |
5117 | pfm_msg_t *msg = NULL; | |
5118 | ||
5119 | if (ctx->ctx_fl_no_msg == 0) { | |
5120 | msg = pfm_get_new_msg(ctx); | |
5121 | if (msg == NULL) { | |
5122 | printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n"); | |
5123 | return -1; | |
5124 | } | |
5125 | ||
5126 | msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; | |
5127 | msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; | |
5128 | msg->pfm_ovfl_msg.msg_active_set = 0; | |
5129 | msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; | |
5130 | msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL; | |
5131 | msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL; | |
5132 | msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; | |
5133 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | |
5134 | } | |
5135 | ||
5136 | DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n", | |
5137 | msg, | |
5138 | ctx->ctx_fl_no_msg, | |
5139 | ctx->ctx_fd, | |
5140 | ovfl_pmds)); | |
5141 | ||
5142 | return pfm_notify_user(ctx, msg); | |
5143 | } | |
5144 | ||
5145 | static int | |
5146 | pfm_end_notify_user(pfm_context_t *ctx) | |
5147 | { | |
5148 | pfm_msg_t *msg; | |
5149 | ||
5150 | msg = pfm_get_new_msg(ctx); | |
5151 | if (msg == NULL) { | |
5152 | printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n"); | |
5153 | return -1; | |
5154 | } | |
5155 | /* no leak */ | |
5156 | memset(msg, 0, sizeof(*msg)); | |
5157 | ||
5158 | msg->pfm_end_msg.msg_type = PFM_MSG_END; | |
5159 | msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; | |
5160 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | |
5161 | ||
5162 | DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n", | |
5163 | msg, | |
5164 | ctx->ctx_fl_no_msg, | |
5165 | ctx->ctx_fd)); | |
5166 | ||
5167 | return pfm_notify_user(ctx, msg); | |
5168 | } | |
5169 | ||
5170 | /* | |
5171 | * main overflow processing routine. | |
72fdbdce | 5172 | * it can be called from the interrupt path or explicitly during the context switch code |
1da177e4 | 5173 | */ |
e088a4ad MW |
5174 | static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, |
5175 | unsigned long pmc0, struct pt_regs *regs) | |
1da177e4 LT |
5176 | { |
5177 | pfm_ovfl_arg_t *ovfl_arg; | |
5178 | unsigned long mask; | |
5179 | unsigned long old_val, ovfl_val, new_val; | |
5180 | unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds; | |
5181 | unsigned long tstamp; | |
5182 | pfm_ovfl_ctrl_t ovfl_ctrl; | |
5183 | unsigned int i, has_smpl; | |
5184 | int must_notify = 0; | |
5185 | ||
5186 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring; | |
5187 | ||
5188 | /* | |
5189 | * sanity test. Should never happen | |
5190 | */ | |
5191 | if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; | |
5192 | ||
5193 | tstamp = ia64_get_itc(); | |
5194 | mask = pmc0 >> PMU_FIRST_COUNTER; | |
5195 | ovfl_val = pmu_conf->ovfl_val; | |
5196 | has_smpl = CTX_HAS_SMPL(ctx); | |
5197 | ||
5198 | DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " | |
5199 | "used_pmds=0x%lx\n", | |
5200 | pmc0, | |
19c5870c | 5201 | task ? task_pid_nr(task): -1, |
1da177e4 LT |
5202 | (regs ? regs->cr_iip : 0), |
5203 | CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", | |
5204 | ctx->ctx_used_pmds[0])); | |
5205 | ||
5206 | ||
5207 | /* | |
5208 | * first we update the virtual counters | |
5209 | * assume there was a prior ia64_srlz_d() issued | |
5210 | */ | |
5211 | for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) { | |
5212 | ||
5213 | /* skip pmd which did not overflow */ | |
5214 | if ((mask & 0x1) == 0) continue; | |
5215 | ||
5216 | /* | |
5217 | * Note that the pmd is not necessarily 0 at this point as qualified events | |
5218 | * may have happened before the PMU was frozen. The residual count is not | |
5219 | * taken into consideration here but will be with any read of the pmd via | |
5220 | * pfm_read_pmds(). | |
5221 | */ | |
5222 | old_val = new_val = ctx->ctx_pmds[i].val; | |
5223 | new_val += 1 + ovfl_val; | |
5224 | ctx->ctx_pmds[i].val = new_val; | |
5225 | ||
5226 | /* | |
5227 | * check for overflow condition | |
5228 | */ | |
5229 | if (likely(old_val > new_val)) { | |
5230 | ovfl_pmds |= 1UL << i; | |
5231 | if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; | |
5232 | } | |
5233 | ||
5234 | DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n", | |
5235 | i, | |
5236 | new_val, | |
5237 | old_val, | |
5238 | ia64_get_pmd(i) & ovfl_val, | |
5239 | ovfl_pmds, | |
5240 | ovfl_notify)); | |
5241 | } | |
5242 | ||
5243 | /* | |
5244 | * there was no 64-bit overflow, nothing else to do | |
5245 | */ | |
5246 | if (ovfl_pmds == 0UL) return; | |
5247 | ||
5248 | /* | |
5249 | * reset all control bits | |
5250 | */ | |
5251 | ovfl_ctrl.val = 0; | |
5252 | reset_pmds = 0UL; | |
5253 | ||
5254 | /* | |
5255 | * if a sampling format module exists, then we "cache" the overflow by | |
5256 | * calling the module's handler() routine. | |
5257 | */ | |
5258 | if (has_smpl) { | |
5259 | unsigned long start_cycles, end_cycles; | |
5260 | unsigned long pmd_mask; | |
5261 | int j, k, ret = 0; | |
5262 | int this_cpu = smp_processor_id(); | |
5263 | ||
5264 | pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; | |
5265 | ovfl_arg = &ctx->ctx_ovfl_arg; | |
5266 | ||
5267 | prefetch(ctx->ctx_smpl_hdr); | |
5268 | ||
5269 | for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) { | |
5270 | ||
5271 | mask = 1UL << i; | |
5272 | ||
5273 | if ((pmd_mask & 0x1) == 0) continue; | |
5274 | ||
5275 | ovfl_arg->ovfl_pmd = (unsigned char )i; | |
5276 | ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0; | |
5277 | ovfl_arg->active_set = 0; | |
5278 | ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */ | |
5279 | ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; | |
5280 | ||
5281 | ovfl_arg->pmd_value = ctx->ctx_pmds[i].val; | |
5282 | ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval; | |
5283 | ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid; | |
5284 | ||
5285 | /* | |
5286 | * copy values of pmds of interest. Sampling format may copy them | |
5287 | * into sampling buffer. | |
5288 | */ | |
5289 | if (smpl_pmds) { | |
5290 | for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { | |
5291 | if ((smpl_pmds & 0x1) == 0) continue; | |
5292 | ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); | |
5293 | DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1])); | |
5294 | } | |
5295 | } | |
5296 | ||
5297 | pfm_stats[this_cpu].pfm_smpl_handler_calls++; | |
5298 | ||
5299 | start_cycles = ia64_get_itc(); | |
5300 | ||
5301 | /* | |
5302 | * call custom buffer format record (handler) routine | |
5303 | */ | |
5304 | ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); | |
5305 | ||
5306 | end_cycles = ia64_get_itc(); | |
5307 | ||
5308 | /* | |
5309 | * For those controls, we take the union because they have | |
5310 | * an all or nothing behavior. | |
5311 | */ | |
5312 | ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user; | |
5313 | ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task; | |
5314 | ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring; | |
5315 | /* | |
5316 | * build the bitmask of pmds to reset now | |
5317 | */ | |
5318 | if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask; | |
5319 | ||
5320 | pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; | |
5321 | } | |
5322 | /* | |
5323 | * when the module cannot handle the rest of the overflows, we abort right here | |
5324 | */ | |
5325 | if (ret && pmd_mask) { | |
5326 | DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n", | |
5327 | pmd_mask<<PMU_FIRST_COUNTER)); | |
5328 | } | |
5329 | /* | |
5330 | * remove the pmds we reset now from the set of pmds to reset in pfm_restart() | |
5331 | */ | |
5332 | ovfl_pmds &= ~reset_pmds; | |
5333 | } else { | |
5334 | /* | |
5335 | * when no sampling module is used, then the default | |
5336 | * is to notify on overflow if requested by user | |
5337 | */ | |
5338 | ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0; | |
5339 | ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0; | |
5340 | ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */ | |
5341 | ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1; | |
5342 | /* | |
5343 | * if needed, we reset all overflowed pmds | |
5344 | */ | |
5345 | if (ovfl_notify == 0) reset_pmds = ovfl_pmds; | |
5346 | } | |
5347 | ||
5348 | DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds)); | |
5349 | ||
5350 | /* | |
5351 | * reset the requested PMD registers using the short reset values | |
5352 | */ | |
5353 | if (reset_pmds) { | |
5354 | unsigned long bm = reset_pmds; | |
5355 | pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET); | |
5356 | } | |
5357 | ||
5358 | if (ovfl_notify && ovfl_ctrl.bits.notify_user) { | |
5359 | /* | |
5360 | * keep track of what to reset when unblocking | |
5361 | */ | |
5362 | ctx->ctx_ovfl_regs[0] = ovfl_pmds; | |
5363 | ||
5364 | /* | |
5365 | * check for blocking context | |
5366 | */ | |
5367 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { | |
5368 | ||
5369 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; | |
5370 | ||
5371 | /* | |
5372 | * set the perfmon specific checking pending work for the task | |
5373 | */ | |
5374 | PFM_SET_WORK_PENDING(task, 1); | |
5375 | ||
5376 | /* | |
5377 | * when coming from ctxsw, current still points to the | |
5378 | * previous task, therefore we must work with task and not current. | |
5379 | */ | |
f14488cc | 5380 | set_notify_resume(task); |
1da177e4 LT |
5381 | } |
5382 | /* | |
5383 | * defer until state is changed (shorten spin window). the context is locked | |
5384 | * anyway, so the signal receiver would come spin for nothing. | |
5385 | */ | |
5386 | must_notify = 1; | |
5387 | } | |
5388 | ||
5389 | DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", | |
19c5870c | 5390 | GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1, |
1da177e4 LT |
5391 | PFM_GET_WORK_PENDING(task), |
5392 | ctx->ctx_fl_trap_reason, | |
5393 | ovfl_pmds, | |
5394 | ovfl_notify, | |
5395 | ovfl_ctrl.bits.mask_monitoring ? 1 : 0)); | |
5396 | /* | |
5397 | * in case monitoring must be stopped, we toggle the psr bits | |
5398 | */ | |
5399 | if (ovfl_ctrl.bits.mask_monitoring) { | |
5400 | pfm_mask_monitoring(task); | |
5401 | ctx->ctx_state = PFM_CTX_MASKED; | |
5402 | ctx->ctx_fl_can_restart = 1; | |
5403 | } | |
5404 | ||
5405 | /* | |
5406 | * send notification now | |
5407 | */ | |
5408 | if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify); | |
5409 | ||
5410 | return; | |
5411 | ||
5412 | sanity_check: | |
5413 | printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", | |
5414 | smp_processor_id(), | |
19c5870c | 5415 | task ? task_pid_nr(task) : -1, |
1da177e4 LT |
5416 | pmc0); |
5417 | return; | |
5418 | ||
5419 | stop_monitoring: | |
5420 | /* | |
5421 | * in SMP, zombie context is never restored but reclaimed in pfm_load_regs(). | |
5422 | * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can | |
5423 | * come here as zombie only if the task is the current task. In which case, we | |
5424 | * can access the PMU hardware directly. | |
5425 | * | |
5426 | * Note that zombies do have PM_VALID set. So here we do the minimal. | |
5427 | * | |
5428 | * In case the context was zombified it could not be reclaimed at the time | |
5429 | * the monitoring program exited. At this point, the PMU reservation has been | |
5430 | * returned, the sampiing buffer has been freed. We must convert this call | |
5431 | * into a spurious interrupt. However, we must also avoid infinite overflows | |
5432 | * by stopping monitoring for this task. We can only come here for a per-task | |
5433 | * context. All we need to do is to stop monitoring using the psr bits which | |
5434 | * are always task private. By re-enabling secure montioring, we ensure that | |
5435 | * the monitored task will not be able to re-activate monitoring. | |
5436 | * The task will eventually be context switched out, at which point the context | |
5437 | * will be reclaimed (that includes releasing ownership of the PMU). | |
5438 | * | |
5439 | * So there might be a window of time where the number of per-task session is zero | |
5440 | * yet one PMU might have a owner and get at most one overflow interrupt for a zombie | |
5441 | * context. This is safe because if a per-task session comes in, it will push this one | |
5442 | * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide | |
5443 | * session is force on that CPU, given that we use task pinning, pfm_save_regs() will | |
5444 | * also push our zombie context out. | |
5445 | * | |
5446 | * Overall pretty hairy stuff.... | |
5447 | */ | |
19c5870c | 5448 | DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); |
1da177e4 LT |
5449 | pfm_clear_psr_up(); |
5450 | ia64_psr(regs)->up = 0; | |
5451 | ia64_psr(regs)->sp = 1; | |
5452 | return; | |
5453 | } | |
5454 | ||
5455 | static int | |
9010eff0 | 5456 | pfm_do_interrupt_handler(void *arg, struct pt_regs *regs) |
1da177e4 LT |
5457 | { |
5458 | struct task_struct *task; | |
5459 | pfm_context_t *ctx; | |
5460 | unsigned long flags; | |
5461 | u64 pmc0; | |
5462 | int this_cpu = smp_processor_id(); | |
5463 | int retval = 0; | |
5464 | ||
5465 | pfm_stats[this_cpu].pfm_ovfl_intr_count++; | |
5466 | ||
5467 | /* | |
5468 | * srlz.d done before arriving here | |
5469 | */ | |
5470 | pmc0 = ia64_get_pmc(0); | |
5471 | ||
5472 | task = GET_PMU_OWNER(); | |
5473 | ctx = GET_PMU_CTX(); | |
5474 | ||
5475 | /* | |
5476 | * if we have some pending bits set | |
5477 | * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1 | |
5478 | */ | |
5479 | if (PMC0_HAS_OVFL(pmc0) && task) { | |
5480 | /* | |
5481 | * we assume that pmc0.fr is always set here | |
5482 | */ | |
5483 | ||
5484 | /* sanity check */ | |
5485 | if (!ctx) goto report_spurious1; | |
5486 | ||
5487 | if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) | |
5488 | goto report_spurious2; | |
5489 | ||
5490 | PROTECT_CTX_NOPRINT(ctx, flags); | |
5491 | ||
5492 | pfm_overflow_handler(task, ctx, pmc0, regs); | |
5493 | ||
5494 | UNPROTECT_CTX_NOPRINT(ctx, flags); | |
5495 | ||
5496 | } else { | |
5497 | pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; | |
5498 | retval = -1; | |
5499 | } | |
5500 | /* | |
5501 | * keep it unfrozen at all times | |
5502 | */ | |
5503 | pfm_unfreeze_pmu(); | |
5504 | ||
5505 | return retval; | |
5506 | ||
5507 | report_spurious1: | |
5508 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", | |
19c5870c | 5509 | this_cpu, task_pid_nr(task)); |
1da177e4 LT |
5510 | pfm_unfreeze_pmu(); |
5511 | return -1; | |
5512 | report_spurious2: | |
5513 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", | |
5514 | this_cpu, | |
19c5870c | 5515 | task_pid_nr(task)); |
1da177e4 LT |
5516 | pfm_unfreeze_pmu(); |
5517 | return -1; | |
5518 | } | |
5519 | ||
5520 | static irqreturn_t | |
3bbe486b | 5521 | pfm_interrupt_handler(int irq, void *arg) |
1da177e4 LT |
5522 | { |
5523 | unsigned long start_cycles, total_cycles; | |
5524 | unsigned long min, max; | |
5525 | int this_cpu; | |
5526 | int ret; | |
3bbe486b | 5527 | struct pt_regs *regs = get_irq_regs(); |
1da177e4 LT |
5528 | |
5529 | this_cpu = get_cpu(); | |
a1ecf7f6 TL |
5530 | if (likely(!pfm_alt_intr_handler)) { |
5531 | min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; | |
5532 | max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; | |
1da177e4 | 5533 | |
a1ecf7f6 | 5534 | start_cycles = ia64_get_itc(); |
1da177e4 | 5535 | |
9010eff0 | 5536 | ret = pfm_do_interrupt_handler(arg, regs); |
1da177e4 | 5537 | |
a1ecf7f6 | 5538 | total_cycles = ia64_get_itc(); |
1da177e4 | 5539 | |
a1ecf7f6 TL |
5540 | /* |
5541 | * don't measure spurious interrupts | |
5542 | */ | |
5543 | if (likely(ret == 0)) { | |
5544 | total_cycles -= start_cycles; | |
1da177e4 | 5545 | |
a1ecf7f6 TL |
5546 | if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; |
5547 | if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; | |
1da177e4 | 5548 | |
a1ecf7f6 TL |
5549 | pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; |
5550 | } | |
5551 | } | |
5552 | else { | |
5553 | (*pfm_alt_intr_handler->handler)(irq, arg, regs); | |
1da177e4 | 5554 | } |
a1ecf7f6 | 5555 | |
8b0b1db0 | 5556 | put_cpu(); |
1da177e4 LT |
5557 | return IRQ_HANDLED; |
5558 | } | |
5559 | ||
5560 | /* | |
5561 | * /proc/perfmon interface, for debug only | |
5562 | */ | |
5563 | ||
fa276f36 | 5564 | #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) |
1da177e4 LT |
5565 | |
5566 | static void * | |
5567 | pfm_proc_start(struct seq_file *m, loff_t *pos) | |
5568 | { | |
5569 | if (*pos == 0) { | |
5570 | return PFM_PROC_SHOW_HEADER; | |
5571 | } | |
5572 | ||
5dd3c994 | 5573 | while (*pos <= nr_cpu_ids) { |
1da177e4 LT |
5574 | if (cpu_online(*pos - 1)) { |
5575 | return (void *)*pos; | |
5576 | } | |
5577 | ++*pos; | |
5578 | } | |
5579 | return NULL; | |
5580 | } | |
5581 | ||
5582 | static void * | |
5583 | pfm_proc_next(struct seq_file *m, void *v, loff_t *pos) | |
5584 | { | |
5585 | ++*pos; | |
5586 | return pfm_proc_start(m, pos); | |
5587 | } | |
5588 | ||
5589 | static void | |
5590 | pfm_proc_stop(struct seq_file *m, void *v) | |
5591 | { | |
5592 | } | |
5593 | ||
5594 | static void | |
5595 | pfm_proc_show_header(struct seq_file *m) | |
5596 | { | |
5597 | struct list_head * pos; | |
5598 | pfm_buffer_fmt_t * entry; | |
5599 | unsigned long flags; | |
5600 | ||
5601 | seq_printf(m, | |
5602 | "perfmon version : %u.%u\n" | |
5603 | "model : %s\n" | |
5604 | "fastctxsw : %s\n" | |
5605 | "expert mode : %s\n" | |
5606 | "ovfl_mask : 0x%lx\n" | |
5607 | "PMU flags : 0x%x\n", | |
5608 | PFM_VERSION_MAJ, PFM_VERSION_MIN, | |
5609 | pmu_conf->pmu_name, | |
5610 | pfm_sysctl.fastctxsw > 0 ? "Yes": "No", | |
5611 | pfm_sysctl.expert_mode > 0 ? "Yes": "No", | |
5612 | pmu_conf->ovfl_val, | |
5613 | pmu_conf->flags); | |
5614 | ||
5615 | LOCK_PFS(flags); | |
5616 | ||
5617 | seq_printf(m, | |
5618 | "proc_sessions : %u\n" | |
5619 | "sys_sessions : %u\n" | |
5620 | "sys_use_dbregs : %u\n" | |
5621 | "ptrace_use_dbregs : %u\n", | |
5622 | pfm_sessions.pfs_task_sessions, | |
5623 | pfm_sessions.pfs_sys_sessions, | |
5624 | pfm_sessions.pfs_sys_use_dbregs, | |
5625 | pfm_sessions.pfs_ptrace_use_dbregs); | |
5626 | ||
5627 | UNLOCK_PFS(flags); | |
5628 | ||
5629 | spin_lock(&pfm_buffer_fmt_lock); | |
5630 | ||
5631 | list_for_each(pos, &pfm_buffer_fmt_list) { | |
5632 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | |
7451adc5 AS |
5633 | seq_printf(m, "format : %16phD %s\n", |
5634 | entry->fmt_uuid, entry->fmt_name); | |
1da177e4 LT |
5635 | } |
5636 | spin_unlock(&pfm_buffer_fmt_lock); | |
5637 | ||
5638 | } | |
5639 | ||
5640 | static int | |
5641 | pfm_proc_show(struct seq_file *m, void *v) | |
5642 | { | |
5643 | unsigned long psr; | |
5644 | unsigned int i; | |
5645 | int cpu; | |
5646 | ||
5647 | if (v == PFM_PROC_SHOW_HEADER) { | |
5648 | pfm_proc_show_header(m); | |
5649 | return 0; | |
5650 | } | |
5651 | ||
5652 | /* show info for CPU (v - 1) */ | |
5653 | ||
5654 | cpu = (long)v - 1; | |
5655 | seq_printf(m, | |
5656 | "CPU%-2d overflow intrs : %lu\n" | |
5657 | "CPU%-2d overflow cycles : %lu\n" | |
5658 | "CPU%-2d overflow min : %lu\n" | |
5659 | "CPU%-2d overflow max : %lu\n" | |
5660 | "CPU%-2d smpl handler calls : %lu\n" | |
5661 | "CPU%-2d smpl handler cycles : %lu\n" | |
5662 | "CPU%-2d spurious intrs : %lu\n" | |
5663 | "CPU%-2d replay intrs : %lu\n" | |
5664 | "CPU%-2d syst_wide : %d\n" | |
5665 | "CPU%-2d dcr_pp : %d\n" | |
5666 | "CPU%-2d exclude idle : %d\n" | |
5667 | "CPU%-2d owner : %d\n" | |
5668 | "CPU%-2d context : %p\n" | |
5669 | "CPU%-2d activations : %lu\n", | |
5670 | cpu, pfm_stats[cpu].pfm_ovfl_intr_count, | |
5671 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles, | |
5672 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min, | |
5673 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max, | |
5674 | cpu, pfm_stats[cpu].pfm_smpl_handler_calls, | |
5675 | cpu, pfm_stats[cpu].pfm_smpl_handler_cycles, | |
5676 | cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count, | |
5677 | cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count, | |
5678 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0, | |
5679 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0, | |
5680 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0, | |
5681 | cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1, | |
5682 | cpu, pfm_get_cpu_data(pmu_ctx, cpu), | |
5683 | cpu, pfm_get_cpu_data(pmu_activation_number, cpu)); | |
5684 | ||
5685 | if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) { | |
5686 | ||
5687 | psr = pfm_get_psr(); | |
5688 | ||
5689 | ia64_srlz_d(); | |
5690 | ||
5691 | seq_printf(m, | |
5692 | "CPU%-2d psr : 0x%lx\n" | |
5693 | "CPU%-2d pmc0 : 0x%lx\n", | |
5694 | cpu, psr, | |
5695 | cpu, ia64_get_pmc(0)); | |
5696 | ||
5697 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | |
5698 | if (PMC_IS_COUNTING(i) == 0) continue; | |
5699 | seq_printf(m, | |
5700 | "CPU%-2d pmc%u : 0x%lx\n" | |
5701 | "CPU%-2d pmd%u : 0x%lx\n", | |
5702 | cpu, i, ia64_get_pmc(i), | |
5703 | cpu, i, ia64_get_pmd(i)); | |
5704 | } | |
5705 | } | |
5706 | return 0; | |
5707 | } | |
5708 | ||
a23fe55e | 5709 | const struct seq_operations pfm_seq_ops = { |
1da177e4 LT |
5710 | .start = pfm_proc_start, |
5711 | .next = pfm_proc_next, | |
5712 | .stop = pfm_proc_stop, | |
5713 | .show = pfm_proc_show | |
5714 | }; | |
5715 | ||
5716 | static int | |
5717 | pfm_proc_open(struct inode *inode, struct file *file) | |
5718 | { | |
5719 | return seq_open(file, &pfm_seq_ops); | |
5720 | } | |
5721 | ||
5722 | ||
5723 | /* | |
5724 | * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens | |
5725 | * during pfm_enable() hence before pfm_start(). We cannot assume monitoring | |
5726 | * is active or inactive based on mode. We must rely on the value in | |
5727 | * local_cpu_data->pfm_syst_info | |
5728 | */ | |
5729 | void | |
5730 | pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) | |
5731 | { | |
5732 | struct pt_regs *regs; | |
5733 | unsigned long dcr; | |
5734 | unsigned long dcr_pp; | |
5735 | ||
5736 | dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0; | |
5737 | ||
5738 | /* | |
5739 | * pid 0 is guaranteed to be the idle task. There is one such task with pid 0 | |
5740 | * on every CPU, so we can rely on the pid to identify the idle task. | |
5741 | */ | |
5742 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { | |
6450578f | 5743 | regs = task_pt_regs(task); |
1da177e4 LT |
5744 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; |
5745 | return; | |
5746 | } | |
5747 | /* | |
5748 | * if monitoring has started | |
5749 | */ | |
5750 | if (dcr_pp) { | |
5751 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | |
5752 | /* | |
5753 | * context switching in? | |
5754 | */ | |
5755 | if (is_ctxswin) { | |
5756 | /* mask monitoring for the idle task */ | |
5757 | ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP); | |
5758 | pfm_clear_psr_pp(); | |
5759 | ia64_srlz_i(); | |
5760 | return; | |
5761 | } | |
5762 | /* | |
5763 | * context switching out | |
5764 | * restore monitoring for next task | |
5765 | * | |
5766 | * Due to inlining this odd if-then-else construction generates | |
5767 | * better code. | |
5768 | */ | |
5769 | ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP); | |
5770 | pfm_set_psr_pp(); | |
5771 | ia64_srlz_i(); | |
5772 | } | |
5773 | } | |
5774 | ||
5775 | #ifdef CONFIG_SMP | |
5776 | ||
5777 | static void | |
5778 | pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) | |
5779 | { | |
5780 | struct task_struct *task = ctx->ctx_task; | |
5781 | ||
5782 | ia64_psr(regs)->up = 0; | |
5783 | ia64_psr(regs)->sp = 1; | |
5784 | ||
5785 | if (GET_PMU_OWNER() == task) { | |
19c5870c AD |
5786 | DPRINT(("cleared ownership for [%d]\n", |
5787 | task_pid_nr(ctx->ctx_task))); | |
1da177e4 LT |
5788 | SET_PMU_OWNER(NULL, NULL); |
5789 | } | |
5790 | ||
5791 | /* | |
5792 | * disconnect the task from the context and vice-versa | |
5793 | */ | |
5794 | PFM_SET_WORK_PENDING(task, 0); | |
5795 | ||
5796 | task->thread.pfm_context = NULL; | |
5797 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | |
5798 | ||
19c5870c | 5799 | DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
5800 | } |
5801 | ||
5802 | ||
5803 | /* | |
5804 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | |
5805 | */ | |
5806 | void | |
5807 | pfm_save_regs(struct task_struct *task) | |
5808 | { | |
5809 | pfm_context_t *ctx; | |
1da177e4 LT |
5810 | unsigned long flags; |
5811 | u64 psr; | |
5812 | ||
5813 | ||
5814 | ctx = PFM_GET_CTX(task); | |
5815 | if (ctx == NULL) return; | |
1da177e4 LT |
5816 | |
5817 | /* | |
5818 | * we always come here with interrupts ALREADY disabled by | |
5819 | * the scheduler. So we simply need to protect against concurrent | |
5820 | * access, not CPU concurrency. | |
5821 | */ | |
5822 | flags = pfm_protect_ctx_ctxsw(ctx); | |
5823 | ||
5824 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | |
6450578f | 5825 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
5826 | |
5827 | pfm_clear_psr_up(); | |
5828 | ||
5829 | pfm_force_cleanup(ctx, regs); | |
5830 | ||
5831 | BUG_ON(ctx->ctx_smpl_hdr); | |
5832 | ||
5833 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
5834 | ||
5835 | pfm_context_free(ctx); | |
5836 | return; | |
5837 | } | |
5838 | ||
5839 | /* | |
5840 | * save current PSR: needed because we modify it | |
5841 | */ | |
5842 | ia64_srlz_d(); | |
5843 | psr = pfm_get_psr(); | |
5844 | ||
5845 | BUG_ON(psr & (IA64_PSR_I)); | |
5846 | ||
5847 | /* | |
5848 | * stop monitoring: | |
5849 | * This is the last instruction which may generate an overflow | |
5850 | * | |
5851 | * We do not need to set psr.sp because, it is irrelevant in kernel. | |
5852 | * It will be restored from ipsr when going back to user level | |
5853 | */ | |
5854 | pfm_clear_psr_up(); | |
5855 | ||
5856 | /* | |
5857 | * keep a copy of psr.up (for reload) | |
5858 | */ | |
5859 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | |
5860 | ||
5861 | /* | |
5862 | * release ownership of this PMU. | |
5863 | * PM interrupts are masked, so nothing | |
5864 | * can happen. | |
5865 | */ | |
5866 | SET_PMU_OWNER(NULL, NULL); | |
5867 | ||
5868 | /* | |
5869 | * we systematically save the PMD as we have no | |
5870 | * guarantee we will be schedule at that same | |
5871 | * CPU again. | |
5872 | */ | |
35589a8f | 5873 | pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); |
1da177e4 LT |
5874 | |
5875 | /* | |
5876 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | |
5877 | * we will need it on the restore path to check | |
5878 | * for pending overflow. | |
5879 | */ | |
35589a8f | 5880 | ctx->th_pmcs[0] = ia64_get_pmc(0); |
1da177e4 LT |
5881 | |
5882 | /* | |
5883 | * unfreeze PMU if had pending overflows | |
5884 | */ | |
35589a8f | 5885 | if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); |
1da177e4 LT |
5886 | |
5887 | /* | |
5888 | * finally, allow context access. | |
5889 | * interrupts will still be masked after this call. | |
5890 | */ | |
5891 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
5892 | } | |
5893 | ||
5894 | #else /* !CONFIG_SMP */ | |
5895 | void | |
5896 | pfm_save_regs(struct task_struct *task) | |
5897 | { | |
5898 | pfm_context_t *ctx; | |
5899 | u64 psr; | |
5900 | ||
5901 | ctx = PFM_GET_CTX(task); | |
5902 | if (ctx == NULL) return; | |
5903 | ||
5904 | /* | |
5905 | * save current PSR: needed because we modify it | |
5906 | */ | |
5907 | psr = pfm_get_psr(); | |
5908 | ||
5909 | BUG_ON(psr & (IA64_PSR_I)); | |
5910 | ||
5911 | /* | |
5912 | * stop monitoring: | |
5913 | * This is the last instruction which may generate an overflow | |
5914 | * | |
5915 | * We do not need to set psr.sp because, it is irrelevant in kernel. | |
5916 | * It will be restored from ipsr when going back to user level | |
5917 | */ | |
5918 | pfm_clear_psr_up(); | |
5919 | ||
5920 | /* | |
5921 | * keep a copy of psr.up (for reload) | |
5922 | */ | |
5923 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | |
5924 | } | |
5925 | ||
5926 | static void | |
5927 | pfm_lazy_save_regs (struct task_struct *task) | |
5928 | { | |
5929 | pfm_context_t *ctx; | |
1da177e4 LT |
5930 | unsigned long flags; |
5931 | ||
5932 | { u64 psr = pfm_get_psr(); | |
5933 | BUG_ON(psr & IA64_PSR_UP); | |
5934 | } | |
5935 | ||
5936 | ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
5937 | |
5938 | /* | |
5939 | * we need to mask PMU overflow here to | |
5940 | * make sure that we maintain pmc0 until | |
5941 | * we save it. overflow interrupts are | |
5942 | * treated as spurious if there is no | |
5943 | * owner. | |
5944 | * | |
5945 | * XXX: I don't think this is necessary | |
5946 | */ | |
5947 | PROTECT_CTX(ctx,flags); | |
5948 | ||
5949 | /* | |
5950 | * release ownership of this PMU. | |
5951 | * must be done before we save the registers. | |
5952 | * | |
5953 | * after this call any PMU interrupt is treated | |
5954 | * as spurious. | |
5955 | */ | |
5956 | SET_PMU_OWNER(NULL, NULL); | |
5957 | ||
5958 | /* | |
5959 | * save all the pmds we use | |
5960 | */ | |
35589a8f | 5961 | pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); |
1da177e4 LT |
5962 | |
5963 | /* | |
5964 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | |
5965 | * it is needed to check for pended overflow | |
5966 | * on the restore path | |
5967 | */ | |
35589a8f | 5968 | ctx->th_pmcs[0] = ia64_get_pmc(0); |
1da177e4 LT |
5969 | |
5970 | /* | |
5971 | * unfreeze PMU if had pending overflows | |
5972 | */ | |
35589a8f | 5973 | if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); |
1da177e4 LT |
5974 | |
5975 | /* | |
5976 | * now get can unmask PMU interrupts, they will | |
5977 | * be treated as purely spurious and we will not | |
5978 | * lose any information | |
5979 | */ | |
5980 | UNPROTECT_CTX(ctx,flags); | |
5981 | } | |
5982 | #endif /* CONFIG_SMP */ | |
5983 | ||
5984 | #ifdef CONFIG_SMP | |
5985 | /* | |
5986 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | |
5987 | */ | |
5988 | void | |
5989 | pfm_load_regs (struct task_struct *task) | |
5990 | { | |
5991 | pfm_context_t *ctx; | |
1da177e4 LT |
5992 | unsigned long pmc_mask = 0UL, pmd_mask = 0UL; |
5993 | unsigned long flags; | |
5994 | u64 psr, psr_up; | |
5995 | int need_irq_resend; | |
5996 | ||
5997 | ctx = PFM_GET_CTX(task); | |
5998 | if (unlikely(ctx == NULL)) return; | |
5999 | ||
6000 | BUG_ON(GET_PMU_OWNER()); | |
6001 | ||
1da177e4 LT |
6002 | /* |
6003 | * possible on unload | |
6004 | */ | |
35589a8f | 6005 | if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return; |
1da177e4 LT |
6006 | |
6007 | /* | |
6008 | * we always come here with interrupts ALREADY disabled by | |
6009 | * the scheduler. So we simply need to protect against concurrent | |
6010 | * access, not CPU concurrency. | |
6011 | */ | |
6012 | flags = pfm_protect_ctx_ctxsw(ctx); | |
6013 | psr = pfm_get_psr(); | |
6014 | ||
6015 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | |
6016 | ||
6017 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
6018 | BUG_ON(psr & IA64_PSR_I); | |
6019 | ||
6020 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { | |
6450578f | 6021 | struct pt_regs *regs = task_pt_regs(task); |
1da177e4 LT |
6022 | |
6023 | BUG_ON(ctx->ctx_smpl_hdr); | |
6024 | ||
6025 | pfm_force_cleanup(ctx, regs); | |
6026 | ||
6027 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
6028 | ||
6029 | /* | |
6030 | * this one (kmalloc'ed) is fine with interrupts disabled | |
6031 | */ | |
6032 | pfm_context_free(ctx); | |
6033 | ||
6034 | return; | |
6035 | } | |
6036 | ||
6037 | /* | |
6038 | * we restore ALL the debug registers to avoid picking up | |
6039 | * stale state. | |
6040 | */ | |
6041 | if (ctx->ctx_fl_using_dbreg) { | |
6042 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
6043 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
6044 | } | |
6045 | /* | |
6046 | * retrieve saved psr.up | |
6047 | */ | |
6048 | psr_up = ctx->ctx_saved_psr_up; | |
6049 | ||
6050 | /* | |
6051 | * if we were the last user of the PMU on that CPU, | |
6052 | * then nothing to do except restore psr | |
6053 | */ | |
6054 | if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) { | |
6055 | ||
6056 | /* | |
6057 | * retrieve partial reload masks (due to user modifications) | |
6058 | */ | |
6059 | pmc_mask = ctx->ctx_reload_pmcs[0]; | |
6060 | pmd_mask = ctx->ctx_reload_pmds[0]; | |
6061 | ||
6062 | } else { | |
6063 | /* | |
6064 | * To avoid leaking information to the user level when psr.sp=0, | |
6065 | * we must reload ALL implemented pmds (even the ones we don't use). | |
6066 | * In the kernel we only allow PFM_READ_PMDS on registers which | |
6067 | * we initialized or requested (sampling) so there is no risk there. | |
6068 | */ | |
6069 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | |
6070 | ||
6071 | /* | |
6072 | * ALL accessible PMCs are systematically reloaded, unused registers | |
6073 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | |
6074 | * up stale configuration. | |
6075 | * | |
6076 | * PMC0 is never in the mask. It is always restored separately. | |
6077 | */ | |
6078 | pmc_mask = ctx->ctx_all_pmcs[0]; | |
6079 | } | |
6080 | /* | |
6081 | * when context is MASKED, we will restore PMC with plm=0 | |
6082 | * and PMD with stale information, but that's ok, nothing | |
6083 | * will be captured. | |
6084 | * | |
6085 | * XXX: optimize here | |
6086 | */ | |
35589a8f KA |
6087 | if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask); |
6088 | if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); | |
1da177e4 LT |
6089 | |
6090 | /* | |
6091 | * check for pending overflow at the time the state | |
6092 | * was saved. | |
6093 | */ | |
35589a8f | 6094 | if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { |
1da177e4 LT |
6095 | /* |
6096 | * reload pmc0 with the overflow information | |
6097 | * On McKinley PMU, this will trigger a PMU interrupt | |
6098 | */ | |
35589a8f | 6099 | ia64_set_pmc(0, ctx->th_pmcs[0]); |
1da177e4 | 6100 | ia64_srlz_d(); |
35589a8f | 6101 | ctx->th_pmcs[0] = 0UL; |
1da177e4 LT |
6102 | |
6103 | /* | |
6104 | * will replay the PMU interrupt | |
6105 | */ | |
c0ad90a3 | 6106 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
1da177e4 LT |
6107 | |
6108 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | |
6109 | } | |
6110 | ||
6111 | /* | |
6112 | * we just did a reload, so we reset the partial reload fields | |
6113 | */ | |
6114 | ctx->ctx_reload_pmcs[0] = 0UL; | |
6115 | ctx->ctx_reload_pmds[0] = 0UL; | |
6116 | ||
6117 | SET_LAST_CPU(ctx, smp_processor_id()); | |
6118 | ||
6119 | /* | |
6120 | * dump activation value for this PMU | |
6121 | */ | |
6122 | INC_ACTIVATION(); | |
6123 | /* | |
6124 | * record current activation for this context | |
6125 | */ | |
6126 | SET_ACTIVATION(ctx); | |
6127 | ||
6128 | /* | |
6129 | * establish new ownership. | |
6130 | */ | |
6131 | SET_PMU_OWNER(task, ctx); | |
6132 | ||
6133 | /* | |
6134 | * restore the psr.up bit. measurement | |
6135 | * is active again. | |
6136 | * no PMU interrupt can happen at this point | |
6137 | * because we still have interrupts disabled. | |
6138 | */ | |
6139 | if (likely(psr_up)) pfm_set_psr_up(); | |
6140 | ||
6141 | /* | |
6142 | * allow concurrent access to context | |
6143 | */ | |
6144 | pfm_unprotect_ctx_ctxsw(ctx, flags); | |
6145 | } | |
6146 | #else /* !CONFIG_SMP */ | |
6147 | /* | |
6148 | * reload PMU state for UP kernels | |
6149 | * in 2.5 we come here with interrupts disabled | |
6150 | */ | |
6151 | void | |
6152 | pfm_load_regs (struct task_struct *task) | |
6153 | { | |
1da177e4 LT |
6154 | pfm_context_t *ctx; |
6155 | struct task_struct *owner; | |
6156 | unsigned long pmd_mask, pmc_mask; | |
6157 | u64 psr, psr_up; | |
6158 | int need_irq_resend; | |
6159 | ||
6160 | owner = GET_PMU_OWNER(); | |
6161 | ctx = PFM_GET_CTX(task); | |
1da177e4 LT |
6162 | psr = pfm_get_psr(); |
6163 | ||
6164 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | |
6165 | BUG_ON(psr & IA64_PSR_I); | |
6166 | ||
6167 | /* | |
6168 | * we restore ALL the debug registers to avoid picking up | |
6169 | * stale state. | |
6170 | * | |
6171 | * This must be done even when the task is still the owner | |
6172 | * as the registers may have been modified via ptrace() | |
6173 | * (not perfmon) by the previous task. | |
6174 | */ | |
6175 | if (ctx->ctx_fl_using_dbreg) { | |
6176 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | |
6177 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | |
6178 | } | |
6179 | ||
6180 | /* | |
6181 | * retrieved saved psr.up | |
6182 | */ | |
6183 | psr_up = ctx->ctx_saved_psr_up; | |
6184 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | |
6185 | ||
6186 | /* | |
6187 | * short path, our state is still there, just | |
6188 | * need to restore psr and we go | |
6189 | * | |
6190 | * we do not touch either PMC nor PMD. the psr is not touched | |
6191 | * by the overflow_handler. So we are safe w.r.t. to interrupt | |
6192 | * concurrency even without interrupt masking. | |
6193 | */ | |
6194 | if (likely(owner == task)) { | |
6195 | if (likely(psr_up)) pfm_set_psr_up(); | |
6196 | return; | |
6197 | } | |
6198 | ||
6199 | /* | |
6200 | * someone else is still using the PMU, first push it out and | |
6201 | * then we'll be able to install our stuff ! | |
6202 | * | |
6203 | * Upon return, there will be no owner for the current PMU | |
6204 | */ | |
6205 | if (owner) pfm_lazy_save_regs(owner); | |
6206 | ||
6207 | /* | |
6208 | * To avoid leaking information to the user level when psr.sp=0, | |
6209 | * we must reload ALL implemented pmds (even the ones we don't use). | |
6210 | * In the kernel we only allow PFM_READ_PMDS on registers which | |
6211 | * we initialized or requested (sampling) so there is no risk there. | |
6212 | */ | |
6213 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | |
6214 | ||
6215 | /* | |
6216 | * ALL accessible PMCs are systematically reloaded, unused registers | |
6217 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | |
6218 | * up stale configuration. | |
6219 | * | |
6220 | * PMC0 is never in the mask. It is always restored separately | |
6221 | */ | |
6222 | pmc_mask = ctx->ctx_all_pmcs[0]; | |
6223 | ||
35589a8f KA |
6224 | pfm_restore_pmds(ctx->th_pmds, pmd_mask); |
6225 | pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); | |
1da177e4 LT |
6226 | |
6227 | /* | |
6228 | * check for pending overflow at the time the state | |
6229 | * was saved. | |
6230 | */ | |
35589a8f | 6231 | if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { |
1da177e4 LT |
6232 | /* |
6233 | * reload pmc0 with the overflow information | |
6234 | * On McKinley PMU, this will trigger a PMU interrupt | |
6235 | */ | |
35589a8f | 6236 | ia64_set_pmc(0, ctx->th_pmcs[0]); |
1da177e4 LT |
6237 | ia64_srlz_d(); |
6238 | ||
35589a8f | 6239 | ctx->th_pmcs[0] = 0UL; |
1da177e4 LT |
6240 | |
6241 | /* | |
6242 | * will replay the PMU interrupt | |
6243 | */ | |
c0ad90a3 | 6244 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
1da177e4 LT |
6245 | |
6246 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | |
6247 | } | |
6248 | ||
6249 | /* | |
6250 | * establish new ownership. | |
6251 | */ | |
6252 | SET_PMU_OWNER(task, ctx); | |
6253 | ||
6254 | /* | |
6255 | * restore the psr.up bit. measurement | |
6256 | * is active again. | |
6257 | * no PMU interrupt can happen at this point | |
6258 | * because we still have interrupts disabled. | |
6259 | */ | |
6260 | if (likely(psr_up)) pfm_set_psr_up(); | |
6261 | } | |
6262 | #endif /* CONFIG_SMP */ | |
6263 | ||
6264 | /* | |
6265 | * this function assumes monitoring is stopped | |
6266 | */ | |
6267 | static void | |
6268 | pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) | |
6269 | { | |
6270 | u64 pmc0; | |
6271 | unsigned long mask2, val, pmd_val, ovfl_val; | |
6272 | int i, can_access_pmu = 0; | |
6273 | int is_self; | |
6274 | ||
6275 | /* | |
6276 | * is the caller the task being monitored (or which initiated the | |
6277 | * session for system wide measurements) | |
6278 | */ | |
6279 | is_self = ctx->ctx_task == task ? 1 : 0; | |
6280 | ||
6281 | /* | |
6282 | * can access PMU is task is the owner of the PMU state on the current CPU | |
6283 | * or if we are running on the CPU bound to the context in system-wide mode | |
6284 | * (that is not necessarily the task the context is attached to in this mode). | |
6285 | * In system-wide we always have can_access_pmu true because a task running on an | |
6286 | * invalid processor is flagged earlier in the call stack (see pfm_stop). | |
6287 | */ | |
6288 | can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); | |
6289 | if (can_access_pmu) { | |
6290 | /* | |
6291 | * Mark the PMU as not owned | |
6292 | * This will cause the interrupt handler to do nothing in case an overflow | |
6293 | * interrupt was in-flight | |
6294 | * This also guarantees that pmc0 will contain the final state | |
6295 | * It virtually gives us full control on overflow processing from that point | |
6296 | * on. | |
6297 | */ | |
6298 | SET_PMU_OWNER(NULL, NULL); | |
6299 | DPRINT(("releasing ownership\n")); | |
6300 | ||
6301 | /* | |
6302 | * read current overflow status: | |
6303 | * | |
6304 | * we are guaranteed to read the final stable state | |
6305 | */ | |
6306 | ia64_srlz_d(); | |
6307 | pmc0 = ia64_get_pmc(0); /* slow */ | |
6308 | ||
6309 | /* | |
6310 | * reset freeze bit, overflow status information destroyed | |
6311 | */ | |
6312 | pfm_unfreeze_pmu(); | |
6313 | } else { | |
35589a8f | 6314 | pmc0 = ctx->th_pmcs[0]; |
1da177e4 LT |
6315 | /* |
6316 | * clear whatever overflow status bits there were | |
6317 | */ | |
35589a8f | 6318 | ctx->th_pmcs[0] = 0; |
1da177e4 LT |
6319 | } |
6320 | ovfl_val = pmu_conf->ovfl_val; | |
6321 | /* | |
6322 | * we save all the used pmds | |
6323 | * we take care of overflows for counting PMDs | |
6324 | * | |
6325 | * XXX: sampling situation is not taken into account here | |
6326 | */ | |
6327 | mask2 = ctx->ctx_used_pmds[0]; | |
6328 | ||
6329 | DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2)); | |
6330 | ||
6331 | for (i = 0; mask2; i++, mask2>>=1) { | |
6332 | ||
6333 | /* skip non used pmds */ | |
6334 | if ((mask2 & 0x1) == 0) continue; | |
6335 | ||
6336 | /* | |
6337 | * can access PMU always true in system wide mode | |
6338 | */ | |
35589a8f | 6339 | val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i]; |
1da177e4 LT |
6340 | |
6341 | if (PMD_IS_COUNTING(i)) { | |
6342 | DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", | |
19c5870c | 6343 | task_pid_nr(task), |
1da177e4 LT |
6344 | i, |
6345 | ctx->ctx_pmds[i].val, | |
6346 | val & ovfl_val)); | |
6347 | ||
6348 | /* | |
6349 | * we rebuild the full 64 bit value of the counter | |
6350 | */ | |
6351 | val = ctx->ctx_pmds[i].val + (val & ovfl_val); | |
6352 | ||
6353 | /* | |
6354 | * now everything is in ctx_pmds[] and we need | |
6355 | * to clear the saved context from save_regs() such that | |
6356 | * pfm_read_pmds() gets the correct value | |
6357 | */ | |
6358 | pmd_val = 0UL; | |
6359 | ||
6360 | /* | |
6361 | * take care of overflow inline | |
6362 | */ | |
6363 | if (pmc0 & (1UL << i)) { | |
6364 | val += 1 + ovfl_val; | |
19c5870c | 6365 | DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); |
1da177e4 LT |
6366 | } |
6367 | } | |
6368 | ||
19c5870c | 6369 | DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); |
1da177e4 | 6370 | |
35589a8f | 6371 | if (is_self) ctx->th_pmds[i] = pmd_val; |
1da177e4 LT |
6372 | |
6373 | ctx->ctx_pmds[i].val = val; | |
6374 | } | |
6375 | } | |
6376 | ||
6377 | static struct irqaction perfmon_irqaction = { | |
6378 | .handler = pfm_interrupt_handler, | |
1da177e4 LT |
6379 | .name = "perfmon" |
6380 | }; | |
6381 | ||
a1ecf7f6 TL |
6382 | static void |
6383 | pfm_alt_save_pmu_state(void *data) | |
6384 | { | |
6385 | struct pt_regs *regs; | |
6386 | ||
6450578f | 6387 | regs = task_pt_regs(current); |
a1ecf7f6 TL |
6388 | |
6389 | DPRINT(("called\n")); | |
6390 | ||
6391 | /* | |
6392 | * should not be necessary but | |
6393 | * let's take not risk | |
6394 | */ | |
6395 | pfm_clear_psr_up(); | |
6396 | pfm_clear_psr_pp(); | |
6397 | ia64_psr(regs)->pp = 0; | |
6398 | ||
6399 | /* | |
6400 | * This call is required | |
6401 | * May cause a spurious interrupt on some processors | |
6402 | */ | |
6403 | pfm_freeze_pmu(); | |
6404 | ||
6405 | ia64_srlz_d(); | |
6406 | } | |
6407 | ||
6408 | void | |
6409 | pfm_alt_restore_pmu_state(void *data) | |
6410 | { | |
6411 | struct pt_regs *regs; | |
6412 | ||
6450578f | 6413 | regs = task_pt_regs(current); |
a1ecf7f6 TL |
6414 | |
6415 | DPRINT(("called\n")); | |
6416 | ||
6417 | /* | |
6418 | * put PMU back in state expected | |
6419 | * by perfmon | |
6420 | */ | |
6421 | pfm_clear_psr_up(); | |
6422 | pfm_clear_psr_pp(); | |
6423 | ia64_psr(regs)->pp = 0; | |
6424 | ||
6425 | /* | |
6426 | * perfmon runs with PMU unfrozen at all times | |
6427 | */ | |
6428 | pfm_unfreeze_pmu(); | |
6429 | ||
6430 | ia64_srlz_d(); | |
6431 | } | |
6432 | ||
6433 | int | |
6434 | pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |
6435 | { | |
6436 | int ret, i; | |
6437 | int reserve_cpu; | |
6438 | ||
6439 | /* some sanity checks */ | |
6440 | if (hdl == NULL || hdl->handler == NULL) return -EINVAL; | |
6441 | ||
6442 | /* do the easy test first */ | |
6443 | if (pfm_alt_intr_handler) return -EBUSY; | |
6444 | ||
6445 | /* one at a time in the install or remove, just fail the others */ | |
6446 | if (!spin_trylock(&pfm_alt_install_check)) { | |
6447 | return -EBUSY; | |
6448 | } | |
6449 | ||
6450 | /* reserve our session */ | |
6451 | for_each_online_cpu(reserve_cpu) { | |
6452 | ret = pfm_reserve_session(NULL, 1, reserve_cpu); | |
6453 | if (ret) goto cleanup_reserve; | |
6454 | } | |
6455 | ||
6456 | /* save the current system wide pmu states */ | |
15c8b6c1 | 6457 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); |
a1ecf7f6 TL |
6458 | if (ret) { |
6459 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | |
6460 | goto cleanup_reserve; | |
6461 | } | |
6462 | ||
6463 | /* officially change to the alternate interrupt handler */ | |
6464 | pfm_alt_intr_handler = hdl; | |
6465 | ||
6466 | spin_unlock(&pfm_alt_install_check); | |
6467 | ||
6468 | return 0; | |
6469 | ||
6470 | cleanup_reserve: | |
6471 | for_each_online_cpu(i) { | |
6472 | /* don't unreserve more than we reserved */ | |
6473 | if (i >= reserve_cpu) break; | |
6474 | ||
6475 | pfm_unreserve_session(NULL, 1, i); | |
6476 | } | |
6477 | ||
6478 | spin_unlock(&pfm_alt_install_check); | |
6479 | ||
6480 | return ret; | |
6481 | } | |
6482 | EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt); | |
6483 | ||
6484 | int | |
6485 | pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |
6486 | { | |
6487 | int i; | |
6488 | int ret; | |
6489 | ||
6490 | if (hdl == NULL) return -EINVAL; | |
6491 | ||
6492 | /* cannot remove someone else's handler! */ | |
6493 | if (pfm_alt_intr_handler != hdl) return -EINVAL; | |
6494 | ||
6495 | /* one at a time in the install or remove, just fail the others */ | |
6496 | if (!spin_trylock(&pfm_alt_install_check)) { | |
6497 | return -EBUSY; | |
6498 | } | |
6499 | ||
6500 | pfm_alt_intr_handler = NULL; | |
6501 | ||
15c8b6c1 | 6502 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); |
a1ecf7f6 TL |
6503 | if (ret) { |
6504 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | |
6505 | } | |
6506 | ||
6507 | for_each_online_cpu(i) { | |
6508 | pfm_unreserve_session(NULL, 1, i); | |
6509 | } | |
6510 | ||
6511 | spin_unlock(&pfm_alt_install_check); | |
6512 | ||
6513 | return 0; | |
6514 | } | |
6515 | EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt); | |
6516 | ||
1da177e4 LT |
6517 | /* |
6518 | * perfmon initialization routine, called from the initcall() table | |
6519 | */ | |
6520 | static int init_pfm_fs(void); | |
6521 | ||
6522 | static int __init | |
6523 | pfm_probe_pmu(void) | |
6524 | { | |
6525 | pmu_config_t **p; | |
6526 | int family; | |
6527 | ||
6528 | family = local_cpu_data->family; | |
6529 | p = pmu_confs; | |
6530 | ||
6531 | while(*p) { | |
6532 | if ((*p)->probe) { | |
6533 | if ((*p)->probe() == 0) goto found; | |
6534 | } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) { | |
6535 | goto found; | |
6536 | } | |
6537 | p++; | |
6538 | } | |
6539 | return -1; | |
6540 | found: | |
6541 | pmu_conf = *p; | |
6542 | return 0; | |
6543 | } | |
6544 | ||
5dfe4c96 | 6545 | static const struct file_operations pfm_proc_fops = { |
1da177e4 LT |
6546 | .open = pfm_proc_open, |
6547 | .read = seq_read, | |
6548 | .llseek = seq_lseek, | |
6549 | .release = seq_release, | |
6550 | }; | |
6551 | ||
6552 | int __init | |
6553 | pfm_init(void) | |
6554 | { | |
6555 | unsigned int n, n_counters, i; | |
6556 | ||
6557 | printk("perfmon: version %u.%u IRQ %u\n", | |
6558 | PFM_VERSION_MAJ, | |
6559 | PFM_VERSION_MIN, | |
6560 | IA64_PERFMON_VECTOR); | |
6561 | ||
6562 | if (pfm_probe_pmu()) { | |
6563 | printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n", | |
6564 | local_cpu_data->family); | |
6565 | return -ENODEV; | |
6566 | } | |
6567 | ||
6568 | /* | |
6569 | * compute the number of implemented PMD/PMC from the | |
6570 | * description tables | |
6571 | */ | |
6572 | n = 0; | |
6573 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | |
6574 | if (PMC_IS_IMPL(i) == 0) continue; | |
6575 | pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63); | |
6576 | n++; | |
6577 | } | |
6578 | pmu_conf->num_pmcs = n; | |
6579 | ||
6580 | n = 0; n_counters = 0; | |
6581 | for (i=0; PMD_IS_LAST(i) == 0; i++) { | |
6582 | if (PMD_IS_IMPL(i) == 0) continue; | |
6583 | pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63); | |
6584 | n++; | |
6585 | if (PMD_IS_COUNTING(i)) n_counters++; | |
6586 | } | |
6587 | pmu_conf->num_pmds = n; | |
6588 | pmu_conf->num_counters = n_counters; | |
6589 | ||
6590 | /* | |
6591 | * sanity checks on the number of debug registers | |
6592 | */ | |
6593 | if (pmu_conf->use_rr_dbregs) { | |
6594 | if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) { | |
6595 | printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs); | |
6596 | pmu_conf = NULL; | |
6597 | return -1; | |
6598 | } | |
6599 | if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) { | |
6600 | printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs); | |
6601 | pmu_conf = NULL; | |
6602 | return -1; | |
6603 | } | |
6604 | } | |
6605 | ||
6606 | printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n", | |
6607 | pmu_conf->pmu_name, | |
6608 | pmu_conf->num_pmcs, | |
6609 | pmu_conf->num_pmds, | |
6610 | pmu_conf->num_counters, | |
6611 | ffz(pmu_conf->ovfl_val)); | |
6612 | ||
6613 | /* sanity check */ | |
35589a8f | 6614 | if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) { |
1da177e4 LT |
6615 | printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); |
6616 | pmu_conf = NULL; | |
6617 | return -1; | |
6618 | } | |
6619 | ||
6620 | /* | |
6621 | * create /proc/perfmon (mostly for debugging purposes) | |
6622 | */ | |
e2363768 | 6623 | perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops); |
1da177e4 LT |
6624 | if (perfmon_dir == NULL) { |
6625 | printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); | |
6626 | pmu_conf = NULL; | |
6627 | return -1; | |
6628 | } | |
1da177e4 LT |
6629 | |
6630 | /* | |
6631 | * create /proc/sys/kernel/perfmon (for debugging purposes) | |
6632 | */ | |
0b4d4147 | 6633 | pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root); |
1da177e4 LT |
6634 | |
6635 | /* | |
6636 | * initialize all our spinlocks | |
6637 | */ | |
6638 | spin_lock_init(&pfm_sessions.pfs_lock); | |
6639 | spin_lock_init(&pfm_buffer_fmt_lock); | |
6640 | ||
6641 | init_pfm_fs(); | |
6642 | ||
6643 | for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; | |
6644 | ||
6645 | return 0; | |
6646 | } | |
6647 | ||
6648 | __initcall(pfm_init); | |
6649 | ||
6650 | /* | |
6651 | * this function is called before pfm_init() | |
6652 | */ | |
6653 | void | |
6654 | pfm_init_percpu (void) | |
6655 | { | |
ff741906 | 6656 | static int first_time=1; |
1da177e4 LT |
6657 | /* |
6658 | * make sure no measurement is active | |
6659 | * (may inherit programmed PMCs from EFI). | |
6660 | */ | |
6661 | pfm_clear_psr_pp(); | |
6662 | pfm_clear_psr_up(); | |
6663 | ||
6664 | /* | |
6665 | * we run with the PMU not frozen at all times | |
6666 | */ | |
6667 | pfm_unfreeze_pmu(); | |
6668 | ||
ff741906 | 6669 | if (first_time) { |
1da177e4 | 6670 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); |
ff741906 AR |
6671 | first_time=0; |
6672 | } | |
1da177e4 LT |
6673 | |
6674 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | |
6675 | ia64_srlz_d(); | |
6676 | } | |
6677 | ||
6678 | /* | |
6679 | * used for debug purposes only | |
6680 | */ | |
6681 | void | |
6682 | dump_pmu_state(const char *from) | |
6683 | { | |
6684 | struct task_struct *task; | |
1da177e4 LT |
6685 | struct pt_regs *regs; |
6686 | pfm_context_t *ctx; | |
6687 | unsigned long psr, dcr, info, flags; | |
6688 | int i, this_cpu; | |
6689 | ||
6690 | local_irq_save(flags); | |
6691 | ||
6692 | this_cpu = smp_processor_id(); | |
6450578f | 6693 | regs = task_pt_regs(current); |
1da177e4 LT |
6694 | info = PFM_CPUINFO_GET(); |
6695 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | |
6696 | ||
6697 | if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) { | |
6698 | local_irq_restore(flags); | |
6699 | return; | |
6700 | } | |
6701 | ||
6702 | printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", | |
6703 | this_cpu, | |
6704 | from, | |
19c5870c | 6705 | task_pid_nr(current), |
1da177e4 LT |
6706 | regs->cr_iip, |
6707 | current->comm); | |
6708 | ||
6709 | task = GET_PMU_OWNER(); | |
6710 | ctx = GET_PMU_CTX(); | |
6711 | ||
19c5870c | 6712 | printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); |
1da177e4 LT |
6713 | |
6714 | psr = pfm_get_psr(); | |
6715 | ||
6716 | printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n", | |
6717 | this_cpu, | |
6718 | ia64_get_pmc(0), | |
6719 | psr & IA64_PSR_PP ? 1 : 0, | |
6720 | psr & IA64_PSR_UP ? 1 : 0, | |
6721 | dcr & IA64_DCR_PP ? 1 : 0, | |
6722 | info, | |
6723 | ia64_psr(regs)->up, | |
6724 | ia64_psr(regs)->pp); | |
6725 | ||
6726 | ia64_psr(regs)->up = 0; | |
6727 | ia64_psr(regs)->pp = 0; | |
6728 | ||
1da177e4 LT |
6729 | for (i=1; PMC_IS_LAST(i) == 0; i++) { |
6730 | if (PMC_IS_IMPL(i) == 0) continue; | |
35589a8f | 6731 | printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]); |
1da177e4 LT |
6732 | } |
6733 | ||
6734 | for (i=1; PMD_IS_LAST(i) == 0; i++) { | |
6735 | if (PMD_IS_IMPL(i) == 0) continue; | |
35589a8f | 6736 | printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]); |
1da177e4 LT |
6737 | } |
6738 | ||
6739 | if (ctx) { | |
6740 | printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n", | |
6741 | this_cpu, | |
6742 | ctx->ctx_state, | |
6743 | ctx->ctx_smpl_vaddr, | |
6744 | ctx->ctx_smpl_hdr, | |
6745 | ctx->ctx_msgq_head, | |
6746 | ctx->ctx_msgq_tail, | |
6747 | ctx->ctx_saved_psr_up); | |
6748 | } | |
6749 | local_irq_restore(flags); | |
6750 | } | |
6751 | ||
6752 | /* | |
6753 | * called from process.c:copy_thread(). task is new child. | |
6754 | */ | |
6755 | void | |
6756 | pfm_inherit(struct task_struct *task, struct pt_regs *regs) | |
6757 | { | |
6758 | struct thread_struct *thread; | |
6759 | ||
19c5870c | 6760 | DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); |
1da177e4 LT |
6761 | |
6762 | thread = &task->thread; | |
6763 | ||
6764 | /* | |
6765 | * cut links inherited from parent (current) | |
6766 | */ | |
6767 | thread->pfm_context = NULL; | |
6768 | ||
6769 | PFM_SET_WORK_PENDING(task, 0); | |
6770 | ||
6771 | /* | |
6772 | * the psr bits are already set properly in copy_threads() | |
6773 | */ | |
6774 | } | |
6775 | #else /* !CONFIG_PERFMON */ | |
6776 | asmlinkage long | |
6777 | sys_perfmonctl (int fd, int cmd, void *arg, int count) | |
6778 | { | |
6779 | return -ENOSYS; | |
6780 | } | |
6781 | #endif /* CONFIG_PERFMON */ |