]>
Commit | Line | Data |
---|---|---|
14f70012 DCZ |
1 | /* |
2 | * Linux performance counter support for MIPS. | |
3 | * | |
4 | * Copyright (C) 2010 MIPS Technologies, Inc. | |
5 | * Author: Deng-Cheng Zhu | |
6 | * | |
7 | * This code is based on the implementation for ARM, which is in turn | |
8 | * based on the sparc64 perf event code and the x86 code. Performance | |
7e788d96 DCZ |
9 | * counter access is based on the MIPS Oprofile code. And the callchain |
10 | * support references the code of MIPS stacktrace.c. | |
14f70012 DCZ |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/cpumask.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/perf_event.h> | |
22 | #include <linux/uaccess.h> | |
23 | ||
24 | #include <asm/irq.h> | |
25 | #include <asm/irq_regs.h> | |
26 | #include <asm/stacktrace.h> | |
27 | #include <asm/time.h> /* For perf_irq */ | |
28 | ||
29 | /* These are for 32bit counters. For 64bit ones, define them accordingly. */ | |
30 | #define MAX_PERIOD ((1ULL << 32) - 1) | |
31 | #define VALID_COUNT 0x7fffffff | |
32 | #define TOTAL_BITS 32 | |
33 | #define HIGHEST_BIT 31 | |
34 | ||
35 | #define MIPS_MAX_HWEVENTS 4 | |
36 | ||
37 | struct cpu_hw_events { | |
38 | /* Array of events on this cpu. */ | |
39 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | |
40 | ||
41 | /* | |
42 | * Set the bit (indexed by the counter number) when the counter | |
43 | * is used for an event. | |
44 | */ | |
45 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | |
46 | ||
47 | /* | |
48 | * The borrowed MSB for the performance counter. A MIPS performance | |
49 | * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit | |
50 | * counters) as a factor of determining whether a counter overflow | |
51 | * should be signaled. So here we use a separate MSB for each | |
52 | * counter to make things easy. | |
53 | */ | |
54 | unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | |
55 | ||
56 | /* | |
57 | * Software copy of the control register for each performance counter. | |
58 | * MIPS CPUs vary in performance counters. They use this differently, | |
59 | * and even may not use it. | |
60 | */ | |
61 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | |
62 | }; | |
63 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |
64 | .saved_ctrl = {0}, | |
65 | }; | |
66 | ||
67 | /* The description of MIPS performance events. */ | |
68 | struct mips_perf_event { | |
69 | unsigned int event_id; | |
70 | /* | |
71 | * MIPS performance counters are indexed starting from 0. | |
72 | * CNTR_EVEN indicates the indexes of the counters to be used are | |
73 | * even numbers. | |
74 | */ | |
75 | unsigned int cntr_mask; | |
76 | #define CNTR_EVEN 0x55555555 | |
77 | #define CNTR_ODD 0xaaaaaaaa | |
78 | #ifdef CONFIG_MIPS_MT_SMP | |
79 | enum { | |
80 | T = 0, | |
81 | V = 1, | |
82 | P = 2, | |
83 | } range; | |
84 | #else | |
85 | #define T | |
86 | #define V | |
87 | #define P | |
88 | #endif | |
89 | }; | |
90 | ||
3a9ab99e DCZ |
91 | static struct mips_perf_event raw_event; |
92 | static DEFINE_MUTEX(raw_event_mutex); | |
93 | ||
14f70012 DCZ |
94 | #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff |
95 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
96 | ||
97 | struct mips_pmu { | |
98 | const char *name; | |
99 | int irq; | |
100 | irqreturn_t (*handle_irq)(int irq, void *dev); | |
101 | int (*handle_shared_irq)(void); | |
102 | void (*start)(void); | |
103 | void (*stop)(void); | |
104 | int (*alloc_counter)(struct cpu_hw_events *cpuc, | |
105 | struct hw_perf_event *hwc); | |
106 | u64 (*read_counter)(unsigned int idx); | |
107 | void (*write_counter)(unsigned int idx, u64 val); | |
108 | void (*enable_event)(struct hw_perf_event *evt, int idx); | |
109 | void (*disable_event)(int idx); | |
3a9ab99e | 110 | const struct mips_perf_event *(*map_raw_event)(u64 config); |
14f70012 DCZ |
111 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; |
112 | const struct mips_perf_event (*cache_event_map) | |
113 | [PERF_COUNT_HW_CACHE_MAX] | |
114 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
115 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
116 | unsigned int num_counters; | |
117 | }; | |
118 | ||
119 | static const struct mips_pmu *mipspmu; | |
120 | ||
121 | static int | |
122 | mipspmu_event_set_period(struct perf_event *event, | |
123 | struct hw_perf_event *hwc, | |
124 | int idx) | |
125 | { | |
126 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
127 | s64 left = local64_read(&hwc->period_left); | |
128 | s64 period = hwc->sample_period; | |
129 | int ret = 0; | |
130 | u64 uleft; | |
131 | unsigned long flags; | |
132 | ||
133 | if (unlikely(left <= -period)) { | |
134 | left = period; | |
135 | local64_set(&hwc->period_left, left); | |
136 | hwc->last_period = period; | |
137 | ret = 1; | |
138 | } | |
139 | ||
140 | if (unlikely(left <= 0)) { | |
141 | left += period; | |
142 | local64_set(&hwc->period_left, left); | |
143 | hwc->last_period = period; | |
144 | ret = 1; | |
145 | } | |
146 | ||
147 | if (left > (s64)MAX_PERIOD) | |
148 | left = MAX_PERIOD; | |
149 | ||
150 | local64_set(&hwc->prev_count, (u64)-left); | |
151 | ||
152 | local_irq_save(flags); | |
153 | uleft = (u64)(-left) & MAX_PERIOD; | |
154 | uleft > VALID_COUNT ? | |
155 | set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); | |
156 | mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); | |
157 | local_irq_restore(flags); | |
158 | ||
159 | perf_event_update_userpage(event); | |
160 | ||
161 | return ret; | |
162 | } | |
163 | ||
14f70012 DCZ |
164 | static void mipspmu_event_update(struct perf_event *event, |
165 | struct hw_perf_event *hwc, | |
166 | int idx) | |
167 | { | |
168 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
169 | unsigned long flags; | |
170 | int shift = 64 - TOTAL_BITS; | |
171 | s64 prev_raw_count, new_raw_count; | |
ba9786f3 | 172 | u64 delta; |
14f70012 DCZ |
173 | |
174 | again: | |
175 | prev_raw_count = local64_read(&hwc->prev_count); | |
176 | local_irq_save(flags); | |
177 | /* Make the counter value be a "real" one. */ | |
178 | new_raw_count = mipspmu->read_counter(idx); | |
179 | if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { | |
180 | new_raw_count &= VALID_COUNT; | |
181 | clear_bit(idx, cpuc->msbs); | |
182 | } else | |
183 | new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); | |
184 | local_irq_restore(flags); | |
185 | ||
186 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
187 | new_raw_count) != prev_raw_count) | |
188 | goto again; | |
189 | ||
190 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
191 | delta >>= shift; | |
192 | ||
193 | local64_add(delta, &event->count); | |
194 | local64_sub(delta, &hwc->period_left); | |
195 | ||
196 | return; | |
197 | } | |
198 | ||
404ff638 DCZ |
199 | static void mipspmu_start(struct perf_event *event, int flags) |
200 | { | |
201 | struct hw_perf_event *hwc = &event->hw; | |
202 | ||
203 | if (!mipspmu) | |
204 | return; | |
205 | ||
206 | if (flags & PERF_EF_RELOAD) | |
207 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
208 | ||
209 | hwc->state = 0; | |
210 | ||
211 | /* Set the period for the event. */ | |
212 | mipspmu_event_set_period(event, hwc, hwc->idx); | |
213 | ||
214 | /* Enable the event. */ | |
215 | mipspmu->enable_event(hwc, hwc->idx); | |
216 | } | |
217 | ||
218 | static void mipspmu_stop(struct perf_event *event, int flags) | |
219 | { | |
220 | struct hw_perf_event *hwc = &event->hw; | |
221 | ||
222 | if (!mipspmu) | |
223 | return; | |
224 | ||
225 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
226 | /* We are working on a local event. */ | |
227 | mipspmu->disable_event(hwc->idx); | |
228 | barrier(); | |
229 | mipspmu_event_update(event, hwc, hwc->idx); | |
230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
231 | } | |
232 | } | |
233 | ||
234 | static int mipspmu_add(struct perf_event *event, int flags) | |
14f70012 DCZ |
235 | { |
236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
237 | struct hw_perf_event *hwc = &event->hw; | |
404ff638 DCZ |
238 | int idx; |
239 | int err = 0; | |
14f70012 | 240 | |
404ff638 | 241 | perf_pmu_disable(event->pmu); |
14f70012 | 242 | |
404ff638 DCZ |
243 | /* To look for a free counter for this event. */ |
244 | idx = mipspmu->alloc_counter(cpuc, hwc); | |
245 | if (idx < 0) { | |
246 | err = idx; | |
247 | goto out; | |
248 | } | |
14f70012 | 249 | |
404ff638 DCZ |
250 | /* |
251 | * If there is an event in the counter we are going to use then | |
252 | * make sure it is disabled. | |
253 | */ | |
254 | event->hw.idx = idx; | |
14f70012 | 255 | mipspmu->disable_event(idx); |
404ff638 | 256 | cpuc->events[idx] = event; |
14f70012 | 257 | |
404ff638 DCZ |
258 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
259 | if (flags & PERF_EF_START) | |
260 | mipspmu_start(event, PERF_EF_RELOAD); | |
14f70012 | 261 | |
404ff638 | 262 | /* Propagate our changes to the userspace mapping. */ |
14f70012 | 263 | perf_event_update_userpage(event); |
404ff638 DCZ |
264 | |
265 | out: | |
266 | perf_pmu_enable(event->pmu); | |
267 | return err; | |
14f70012 DCZ |
268 | } |
269 | ||
404ff638 | 270 | static void mipspmu_del(struct perf_event *event, int flags) |
14f70012 | 271 | { |
404ff638 | 272 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
14f70012 | 273 | struct hw_perf_event *hwc = &event->hw; |
404ff638 | 274 | int idx = hwc->idx; |
14f70012 | 275 | |
404ff638 DCZ |
276 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); |
277 | ||
278 | mipspmu_stop(event, PERF_EF_UPDATE); | |
279 | cpuc->events[idx] = NULL; | |
280 | clear_bit(idx, cpuc->used_mask); | |
281 | ||
282 | perf_event_update_userpage(event); | |
14f70012 DCZ |
283 | } |
284 | ||
285 | static void mipspmu_read(struct perf_event *event) | |
286 | { | |
287 | struct hw_perf_event *hwc = &event->hw; | |
288 | ||
289 | /* Don't read disabled counters! */ | |
290 | if (hwc->idx < 0) | |
291 | return; | |
292 | ||
293 | mipspmu_event_update(event, hwc, hwc->idx); | |
294 | } | |
295 | ||
404ff638 DCZ |
296 | static void mipspmu_enable(struct pmu *pmu) |
297 | { | |
298 | if (mipspmu) | |
299 | mipspmu->start(); | |
300 | } | |
301 | ||
302 | static void mipspmu_disable(struct pmu *pmu) | |
303 | { | |
304 | if (mipspmu) | |
305 | mipspmu->stop(); | |
306 | } | |
14f70012 DCZ |
307 | |
308 | static atomic_t active_events = ATOMIC_INIT(0); | |
309 | static DEFINE_MUTEX(pmu_reserve_mutex); | |
310 | static int (*save_perf_irq)(void); | |
311 | ||
312 | static int mipspmu_get_irq(void) | |
313 | { | |
314 | int err; | |
315 | ||
316 | if (mipspmu->irq >= 0) { | |
317 | /* Request my own irq handler. */ | |
318 | err = request_irq(mipspmu->irq, mipspmu->handle_irq, | |
319 | IRQF_DISABLED | IRQF_NOBALANCING, | |
320 | "mips_perf_pmu", NULL); | |
321 | if (err) { | |
322 | pr_warning("Unable to request IRQ%d for MIPS " | |
323 | "performance counters!\n", mipspmu->irq); | |
324 | } | |
325 | } else if (cp0_perfcount_irq < 0) { | |
326 | /* | |
327 | * We are sharing the irq number with the timer interrupt. | |
328 | */ | |
329 | save_perf_irq = perf_irq; | |
330 | perf_irq = mipspmu->handle_shared_irq; | |
331 | err = 0; | |
332 | } else { | |
333 | pr_warning("The platform hasn't properly defined its " | |
334 | "interrupt controller.\n"); | |
335 | err = -ENOENT; | |
336 | } | |
337 | ||
338 | return err; | |
339 | } | |
340 | ||
341 | static void mipspmu_free_irq(void) | |
342 | { | |
343 | if (mipspmu->irq >= 0) | |
344 | free_irq(mipspmu->irq, NULL); | |
345 | else if (cp0_perfcount_irq < 0) | |
346 | perf_irq = save_perf_irq; | |
347 | } | |
348 | ||
404ff638 DCZ |
349 | /* |
350 | * mipsxx/rm9000/loongson2 have different performance counters, they have | |
351 | * specific low-level init routines. | |
352 | */ | |
353 | static void reset_counters(void *arg); | |
354 | static int __hw_perf_event_init(struct perf_event *event); | |
355 | ||
356 | static void hw_perf_event_destroy(struct perf_event *event) | |
357 | { | |
358 | if (atomic_dec_and_mutex_lock(&active_events, | |
359 | &pmu_reserve_mutex)) { | |
360 | /* | |
361 | * We must not call the destroy function with interrupts | |
362 | * disabled. | |
363 | */ | |
364 | on_each_cpu(reset_counters, | |
365 | (void *)(long)mipspmu->num_counters, 1); | |
366 | mipspmu_free_irq(); | |
367 | mutex_unlock(&pmu_reserve_mutex); | |
368 | } | |
369 | } | |
370 | ||
371 | static int mipspmu_event_init(struct perf_event *event) | |
372 | { | |
373 | int err = 0; | |
374 | ||
375 | switch (event->attr.type) { | |
376 | case PERF_TYPE_RAW: | |
377 | case PERF_TYPE_HARDWARE: | |
378 | case PERF_TYPE_HW_CACHE: | |
379 | break; | |
380 | ||
381 | default: | |
382 | return -ENOENT; | |
383 | } | |
384 | ||
385 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | |
386 | (event->cpu >= 0 && !cpu_online(event->cpu))) | |
387 | return -ENODEV; | |
388 | ||
389 | if (!atomic_inc_not_zero(&active_events)) { | |
390 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | |
391 | atomic_dec(&active_events); | |
392 | return -ENOSPC; | |
393 | } | |
394 | ||
395 | mutex_lock(&pmu_reserve_mutex); | |
396 | if (atomic_read(&active_events) == 0) | |
397 | err = mipspmu_get_irq(); | |
398 | ||
399 | if (!err) | |
400 | atomic_inc(&active_events); | |
401 | mutex_unlock(&pmu_reserve_mutex); | |
402 | } | |
403 | ||
404 | if (err) | |
405 | return err; | |
406 | ||
407 | err = __hw_perf_event_init(event); | |
408 | if (err) | |
409 | hw_perf_event_destroy(event); | |
410 | ||
411 | return err; | |
412 | } | |
413 | ||
414 | static struct pmu pmu = { | |
415 | .pmu_enable = mipspmu_enable, | |
416 | .pmu_disable = mipspmu_disable, | |
417 | .event_init = mipspmu_event_init, | |
418 | .add = mipspmu_add, | |
419 | .del = mipspmu_del, | |
420 | .start = mipspmu_start, | |
421 | .stop = mipspmu_stop, | |
422 | .read = mipspmu_read, | |
423 | }; | |
424 | ||
14f70012 DCZ |
425 | static inline unsigned int |
426 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | |
427 | { | |
428 | /* | |
429 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | |
430 | * event_id. | |
431 | */ | |
432 | #ifdef CONFIG_MIPS_MT_SMP | |
433 | return ((unsigned int)pev->range << 24) | | |
434 | (pev->cntr_mask & 0xffff00) | | |
435 | (pev->event_id & 0xff); | |
436 | #else | |
437 | return (pev->cntr_mask & 0xffff00) | | |
438 | (pev->event_id & 0xff); | |
439 | #endif | |
440 | } | |
441 | ||
442 | static const struct mips_perf_event * | |
443 | mipspmu_map_general_event(int idx) | |
444 | { | |
445 | const struct mips_perf_event *pev; | |
446 | ||
447 | pev = ((*mipspmu->general_event_map)[idx].event_id == | |
448 | UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : | |
449 | &(*mipspmu->general_event_map)[idx]); | |
450 | ||
451 | return pev; | |
452 | } | |
453 | ||
454 | static const struct mips_perf_event * | |
455 | mipspmu_map_cache_event(u64 config) | |
456 | { | |
457 | unsigned int cache_type, cache_op, cache_result; | |
458 | const struct mips_perf_event *pev; | |
459 | ||
460 | cache_type = (config >> 0) & 0xff; | |
461 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
462 | return ERR_PTR(-EINVAL); | |
463 | ||
464 | cache_op = (config >> 8) & 0xff; | |
465 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
466 | return ERR_PTR(-EINVAL); | |
467 | ||
468 | cache_result = (config >> 16) & 0xff; | |
469 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
470 | return ERR_PTR(-EINVAL); | |
471 | ||
472 | pev = &((*mipspmu->cache_event_map) | |
473 | [cache_type] | |
474 | [cache_op] | |
475 | [cache_result]); | |
476 | ||
477 | if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) | |
478 | return ERR_PTR(-EOPNOTSUPP); | |
479 | ||
480 | return pev; | |
481 | ||
482 | } | |
483 | ||
484 | static int validate_event(struct cpu_hw_events *cpuc, | |
485 | struct perf_event *event) | |
486 | { | |
487 | struct hw_perf_event fake_hwc = event->hw; | |
488 | ||
c049b6a5 DCZ |
489 | /* Allow mixed event group. So return 1 to pass validation. */ |
490 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) | |
491 | return 1; | |
14f70012 DCZ |
492 | |
493 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | |
494 | } | |
495 | ||
496 | static int validate_group(struct perf_event *event) | |
497 | { | |
498 | struct perf_event *sibling, *leader = event->group_leader; | |
499 | struct cpu_hw_events fake_cpuc; | |
500 | ||
501 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | |
502 | ||
503 | if (!validate_event(&fake_cpuc, leader)) | |
504 | return -ENOSPC; | |
505 | ||
506 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
507 | if (!validate_event(&fake_cpuc, sibling)) | |
508 | return -ENOSPC; | |
509 | } | |
510 | ||
511 | if (!validate_event(&fake_cpuc, event)) | |
512 | return -ENOSPC; | |
513 | ||
514 | return 0; | |
515 | } | |
516 | ||
14f70012 DCZ |
517 | /* This is needed by specific irq handlers in perf_event_*.c */ |
518 | static void | |
519 | handle_associated_event(struct cpu_hw_events *cpuc, | |
520 | int idx, struct perf_sample_data *data, struct pt_regs *regs) | |
521 | { | |
522 | struct perf_event *event = cpuc->events[idx]; | |
523 | struct hw_perf_event *hwc = &event->hw; | |
524 | ||
525 | mipspmu_event_update(event, hwc, idx); | |
526 | data->period = event->hw.last_period; | |
527 | if (!mipspmu_event_set_period(event, hwc, idx)) | |
528 | return; | |
529 | ||
530 | if (perf_event_overflow(event, 0, data, regs)) | |
531 | mipspmu->disable_event(idx); | |
532 | } | |
7e788d96 | 533 | |
3a9ab99e DCZ |
534 | #include "perf_event_mipsxx.c" |
535 | ||
7e788d96 | 536 | /* Callchain handling code. */ |
7e788d96 DCZ |
537 | |
538 | /* | |
539 | * Leave userspace callchain empty for now. When we find a way to trace | |
540 | * the user stack callchains, we add here. | |
541 | */ | |
98f92f2f DCZ |
542 | void perf_callchain_user(struct perf_callchain_entry *entry, |
543 | struct pt_regs *regs) | |
7e788d96 DCZ |
544 | { |
545 | } | |
546 | ||
547 | static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |
548 | unsigned long reg29) | |
549 | { | |
550 | unsigned long *sp = (unsigned long *)reg29; | |
551 | unsigned long addr; | |
552 | ||
553 | while (!kstack_end(sp)) { | |
554 | addr = *sp++; | |
555 | if (__kernel_text_address(addr)) { | |
98f92f2f | 556 | perf_callchain_store(entry, addr); |
7e788d96 DCZ |
557 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
558 | break; | |
559 | } | |
560 | } | |
561 | } | |
562 | ||
98f92f2f DCZ |
563 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
564 | struct pt_regs *regs) | |
7e788d96 DCZ |
565 | { |
566 | unsigned long sp = regs->regs[29]; | |
567 | #ifdef CONFIG_KALLSYMS | |
568 | unsigned long ra = regs->regs[31]; | |
569 | unsigned long pc = regs->cp0_epc; | |
570 | ||
7e788d96 DCZ |
571 | if (raw_show_trace || !__kernel_text_address(pc)) { |
572 | unsigned long stack_page = | |
573 | (unsigned long)task_stack_page(current); | |
574 | if (stack_page && sp >= stack_page && | |
575 | sp <= stack_page + THREAD_SIZE - 32) | |
576 | save_raw_perf_callchain(entry, sp); | |
577 | return; | |
578 | } | |
579 | do { | |
98f92f2f | 580 | perf_callchain_store(entry, pc); |
7e788d96 DCZ |
581 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
582 | break; | |
583 | pc = unwind_stack(current, &sp, pc, &ra); | |
584 | } while (pc); | |
585 | #else | |
7e788d96 DCZ |
586 | save_raw_perf_callchain(entry, sp); |
587 | #endif | |
588 | } |