]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kernel/kprobes.c
alpha: osf_sys.c: use timespec64 where appropriate
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kernel / kprobes.c
CommitLineData
4ba069b8
MG
1/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
a53c8fab 18 * Copyright IBM Corp. 2002, 2006
4ba069b8
MG
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
4ba069b8
MG
23#include <linux/kprobes.h>
24#include <linux/ptrace.h>
25#include <linux/preempt.h>
26#include <linux/stop_machine.h>
1eeb66a1 27#include <linux/kdebug.h>
a2b53673 28#include <linux/uaccess.h>
dcc096c5 29#include <linux/extable.h>
4ba069b8 30#include <linux/module.h>
5a0e3ad6 31#include <linux/slab.h>
adb45839 32#include <linux/hardirq.h>
c933146a 33#include <linux/ftrace.h>
e6c7c630 34#include <asm/set_memory.h>
a882b3b0 35#include <asm/sections.h>
7c0f6ba6 36#include <linux/uaccess.h>
a882b3b0 37#include <asm/dis.h>
4ba069b8 38
4a188635 39DEFINE_PER_CPU(struct kprobe *, current_kprobe);
4ba069b8
MG
40DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41
4a188635 42struct kretprobe_blackpoint kretprobe_blacklist[] = { };
f438d914 43
63c40436
HC
44DEFINE_INSN_CACHE_OPS(dmainsn);
45
46static void *alloc_dmainsn_page(void)
47{
57d7f939
MS
48 void *page;
49
50 page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
51 if (page)
52 set_memory_x((unsigned long) page, 1);
53 return page;
63c40436
HC
54}
55
56static void free_dmainsn_page(void *page)
57{
57d7f939 58 set_memory_nx((unsigned long) page, 1);
63c40436
HC
59 free_page((unsigned long)page);
60}
61
62struct kprobe_insn_cache kprobe_dmainsn_slots = {
63 .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
64 .alloc = alloc_dmainsn_page,
65 .free = free_dmainsn_page,
66 .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
67 .insn_size = MAX_INSN_SIZE,
68};
69
7a5388de 70static void copy_instruction(struct kprobe *p)
63c40436 71{
c933146a 72 unsigned long ip = (unsigned long) p->addr;
63c40436
HC
73 s64 disp, new_disp;
74 u64 addr, new_addr;
75
c933146a
HC
76 if (ftrace_location(ip) == ip) {
77 /*
78 * If kprobes patches the instruction that is morphed by
79 * ftrace make sure that kprobes always sees the branch
e6d60b36
HC
80 * "jg .+24" that skips the mcount block or the "brcl 0,0"
81 * in case of hotpatch.
c933146a
HC
82 */
83 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
84 p->ainsn.is_ftrace_insn = 1;
85 } else
ed7d56e1 86 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
c933146a 87 p->opcode = p->ainsn.insn[0];
975fab17 88 if (!probe_is_insn_relative_long(p->ainsn.insn))
63c40436
HC
89 return;
90 /*
91 * For pc-relative instructions in RIL-b or RIL-c format patch the
92 * RI2 displacement field. We have already made sure that the insn
93 * slot for the patched instruction is within the same 2GB area
94 * as the original instruction (either kernel image or module area).
95 * Therefore the new displacement will always fit.
96 */
97 disp = *(s32 *)&p->ainsn.insn[1];
98 addr = (u64)(unsigned long)p->addr;
99 new_addr = (u64)(unsigned long)p->ainsn.insn;
100 new_disp = ((addr + (disp * 2)) - new_addr) / 2;
101 *(s32 *)&p->ainsn.insn[1] = new_disp;
102}
7a5388de 103NOKPROBE_SYMBOL(copy_instruction);
63c40436
HC
104
105static inline int is_kernel_addr(void *addr)
106{
107 return addr < (void *)_end;
108}
109
7a5388de 110static int s390_get_insn_slot(struct kprobe *p)
63c40436
HC
111{
112 /*
113 * Get an insn slot that is within the same 2GB area like the original
114 * instruction. That way instructions with a 32bit signed displacement
115 * field can be patched and executed within the insn slot.
116 */
117 p->ainsn.insn = NULL;
118 if (is_kernel_addr(p->addr))
119 p->ainsn.insn = get_dmainsn_slot();
fcd05b50 120 else if (is_module_addr(p->addr))
63c40436
HC
121 p->ainsn.insn = get_insn_slot();
122 return p->ainsn.insn ? 0 : -ENOMEM;
123}
7a5388de 124NOKPROBE_SYMBOL(s390_get_insn_slot);
63c40436 125
7a5388de 126static void s390_free_insn_slot(struct kprobe *p)
63c40436
HC
127{
128 if (!p->ainsn.insn)
129 return;
130 if (is_kernel_addr(p->addr))
131 free_dmainsn_slot(p->ainsn.insn, 0);
132 else
133 free_insn_slot(p->ainsn.insn, 0);
134 p->ainsn.insn = NULL;
135}
7a5388de 136NOKPROBE_SYMBOL(s390_free_insn_slot);
63c40436 137
7a5388de 138int arch_prepare_kprobe(struct kprobe *p)
ba640a59
MS
139{
140 if ((unsigned long) p->addr & 0x01)
141 return -EINVAL;
ba640a59 142 /* Make sure the probe isn't going on a difficult instruction */
975fab17 143 if (probe_is_prohibited_opcode(p->addr))
ba640a59 144 return -EINVAL;
63c40436
HC
145 if (s390_get_insn_slot(p))
146 return -ENOMEM;
63c40436 147 copy_instruction(p);
ba640a59 148 return 0;
4ba069b8 149}
7a5388de 150NOKPROBE_SYMBOL(arch_prepare_kprobe);
4ba069b8 151
c933146a
HC
152int arch_check_ftrace_location(struct kprobe *p)
153{
154 return 0;
155}
156
157struct swap_insn_args {
158 struct kprobe *p;
159 unsigned int arm_kprobe : 1;
5a8b589f
MS
160};
161
7a5388de 162static int swap_instruction(void *data)
4ba069b8 163{
c933146a
HC
164 struct swap_insn_args *args = data;
165 struct ftrace_insn new_insn, *insn;
166 struct kprobe *p = args->p;
167 size_t len;
168
169 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
170 len = sizeof(new_insn.opc);
171 if (!p->ainsn.is_ftrace_insn)
172 goto skip_ftrace;
173 len = sizeof(new_insn);
174 insn = (struct ftrace_insn *) p->addr;
175 if (args->arm_kprobe) {
176 if (is_ftrace_nop(insn))
177 new_insn.disp = KPROBE_ON_FTRACE_NOP;
178 else
179 new_insn.disp = KPROBE_ON_FTRACE_CALL;
180 } else {
181 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
182 if (insn->disp == KPROBE_ON_FTRACE_NOP)
183 ftrace_generate_nop_insn(&new_insn);
184 }
185skip_ftrace:
8a5d8473 186 s390_kernel_write(p->addr, &new_insn, len);
5a8b589f 187 return 0;
4ba069b8 188}
7a5388de 189NOKPROBE_SYMBOL(swap_instruction);
4ba069b8 190
7a5388de 191void arch_arm_kprobe(struct kprobe *p)
4ba069b8 192{
c933146a 193 struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
4ba069b8 194
5d5dbc4e 195 stop_machine_cpuslocked(swap_instruction, &args, NULL);
4ba069b8 196}
7a5388de 197NOKPROBE_SYMBOL(arch_arm_kprobe);
4ba069b8 198
7a5388de 199void arch_disarm_kprobe(struct kprobe *p)
4ba069b8 200{
c933146a 201 struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
4ba069b8 202
5d5dbc4e 203 stop_machine_cpuslocked(swap_instruction, &args, NULL);
4ba069b8 204}
7a5388de 205NOKPROBE_SYMBOL(arch_disarm_kprobe);
4ba069b8 206
7a5388de 207void arch_remove_kprobe(struct kprobe *p)
4ba069b8 208{
63c40436 209 s390_free_insn_slot(p);
4ba069b8 210}
7a5388de 211NOKPROBE_SYMBOL(arch_remove_kprobe);
4ba069b8 212
7a5388de
HC
213static void enable_singlestep(struct kprobe_ctlblk *kcb,
214 struct pt_regs *regs,
215 unsigned long ip)
4ba069b8 216{
5e9a2692 217 struct per_regs per_kprobe;
4ba069b8 218
5e9a2692
MS
219 /* Set up the PER control registers %cr9-%cr11 */
220 per_kprobe.control = PER_EVENT_IFETCH;
221 per_kprobe.start = ip;
222 per_kprobe.end = ip;
4ba069b8 223
fc0a1fea
MS
224 /* Save control regs and psw mask */
225 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
226 kcb->kprobe_saved_imask = regs->psw.mask &
227 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
228
229 /* Set PER control regs, turns on single step for the given address */
5e9a2692 230 __ctl_load(per_kprobe, 9, 11);
4ba069b8 231 regs->psw.mask |= PSW_MASK_PER;
adb45839 232 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
fecc868a 233 regs->psw.addr = ip;
4ba069b8 234}
7a5388de 235NOKPROBE_SYMBOL(enable_singlestep);
4ba069b8 236
7a5388de
HC
237static void disable_singlestep(struct kprobe_ctlblk *kcb,
238 struct pt_regs *regs,
239 unsigned long ip)
fc0a1fea
MS
240{
241 /* Restore control regs and psw mask, set new psw address */
242 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
243 regs->psw.mask &= ~PSW_MASK_PER;
244 regs->psw.mask |= kcb->kprobe_saved_imask;
fecc868a 245 regs->psw.addr = ip;
fc0a1fea 246}
7a5388de 247NOKPROBE_SYMBOL(disable_singlestep);
fc0a1fea 248
b9599798
MS
249/*
250 * Activate a kprobe by storing its pointer to current_kprobe. The
251 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
252 * two kprobes can be active, see KPROBE_REENTER.
253 */
7a5388de 254static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
4ba069b8 255{
eb7e7d76 256 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
4ba069b8 257 kcb->prev_kprobe.status = kcb->kprobe_status;
eb7e7d76 258 __this_cpu_write(current_kprobe, p);
4ba069b8 259}
7a5388de 260NOKPROBE_SYMBOL(push_kprobe);
4ba069b8 261
b9599798
MS
262/*
263 * Deactivate a kprobe by backing up to the previous state. If the
264 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
265 * for any other state prev_kprobe.kp will be NULL.
266 */
7a5388de 267static void pop_kprobe(struct kprobe_ctlblk *kcb)
4ba069b8 268{
eb7e7d76 269 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
4ba069b8 270 kcb->kprobe_status = kcb->prev_kprobe.status;
4ba069b8 271}
7a5388de 272NOKPROBE_SYMBOL(pop_kprobe);
4ba069b8 273
7a5388de 274void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
4ba069b8 275{
4c4308cb 276 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
4ba069b8 277
4c4308cb 278 /* Replace the return addr with trampoline addr */
4a188635 279 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
4ba069b8 280}
7a5388de 281NOKPROBE_SYMBOL(arch_prepare_kretprobe);
4ba069b8 282
7a5388de 283static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
0e917cc3
MS
284{
285 switch (kcb->kprobe_status) {
286 case KPROBE_HIT_SSDONE:
287 case KPROBE_HIT_ACTIVE:
288 kprobes_inc_nmissed_count(p);
289 break;
290 case KPROBE_HIT_SS:
291 case KPROBE_REENTER:
292 default:
293 /*
294 * A kprobe on the code path to single step an instruction
295 * is a BUG. The code path resides in the .kprobes.text
296 * section and is executed with interrupts disabled.
297 */
298 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
299 dump_kprobe(p);
300 BUG();
301 }
302}
7a5388de 303NOKPROBE_SYMBOL(kprobe_reenter_check);
0e917cc3 304
7a5388de 305static int kprobe_handler(struct pt_regs *regs)
4ba069b8 306{
4ba069b8 307 struct kprobe_ctlblk *kcb;
0e917cc3 308 struct kprobe *p;
4ba069b8
MG
309
310 /*
0e917cc3
MS
311 * We want to disable preemption for the entire duration of kprobe
312 * processing. That includes the calls to the pre/post handlers
313 * and single stepping the kprobe instruction.
4ba069b8
MG
314 */
315 preempt_disable();
316 kcb = get_kprobe_ctlblk();
9cb1ccec 317 p = get_kprobe((void *)(regs->psw.addr - 2));
4ba069b8 318
0e917cc3
MS
319 if (p) {
320 if (kprobe_running()) {
b9599798
MS
321 /*
322 * We have hit a kprobe while another is still
323 * active. This can happen in the pre and post
324 * handler. Single step the instruction of the
325 * new probe but do not call any handler function
326 * of this secondary kprobe.
327 * push_kprobe and pop_kprobe saves and restores
328 * the currently active kprobe.
4ba069b8 329 */
0e917cc3 330 kprobe_reenter_check(kcb, p);
b9599798 331 push_kprobe(kcb, p);
4ba069b8 332 kcb->kprobe_status = KPROBE_REENTER;
4ba069b8 333 } else {
0e917cc3
MS
334 /*
335 * If we have no pre-handler or it returned 0, we
336 * continue with single stepping. If we have a
337 * pre-handler and it returned non-zero, it prepped
338 * for calling the break_handler below on re-entry
339 * for jprobe processing, so get out doing nothing
340 * more here.
341 */
342 push_kprobe(kcb, p);
343 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
344 if (p->pre_handler && p->pre_handler(p, regs))
345 return 1;
346 kcb->kprobe_status = KPROBE_HIT_SS;
4ba069b8 347 }
0e917cc3 348 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
4ba069b8 349 return 1;
0e917cc3 350 } else if (kprobe_running()) {
eb7e7d76 351 p = __this_cpu_read(current_kprobe);
0e917cc3
MS
352 if (p->break_handler && p->break_handler(p, regs)) {
353 /*
354 * Continuation after the jprobe completed and
355 * caused the jprobe_return trap. The jprobe
356 * break_handler "returns" to the original
357 * function that still has the kprobe breakpoint
358 * installed. We continue with single stepping.
359 */
360 kcb->kprobe_status = KPROBE_HIT_SS;
361 enable_singlestep(kcb, regs,
362 (unsigned long) p->ainsn.insn);
363 return 1;
364 } /* else:
365 * No kprobe at this address and the current kprobe
366 * has no break handler (no jprobe!). The kernel just
367 * exploded, let the standard trap handler pick up the
368 * pieces.
369 */
370 } /* else:
371 * No kprobe at this address and no active kprobe. The trap has
372 * not been caused by a kprobe breakpoint. The race of breakpoint
373 * vs. kprobe remove does not exist because on s390 as we use
374 * stop_machine to arm/disarm the breakpoints.
375 */
4ba069b8 376 preempt_enable_no_resched();
0e917cc3 377 return 0;
4ba069b8 378}
7a5388de 379NOKPROBE_SYMBOL(kprobe_handler);
4ba069b8
MG
380
381/*
382 * Function return probe trampoline:
383 * - init_kprobes() establishes a probepoint here
384 * - When the probed function returns, this probe
385 * causes the handlers to fire
386 */
a806170e 387static void __used kretprobe_trampoline_holder(void)
4ba069b8
MG
388{
389 asm volatile(".global kretprobe_trampoline\n"
390 "kretprobe_trampoline: bcr 0,0\n");
391}
392
393/*
394 * Called when the probe at kretprobe trampoline is hit
395 */
7a5388de 396static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
4ba069b8 397{
4a188635 398 struct kretprobe_instance *ri;
99219a3f 399 struct hlist_head *head, empty_rp;
b67bfe0d 400 struct hlist_node *tmp;
4a188635
MS
401 unsigned long flags, orig_ret_address;
402 unsigned long trampoline_address;
403 kprobe_opcode_t *correct_ret_addr;
4ba069b8 404
99219a3f 405 INIT_HLIST_HEAD(&empty_rp);
ef53d9c5 406 kretprobe_hash_lock(current, &head, &flags);
4ba069b8
MG
407
408 /*
409 * It is possible to have multiple instances associated with a given
410 * task either because an multiple functions in the call path
025dfdaf 411 * have a return probe installed on them, and/or more than one return
4ba069b8
MG
412 * return probe was registered for a target function.
413 *
414 * We can handle this because:
415 * - instances are always inserted at the head of the list
416 * - when multiple return probes are registered for the same
417 * function, the first instance's ret_addr will point to the
418 * real return address, and all the rest will point to
419 * kretprobe_trampoline
420 */
4a188635
MS
421 ri = NULL;
422 orig_ret_address = 0;
423 correct_ret_addr = NULL;
424 trampoline_address = (unsigned long) &kretprobe_trampoline;
b67bfe0d 425 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
4ba069b8
MG
426 if (ri->task != current)
427 /* another task is sharing our hash bucket */
428 continue;
429
4a188635 430 orig_ret_address = (unsigned long) ri->ret_addr;
89480801
MS
431
432 if (orig_ret_address != trampoline_address)
433 /*
434 * This is the real return address. Any other
435 * instances associated with this task are for
436 * other calls deeper on the call stack
437 */
438 break;
439 }
440
441 kretprobe_assert(ri, orig_ret_address, trampoline_address);
442
443 correct_ret_addr = ri->ret_addr;
b67bfe0d 444 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
89480801
MS
445 if (ri->task != current)
446 /* another task is sharing our hash bucket */
447 continue;
4ba069b8 448
4a188635 449 orig_ret_address = (unsigned long) ri->ret_addr;
89480801
MS
450
451 if (ri->rp && ri->rp->handler) {
452 ri->ret_addr = correct_ret_addr;
453 ri->rp->handler(ri, regs);
454 }
455
99219a3f 456 recycle_rp_inst(ri, &empty_rp);
4ba069b8 457
4a188635 458 if (orig_ret_address != trampoline_address)
4ba069b8
MG
459 /*
460 * This is the real return address. Any other
461 * instances associated with this task are for
462 * other calls deeper on the call stack
463 */
464 break;
4ba069b8 465 }
89480801 466
fecc868a 467 regs->psw.addr = orig_ret_address;
4ba069b8 468
b9599798 469 pop_kprobe(get_kprobe_ctlblk());
ef53d9c5 470 kretprobe_hash_unlock(current, &flags);
4ba069b8
MG
471 preempt_enable_no_resched();
472
b67bfe0d 473 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
99219a3f 474 hlist_del(&ri->hlist);
475 kfree(ri);
476 }
4ba069b8
MG
477 /*
478 * By returning a non-zero value, we are telling
479 * kprobe_handler() that we don't want the post_handler
480 * to run (and have re-enabled preemption)
481 */
482 return 1;
483}
7a5388de 484NOKPROBE_SYMBOL(trampoline_probe_handler);
4ba069b8
MG
485
486/*
487 * Called after single-stepping. p->addr is the address of the
488 * instruction whose first byte has been replaced by the "breakpoint"
489 * instruction. To avoid the SMP problems that can occur when we
490 * temporarily put back the original opcode to single-step, we
491 * single-stepped a copy of the instruction. The address of this
492 * copy is p->ainsn.insn.
493 */
7a5388de 494static void resume_execution(struct kprobe *p, struct pt_regs *regs)
4ba069b8
MG
495{
496 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
9cb1ccec 497 unsigned long ip = regs->psw.addr;
975fab17 498 int fixup = probe_get_fixup_type(p->ainsn.insn);
4ba069b8 499
c933146a
HC
500 /* Check if the kprobes location is an enabled ftrace caller */
501 if (p->ainsn.is_ftrace_insn) {
502 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
503 struct ftrace_insn call_insn;
504
505 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
506 /*
507 * A kprobe on an enabled ftrace call site actually single
508 * stepped an unconditional branch (ftrace nop equivalent).
509 * Now we need to fixup things and pretend that a brasl r0,...
510 * was executed instead.
511 */
512 if (insn->disp == KPROBE_ON_FTRACE_CALL) {
513 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
514 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
515 }
516 }
517
ba640a59 518 if (fixup & FIXUP_PSW_NORMAL)
fc0a1fea 519 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
4ba069b8 520
ba640a59 521 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
a882b3b0 522 int ilen = insn_length(p->ainsn.insn[0] >> 8);
ba640a59
MS
523 if (ip - (unsigned long) p->ainsn.insn == ilen)
524 ip = (unsigned long) p->addr + ilen;
525 }
4ba069b8 526
ba640a59
MS
527 if (fixup & FIXUP_RETURN_REGISTER) {
528 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
529 regs->gprs[reg] += (unsigned long) p->addr -
530 (unsigned long) p->ainsn.insn;
531 }
4ba069b8 532
fc0a1fea 533 disable_singlestep(kcb, regs, ip);
4ba069b8 534}
7a5388de 535NOKPROBE_SYMBOL(resume_execution);
4ba069b8 536
7a5388de 537static int post_kprobe_handler(struct pt_regs *regs)
4ba069b8 538{
4ba069b8 539 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
4a188635 540 struct kprobe *p = kprobe_running();
4ba069b8 541
4a188635 542 if (!p)
4ba069b8
MG
543 return 0;
544
4a188635 545 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
4ba069b8 546 kcb->kprobe_status = KPROBE_HIT_SSDONE;
4a188635 547 p->post_handler(p, regs, 0);
4ba069b8
MG
548 }
549
4a188635 550 resume_execution(p, regs);
b9599798 551 pop_kprobe(kcb);
4ba069b8
MG
552 preempt_enable_no_resched();
553
554 /*
555 * if somebody else is singlestepping across a probe point, psw mask
556 * will have PER set, in which case, continue the remaining processing
557 * of do_single_step, as if this is not a probe hit.
558 */
4a188635 559 if (regs->psw.mask & PSW_MASK_PER)
4ba069b8 560 return 0;
4ba069b8
MG
561
562 return 1;
563}
7a5388de 564NOKPROBE_SYMBOL(post_kprobe_handler);
4ba069b8 565
7a5388de 566static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
4ba069b8 567{
4ba069b8 568 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
4a188635 569 struct kprobe *p = kprobe_running();
4ba069b8
MG
570 const struct exception_table_entry *entry;
571
572 switch(kcb->kprobe_status) {
4ba069b8
MG
573 case KPROBE_HIT_SS:
574 case KPROBE_REENTER:
575 /*
576 * We are here because the instruction being single
577 * stepped caused a page fault. We reset the current
578 * kprobe and the nip points back to the probe address
579 * and allow the page fault handler to continue as a
580 * normal page fault.
581 */
4a188635 582 disable_singlestep(kcb, regs, (unsigned long) p->addr);
b9599798 583 pop_kprobe(kcb);
4ba069b8
MG
584 preempt_enable_no_resched();
585 break;
586 case KPROBE_HIT_ACTIVE:
587 case KPROBE_HIT_SSDONE:
588 /*
589 * We increment the nmissed count for accounting,
23d6d3db 590 * we can also use npre/npostfault count for accounting
4ba069b8
MG
591 * these specific fault cases.
592 */
4a188635 593 kprobes_inc_nmissed_count(p);
4ba069b8
MG
594
595 /*
596 * We come here because instructions in the pre/post
597 * handler caused the page_fault, this could happen
598 * if handler tries to access user space by
599 * copy_from_user(), get_user() etc. Let the
600 * user-specified handler try to fix it first.
601 */
4a188635 602 if (p->fault_handler && p->fault_handler(p, regs, trapnr))
4ba069b8
MG
603 return 1;
604
605 /*
606 * In case the user-specified fault handler returned
607 * zero, try to fix up.
608 */
9cb1ccec 609 entry = search_exception_tables(regs->psw.addr);
4ba069b8 610 if (entry) {
fecc868a 611 regs->psw.addr = extable_fixup(entry);
4ba069b8
MG
612 return 1;
613 }
614
615 /*
616 * fixup_exception() could not handle it,
617 * Let do_page_fault() fix it.
618 */
619 break;
620 default:
621 break;
622 }
623 return 0;
624}
7a5388de 625NOKPROBE_SYMBOL(kprobe_trap_handler);
4ba069b8 626
7a5388de 627int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
adb45839
MS
628{
629 int ret;
630
631 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
632 local_irq_disable();
633 ret = kprobe_trap_handler(regs, trapnr);
634 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
635 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
636 return ret;
637}
7a5388de 638NOKPROBE_SYMBOL(kprobe_fault_handler);
adb45839 639
4ba069b8
MG
640/*
641 * Wrapper routine to for handling exceptions.
642 */
7a5388de
HC
643int kprobe_exceptions_notify(struct notifier_block *self,
644 unsigned long val, void *data)
4ba069b8 645{
4a188635 646 struct die_args *args = (struct die_args *) data;
adb45839 647 struct pt_regs *regs = args->regs;
4ba069b8
MG
648 int ret = NOTIFY_DONE;
649
adb45839
MS
650 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
651 local_irq_disable();
652
4ba069b8
MG
653 switch (val) {
654 case DIE_BPT:
4a188635 655 if (kprobe_handler(regs))
4ba069b8
MG
656 ret = NOTIFY_STOP;
657 break;
658 case DIE_SSTEP:
4a188635 659 if (post_kprobe_handler(regs))
4ba069b8
MG
660 ret = NOTIFY_STOP;
661 break;
662 case DIE_TRAP:
adb45839 663 if (!preemptible() && kprobe_running() &&
4a188635 664 kprobe_trap_handler(regs, args->trapnr))
4ba069b8 665 ret = NOTIFY_STOP;
4ba069b8
MG
666 break;
667 default:
668 break;
669 }
adb45839
MS
670
671 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
672 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
673
4ba069b8
MG
674 return ret;
675}
7a5388de 676NOKPROBE_SYMBOL(kprobe_exceptions_notify);
4ba069b8 677
7a5388de 678int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
4ba069b8
MG
679{
680 struct jprobe *jp = container_of(p, struct jprobe, kp);
4ba069b8 681 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
92b8cbf1 682 unsigned long stack;
4ba069b8
MG
683
684 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
685
686 /* setup return addr to the jprobe handler routine */
fecc868a 687 regs->psw.addr = (unsigned long) jp->entry;
adb45839 688 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
4ba069b8 689
4ba069b8 690 /* r15 is the stack pointer */
92b8cbf1 691 stack = (unsigned long) regs->gprs[15];
4ba069b8 692
92b8cbf1 693 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
e64a5470
JO
694
695 /*
696 * jprobes use jprobe_return() which skips the normal return
697 * path of the function, and this messes up the accounting of the
698 * function graph tracer to get messed up.
699 *
700 * Pause function graph tracing while performing the jprobe function.
701 */
702 pause_graph_tracing();
4ba069b8
MG
703 return 1;
704}
7a5388de 705NOKPROBE_SYMBOL(setjmp_pre_handler);
4ba069b8 706
7a5388de 707void jprobe_return(void)
4ba069b8
MG
708{
709 asm volatile(".word 0x0002");
710}
7a5388de 711NOKPROBE_SYMBOL(jprobe_return);
4ba069b8 712
7a5388de 713int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
4ba069b8
MG
714{
715 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
92b8cbf1
MS
716 unsigned long stack;
717
e64a5470
JO
718 /* It's OK to start function graph tracing again */
719 unpause_graph_tracing();
720
92b8cbf1 721 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
4ba069b8
MG
722
723 /* Put the regs back */
724 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
725 /* put the stack back */
92b8cbf1 726 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
4ba069b8
MG
727 preempt_enable_no_resched();
728 return 1;
729}
7a5388de 730NOKPROBE_SYMBOL(longjmp_break_handler);
4ba069b8 731
4a188635
MS
732static struct kprobe trampoline = {
733 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
4ba069b8
MG
734 .pre_handler = trampoline_probe_handler
735};
736
737int __init arch_init_kprobes(void)
738{
4a188635 739 return register_kprobe(&trampoline);
4ba069b8 740}
bf8f6e5b 741
7a5388de 742int arch_trampoline_kprobe(struct kprobe *p)
bf8f6e5b 743{
4a188635 744 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
bf8f6e5b 745}
7a5388de 746NOKPROBE_SYMBOL(arch_trampoline_kprobe);