2 * Kernel Probes (KProbes)
3 * arch/ia64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 * <anil.s.keshavamurthy@intel.com> adapted from i386
26 #include <linux/kprobes.h>
27 #include <linux/ptrace.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/preempt.h>
31 #include <linux/extable.h>
32 #include <linux/kdebug.h>
34 #include <asm/pgtable.h>
35 #include <asm/sections.h>
36 #include <asm/exception.h>
38 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
39 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
41 struct kretprobe_blackpoint kretprobe_blacklist
[] = {{NULL
, NULL
}};
43 enum instruction_type
{A
, I
, M
, F
, B
, L
, X
, u
};
44 static enum instruction_type bundle_encoding
[32][3] = {
79 /* Insert a long branch code */
80 static void __kprobes
set_brl_inst(void *from
, void *to
)
82 s64 rel
= ((s64
) to
- (s64
) from
) >> 4;
84 brl
= (bundle_t
*) ((u64
) from
& ~0xf);
85 brl
->quad0
.template = 0x05; /* [MLX](stop) */
86 brl
->quad0
.slot0
= NOP_M_INST
; /* nop.m 0x0 */
87 brl
->quad0
.slot1_p0
= ((rel
>> 20) & 0x7fffffffff) << 2;
88 brl
->quad1
.slot1_p1
= (((rel
>> 20) & 0x7fffffffff) << 2) >> (64 - 46);
89 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */
90 brl
->quad1
.slot2
= BRL_INST(rel
>> 59, rel
& 0xfffff);
94 * In this function we check to see if the instruction
95 * is IP relative instruction and update the kprobe
96 * inst flag accordingly
98 static void __kprobes
update_kprobe_inst_flag(uint
template, uint slot
,
100 unsigned long kprobe_inst
,
103 p
->ainsn
.inst_flag
= 0;
104 p
->ainsn
.target_br_reg
= 0;
105 p
->ainsn
.slot
= slot
;
107 /* Check for Break instruction
108 * Bits 37:40 Major opcode to be zero
109 * Bits 27:32 X6 to be zero
110 * Bits 32:35 X3 to be zero
112 if ((!major_opcode
) && (!((kprobe_inst
>> 27) & 0x1FF)) ) {
113 /* is a break instruction */
114 p
->ainsn
.inst_flag
|= INST_FLAG_BREAK_INST
;
118 if (bundle_encoding
[template][slot
] == B
) {
119 switch (major_opcode
) {
120 case INDIRECT_CALL_OPCODE
:
121 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
122 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
124 case IP_RELATIVE_PREDICT_OPCODE
:
125 case IP_RELATIVE_BRANCH_OPCODE
:
126 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_RELATIVE_IP_ADDR
;
128 case IP_RELATIVE_CALL_OPCODE
:
129 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_RELATIVE_IP_ADDR
;
130 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
131 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
134 } else if (bundle_encoding
[template][slot
] == X
) {
135 switch (major_opcode
) {
136 case LONG_CALL_OPCODE
:
137 p
->ainsn
.inst_flag
|= INST_FLAG_FIX_BRANCH_REG
;
138 p
->ainsn
.target_br_reg
= ((kprobe_inst
>> 6) & 0x7);
146 * In this function we check to see if the instruction
147 * (qp) cmpx.crel.ctype p1,p2=r2,r3
148 * on which we are inserting kprobe is cmp instruction
151 static uint __kprobes
is_cmp_ctype_unc_inst(uint
template, uint slot
,
153 unsigned long kprobe_inst
)
158 if (!((bundle_encoding
[template][slot
] == I
) ||
159 (bundle_encoding
[template][slot
] == M
)))
162 if (!((major_opcode
== 0xC) || (major_opcode
== 0xD) ||
163 (major_opcode
== 0xE)))
166 cmp_inst
.l
= kprobe_inst
;
167 if ((cmp_inst
.f
.x2
== 0) || (cmp_inst
.f
.x2
== 1)) {
168 /* Integer compare - Register Register (A6 type)*/
169 if ((cmp_inst
.f
.tb
== 0) && (cmp_inst
.f
.ta
== 0)
170 &&(cmp_inst
.f
.c
== 1))
172 } else if ((cmp_inst
.f
.x2
== 2)||(cmp_inst
.f
.x2
== 3)) {
173 /* Integer compare - Immediate Register (A8 type)*/
174 if ((cmp_inst
.f
.ta
== 0) &&(cmp_inst
.f
.c
== 1))
182 * In this function we check to see if the instruction
183 * on which we are inserting kprobe is supported.
184 * Returns qp value if supported
185 * Returns -EINVAL if unsupported
187 static int __kprobes
unsupported_inst(uint
template, uint slot
,
189 unsigned long kprobe_inst
,
194 qp
= kprobe_inst
& 0x3f;
195 if (is_cmp_ctype_unc_inst(template, slot
, major_opcode
, kprobe_inst
)) {
196 if (slot
== 1 && qp
) {
197 printk(KERN_WARNING
"Kprobes on cmp unc "
198 "instruction on slot 1 at <0x%lx> "
199 "is not supported\n", addr
);
205 else if (bundle_encoding
[template][slot
] == I
) {
206 if (major_opcode
== 0) {
208 * Check for Integer speculation instruction
209 * - Bit 33-35 to be equal to 0x1
211 if (((kprobe_inst
>> 33) & 0x7) == 1) {
213 "Kprobes on speculation inst at <0x%lx> not supported\n",
218 * IP relative mov instruction
219 * - Bit 27-35 to be equal to 0x30
221 if (((kprobe_inst
>> 27) & 0x1FF) == 0x30) {
223 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
229 else if ((major_opcode
== 5) && !(kprobe_inst
& (0xFUl
<< 33)) &&
230 (kprobe_inst
& (0x1UL
<< 12))) {
231 /* test bit instructions, tbit,tnat,tf
232 * bit 33-36 to be equal to 0
233 * bit 12 to be equal to 1
235 if (slot
== 1 && qp
) {
236 printk(KERN_WARNING
"Kprobes on test bit "
237 "instruction on slot at <0x%lx> "
238 "is not supported\n", addr
);
244 else if (bundle_encoding
[template][slot
] == B
) {
245 if (major_opcode
== 7) {
246 /* IP-Relative Predict major code is 7 */
247 printk(KERN_WARNING
"Kprobes on IP-Relative"
248 "Predict is not supported\n");
251 else if (major_opcode
== 2) {
252 /* Indirect Predict, major code is 2
253 * bit 27-32 to be equal to 10 or 11
255 int x6
=(kprobe_inst
>> 27) & 0x3F;
256 if ((x6
== 0x10) || (x6
== 0x11)) {
257 printk(KERN_WARNING
"Kprobes on "
258 "Indirect Predict is not supported\n");
263 /* kernel does not use float instruction, here for safety kprobe
264 * will judge whether it is fcmp/flass/float approximation instruction
266 else if (unlikely(bundle_encoding
[template][slot
] == F
)) {
267 if ((major_opcode
== 4 || major_opcode
== 5) &&
268 (kprobe_inst
& (0x1 << 12))) {
269 /* fcmp/fclass unc instruction */
270 if (slot
== 1 && qp
) {
271 printk(KERN_WARNING
"Kprobes on fcmp/fclass "
272 "instruction on slot at <0x%lx> "
273 "is not supported\n", addr
);
279 if ((major_opcode
== 0 || major_opcode
== 1) &&
280 (kprobe_inst
& (0x1UL
<< 33))) {
281 /* float Approximation instruction */
282 if (slot
== 1 && qp
) {
283 printk(KERN_WARNING
"Kprobes on float Approx "
284 "instr at <0x%lx> is not supported\n",
295 * In this function we override the bundle with
296 * the break instruction at the given slot.
298 static void __kprobes
prepare_break_inst(uint
template, uint slot
,
300 unsigned long kprobe_inst
,
304 unsigned long break_inst
= BREAK_INST
;
305 bundle_t
*bundle
= &p
->opcode
.bundle
;
308 * Copy the original kprobe_inst qualifying predicate(qp)
309 * to the break instruction
315 bundle
->quad0
.slot0
= break_inst
;
318 bundle
->quad0
.slot1_p0
= break_inst
;
319 bundle
->quad1
.slot1_p1
= break_inst
>> (64-46);
322 bundle
->quad1
.slot2
= break_inst
;
327 * Update the instruction flag, so that we can
328 * emulate the instruction properly after we
329 * single step on original instruction
331 update_kprobe_inst_flag(template, slot
, major_opcode
, kprobe_inst
, p
);
334 static void __kprobes
get_kprobe_inst(bundle_t
*bundle
, uint slot
,
335 unsigned long *kprobe_inst
, uint
*major_opcode
)
337 unsigned long kprobe_inst_p0
, kprobe_inst_p1
;
338 unsigned int template;
340 template = bundle
->quad0
.template;
344 *major_opcode
= (bundle
->quad0
.slot0
>> SLOT0_OPCODE_SHIFT
);
345 *kprobe_inst
= bundle
->quad0
.slot0
;
348 *major_opcode
= (bundle
->quad1
.slot1_p1
>> SLOT1_p1_OPCODE_SHIFT
);
349 kprobe_inst_p0
= bundle
->quad0
.slot1_p0
;
350 kprobe_inst_p1
= bundle
->quad1
.slot1_p1
;
351 *kprobe_inst
= kprobe_inst_p0
| (kprobe_inst_p1
<< (64-46));
354 *major_opcode
= (bundle
->quad1
.slot2
>> SLOT2_OPCODE_SHIFT
);
355 *kprobe_inst
= bundle
->quad1
.slot2
;
360 /* Returns non-zero if the addr is in the Interrupt Vector Table */
361 static int __kprobes
in_ivt_functions(unsigned long addr
)
363 return (addr
>= (unsigned long)__start_ivt_text
364 && addr
< (unsigned long)__end_ivt_text
);
367 static int __kprobes
valid_kprobe_addr(int template, int slot
,
370 if ((slot
> 2) || ((bundle_encoding
[template][1] == L
) && slot
> 1)) {
371 printk(KERN_WARNING
"Attempting to insert unaligned kprobe "
376 if (in_ivt_functions(addr
)) {
377 printk(KERN_WARNING
"Kprobes can't be inserted inside "
378 "IVT functions at 0x%lx\n", addr
);
385 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
388 i
= atomic_add_return(1, &kcb
->prev_kprobe_index
);
389 kcb
->prev_kprobe
[i
-1].kp
= kprobe_running();
390 kcb
->prev_kprobe
[i
-1].status
= kcb
->kprobe_status
;
393 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
396 i
= atomic_read(&kcb
->prev_kprobe_index
);
397 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
[i
-1].kp
);
398 kcb
->kprobe_status
= kcb
->prev_kprobe
[i
-1].status
;
399 atomic_sub(1, &kcb
->prev_kprobe_index
);
402 static void __kprobes
set_current_kprobe(struct kprobe
*p
,
403 struct kprobe_ctlblk
*kcb
)
405 __this_cpu_write(current_kprobe
, p
);
408 static void kretprobe_trampoline(void)
413 * At this point the target function has been tricked into
414 * returning into our trampoline. Lookup the associated instance
416 * - call the handler function
417 * - cleanup by marking the instance as unused
418 * - long jump back to the original return address
420 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
422 struct kretprobe_instance
*ri
= NULL
;
423 struct hlist_head
*head
, empty_rp
;
424 struct hlist_node
*tmp
;
425 unsigned long flags
, orig_ret_address
= 0;
426 unsigned long trampoline_address
=
427 ((struct fnptr
*)kretprobe_trampoline
)->ip
;
429 INIT_HLIST_HEAD(&empty_rp
);
430 kretprobe_hash_lock(current
, &head
, &flags
);
433 * It is possible to have multiple instances associated with a given
434 * task either because an multiple functions in the call path
435 * have a return probe installed on them, and/or more than one return
436 * return probe was registered for a target function.
438 * We can handle this because:
439 * - instances are always inserted at the head of the list
440 * - when multiple return probes are registered for the same
441 * function, the first instance's ret_addr will point to the
442 * real return address, and all the rest will point to
443 * kretprobe_trampoline
445 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
446 if (ri
->task
!= current
)
447 /* another task is sharing our hash bucket */
450 orig_ret_address
= (unsigned long)ri
->ret_addr
;
451 if (orig_ret_address
!= trampoline_address
)
453 * This is the real return address. Any other
454 * instances associated with this task are for
455 * other calls deeper on the call stack
460 regs
->cr_iip
= orig_ret_address
;
462 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
463 if (ri
->task
!= current
)
464 /* another task is sharing our hash bucket */
467 if (ri
->rp
&& ri
->rp
->handler
)
468 ri
->rp
->handler(ri
, regs
);
470 orig_ret_address
= (unsigned long)ri
->ret_addr
;
471 recycle_rp_inst(ri
, &empty_rp
);
473 if (orig_ret_address
!= trampoline_address
)
475 * This is the real return address. Any other
476 * instances associated with this task are for
477 * other calls deeper on the call stack
481 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
483 kretprobe_hash_unlock(current
, &flags
);
485 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
486 hlist_del(&ri
->hlist
);
490 * By returning a non-zero value, we are telling
491 * kprobe_handler() that we don't want the post_handler
492 * to run (and have re-enabled preemption)
497 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
498 struct pt_regs
*regs
)
500 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->b0
;
502 /* Replace the return addr with trampoline addr */
503 regs
->b0
= ((struct fnptr
*)kretprobe_trampoline
)->ip
;
506 /* Check the instruction in the slot is break */
507 static int __kprobes
__is_ia64_break_inst(bundle_t
*bundle
, uint slot
)
509 unsigned int major_opcode
;
510 unsigned int template = bundle
->quad0
.template;
511 unsigned long kprobe_inst
;
513 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
514 if (slot
== 1 && bundle_encoding
[template][1] == L
)
517 /* Get Kprobe probe instruction at given slot*/
518 get_kprobe_inst(bundle
, slot
, &kprobe_inst
, &major_opcode
);
520 /* For break instruction,
521 * Bits 37:40 Major opcode to be zero
522 * Bits 27:32 X6 to be zero
523 * Bits 32:35 X3 to be zero
525 if (major_opcode
|| ((kprobe_inst
>> 27) & 0x1FF)) {
526 /* Not a break instruction */
530 /* Is a break instruction */
535 * In this function, we check whether the target bundle modifies IP or
536 * it triggers an exception. If so, it cannot be boostable.
538 static int __kprobes
can_boost(bundle_t
*bundle
, uint slot
,
539 unsigned long bundle_addr
)
541 unsigned int template = bundle
->quad0
.template;
544 if (search_exception_tables(bundle_addr
+ slot
) ||
545 __is_ia64_break_inst(bundle
, slot
))
546 return 0; /* exception may occur in this bundle*/
547 } while ((++slot
) < 3);
549 if (template >= 0x10 /* including B unit */ ||
550 template == 0x04 /* including X unit */ ||
551 template == 0x06) /* undefined */
557 /* Prepare long jump bundle and disables other boosters if need */
558 static void __kprobes
prepare_booster(struct kprobe
*p
)
560 unsigned long addr
= (unsigned long)p
->addr
& ~0xFULL
;
561 unsigned int slot
= (unsigned long)p
->addr
& 0xf;
562 struct kprobe
*other_kp
;
564 if (can_boost(&p
->ainsn
.insn
[0].bundle
, slot
, addr
)) {
565 set_brl_inst(&p
->ainsn
.insn
[1].bundle
, (bundle_t
*)addr
+ 1);
566 p
->ainsn
.inst_flag
|= INST_FLAG_BOOSTABLE
;
569 /* disables boosters in previous slots */
570 for (; addr
< (unsigned long)p
->addr
; addr
++) {
571 other_kp
= get_kprobe((void *)addr
);
573 other_kp
->ainsn
.inst_flag
&= ~INST_FLAG_BOOSTABLE
;
577 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
579 unsigned long addr
= (unsigned long) p
->addr
;
580 unsigned long *kprobe_addr
= (unsigned long *)(addr
& ~0xFULL
);
581 unsigned long kprobe_inst
=0;
582 unsigned int slot
= addr
& 0xf, template, major_opcode
= 0;
586 bundle
= &((kprobe_opcode_t
*)kprobe_addr
)->bundle
;
587 template = bundle
->quad0
.template;
589 if(valid_kprobe_addr(template, slot
, addr
))
592 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
593 if (slot
== 1 && bundle_encoding
[template][1] == L
)
596 /* Get kprobe_inst and major_opcode from the bundle */
597 get_kprobe_inst(bundle
, slot
, &kprobe_inst
, &major_opcode
);
599 qp
= unsupported_inst(template, slot
, major_opcode
, kprobe_inst
, addr
);
603 p
->ainsn
.insn
= get_insn_slot();
606 memcpy(&p
->opcode
, kprobe_addr
, sizeof(kprobe_opcode_t
));
607 memcpy(p
->ainsn
.insn
, kprobe_addr
, sizeof(kprobe_opcode_t
));
609 prepare_break_inst(template, slot
, major_opcode
, kprobe_inst
, p
, qp
);
616 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
618 unsigned long arm_addr
;
619 bundle_t
*src
, *dest
;
621 arm_addr
= ((unsigned long)p
->addr
) & ~0xFUL
;
622 dest
= &((kprobe_opcode_t
*)arm_addr
)->bundle
;
623 src
= &p
->opcode
.bundle
;
625 flush_icache_range((unsigned long)p
->ainsn
.insn
,
626 (unsigned long)p
->ainsn
.insn
+
627 sizeof(kprobe_opcode_t
) * MAX_INSN_SIZE
);
629 switch (p
->ainsn
.slot
) {
631 dest
->quad0
.slot0
= src
->quad0
.slot0
;
634 dest
->quad1
.slot1_p1
= src
->quad1
.slot1_p1
;
637 dest
->quad1
.slot2
= src
->quad1
.slot2
;
640 flush_icache_range(arm_addr
, arm_addr
+ sizeof(kprobe_opcode_t
));
643 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
645 unsigned long arm_addr
;
646 bundle_t
*src
, *dest
;
648 arm_addr
= ((unsigned long)p
->addr
) & ~0xFUL
;
649 dest
= &((kprobe_opcode_t
*)arm_addr
)->bundle
;
650 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
651 src
= &p
->ainsn
.insn
->bundle
;
652 switch (p
->ainsn
.slot
) {
654 dest
->quad0
.slot0
= src
->quad0
.slot0
;
657 dest
->quad1
.slot1_p1
= src
->quad1
.slot1_p1
;
660 dest
->quad1
.slot2
= src
->quad1
.slot2
;
663 flush_icache_range(arm_addr
, arm_addr
+ sizeof(kprobe_opcode_t
));
666 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
669 free_insn_slot(p
->ainsn
.insn
,
670 p
->ainsn
.inst_flag
& INST_FLAG_BOOSTABLE
);
671 p
->ainsn
.insn
= NULL
;
675 * We are resuming execution after a single step fault, so the pt_regs
676 * structure reflects the register state after we executed the instruction
677 * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
678 * the ip to point back to the original stack address. To set the IP address
679 * to original stack address, handle the case where we need to fixup the
680 * relative IP address and/or fixup branch register.
682 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
684 unsigned long bundle_addr
= (unsigned long) (&p
->ainsn
.insn
->bundle
);
685 unsigned long resume_addr
= (unsigned long)p
->addr
& ~0xFULL
;
686 unsigned long template;
687 int slot
= ((unsigned long)p
->addr
& 0xf);
689 template = p
->ainsn
.insn
->bundle
.quad0
.template;
691 if (slot
== 1 && bundle_encoding
[template][1] == L
)
694 if (p
->ainsn
.inst_flag
& ~INST_FLAG_BOOSTABLE
) {
696 if (p
->ainsn
.inst_flag
& INST_FLAG_FIX_RELATIVE_IP_ADDR
) {
697 /* Fix relative IP address */
698 regs
->cr_iip
= (regs
->cr_iip
- bundle_addr
) +
702 if (p
->ainsn
.inst_flag
& INST_FLAG_FIX_BRANCH_REG
) {
704 * Fix target branch register, software convention is
705 * to use either b0 or b6 or b7, so just checking
706 * only those registers
708 switch (p
->ainsn
.target_br_reg
) {
710 if ((regs
->b0
== bundle_addr
) ||
711 (regs
->b0
== bundle_addr
+ 0x10)) {
712 regs
->b0
= (regs
->b0
- bundle_addr
) +
717 if ((regs
->b6
== bundle_addr
) ||
718 (regs
->b6
== bundle_addr
+ 0x10)) {
719 regs
->b6
= (regs
->b6
- bundle_addr
) +
724 if ((regs
->b7
== bundle_addr
) ||
725 (regs
->b7
== bundle_addr
+ 0x10)) {
726 regs
->b7
= (regs
->b7
- bundle_addr
) +
736 if (regs
->cr_iip
== bundle_addr
+ 0x10) {
737 regs
->cr_iip
= resume_addr
+ 0x10;
740 if (regs
->cr_iip
== bundle_addr
) {
741 regs
->cr_iip
= resume_addr
;
746 /* Turn off Single Step bit */
747 ia64_psr(regs
)->ss
= 0;
750 static void __kprobes
prepare_ss(struct kprobe
*p
, struct pt_regs
*regs
)
752 unsigned long bundle_addr
= (unsigned long) &p
->ainsn
.insn
->bundle
;
753 unsigned long slot
= (unsigned long)p
->addr
& 0xf;
755 /* single step inline if break instruction */
756 if (p
->ainsn
.inst_flag
== INST_FLAG_BREAK_INST
)
757 regs
->cr_iip
= (unsigned long)p
->addr
& ~0xFULL
;
759 regs
->cr_iip
= bundle_addr
& ~0xFULL
;
764 ia64_psr(regs
)->ri
= slot
;
766 /* turn on single stepping */
767 ia64_psr(regs
)->ss
= 1;
770 static int __kprobes
is_ia64_break_inst(struct pt_regs
*regs
)
772 unsigned int slot
= ia64_psr(regs
)->ri
;
773 unsigned long *kprobe_addr
= (unsigned long *)regs
->cr_iip
;
776 memcpy(&bundle
, kprobe_addr
, sizeof(bundle_t
));
778 return __is_ia64_break_inst(&bundle
, slot
);
781 static int __kprobes
pre_kprobes_handler(struct die_args
*args
)
785 struct pt_regs
*regs
= args
->regs
;
786 kprobe_opcode_t
*addr
= (kprobe_opcode_t
*)instruction_pointer(regs
);
787 struct kprobe_ctlblk
*kcb
;
790 * We don't want to be preempted for the entire
791 * duration of kprobe processing
794 kcb
= get_kprobe_ctlblk();
796 /* Handle recursion cases */
797 if (kprobe_running()) {
798 p
= get_kprobe(addr
);
800 if ((kcb
->kprobe_status
== KPROBE_HIT_SS
) &&
801 (p
->ainsn
.inst_flag
== INST_FLAG_BREAK_INST
)) {
802 ia64_psr(regs
)->ss
= 0;
805 /* We have reentered the pre_kprobe_handler(), since
806 * another probe was hit while within the handler.
807 * We here save the original kprobes variables and
808 * just single step on the instruction of the new probe
809 * without calling any user handlers.
811 save_previous_kprobe(kcb
);
812 set_current_kprobe(p
, kcb
);
813 kprobes_inc_nmissed_count(p
);
815 kcb
->kprobe_status
= KPROBE_REENTER
;
817 } else if (!is_ia64_break_inst(regs
)) {
818 /* The breakpoint instruction was removed by
819 * another cpu right after we hit, no further
820 * handling of this interrupt is appropriate
830 p
= get_kprobe(addr
);
832 if (!is_ia64_break_inst(regs
)) {
834 * The breakpoint instruction was removed right
835 * after we hit it. Another cpu has removed
836 * either a probepoint or a debugger breakpoint
837 * at this address. In either case, no further
838 * handling of this interrupt is appropriate.
844 /* Not one of our break, let kernel handle it */
848 set_current_kprobe(p
, kcb
);
849 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
851 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
852 reset_current_kprobe();
853 preempt_enable_no_resched();
857 #if !defined(CONFIG_PREEMPT)
858 if (p
->ainsn
.inst_flag
== INST_FLAG_BOOSTABLE
&& !p
->post_handler
) {
859 /* Boost up -- we can execute copied instructions directly */
860 ia64_psr(regs
)->ri
= p
->ainsn
.slot
;
861 regs
->cr_iip
= (unsigned long)&p
->ainsn
.insn
->bundle
& ~0xFULL
;
862 /* turn single stepping off */
863 ia64_psr(regs
)->ss
= 0;
865 reset_current_kprobe();
866 preempt_enable_no_resched();
871 kcb
->kprobe_status
= KPROBE_HIT_SS
;
875 preempt_enable_no_resched();
879 static int __kprobes
post_kprobes_handler(struct pt_regs
*regs
)
881 struct kprobe
*cur
= kprobe_running();
882 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
887 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
888 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
889 cur
->post_handler(cur
, regs
, 0);
892 resume_execution(cur
, regs
);
894 /*Restore back the original saved kprobes variables and continue. */
895 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
896 restore_previous_kprobe(kcb
);
899 reset_current_kprobe();
902 preempt_enable_no_resched();
906 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
908 struct kprobe
*cur
= kprobe_running();
909 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
912 switch(kcb
->kprobe_status
) {
916 * We are here because the instruction being single
917 * stepped caused a page fault. We reset the current
918 * kprobe and the instruction pointer points back to
919 * the probe address and allow the page fault handler
920 * to continue as a normal page fault.
922 regs
->cr_iip
= ((unsigned long)cur
->addr
) & ~0xFULL
;
923 ia64_psr(regs
)->ri
= ((unsigned long)cur
->addr
) & 0xf;
924 if (kcb
->kprobe_status
== KPROBE_REENTER
)
925 restore_previous_kprobe(kcb
);
927 reset_current_kprobe();
928 preempt_enable_no_resched();
930 case KPROBE_HIT_ACTIVE
:
931 case KPROBE_HIT_SSDONE
:
933 * We increment the nmissed count for accounting,
934 * we can also use npre/npostfault count for accounting
935 * these specific fault cases.
937 kprobes_inc_nmissed_count(cur
);
940 * We come here because instructions in the pre/post
941 * handler caused the page_fault, this could happen
942 * if handler tries to access user space by
943 * copy_from_user(), get_user() etc. Let the
944 * user-specified handler try to fix it first.
946 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
949 * In case the user-specified fault handler returned
950 * zero, try to fix up.
952 if (ia64_done_with_exception(regs
))
956 * Let ia64_do_page_fault() fix it.
966 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
967 unsigned long val
, void *data
)
969 struct die_args
*args
= (struct die_args
*)data
;
970 int ret
= NOTIFY_DONE
;
972 if (args
->regs
&& user_mode(args
->regs
))
977 /* err is break number from ia64_bad_break() */
978 if ((args
->err
>> 12) == (__IA64_BREAK_KPROBE
>> 12)
980 if (pre_kprobes_handler(args
))
984 /* err is vector number from ia64_fault() */
986 if (post_kprobes_handler(args
->regs
))
995 struct param_bsp_cfm
{
1001 static void ia64_get_bsp_cfm(struct unw_frame_info
*info
, void *arg
)
1004 struct param_bsp_cfm
*lp
= arg
;
1007 unw_get_ip(info
, &ip
);
1011 unw_get_bsp(info
, (unsigned long*)&lp
->bsp
);
1012 unw_get_cfm(info
, (unsigned long*)&lp
->cfm
);
1015 } while (unw_unwind(info
) >= 0);
1021 unsigned long arch_deref_entry_point(void *entry
)
1023 return ((struct fnptr
*)entry
)->ip
;
1026 static struct kprobe trampoline_p
= {
1027 .pre_handler
= trampoline_probe_handler
1030 int __init
arch_init_kprobes(void)
1033 (kprobe_opcode_t
*)((struct fnptr
*)kretprobe_trampoline
)->ip
;
1034 return register_kprobe(&trampoline_p
);
1037 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
1040 (kprobe_opcode_t
*)((struct fnptr
*)kretprobe_trampoline
)->ip
)