]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * Copyright (C) 1998-2004 Hewlett-Packard Co | |
4 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
5 | * Stephane Eranian <eranian@hpl.hp.com> | |
6 | * Copyright (C) 2003 Intel Co | |
7 | * Suresh Siddha <suresh.b.siddha@intel.com> | |
8 | * Fenghua Yu <fenghua.yu@intel.com> | |
9 | * Arun Sharma <arun.sharma@intel.com> | |
10 | * | |
11 | * 12/07/98 S. Eranian added pt_regs & switch_stack | |
12 | * 12/21/98 D. Mosberger updated to match latest code | |
13 | * 6/17/99 D. Mosberger added second unat member to "struct switch_stack" | |
14 | * | |
15 | */ | |
43e40f25 DH |
16 | #ifndef _ASM_IA64_PTRACE_H |
17 | #define _ASM_IA64_PTRACE_H | |
d5759641 | 18 | |
82f1b07b | 19 | #ifndef ASM_OFFSETS_C |
0013a854 | 20 | #include <asm/asm-offsets.h> |
82f1b07b | 21 | #endif |
43e40f25 | 22 | #include <uapi/asm/ptrace.h> |
1da177e4 LT |
23 | |
24 | /* | |
25 | * Base-2 logarithm of number of pages to allocate per task structure | |
26 | * (including register backing store and memory stack): | |
27 | */ | |
28 | #if defined(CONFIG_IA64_PAGE_SIZE_4KB) | |
29 | # define KERNEL_STACK_SIZE_ORDER 3 | |
30 | #elif defined(CONFIG_IA64_PAGE_SIZE_8KB) | |
31 | # define KERNEL_STACK_SIZE_ORDER 2 | |
32 | #elif defined(CONFIG_IA64_PAGE_SIZE_16KB) | |
33 | # define KERNEL_STACK_SIZE_ORDER 1 | |
34 | #else | |
35 | # define KERNEL_STACK_SIZE_ORDER 0 | |
36 | #endif | |
37 | ||
4dcc29e1 | 38 | #define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31) |
1da177e4 LT |
39 | #define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE) |
40 | ||
41 | #define KERNEL_STACK_SIZE IA64_STK_OFFSET | |
42 | ||
d5759641 | 43 | #ifndef __ASSEMBLY__ |
1da177e4 | 44 | |
d5759641 DW |
45 | #include <asm/current.h> |
46 | #include <asm/page.h> | |
47 | ||
1da177e4 LT |
48 | /* |
49 | * We use the ia64_psr(regs)->ri to determine which of the three | |
50 | * instructions in bundle (16 bytes) took the sample. Generate | |
51 | * the canonical representation by adding to instruction pointer. | |
52 | */ | |
53 | # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) | |
b3f827cb | 54 | |
cfb361f1 SL |
55 | static inline unsigned long user_stack_pointer(struct pt_regs *regs) |
56 | { | |
57 | /* FIXME: should this be bspstore + nr_dirty regs? */ | |
58 | return regs->ar_bspstore; | |
59 | } | |
60 | ||
d7e7528b EP |
61 | static inline int is_syscall_success(struct pt_regs *regs) |
62 | { | |
63 | return regs->r10 != -1; | |
64 | } | |
65 | ||
66 | static inline long regs_return_value(struct pt_regs *regs) | |
67 | { | |
68 | if (is_syscall_success(regs)) | |
69 | return regs->r8; | |
70 | else | |
71 | return -regs->r8; | |
72 | } | |
b3f827cb | 73 | |
1da177e4 LT |
74 | /* Conserve space in histogram by encoding slot bits in address |
75 | * bits 2 and 3 rather than bits 0 and 1. | |
76 | */ | |
77 | #define profile_pc(regs) \ | |
78 | ({ \ | |
79 | unsigned long __ip = instruction_pointer(regs); \ | |
80 | (__ip & ~3UL) + ((__ip & 3UL) << 2); \ | |
81 | }) | |
1ca97bb5 AV |
82 | /* |
83 | * Why not default? Because user_stack_pointer() on ia64 gives register | |
84 | * stack backing store instead... | |
85 | */ | |
86 | #define current_user_stack_pointer() (current_pt_regs()->r12) | |
1da177e4 LT |
87 | |
88 | /* given a pointer to a task_struct, return the user's pt_regs */ | |
6450578f | 89 | # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) |
1da177e4 LT |
90 | # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) |
91 | # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) | |
92 | # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) | |
93 | # define fsys_mode(task,regs) \ | |
94 | ({ \ | |
95 | struct task_struct *_task = (task); \ | |
96 | struct pt_regs *_regs = (regs); \ | |
97 | !user_mode(_regs) && user_stack(_task, _regs); \ | |
98 | }) | |
99 | ||
100 | /* | |
101 | * System call handlers that, upon successful completion, need to return a negative value | |
102 | * should call force_successful_syscall_return() right before returning. On architectures | |
103 | * where the syscall convention provides for a separate error flag (e.g., alpha, ia64, | |
104 | * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error | |
105 | * flag will not get set. On architectures which do not support a separate error flag, | |
106 | * the macro is a no-op and the spurious error condition needs to be filtered out by some | |
107 | * other means (e.g., in user-level, by passing an extra argument to the syscall handler, | |
108 | * or something along those lines). | |
109 | * | |
110 | * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. | |
111 | */ | |
6450578f | 112 | # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0) |
1da177e4 LT |
113 | |
114 | struct task_struct; /* forward decl */ | |
115 | struct unw_frame_info; /* forward decl */ | |
116 | ||
1da177e4 LT |
117 | extern void ia64_do_show_stack (struct unw_frame_info *, void *); |
118 | extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, | |
119 | unsigned long *); | |
120 | extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, | |
121 | unsigned long, long *); | |
122 | extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long, | |
123 | unsigned long, long); | |
124 | extern void ia64_flush_fph (struct task_struct *); | |
125 | extern void ia64_sync_fph (struct task_struct *); | |
3b2ce0b1 | 126 | extern void ia64_sync_krbs(void); |
1da177e4 LT |
127 | extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *, |
128 | unsigned long, unsigned long); | |
129 | ||
130 | /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */ | |
131 | extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat); | |
132 | /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */ | |
133 | extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat); | |
134 | ||
135 | extern void ia64_increment_ip (struct pt_regs *pt); | |
136 | extern void ia64_decrement_ip (struct pt_regs *pt); | |
137 | ||
3b2ce0b1 PT |
138 | extern void ia64_ptrace_stop(void); |
139 | #define arch_ptrace_stop(code, info) \ | |
140 | ia64_ptrace_stop() | |
141 | #define arch_ptrace_stop_needed(code, info) \ | |
142 | (!test_thread_flag(TIF_RESTORE_RSE)) | |
143 | ||
aa91a2e9 PT |
144 | extern void ptrace_attach_sync_user_rbs (struct task_struct *); |
145 | #define arch_ptrace_attach(child) \ | |
146 | ptrace_attach_sync_user_rbs(child) | |
147 | ||
8db3f525 | 148 | #define arch_has_single_step() (1) |
8db3f525 | 149 | #define arch_has_block_step() (1) |
8db3f525 | 150 | |
1da177e4 | 151 | #endif /* !__ASSEMBLY__ */ |
1da177e4 | 152 | #endif /* _ASM_IA64_PTRACE_H */ |