]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - kernel/trace/trace_probe_tmpl.h
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / kernel / trace / trace_probe_tmpl.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Traceprobe fetch helper inlines
4 */
5
6 static nokprobe_inline void
7 fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
8 {
9 switch (code->size) {
10 case 1:
11 *(u8 *)buf = (u8)val;
12 break;
13 case 2:
14 *(u16 *)buf = (u16)val;
15 break;
16 case 4:
17 *(u32 *)buf = (u32)val;
18 break;
19 case 8:
20 //TBD: 32bit signed
21 *(u64 *)buf = (u64)val;
22 break;
23 default:
24 *(unsigned long *)buf = val;
25 }
26 }
27
28 static nokprobe_inline void
29 fetch_apply_bitfield(struct fetch_insn *code, void *buf)
30 {
31 switch (code->basesize) {
32 case 1:
33 *(u8 *)buf <<= code->lshift;
34 *(u8 *)buf >>= code->rshift;
35 break;
36 case 2:
37 *(u16 *)buf <<= code->lshift;
38 *(u16 *)buf >>= code->rshift;
39 break;
40 case 4:
41 *(u32 *)buf <<= code->lshift;
42 *(u32 *)buf >>= code->rshift;
43 break;
44 case 8:
45 *(u64 *)buf <<= code->lshift;
46 *(u64 *)buf >>= code->rshift;
47 break;
48 }
49 }
50
51 /*
52 * These functions must be defined for each callsite.
53 * Return consumed dynamic data size (>= 0), or error (< 0).
54 * If dest is NULL, don't store result and return required dynamic data size.
55 */
56 static int
57 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
58 void *dest, void *base);
59 static nokprobe_inline int fetch_store_strlen(unsigned long addr);
60 static nokprobe_inline int
61 fetch_store_string(unsigned long addr, void *dest, void *base);
62 static nokprobe_inline int
63 probe_mem_read(void *dest, void *src, size_t size);
64
65 /* From the 2nd stage, routine is same */
66 static nokprobe_inline int
67 process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
68 void *dest, void *base)
69 {
70 struct fetch_insn *s3 = NULL;
71 int total = 0, ret = 0, i = 0;
72 u32 loc = 0;
73 unsigned long lval = val;
74
75 stage2:
76 /* 2nd stage: dereference memory if needed */
77 while (code->op == FETCH_OP_DEREF) {
78 lval = val;
79 ret = probe_mem_read(&val, (void *)val + code->offset,
80 sizeof(val));
81 if (ret)
82 return ret;
83 code++;
84 }
85
86 s3 = code;
87 stage3:
88 /* 3rd stage: store value to buffer */
89 if (unlikely(!dest)) {
90 if (code->op == FETCH_OP_ST_STRING) {
91 ret += fetch_store_strlen(val + code->offset);
92 code++;
93 goto array;
94 } else
95 return -EILSEQ;
96 }
97
98 switch (code->op) {
99 case FETCH_OP_ST_RAW:
100 fetch_store_raw(val, code, dest);
101 break;
102 case FETCH_OP_ST_MEM:
103 probe_mem_read(dest, (void *)val + code->offset, code->size);
104 break;
105 case FETCH_OP_ST_STRING:
106 loc = *(u32 *)dest;
107 ret = fetch_store_string(val + code->offset, dest, base);
108 break;
109 default:
110 return -EILSEQ;
111 }
112 code++;
113
114 /* 4th stage: modify stored value if needed */
115 if (code->op == FETCH_OP_MOD_BF) {
116 fetch_apply_bitfield(code, dest);
117 code++;
118 }
119
120 array:
121 /* the last stage: Loop on array */
122 if (code->op == FETCH_OP_LP_ARRAY) {
123 total += ret;
124 if (++i < code->param) {
125 code = s3;
126 if (s3->op != FETCH_OP_ST_STRING) {
127 dest += s3->size;
128 val += s3->size;
129 goto stage3;
130 }
131 code--;
132 val = lval + sizeof(char *);
133 if (dest) {
134 dest += sizeof(u32);
135 *(u32 *)dest = update_data_loc(loc, ret);
136 }
137 goto stage2;
138 }
139 code++;
140 ret = total;
141 }
142
143 return code->op == FETCH_OP_END ? ret : -EILSEQ;
144 }
145
146 /* Sum up total data length for dynamic arraies (strings) */
147 static nokprobe_inline int
148 __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
149 {
150 struct probe_arg *arg;
151 int i, len, ret = 0;
152
153 for (i = 0; i < tp->nr_args; i++) {
154 arg = tp->args + i;
155 if (unlikely(arg->dynamic)) {
156 len = process_fetch_insn(arg->code, regs, NULL, NULL);
157 if (len > 0)
158 ret += len;
159 }
160 }
161
162 return ret;
163 }
164
165 /* Store the value of each argument */
166 static nokprobe_inline void
167 store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
168 int header_size, int maxlen)
169 {
170 struct probe_arg *arg;
171 void *base = data - header_size;
172 void *dyndata = data + tp->size;
173 u32 *dl; /* Data location */
174 int ret, i;
175
176 for (i = 0; i < tp->nr_args; i++) {
177 arg = tp->args + i;
178 dl = data + arg->offset;
179 /* Point the dynamic data area if needed */
180 if (unlikely(arg->dynamic))
181 *dl = make_data_loc(maxlen, dyndata - base);
182 ret = process_fetch_insn(arg->code, regs, dl, base);
183 if (unlikely(ret < 0 && arg->dynamic)) {
184 *dl = make_data_loc(0, dyndata - base);
185 } else {
186 dyndata += ret;
187 maxlen -= ret;
188 }
189 }
190 }
191
192 static inline int
193 print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
194 u8 *data, void *field)
195 {
196 void *p;
197 int i, j;
198
199 for (i = 0; i < nr_args; i++) {
200 struct probe_arg *a = args + i;
201
202 trace_seq_printf(s, " %s=", a->name);
203 if (likely(!a->count)) {
204 if (!a->type->print(s, data + a->offset, field))
205 return -ENOMEM;
206 continue;
207 }
208 trace_seq_putc(s, '{');
209 p = data + a->offset;
210 for (j = 0; j < a->count; j++) {
211 if (!a->type->print(s, p, field))
212 return -ENOMEM;
213 trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
214 p += a->type->size;
215 }
216 }
217 return 0;
218 }