]>
Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
c349cdca JP |
2 | /* |
3 | * patch.c - livepatch patching functions | |
4 | * | |
5 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> | |
6 | * Copyright (C) 2014 SUSE | |
7 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> | |
c349cdca JP |
8 | */ |
9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | ||
12 | #include <linux/livepatch.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/ftrace.h> | |
15 | #include <linux/rculist.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/bug.h> | |
18 | #include <linux/printk.h> | |
93862e38 | 19 | #include "core.h" |
c349cdca | 20 | #include "patch.h" |
d83a7cb3 | 21 | #include "transition.h" |
c349cdca JP |
22 | |
23 | static LIST_HEAD(klp_ops); | |
24 | ||
19514910 | 25 | struct klp_ops *klp_find_ops(void *old_func) |
c349cdca JP |
26 | { |
27 | struct klp_ops *ops; | |
28 | struct klp_func *func; | |
29 | ||
30 | list_for_each_entry(ops, &klp_ops, node) { | |
31 | func = list_first_entry(&ops->func_stack, struct klp_func, | |
32 | stack_node); | |
19514910 | 33 | if (func->old_func == old_func) |
c349cdca JP |
34 | return ops; |
35 | } | |
36 | ||
37 | return NULL; | |
38 | } | |
39 | ||
40 | static void notrace klp_ftrace_handler(unsigned long ip, | |
41 | unsigned long parent_ip, | |
42 | struct ftrace_ops *fops, | |
d19ad077 | 43 | struct ftrace_regs *fregs) |
c349cdca JP |
44 | { |
45 | struct klp_ops *ops; | |
46 | struct klp_func *func; | |
d83a7cb3 | 47 | int patch_state; |
13f3ea9a | 48 | int bit; |
c349cdca JP |
49 | |
50 | ops = container_of(fops, struct klp_ops, fops); | |
51 | ||
ce5e4803 | 52 | /* |
53 | * The ftrace_test_recursion_trylock() will disable preemption, | |
54 | * which is required for the variant of synchronize_rcu() that is | |
55 | * used to allow patching functions where RCU is not watching. | |
56 | * See klp_synchronize_transition() for more details. | |
57 | */ | |
773c1670 | 58 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
4b750b57 | 59 | if (WARN_ON_ONCE(bit < 0)) |
13f3ea9a | 60 | return; |
d83a7cb3 | 61 | |
c349cdca JP |
62 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
63 | stack_node); | |
d83a7cb3 JP |
64 | |
65 | /* | |
66 | * func should never be NULL because preemption should be disabled here | |
67 | * and unregister_ftrace_function() does the equivalent of a | |
6932689e | 68 | * synchronize_rcu() before the func_stack removal. |
d83a7cb3 | 69 | */ |
c349cdca JP |
70 | if (WARN_ON_ONCE(!func)) |
71 | goto unlock; | |
72 | ||
d83a7cb3 JP |
73 | /* |
74 | * In the enable path, enforce the order of the ops->func_stack and | |
75 | * func->transition reads. The corresponding write barrier is in | |
76 | * __klp_enable_patch(). | |
77 | * | |
78 | * (Note that this barrier technically isn't needed in the disable | |
79 | * path. In the rare case where klp_update_patch_state() runs before | |
80 | * this handler, its TIF_PATCH_PENDING read and this func->transition | |
81 | * read need to be ordered. But klp_update_patch_state() already | |
82 | * enforces that.) | |
83 | */ | |
84 | smp_rmb(); | |
85 | ||
86 | if (unlikely(func->transition)) { | |
87 | ||
88 | /* | |
89 | * Enforce the order of the func->transition and | |
90 | * current->patch_state reads. Otherwise we could read an | |
91 | * out-of-date task state and pick the wrong function. The | |
92 | * corresponding write barrier is in klp_init_transition(). | |
93 | */ | |
94 | smp_rmb(); | |
95 | ||
96 | patch_state = current->patch_state; | |
97 | ||
98 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); | |
99 | ||
100 | if (patch_state == KLP_UNPATCHED) { | |
101 | /* | |
102 | * Use the previously patched version of the function. | |
103 | * If no previous patches exist, continue with the | |
104 | * original function. | |
105 | */ | |
106 | func = list_entry_rcu(func->stack_node.next, | |
107 | struct klp_func, stack_node); | |
108 | ||
109 | if (&func->stack_node == &ops->func_stack) | |
110 | goto unlock; | |
111 | } | |
112 | } | |
113 | ||
e1452b60 JB |
114 | /* |
115 | * NOPs are used to replace existing patches with original code. | |
116 | * Do nothing! Setting pc would cause an infinite loop. | |
117 | */ | |
118 | if (func->nop) | |
119 | goto unlock; | |
120 | ||
2860cd8a | 121 | klp_arch_set_pc(fregs, (unsigned long)func->new_func); |
e1452b60 | 122 | |
c349cdca | 123 | unlock: |
13f3ea9a | 124 | ftrace_test_recursion_unlock(bit); |
c349cdca JP |
125 | } |
126 | ||
127 | /* | |
128 | * Convert a function address into the appropriate ftrace location. | |
129 | * | |
130 | * Usually this is just the address of the function, but on some architectures | |
131 | * it's more complicated so allow them to provide a custom behaviour. | |
132 | */ | |
133 | #ifndef klp_get_ftrace_location | |
134 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | |
135 | { | |
136 | return faddr; | |
137 | } | |
138 | #endif | |
139 | ||
140 | static void klp_unpatch_func(struct klp_func *func) | |
141 | { | |
142 | struct klp_ops *ops; | |
143 | ||
144 | if (WARN_ON(!func->patched)) | |
145 | return; | |
19514910 | 146 | if (WARN_ON(!func->old_func)) |
c349cdca JP |
147 | return; |
148 | ||
19514910 | 149 | ops = klp_find_ops(func->old_func); |
c349cdca JP |
150 | if (WARN_ON(!ops)) |
151 | return; | |
152 | ||
153 | if (list_is_singular(&ops->func_stack)) { | |
154 | unsigned long ftrace_loc; | |
155 | ||
19514910 PM |
156 | ftrace_loc = |
157 | klp_get_ftrace_location((unsigned long)func->old_func); | |
c349cdca JP |
158 | if (WARN_ON(!ftrace_loc)) |
159 | return; | |
160 | ||
161 | WARN_ON(unregister_ftrace_function(&ops->fops)); | |
162 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | |
163 | ||
164 | list_del_rcu(&func->stack_node); | |
165 | list_del(&ops->node); | |
166 | kfree(ops); | |
167 | } else { | |
168 | list_del_rcu(&func->stack_node); | |
169 | } | |
170 | ||
171 | func->patched = false; | |
172 | } | |
173 | ||
174 | static int klp_patch_func(struct klp_func *func) | |
175 | { | |
176 | struct klp_ops *ops; | |
177 | int ret; | |
178 | ||
19514910 | 179 | if (WARN_ON(!func->old_func)) |
c349cdca JP |
180 | return -EINVAL; |
181 | ||
182 | if (WARN_ON(func->patched)) | |
183 | return -EINVAL; | |
184 | ||
19514910 | 185 | ops = klp_find_ops(func->old_func); |
c349cdca JP |
186 | if (!ops) { |
187 | unsigned long ftrace_loc; | |
188 | ||
19514910 PM |
189 | ftrace_loc = |
190 | klp_get_ftrace_location((unsigned long)func->old_func); | |
c349cdca JP |
191 | if (!ftrace_loc) { |
192 | pr_err("failed to find location for function '%s'\n", | |
193 | func->old_name); | |
194 | return -EINVAL; | |
195 | } | |
196 | ||
197 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
198 | if (!ops) | |
199 | return -ENOMEM; | |
200 | ||
201 | ops->fops.func = klp_ftrace_handler; | |
2860cd8a SRV |
202 | ops->fops.flags = FTRACE_OPS_FL_DYNAMIC | |
203 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS | |
204 | FTRACE_OPS_FL_SAVE_REGS | | |
205 | #endif | |
7162431d MB |
206 | FTRACE_OPS_FL_IPMODIFY | |
207 | FTRACE_OPS_FL_PERMANENT; | |
c349cdca JP |
208 | |
209 | list_add(&ops->node, &klp_ops); | |
210 | ||
211 | INIT_LIST_HEAD(&ops->func_stack); | |
212 | list_add_rcu(&func->stack_node, &ops->func_stack); | |
213 | ||
214 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | |
215 | if (ret) { | |
216 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | |
217 | func->old_name, ret); | |
218 | goto err; | |
219 | } | |
220 | ||
221 | ret = register_ftrace_function(&ops->fops); | |
222 | if (ret) { | |
223 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | |
224 | func->old_name, ret); | |
225 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | |
226 | goto err; | |
227 | } | |
228 | ||
229 | ||
230 | } else { | |
231 | list_add_rcu(&func->stack_node, &ops->func_stack); | |
232 | } | |
233 | ||
234 | func->patched = true; | |
235 | ||
236 | return 0; | |
237 | ||
238 | err: | |
239 | list_del_rcu(&func->stack_node); | |
240 | list_del(&ops->node); | |
241 | kfree(ops); | |
242 | return ret; | |
243 | } | |
244 | ||
d697bad5 | 245 | static void __klp_unpatch_object(struct klp_object *obj, bool nops_only) |
c349cdca JP |
246 | { |
247 | struct klp_func *func; | |
248 | ||
d697bad5 PM |
249 | klp_for_each_func(obj, func) { |
250 | if (nops_only && !func->nop) | |
251 | continue; | |
252 | ||
c349cdca JP |
253 | if (func->patched) |
254 | klp_unpatch_func(func); | |
d697bad5 | 255 | } |
c349cdca | 256 | |
d697bad5 PM |
257 | if (obj->dynamic || !nops_only) |
258 | obj->patched = false; | |
259 | } | |
260 | ||
261 | ||
262 | void klp_unpatch_object(struct klp_object *obj) | |
263 | { | |
264 | __klp_unpatch_object(obj, false); | |
c349cdca JP |
265 | } |
266 | ||
267 | int klp_patch_object(struct klp_object *obj) | |
268 | { | |
269 | struct klp_func *func; | |
270 | int ret; | |
271 | ||
272 | if (WARN_ON(obj->patched)) | |
273 | return -EINVAL; | |
274 | ||
275 | klp_for_each_func(obj, func) { | |
276 | ret = klp_patch_func(func); | |
277 | if (ret) { | |
278 | klp_unpatch_object(obj); | |
279 | return ret; | |
280 | } | |
281 | } | |
282 | obj->patched = true; | |
283 | ||
284 | return 0; | |
285 | } | |
d83a7cb3 | 286 | |
d697bad5 | 287 | static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) |
d83a7cb3 JP |
288 | { |
289 | struct klp_object *obj; | |
290 | ||
291 | klp_for_each_object(patch, obj) | |
292 | if (obj->patched) | |
d697bad5 PM |
293 | __klp_unpatch_object(obj, nops_only); |
294 | } | |
295 | ||
296 | void klp_unpatch_objects(struct klp_patch *patch) | |
297 | { | |
298 | __klp_unpatch_objects(patch, false); | |
299 | } | |
300 | ||
301 | void klp_unpatch_objects_dynamic(struct klp_patch *patch) | |
302 | { | |
303 | __klp_unpatch_objects(patch, true); | |
d83a7cb3 | 304 | } |