]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/livepatch/patch.c
UBUNTU: SAUCE: drm/i915: Tweaked Wa_14010685332 for all PCHs
[mirror_ubuntu-hirsute-kernel.git] / kernel / livepatch / patch.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
c349cdca
JP
2/*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
c349cdca
JP
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/livepatch.h>
13#include <linux/list.h>
14#include <linux/ftrace.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/bug.h>
18#include <linux/printk.h>
93862e38 19#include "core.h"
c349cdca 20#include "patch.h"
d83a7cb3 21#include "transition.h"
c349cdca
JP
22
23static LIST_HEAD(klp_ops);
24
19514910 25struct klp_ops *klp_find_ops(void *old_func)
c349cdca
JP
26{
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
19514910 33 if (func->old_func == old_func)
c349cdca
JP
34 return ops;
35 }
36
37 return NULL;
38}
39
40static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
d19ad077 43 struct ftrace_regs *fregs)
c349cdca
JP
44{
45 struct klp_ops *ops;
46 struct klp_func *func;
d83a7cb3 47 int patch_state;
13f3ea9a 48 int bit;
c349cdca
JP
49
50 ops = container_of(fops, struct klp_ops, fops);
51
773c1670 52 bit = ftrace_test_recursion_trylock(ip, parent_ip);
4b750b57 53 if (WARN_ON_ONCE(bit < 0))
13f3ea9a 54 return;
842c0884 55 /*
6932689e 56 * A variant of synchronize_rcu() is used to allow patching functions
842c0884
PM
57 * where RCU is not watching, see klp_synchronize_transition().
58 */
59 preempt_disable_notrace();
d83a7cb3 60
c349cdca
JP
61 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
62 stack_node);
d83a7cb3
JP
63
64 /*
65 * func should never be NULL because preemption should be disabled here
66 * and unregister_ftrace_function() does the equivalent of a
6932689e 67 * synchronize_rcu() before the func_stack removal.
d83a7cb3 68 */
c349cdca
JP
69 if (WARN_ON_ONCE(!func))
70 goto unlock;
71
d83a7cb3
JP
72 /*
73 * In the enable path, enforce the order of the ops->func_stack and
74 * func->transition reads. The corresponding write barrier is in
75 * __klp_enable_patch().
76 *
77 * (Note that this barrier technically isn't needed in the disable
78 * path. In the rare case where klp_update_patch_state() runs before
79 * this handler, its TIF_PATCH_PENDING read and this func->transition
80 * read need to be ordered. But klp_update_patch_state() already
81 * enforces that.)
82 */
83 smp_rmb();
84
85 if (unlikely(func->transition)) {
86
87 /*
88 * Enforce the order of the func->transition and
89 * current->patch_state reads. Otherwise we could read an
90 * out-of-date task state and pick the wrong function. The
91 * corresponding write barrier is in klp_init_transition().
92 */
93 smp_rmb();
94
95 patch_state = current->patch_state;
96
97 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
98
99 if (patch_state == KLP_UNPATCHED) {
100 /*
101 * Use the previously patched version of the function.
102 * If no previous patches exist, continue with the
103 * original function.
104 */
105 func = list_entry_rcu(func->stack_node.next,
106 struct klp_func, stack_node);
107
108 if (&func->stack_node == &ops->func_stack)
109 goto unlock;
110 }
111 }
112
e1452b60
JB
113 /*
114 * NOPs are used to replace existing patches with original code.
115 * Do nothing! Setting pc would cause an infinite loop.
116 */
117 if (func->nop)
118 goto unlock;
119
2860cd8a 120 klp_arch_set_pc(fregs, (unsigned long)func->new_func);
e1452b60 121
c349cdca 122unlock:
842c0884 123 preempt_enable_notrace();
13f3ea9a 124 ftrace_test_recursion_unlock(bit);
c349cdca
JP
125}
126
127/*
128 * Convert a function address into the appropriate ftrace location.
129 *
130 * Usually this is just the address of the function, but on some architectures
131 * it's more complicated so allow them to provide a custom behaviour.
132 */
133#ifndef klp_get_ftrace_location
134static unsigned long klp_get_ftrace_location(unsigned long faddr)
135{
136 return faddr;
137}
138#endif
139
140static void klp_unpatch_func(struct klp_func *func)
141{
142 struct klp_ops *ops;
143
144 if (WARN_ON(!func->patched))
145 return;
19514910 146 if (WARN_ON(!func->old_func))
c349cdca
JP
147 return;
148
19514910 149 ops = klp_find_ops(func->old_func);
c349cdca
JP
150 if (WARN_ON(!ops))
151 return;
152
153 if (list_is_singular(&ops->func_stack)) {
154 unsigned long ftrace_loc;
155
19514910
PM
156 ftrace_loc =
157 klp_get_ftrace_location((unsigned long)func->old_func);
c349cdca
JP
158 if (WARN_ON(!ftrace_loc))
159 return;
160
161 WARN_ON(unregister_ftrace_function(&ops->fops));
162 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
163
164 list_del_rcu(&func->stack_node);
165 list_del(&ops->node);
166 kfree(ops);
167 } else {
168 list_del_rcu(&func->stack_node);
169 }
170
171 func->patched = false;
172}
173
174static int klp_patch_func(struct klp_func *func)
175{
176 struct klp_ops *ops;
177 int ret;
178
19514910 179 if (WARN_ON(!func->old_func))
c349cdca
JP
180 return -EINVAL;
181
182 if (WARN_ON(func->patched))
183 return -EINVAL;
184
19514910 185 ops = klp_find_ops(func->old_func);
c349cdca
JP
186 if (!ops) {
187 unsigned long ftrace_loc;
188
19514910
PM
189 ftrace_loc =
190 klp_get_ftrace_location((unsigned long)func->old_func);
c349cdca
JP
191 if (!ftrace_loc) {
192 pr_err("failed to find location for function '%s'\n",
193 func->old_name);
194 return -EINVAL;
195 }
196
197 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
198 if (!ops)
199 return -ENOMEM;
200
201 ops->fops.func = klp_ftrace_handler;
2860cd8a
SRV
202 ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
203#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
204 FTRACE_OPS_FL_SAVE_REGS |
205#endif
7162431d
MB
206 FTRACE_OPS_FL_IPMODIFY |
207 FTRACE_OPS_FL_PERMANENT;
c349cdca
JP
208
209 list_add(&ops->node, &klp_ops);
210
211 INIT_LIST_HEAD(&ops->func_stack);
212 list_add_rcu(&func->stack_node, &ops->func_stack);
213
214 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
215 if (ret) {
216 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
217 func->old_name, ret);
218 goto err;
219 }
220
221 ret = register_ftrace_function(&ops->fops);
222 if (ret) {
223 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
224 func->old_name, ret);
225 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
226 goto err;
227 }
228
229
230 } else {
231 list_add_rcu(&func->stack_node, &ops->func_stack);
232 }
233
234 func->patched = true;
235
236 return 0;
237
238err:
239 list_del_rcu(&func->stack_node);
240 list_del(&ops->node);
241 kfree(ops);
242 return ret;
243}
244
d697bad5 245static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
c349cdca
JP
246{
247 struct klp_func *func;
248
d697bad5
PM
249 klp_for_each_func(obj, func) {
250 if (nops_only && !func->nop)
251 continue;
252
c349cdca
JP
253 if (func->patched)
254 klp_unpatch_func(func);
d697bad5 255 }
c349cdca 256
d697bad5
PM
257 if (obj->dynamic || !nops_only)
258 obj->patched = false;
259}
260
261
262void klp_unpatch_object(struct klp_object *obj)
263{
264 __klp_unpatch_object(obj, false);
c349cdca
JP
265}
266
267int klp_patch_object(struct klp_object *obj)
268{
269 struct klp_func *func;
270 int ret;
271
272 if (WARN_ON(obj->patched))
273 return -EINVAL;
274
275 klp_for_each_func(obj, func) {
276 ret = klp_patch_func(func);
277 if (ret) {
278 klp_unpatch_object(obj);
279 return ret;
280 }
281 }
282 obj->patched = true;
283
284 return 0;
285}
d83a7cb3 286
d697bad5 287static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
d83a7cb3
JP
288{
289 struct klp_object *obj;
290
291 klp_for_each_object(patch, obj)
292 if (obj->patched)
d697bad5
PM
293 __klp_unpatch_object(obj, nops_only);
294}
295
296void klp_unpatch_objects(struct klp_patch *patch)
297{
298 __klp_unpatch_objects(patch, false);
299}
300
301void klp_unpatch_objects_dynamic(struct klp_patch *patch)
302{
303 __klp_unpatch_objects(patch, true);
d83a7cb3 304}