1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/static_call.h>
6 #include <linux/sort.h>
7 #include <linux/slab.h>
8 #include <linux/module.h>
10 #include <linux/processor.h>
11 #include <asm/sections.h>
13 extern struct static_call_site __start_static_call_sites
[],
14 __stop_static_call_sites
[];
15 extern struct static_call_tramp_key __start_static_call_tramp_key
[],
16 __stop_static_call_tramp_key
[];
18 static bool static_call_initialized
;
20 /* mutex to protect key modules/sites */
21 static DEFINE_MUTEX(static_call_mutex
);
23 static void static_call_lock(void)
25 mutex_lock(&static_call_mutex
);
28 static void static_call_unlock(void)
30 mutex_unlock(&static_call_mutex
);
33 static inline void *static_call_addr(struct static_call_site
*site
)
35 return (void *)((long)site
->addr
+ (long)&site
->addr
);
38 static inline unsigned long __static_call_key(const struct static_call_site
*site
)
40 return (long)site
->key
+ (long)&site
->key
;
43 static inline struct static_call_key
*static_call_key(const struct static_call_site
*site
)
45 return (void *)(__static_call_key(site
) & ~STATIC_CALL_SITE_FLAGS
);
48 /* These assume the key is word-aligned. */
49 static inline bool static_call_is_init(struct static_call_site
*site
)
51 return __static_call_key(site
) & STATIC_CALL_SITE_INIT
;
54 static inline bool static_call_is_tail(struct static_call_site
*site
)
56 return __static_call_key(site
) & STATIC_CALL_SITE_TAIL
;
59 static inline void static_call_set_init(struct static_call_site
*site
)
61 site
->key
= (__static_call_key(site
) | STATIC_CALL_SITE_INIT
) -
65 static int static_call_site_cmp(const void *_a
, const void *_b
)
67 const struct static_call_site
*a
= _a
;
68 const struct static_call_site
*b
= _b
;
69 const struct static_call_key
*key_a
= static_call_key(a
);
70 const struct static_call_key
*key_b
= static_call_key(b
);
81 static void static_call_site_swap(void *_a
, void *_b
, int size
)
83 long delta
= (unsigned long)_a
- (unsigned long)_b
;
84 struct static_call_site
*a
= _a
;
85 struct static_call_site
*b
= _b
;
86 struct static_call_site tmp
= *a
;
88 a
->addr
= b
->addr
- delta
;
89 a
->key
= b
->key
- delta
;
91 b
->addr
= tmp
.addr
+ delta
;
92 b
->key
= tmp
.key
+ delta
;
95 static inline void static_call_sort_entries(struct static_call_site
*start
,
96 struct static_call_site
*stop
)
98 sort(start
, stop
- start
, sizeof(struct static_call_site
),
99 static_call_site_cmp
, static_call_site_swap
);
102 static inline bool static_call_key_has_mods(struct static_call_key
*key
)
104 return !(key
->type
& 1);
107 static inline struct static_call_mod
*static_call_key_next(struct static_call_key
*key
)
109 if (!static_call_key_has_mods(key
))
115 static inline struct static_call_site
*static_call_key_sites(struct static_call_key
*key
)
117 if (static_call_key_has_mods(key
))
120 return (struct static_call_site
*)(key
->type
& ~1);
123 void __static_call_update(struct static_call_key
*key
, void *tramp
, void *func
)
125 struct static_call_site
*site
, *stop
;
126 struct static_call_mod
*site_mod
, first
;
131 if (key
->func
== func
)
136 arch_static_call_transform(NULL
, tramp
, func
, false);
139 * If uninitialized, we'll not update the callsites, but they still
140 * point to the trampoline and we just patched that.
142 if (WARN_ON_ONCE(!static_call_initialized
))
145 first
= (struct static_call_mod
){
146 .next
= static_call_key_next(key
),
148 .sites
= static_call_key_sites(key
),
151 for (site_mod
= &first
; site_mod
; site_mod
= site_mod
->next
) {
152 bool init
= system_state
< SYSTEM_RUNNING
;
153 struct module
*mod
= site_mod
->mod
;
155 if (!site_mod
->sites
) {
157 * This can happen if the static call key is defined in
158 * a module which doesn't use it.
160 * It also happens in the has_mods case, where the
161 * 'first' entry has no sites associated with it.
166 stop
= __stop_static_call_sites
;
168 #ifdef CONFIG_MODULES
170 stop
= mod
->static_call_sites
+
171 mod
->num_static_call_sites
;
172 init
= mod
->state
== MODULE_STATE_COMING
;
176 for (site
= site_mod
->sites
;
177 site
< stop
&& static_call_key(site
) == key
; site
++) {
178 void *site_addr
= static_call_addr(site
);
180 if (!init
&& static_call_is_init(site
))
183 if (!kernel_text_address((unsigned long)site_addr
)) {
185 * This skips patching built-in __exit, which
186 * is part of init_section_contains() but is
187 * not part of kernel_text_address().
189 * Skipping built-in __exit is fine since it
190 * will never be executed.
192 WARN_ONCE(!static_call_is_init(site
),
193 "can't patch static call site at %pS",
198 arch_static_call_transform(site_addr
, NULL
, func
,
199 static_call_is_tail(site
));
204 static_call_unlock();
207 EXPORT_SYMBOL_GPL(__static_call_update
);
209 static int __static_call_init(struct module
*mod
,
210 struct static_call_site
*start
,
211 struct static_call_site
*stop
)
213 struct static_call_site
*site
;
214 struct static_call_key
*key
, *prev_key
= NULL
;
215 struct static_call_mod
*site_mod
;
220 static_call_sort_entries(start
, stop
);
222 for (site
= start
; site
< stop
; site
++) {
223 void *site_addr
= static_call_addr(site
);
225 if ((mod
&& within_module_init((unsigned long)site_addr
, mod
)) ||
226 (!mod
&& init_section_contains(site_addr
, 1)))
227 static_call_set_init(site
);
229 key
= static_call_key(site
);
230 if (key
!= prev_key
) {
234 * For vmlinux (!mod) avoid the allocation by storing
235 * the sites pointer in the key itself. Also see
236 * __static_call_update()'s @first.
238 * This allows architectures (eg. x86) to call
239 * static_call_init() before memory allocation works.
247 site_mod
= kzalloc(sizeof(*site_mod
), GFP_KERNEL
);
252 * When the key has a direct sites pointer, extract
253 * that into an explicit struct static_call_mod, so we
254 * can have a list of modules.
256 if (static_call_key_sites(key
)) {
257 site_mod
->mod
= NULL
;
258 site_mod
->next
= NULL
;
259 site_mod
->sites
= static_call_key_sites(key
);
261 key
->mods
= site_mod
;
263 site_mod
= kzalloc(sizeof(*site_mod
), GFP_KERNEL
);
269 site_mod
->sites
= site
;
270 site_mod
->next
= static_call_key_next(key
);
271 key
->mods
= site_mod
;
275 arch_static_call_transform(site_addr
, NULL
, key
->func
,
276 static_call_is_tail(site
));
282 static int addr_conflict(struct static_call_site
*site
, void *start
, void *end
)
284 unsigned long addr
= (unsigned long)static_call_addr(site
);
286 if (addr
<= (unsigned long)end
&&
287 addr
+ CALL_INSN_SIZE
> (unsigned long)start
)
293 static int __static_call_text_reserved(struct static_call_site
*iter_start
,
294 struct static_call_site
*iter_stop
,
295 void *start
, void *end
)
297 struct static_call_site
*iter
= iter_start
;
299 while (iter
< iter_stop
) {
300 if (addr_conflict(iter
, start
, end
))
308 #ifdef CONFIG_MODULES
310 static int __static_call_mod_text_reserved(void *start
, void *end
)
316 mod
= __module_text_address((unsigned long)start
);
317 WARN_ON_ONCE(__module_text_address((unsigned long)end
) != mod
);
318 if (!try_module_get(mod
))
325 ret
= __static_call_text_reserved(mod
->static_call_sites
,
326 mod
->static_call_sites
+ mod
->num_static_call_sites
,
334 static unsigned long tramp_key_lookup(unsigned long addr
)
336 struct static_call_tramp_key
*start
= __start_static_call_tramp_key
;
337 struct static_call_tramp_key
*stop
= __stop_static_call_tramp_key
;
338 struct static_call_tramp_key
*tramp_key
;
340 for (tramp_key
= start
; tramp_key
!= stop
; tramp_key
++) {
343 tramp
= (long)tramp_key
->tramp
+ (long)&tramp_key
->tramp
;
345 return (long)tramp_key
->key
+ (long)&tramp_key
->key
;
351 static int static_call_add_module(struct module
*mod
)
353 struct static_call_site
*start
= mod
->static_call_sites
;
354 struct static_call_site
*stop
= start
+ mod
->num_static_call_sites
;
355 struct static_call_site
*site
;
357 for (site
= start
; site
!= stop
; site
++) {
358 unsigned long s_key
= __static_call_key(site
);
359 unsigned long addr
= s_key
& ~STATIC_CALL_SITE_FLAGS
;
363 * Is the key is exported, 'addr' points to the key, which
364 * means modules are allowed to call static_call_update() on
367 * Otherwise, the key isn't exported, and 'addr' points to the
368 * trampoline so we need to lookup the key.
370 * We go through this dance to prevent crazy modules from
371 * abusing sensitive static calls.
373 if (!kernel_text_address(addr
))
376 key
= tramp_key_lookup(addr
);
378 pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
379 static_call_addr(site
));
383 key
|= s_key
& STATIC_CALL_SITE_FLAGS
;
384 site
->key
= key
- (long)&site
->key
;
387 return __static_call_init(mod
, start
, stop
);
390 static void static_call_del_module(struct module
*mod
)
392 struct static_call_site
*start
= mod
->static_call_sites
;
393 struct static_call_site
*stop
= mod
->static_call_sites
+
394 mod
->num_static_call_sites
;
395 struct static_call_key
*key
, *prev_key
= NULL
;
396 struct static_call_mod
*site_mod
, **prev
;
397 struct static_call_site
*site
;
399 for (site
= start
; site
< stop
; site
++) {
400 key
= static_call_key(site
);
406 for (prev
= &key
->mods
, site_mod
= key
->mods
;
407 site_mod
&& site_mod
->mod
!= mod
;
408 prev
= &site_mod
->next
, site_mod
= site_mod
->next
)
414 *prev
= site_mod
->next
;
419 static int static_call_module_notify(struct notifier_block
*nb
,
420 unsigned long val
, void *data
)
422 struct module
*mod
= data
;
429 case MODULE_STATE_COMING
:
430 ret
= static_call_add_module(mod
);
432 WARN(1, "Failed to allocate memory for static calls");
433 static_call_del_module(mod
);
436 case MODULE_STATE_GOING
:
437 static_call_del_module(mod
);
441 static_call_unlock();
444 return notifier_from_errno(ret
);
447 static struct notifier_block static_call_module_nb
= {
448 .notifier_call
= static_call_module_notify
,
453 static inline int __static_call_mod_text_reserved(void *start
, void *end
)
458 #endif /* CONFIG_MODULES */
460 int static_call_text_reserved(void *start
, void *end
)
462 int ret
= __static_call_text_reserved(__start_static_call_sites
,
463 __stop_static_call_sites
, start
, end
);
468 return __static_call_mod_text_reserved(start
, end
);
471 int __init
static_call_init(void)
475 if (static_call_initialized
)
480 ret
= __static_call_init(NULL
, __start_static_call_sites
,
481 __stop_static_call_sites
);
482 static_call_unlock();
486 pr_err("Failed to allocate memory for static_call!\n");
490 static_call_initialized
= true;
492 #ifdef CONFIG_MODULES
493 register_module_notifier(&static_call_module_nb
);
497 early_initcall(static_call_init
);
499 long __static_call_return0(void)
504 #ifdef CONFIG_STATIC_CALL_SELFTEST
506 static int func_a(int x
)
511 static int func_b(int x
)
516 DEFINE_STATIC_CALL(sc_selftest
, func_a
);
518 static struct static_call_data
{
522 } static_call_data
[] __initdata
= {
528 static int __init
test_static_call_init(void)
532 for (i
= 0; i
< ARRAY_SIZE(static_call_data
); i
++ ) {
533 struct static_call_data
*scd
= &static_call_data
[i
];
536 static_call_update(sc_selftest
, scd
->func
);
538 WARN_ON(static_call(sc_selftest
)(scd
->val
) != scd
->expect
);
543 early_initcall(test_static_call_init
);
545 #endif /* CONFIG_STATIC_CALL_SELFTEST */