1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex
);
25 void jump_label_lock(void)
27 mutex_lock(&jump_label_mutex
);
30 void jump_label_unlock(void)
32 mutex_unlock(&jump_label_mutex
);
35 static int jump_label_cmp(const void *a
, const void *b
)
37 const struct jump_entry
*jea
= a
;
38 const struct jump_entry
*jeb
= b
;
41 * Entrires are sorted by key.
43 if (jump_entry_key(jea
) < jump_entry_key(jeb
))
46 if (jump_entry_key(jea
) > jump_entry_key(jeb
))
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
54 if (jump_entry_code(jea
) < jump_entry_code(jeb
))
57 if (jump_entry_code(jea
) > jump_entry_code(jeb
))
63 static void jump_label_swap(void *a
, void *b
, int size
)
65 long delta
= (unsigned long)a
- (unsigned long)b
;
66 struct jump_entry
*jea
= a
;
67 struct jump_entry
*jeb
= b
;
68 struct jump_entry tmp
= *jea
;
70 jea
->code
= jeb
->code
- delta
;
71 jea
->target
= jeb
->target
- delta
;
72 jea
->key
= jeb
->key
- delta
;
74 jeb
->code
= tmp
.code
+ delta
;
75 jeb
->target
= tmp
.target
+ delta
;
76 jeb
->key
= tmp
.key
+ delta
;
80 jump_label_sort_entries(struct jump_entry
*start
, struct jump_entry
*stop
)
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
))
86 swapfn
= jump_label_swap
;
88 size
= (((unsigned long)stop
- (unsigned long)start
)
89 / sizeof(struct jump_entry
));
90 sort(start
, size
, sizeof(struct jump_entry
), jump_label_cmp
, swapfn
);
93 static void jump_label_update(struct static_key
*key
);
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
104 int static_key_count(struct static_key
*key
)
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
110 int n
= atomic_read(&key
->enabled
);
112 return n
>= 0 ? n
: 1;
114 EXPORT_SYMBOL_GPL(static_key_count
);
116 void static_key_slow_inc_cpuslocked(struct static_key
*key
)
120 STATIC_KEY_CHECK_USE(key
);
121 lockdep_assert_cpus_held();
124 * Careful if we get concurrent static_key_slow_inc() calls;
125 * later calls must wait for the first one to _finish_ the
126 * jump_label_update() process. At the same time, however,
127 * the jump_label_update() call below wants to see
128 * static_key_enabled(&key) for jumps to be updated properly.
130 * So give a special meaning to negative key->enabled: it sends
131 * static_key_slow_inc() down the slow path, and it is non-zero
132 * so it counts as "enabled" in jump_label_update(). Note that
133 * atomic_inc_unless_negative() checks >= 0, so roll our own.
135 for (v
= atomic_read(&key
->enabled
); v
> 0; v
= v1
) {
136 v1
= atomic_cmpxchg(&key
->enabled
, v
, v
+ 1);
142 if (atomic_read(&key
->enabled
) == 0) {
143 atomic_set(&key
->enabled
, -1);
144 jump_label_update(key
);
146 * Ensure that if the above cmpxchg loop observes our positive
147 * value, it must also observe all the text changes.
149 atomic_set_release(&key
->enabled
, 1);
151 atomic_inc(&key
->enabled
);
156 void static_key_slow_inc(struct static_key
*key
)
159 static_key_slow_inc_cpuslocked(key
);
162 EXPORT_SYMBOL_GPL(static_key_slow_inc
);
164 void static_key_enable_cpuslocked(struct static_key
*key
)
166 STATIC_KEY_CHECK_USE(key
);
167 lockdep_assert_cpus_held();
169 if (atomic_read(&key
->enabled
) > 0) {
170 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 1);
175 if (atomic_read(&key
->enabled
) == 0) {
176 atomic_set(&key
->enabled
, -1);
177 jump_label_update(key
);
179 * See static_key_slow_inc().
181 atomic_set_release(&key
->enabled
, 1);
185 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked
);
187 void static_key_enable(struct static_key
*key
)
190 static_key_enable_cpuslocked(key
);
193 EXPORT_SYMBOL_GPL(static_key_enable
);
195 void static_key_disable_cpuslocked(struct static_key
*key
)
197 STATIC_KEY_CHECK_USE(key
);
198 lockdep_assert_cpus_held();
200 if (atomic_read(&key
->enabled
) != 1) {
201 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 0);
206 if (atomic_cmpxchg(&key
->enabled
, 1, 0))
207 jump_label_update(key
);
210 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked
);
212 void static_key_disable(struct static_key
*key
)
215 static_key_disable_cpuslocked(key
);
218 EXPORT_SYMBOL_GPL(static_key_disable
);
220 static bool static_key_slow_try_dec(struct static_key
*key
)
224 val
= atomic_fetch_add_unless(&key
->enabled
, -1, 1);
229 * The negative count check is valid even when a negative
230 * key->enabled is in use by static_key_slow_inc(); a
231 * __static_key_slow_dec() before the first static_key_slow_inc()
232 * returns is unbalanced, because all other static_key_slow_inc()
233 * instances block while the update is in progress.
235 WARN(val
< 0, "jump label: negative count!\n");
239 static void __static_key_slow_dec_cpuslocked(struct static_key
*key
)
241 lockdep_assert_cpus_held();
243 if (static_key_slow_try_dec(key
))
247 if (atomic_dec_and_test(&key
->enabled
))
248 jump_label_update(key
);
252 static void __static_key_slow_dec(struct static_key
*key
)
255 __static_key_slow_dec_cpuslocked(key
);
259 void jump_label_update_timeout(struct work_struct
*work
)
261 struct static_key_deferred
*key
=
262 container_of(work
, struct static_key_deferred
, work
.work
);
263 __static_key_slow_dec(&key
->key
);
265 EXPORT_SYMBOL_GPL(jump_label_update_timeout
);
267 void static_key_slow_dec(struct static_key
*key
)
269 STATIC_KEY_CHECK_USE(key
);
270 __static_key_slow_dec(key
);
272 EXPORT_SYMBOL_GPL(static_key_slow_dec
);
274 void static_key_slow_dec_cpuslocked(struct static_key
*key
)
276 STATIC_KEY_CHECK_USE(key
);
277 __static_key_slow_dec_cpuslocked(key
);
280 void __static_key_slow_dec_deferred(struct static_key
*key
,
281 struct delayed_work
*work
,
282 unsigned long timeout
)
284 STATIC_KEY_CHECK_USE(key
);
286 if (static_key_slow_try_dec(key
))
289 schedule_delayed_work(work
, timeout
);
291 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred
);
293 void __static_key_deferred_flush(void *key
, struct delayed_work
*work
)
295 STATIC_KEY_CHECK_USE(key
);
296 flush_delayed_work(work
);
298 EXPORT_SYMBOL_GPL(__static_key_deferred_flush
);
300 void jump_label_rate_limit(struct static_key_deferred
*key
,
303 STATIC_KEY_CHECK_USE(key
);
305 INIT_DELAYED_WORK(&key
->work
, jump_label_update_timeout
);
307 EXPORT_SYMBOL_GPL(jump_label_rate_limit
);
309 static int addr_conflict(struct jump_entry
*entry
, void *start
, void *end
)
311 if (jump_entry_code(entry
) <= (unsigned long)end
&&
312 jump_entry_code(entry
) + jump_entry_size(entry
) > (unsigned long)start
)
318 static int __jump_label_text_reserved(struct jump_entry
*iter_start
,
319 struct jump_entry
*iter_stop
, void *start
, void *end
, bool init
)
321 struct jump_entry
*iter
;
324 while (iter
< iter_stop
) {
325 if (init
|| !jump_entry_is_init(iter
)) {
326 if (addr_conflict(iter
, start
, end
))
336 * Update code which is definitely not currently executing.
337 * Architectures which need heavyweight synchronization to modify
338 * running code can override this to make the non-live update case
341 void __weak __init_or_module
arch_jump_label_transform_static(struct jump_entry
*entry
,
342 enum jump_label_type type
)
344 arch_jump_label_transform(entry
, type
);
347 static inline struct jump_entry
*static_key_entries(struct static_key
*key
)
349 WARN_ON_ONCE(key
->type
& JUMP_TYPE_LINKED
);
350 return (struct jump_entry
*)(key
->type
& ~JUMP_TYPE_MASK
);
353 static inline bool static_key_type(struct static_key
*key
)
355 return key
->type
& JUMP_TYPE_TRUE
;
358 static inline bool static_key_linked(struct static_key
*key
)
360 return key
->type
& JUMP_TYPE_LINKED
;
363 static inline void static_key_clear_linked(struct static_key
*key
)
365 key
->type
&= ~JUMP_TYPE_LINKED
;
368 static inline void static_key_set_linked(struct static_key
*key
)
370 key
->type
|= JUMP_TYPE_LINKED
;
374 * A 'struct static_key' uses a union such that it either points directly
375 * to a table of 'struct jump_entry' or to a linked list of modules which in
376 * turn point to 'struct jump_entry' tables.
378 * The two lower bits of the pointer are used to keep track of which pointer
379 * type is in use and to store the initial branch direction, we use an access
380 * function which preserves these bits.
382 static void static_key_set_entries(struct static_key
*key
,
383 struct jump_entry
*entries
)
387 WARN_ON_ONCE((unsigned long)entries
& JUMP_TYPE_MASK
);
388 type
= key
->type
& JUMP_TYPE_MASK
;
389 key
->entries
= entries
;
393 static enum jump_label_type
jump_label_type(struct jump_entry
*entry
)
395 struct static_key
*key
= jump_entry_key(entry
);
396 bool enabled
= static_key_enabled(key
);
397 bool branch
= jump_entry_is_branch(entry
);
399 /* See the comment in linux/jump_label.h */
400 return enabled
^ branch
;
403 static bool jump_label_can_update(struct jump_entry
*entry
, bool init
)
406 * Cannot update code that was in an init text area.
408 if (!init
&& jump_entry_is_init(entry
))
411 if (!kernel_text_address(jump_entry_code(entry
))) {
413 * This skips patching built-in __exit, which
414 * is part of init_section_contains() but is
415 * not part of kernel_text_address().
417 * Skipping built-in __exit is fine since it
418 * will never be executed.
420 WARN_ONCE(!jump_entry_is_init(entry
),
421 "can't patch jump_label at %pS",
422 (void *)jump_entry_code(entry
));
429 #ifndef HAVE_JUMP_LABEL_BATCH
430 static void __jump_label_update(struct static_key
*key
,
431 struct jump_entry
*entry
,
432 struct jump_entry
*stop
,
435 for (; (entry
< stop
) && (jump_entry_key(entry
) == key
); entry
++) {
436 if (jump_label_can_update(entry
, init
))
437 arch_jump_label_transform(entry
, jump_label_type(entry
));
441 static void __jump_label_update(struct static_key
*key
,
442 struct jump_entry
*entry
,
443 struct jump_entry
*stop
,
446 for (; (entry
< stop
) && (jump_entry_key(entry
) == key
); entry
++) {
448 if (!jump_label_can_update(entry
, init
))
451 if (!arch_jump_label_transform_queue(entry
, jump_label_type(entry
))) {
453 * Queue is full: Apply the current queue and try again.
455 arch_jump_label_transform_apply();
456 BUG_ON(!arch_jump_label_transform_queue(entry
, jump_label_type(entry
)));
459 arch_jump_label_transform_apply();
463 void __init
jump_label_init(void)
465 struct jump_entry
*iter_start
= __start___jump_table
;
466 struct jump_entry
*iter_stop
= __stop___jump_table
;
467 struct static_key
*key
= NULL
;
468 struct jump_entry
*iter
;
471 * Since we are initializing the static_key.enabled field with
472 * with the 'raw' int values (to avoid pulling in atomic.h) in
473 * jump_label.h, let's make sure that is safe. There are only two
474 * cases to check since we initialize to 0 or 1.
476 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
479 if (static_key_initialized
)
484 jump_label_sort_entries(iter_start
, iter_stop
);
486 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
487 struct static_key
*iterk
;
491 if (jump_label_type(iter
) == JUMP_LABEL_NOP
)
492 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
494 in_init
= init_section_contains((void *)jump_entry_code(iter
), 1);
495 jump_entry_set_init(iter
, in_init
);
497 iterk
= jump_entry_key(iter
);
502 static_key_set_entries(key
, iter
);
504 static_key_initialized
= true;
509 #ifdef CONFIG_MODULES
511 static enum jump_label_type
jump_label_init_type(struct jump_entry
*entry
)
513 struct static_key
*key
= jump_entry_key(entry
);
514 bool type
= static_key_type(key
);
515 bool branch
= jump_entry_is_branch(entry
);
517 /* See the comment in linux/jump_label.h */
518 return type
^ branch
;
521 struct static_key_mod
{
522 struct static_key_mod
*next
;
523 struct jump_entry
*entries
;
527 static inline struct static_key_mod
*static_key_mod(struct static_key
*key
)
529 WARN_ON_ONCE(!static_key_linked(key
));
530 return (struct static_key_mod
*)(key
->type
& ~JUMP_TYPE_MASK
);
534 * key->type and key->next are the same via union.
535 * This sets key->next and preserves the type bits.
537 * See additional comments above static_key_set_entries().
539 static void static_key_set_mod(struct static_key
*key
,
540 struct static_key_mod
*mod
)
544 WARN_ON_ONCE((unsigned long)mod
& JUMP_TYPE_MASK
);
545 type
= key
->type
& JUMP_TYPE_MASK
;
550 static int __jump_label_mod_text_reserved(void *start
, void *end
)
556 mod
= __module_text_address((unsigned long)start
);
557 WARN_ON_ONCE(__module_text_address((unsigned long)end
) != mod
);
558 if (!try_module_get(mod
))
565 ret
= __jump_label_text_reserved(mod
->jump_entries
,
566 mod
->jump_entries
+ mod
->num_jump_entries
,
567 start
, end
, mod
->state
== MODULE_STATE_COMING
);
574 static void __jump_label_mod_update(struct static_key
*key
)
576 struct static_key_mod
*mod
;
578 for (mod
= static_key_mod(key
); mod
; mod
= mod
->next
) {
579 struct jump_entry
*stop
;
583 * NULL if the static_key is defined in a module
584 * that does not use it
591 stop
= __stop___jump_table
;
593 stop
= m
->jump_entries
+ m
->num_jump_entries
;
594 __jump_label_update(key
, mod
->entries
, stop
,
595 m
&& m
->state
== MODULE_STATE_COMING
);
600 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
601 * @mod: module to patch
603 * Allow for run-time selection of the optimal nops. Before the module
604 * loads patch these with arch_get_jump_label_nop(), which is specified by
605 * the arch specific jump label code.
607 void jump_label_apply_nops(struct module
*mod
)
609 struct jump_entry
*iter_start
= mod
->jump_entries
;
610 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
611 struct jump_entry
*iter
;
613 /* if the module doesn't have jump label entries, just return */
614 if (iter_start
== iter_stop
)
617 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
618 /* Only write NOPs for arch_branch_static(). */
619 if (jump_label_init_type(iter
) == JUMP_LABEL_NOP
)
620 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
624 static int jump_label_add_module(struct module
*mod
)
626 struct jump_entry
*iter_start
= mod
->jump_entries
;
627 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
628 struct jump_entry
*iter
;
629 struct static_key
*key
= NULL
;
630 struct static_key_mod
*jlm
, *jlm2
;
632 /* if the module doesn't have jump label entries, just return */
633 if (iter_start
== iter_stop
)
636 jump_label_sort_entries(iter_start
, iter_stop
);
638 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
639 struct static_key
*iterk
;
642 in_init
= within_module_init(jump_entry_code(iter
), mod
);
643 jump_entry_set_init(iter
, in_init
);
645 iterk
= jump_entry_key(iter
);
650 if (within_module((unsigned long)key
, mod
)) {
651 static_key_set_entries(key
, iter
);
654 jlm
= kzalloc(sizeof(struct static_key_mod
), GFP_KERNEL
);
657 if (!static_key_linked(key
)) {
658 jlm2
= kzalloc(sizeof(struct static_key_mod
),
665 jlm2
->mod
= __module_address((unsigned long)key
);
667 jlm2
->entries
= static_key_entries(key
);
669 static_key_set_mod(key
, jlm2
);
670 static_key_set_linked(key
);
674 jlm
->next
= static_key_mod(key
);
675 static_key_set_mod(key
, jlm
);
676 static_key_set_linked(key
);
678 /* Only update if we've changed from our initial state */
679 if (jump_label_type(iter
) != jump_label_init_type(iter
))
680 __jump_label_update(key
, iter
, iter_stop
, true);
686 static void jump_label_del_module(struct module
*mod
)
688 struct jump_entry
*iter_start
= mod
->jump_entries
;
689 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
690 struct jump_entry
*iter
;
691 struct static_key
*key
= NULL
;
692 struct static_key_mod
*jlm
, **prev
;
694 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
695 if (jump_entry_key(iter
) == key
)
698 key
= jump_entry_key(iter
);
700 if (within_module((unsigned long)key
, mod
))
703 /* No memory during module load */
704 if (WARN_ON(!static_key_linked(key
)))
708 jlm
= static_key_mod(key
);
710 while (jlm
&& jlm
->mod
!= mod
) {
715 /* No memory during module load */
719 if (prev
== &key
->next
)
720 static_key_set_mod(key
, jlm
->next
);
726 jlm
= static_key_mod(key
);
727 /* if only one etry is left, fold it back into the static_key */
728 if (jlm
->next
== NULL
) {
729 static_key_set_entries(key
, jlm
->entries
);
730 static_key_clear_linked(key
);
737 jump_label_module_notify(struct notifier_block
*self
, unsigned long val
,
740 struct module
*mod
= data
;
747 case MODULE_STATE_COMING
:
748 ret
= jump_label_add_module(mod
);
750 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
751 jump_label_del_module(mod
);
754 case MODULE_STATE_GOING
:
755 jump_label_del_module(mod
);
762 return notifier_from_errno(ret
);
765 static struct notifier_block jump_label_module_nb
= {
766 .notifier_call
= jump_label_module_notify
,
767 .priority
= 1, /* higher than tracepoints */
770 static __init
int jump_label_init_module(void)
772 return register_module_notifier(&jump_label_module_nb
);
774 early_initcall(jump_label_init_module
);
776 #endif /* CONFIG_MODULES */
779 * jump_label_text_reserved - check if addr range is reserved
780 * @start: start text addr
781 * @end: end text addr
783 * checks if the text addr located between @start and @end
784 * overlaps with any of the jump label patch addresses. Code
785 * that wants to modify kernel text should first verify that
786 * it does not overlap with any of the jump label addresses.
787 * Caller must hold jump_label_mutex.
789 * returns 1 if there is an overlap, 0 otherwise
791 int jump_label_text_reserved(void *start
, void *end
)
793 bool init
= system_state
< SYSTEM_RUNNING
;
794 int ret
= __jump_label_text_reserved(__start___jump_table
,
795 __stop___jump_table
, start
, end
, init
);
800 #ifdef CONFIG_MODULES
801 ret
= __jump_label_mod_text_reserved(start
, end
);
806 static void jump_label_update(struct static_key
*key
)
808 struct jump_entry
*stop
= __stop___jump_table
;
809 bool init
= system_state
< SYSTEM_RUNNING
;
810 struct jump_entry
*entry
;
811 #ifdef CONFIG_MODULES
814 if (static_key_linked(key
)) {
815 __jump_label_mod_update(key
);
820 mod
= __module_address((unsigned long)key
);
822 stop
= mod
->jump_entries
+ mod
->num_jump_entries
;
823 init
= mod
->state
== MODULE_STATE_COMING
;
827 entry
= static_key_entries(key
);
828 /* if there are no users, entry can be NULL */
830 __jump_label_update(key
, entry
, stop
, init
);
833 #ifdef CONFIG_STATIC_KEYS_SELFTEST
834 static DEFINE_STATIC_KEY_TRUE(sk_true
);
835 static DEFINE_STATIC_KEY_FALSE(sk_false
);
837 static __init
int jump_label_test(void)
841 for (i
= 0; i
< 2; i
++) {
842 WARN_ON(static_key_enabled(&sk_true
.key
) != true);
843 WARN_ON(static_key_enabled(&sk_false
.key
) != false);
845 WARN_ON(!static_branch_likely(&sk_true
));
846 WARN_ON(!static_branch_unlikely(&sk_true
));
847 WARN_ON(static_branch_likely(&sk_false
));
848 WARN_ON(static_branch_unlikely(&sk_false
));
850 static_branch_disable(&sk_true
);
851 static_branch_enable(&sk_false
);
853 WARN_ON(static_key_enabled(&sk_true
.key
) == true);
854 WARN_ON(static_key_enabled(&sk_false
.key
) == false);
856 WARN_ON(static_branch_likely(&sk_true
));
857 WARN_ON(static_branch_unlikely(&sk_true
));
858 WARN_ON(!static_branch_likely(&sk_false
));
859 WARN_ON(!static_branch_unlikely(&sk_false
));
861 static_branch_enable(&sk_true
);
862 static_branch_disable(&sk_false
);
867 early_initcall(jump_label_test
);
868 #endif /* STATIC_KEYS_SELFTEST */