]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * jump label support | |
3 | * | |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | |
5 | * Copyright (C) 2011 Peter Zijlstra | |
6 | * | |
7 | */ | |
8 | #include <linux/memory.h> | |
9 | #include <linux/uaccess.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/list.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/sort.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/static_key.h> | |
16 | #include <linux/jump_label_ratelimit.h> | |
17 | #include <linux/bug.h> | |
18 | #include <linux/cpu.h> | |
19 | ||
20 | #ifdef HAVE_JUMP_LABEL | |
21 | ||
22 | /* mutex to protect coming/going of the the jump_label table */ | |
23 | static DEFINE_MUTEX(jump_label_mutex); | |
24 | ||
25 | void jump_label_lock(void) | |
26 | { | |
27 | mutex_lock(&jump_label_mutex); | |
28 | } | |
29 | ||
30 | void jump_label_unlock(void) | |
31 | { | |
32 | mutex_unlock(&jump_label_mutex); | |
33 | } | |
34 | ||
35 | static int jump_label_cmp(const void *a, const void *b) | |
36 | { | |
37 | const struct jump_entry *jea = a; | |
38 | const struct jump_entry *jeb = b; | |
39 | ||
40 | if (jea->key < jeb->key) | |
41 | return -1; | |
42 | ||
43 | if (jea->key > jeb->key) | |
44 | return 1; | |
45 | ||
46 | return 0; | |
47 | } | |
48 | ||
49 | static void | |
50 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) | |
51 | { | |
52 | unsigned long size; | |
53 | ||
54 | size = (((unsigned long)stop - (unsigned long)start) | |
55 | / sizeof(struct jump_entry)); | |
56 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | |
57 | } | |
58 | ||
59 | static void jump_label_update(struct static_key *key); | |
60 | ||
61 | /* | |
62 | * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. | |
63 | * The use of 'atomic_read()' requires atomic.h and its problematic for some | |
64 | * kernel headers such as kernel.h and others. Since static_key_count() is not | |
65 | * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok | |
66 | * to have it be a function here. Similarly, for 'static_key_enable()' and | |
67 | * 'static_key_disable()', which require bug.h. This should allow jump_label.h | |
68 | * to be included from most/all places for HAVE_JUMP_LABEL. | |
69 | */ | |
70 | int static_key_count(struct static_key *key) | |
71 | { | |
72 | /* | |
73 | * -1 means the first static_key_slow_inc() is in progress. | |
74 | * static_key_enabled() must return true, so return 1 here. | |
75 | */ | |
76 | int n = atomic_read(&key->enabled); | |
77 | ||
78 | return n >= 0 ? n : 1; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(static_key_count); | |
81 | ||
82 | void static_key_enable(struct static_key *key) | |
83 | { | |
84 | int count = static_key_count(key); | |
85 | ||
86 | WARN_ON_ONCE(count < 0 || count > 1); | |
87 | ||
88 | if (!count) | |
89 | static_key_slow_inc(key); | |
90 | } | |
91 | EXPORT_SYMBOL_GPL(static_key_enable); | |
92 | ||
93 | void static_key_disable(struct static_key *key) | |
94 | { | |
95 | int count = static_key_count(key); | |
96 | ||
97 | WARN_ON_ONCE(count < 0 || count > 1); | |
98 | ||
99 | if (count) | |
100 | static_key_slow_dec(key); | |
101 | } | |
102 | EXPORT_SYMBOL_GPL(static_key_disable); | |
103 | ||
104 | void static_key_slow_inc(struct static_key *key) | |
105 | { | |
106 | int v, v1; | |
107 | ||
108 | STATIC_KEY_CHECK_USE(); | |
109 | ||
110 | /* | |
111 | * Careful if we get concurrent static_key_slow_inc() calls; | |
112 | * later calls must wait for the first one to _finish_ the | |
113 | * jump_label_update() process. At the same time, however, | |
114 | * the jump_label_update() call below wants to see | |
115 | * static_key_enabled(&key) for jumps to be updated properly. | |
116 | * | |
117 | * So give a special meaning to negative key->enabled: it sends | |
118 | * static_key_slow_inc() down the slow path, and it is non-zero | |
119 | * so it counts as "enabled" in jump_label_update(). Note that | |
120 | * atomic_inc_unless_negative() checks >= 0, so roll our own. | |
121 | */ | |
122 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { | |
123 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); | |
124 | if (likely(v1 == v)) | |
125 | return; | |
126 | } | |
127 | ||
128 | cpus_read_lock(); | |
129 | jump_label_lock(); | |
130 | if (atomic_read(&key->enabled) == 0) { | |
131 | atomic_set(&key->enabled, -1); | |
132 | jump_label_update(key); | |
133 | atomic_set(&key->enabled, 1); | |
134 | } else { | |
135 | atomic_inc(&key->enabled); | |
136 | } | |
137 | jump_label_unlock(); | |
138 | cpus_read_unlock(); | |
139 | } | |
140 | EXPORT_SYMBOL_GPL(static_key_slow_inc); | |
141 | ||
142 | static void __static_key_slow_dec(struct static_key *key, | |
143 | unsigned long rate_limit, struct delayed_work *work) | |
144 | { | |
145 | cpus_read_lock(); | |
146 | /* | |
147 | * The negative count check is valid even when a negative | |
148 | * key->enabled is in use by static_key_slow_inc(); a | |
149 | * __static_key_slow_dec() before the first static_key_slow_inc() | |
150 | * returns is unbalanced, because all other static_key_slow_inc() | |
151 | * instances block while the update is in progress. | |
152 | */ | |
153 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { | |
154 | WARN(atomic_read(&key->enabled) < 0, | |
155 | "jump label: negative count!\n"); | |
156 | cpus_read_unlock(); | |
157 | return; | |
158 | } | |
159 | ||
160 | if (rate_limit) { | |
161 | atomic_inc(&key->enabled); | |
162 | schedule_delayed_work(work, rate_limit); | |
163 | } else { | |
164 | jump_label_update(key); | |
165 | } | |
166 | jump_label_unlock(); | |
167 | cpus_read_unlock(); | |
168 | } | |
169 | ||
170 | static void jump_label_update_timeout(struct work_struct *work) | |
171 | { | |
172 | struct static_key_deferred *key = | |
173 | container_of(work, struct static_key_deferred, work.work); | |
174 | __static_key_slow_dec(&key->key, 0, NULL); | |
175 | } | |
176 | ||
177 | void static_key_slow_dec(struct static_key *key) | |
178 | { | |
179 | STATIC_KEY_CHECK_USE(); | |
180 | __static_key_slow_dec(key, 0, NULL); | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(static_key_slow_dec); | |
183 | ||
184 | void static_key_slow_dec_deferred(struct static_key_deferred *key) | |
185 | { | |
186 | STATIC_KEY_CHECK_USE(); | |
187 | __static_key_slow_dec(&key->key, key->timeout, &key->work); | |
188 | } | |
189 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); | |
190 | ||
191 | void static_key_deferred_flush(struct static_key_deferred *key) | |
192 | { | |
193 | STATIC_KEY_CHECK_USE(); | |
194 | flush_delayed_work(&key->work); | |
195 | } | |
196 | EXPORT_SYMBOL_GPL(static_key_deferred_flush); | |
197 | ||
198 | void jump_label_rate_limit(struct static_key_deferred *key, | |
199 | unsigned long rl) | |
200 | { | |
201 | STATIC_KEY_CHECK_USE(); | |
202 | key->timeout = rl; | |
203 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); | |
204 | } | |
205 | EXPORT_SYMBOL_GPL(jump_label_rate_limit); | |
206 | ||
207 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) | |
208 | { | |
209 | if (entry->code <= (unsigned long)end && | |
210 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) | |
211 | return 1; | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | static int __jump_label_text_reserved(struct jump_entry *iter_start, | |
217 | struct jump_entry *iter_stop, void *start, void *end) | |
218 | { | |
219 | struct jump_entry *iter; | |
220 | ||
221 | iter = iter_start; | |
222 | while (iter < iter_stop) { | |
223 | if (addr_conflict(iter, start, end)) | |
224 | return 1; | |
225 | iter++; | |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | /* | |
232 | * Update code which is definitely not currently executing. | |
233 | * Architectures which need heavyweight synchronization to modify | |
234 | * running code can override this to make the non-live update case | |
235 | * cheaper. | |
236 | */ | |
237 | void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, | |
238 | enum jump_label_type type) | |
239 | { | |
240 | arch_jump_label_transform(entry, type); | |
241 | } | |
242 | ||
243 | static inline struct jump_entry *static_key_entries(struct static_key *key) | |
244 | { | |
245 | WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); | |
246 | return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); | |
247 | } | |
248 | ||
249 | static inline bool static_key_type(struct static_key *key) | |
250 | { | |
251 | return key->type & JUMP_TYPE_TRUE; | |
252 | } | |
253 | ||
254 | static inline bool static_key_linked(struct static_key *key) | |
255 | { | |
256 | return key->type & JUMP_TYPE_LINKED; | |
257 | } | |
258 | ||
259 | static inline void static_key_clear_linked(struct static_key *key) | |
260 | { | |
261 | key->type &= ~JUMP_TYPE_LINKED; | |
262 | } | |
263 | ||
264 | static inline void static_key_set_linked(struct static_key *key) | |
265 | { | |
266 | key->type |= JUMP_TYPE_LINKED; | |
267 | } | |
268 | ||
269 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) | |
270 | { | |
271 | return (struct static_key *)((unsigned long)entry->key & ~1UL); | |
272 | } | |
273 | ||
274 | static bool jump_entry_branch(struct jump_entry *entry) | |
275 | { | |
276 | return (unsigned long)entry->key & 1UL; | |
277 | } | |
278 | ||
279 | /*** | |
280 | * A 'struct static_key' uses a union such that it either points directly | |
281 | * to a table of 'struct jump_entry' or to a linked list of modules which in | |
282 | * turn point to 'struct jump_entry' tables. | |
283 | * | |
284 | * The two lower bits of the pointer are used to keep track of which pointer | |
285 | * type is in use and to store the initial branch direction, we use an access | |
286 | * function which preserves these bits. | |
287 | */ | |
288 | static void static_key_set_entries(struct static_key *key, | |
289 | struct jump_entry *entries) | |
290 | { | |
291 | unsigned long type; | |
292 | ||
293 | WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); | |
294 | type = key->type & JUMP_TYPE_MASK; | |
295 | key->entries = entries; | |
296 | key->type |= type; | |
297 | } | |
298 | ||
299 | static enum jump_label_type jump_label_type(struct jump_entry *entry) | |
300 | { | |
301 | struct static_key *key = jump_entry_key(entry); | |
302 | bool enabled = static_key_enabled(key); | |
303 | bool branch = jump_entry_branch(entry); | |
304 | ||
305 | /* See the comment in linux/jump_label.h */ | |
306 | return enabled ^ branch; | |
307 | } | |
308 | ||
309 | static void __jump_label_update(struct static_key *key, | |
310 | struct jump_entry *entry, | |
311 | struct jump_entry *stop) | |
312 | { | |
313 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { | |
314 | /* | |
315 | * entry->code set to 0 invalidates module init text sections | |
316 | * kernel_text_address() verifies we are not in core kernel | |
317 | * init code, see jump_label_invalidate_module_init(). | |
318 | */ | |
319 | if (entry->code && kernel_text_address(entry->code)) | |
320 | arch_jump_label_transform(entry, jump_label_type(entry)); | |
321 | } | |
322 | } | |
323 | ||
324 | void __init jump_label_init(void) | |
325 | { | |
326 | struct jump_entry *iter_start = __start___jump_table; | |
327 | struct jump_entry *iter_stop = __stop___jump_table; | |
328 | struct static_key *key = NULL; | |
329 | struct jump_entry *iter; | |
330 | ||
331 | /* | |
332 | * Since we are initializing the static_key.enabled field with | |
333 | * with the 'raw' int values (to avoid pulling in atomic.h) in | |
334 | * jump_label.h, let's make sure that is safe. There are only two | |
335 | * cases to check since we initialize to 0 or 1. | |
336 | */ | |
337 | BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); | |
338 | BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); | |
339 | ||
340 | if (static_key_initialized) | |
341 | return; | |
342 | ||
343 | cpus_read_lock(); | |
344 | jump_label_lock(); | |
345 | jump_label_sort_entries(iter_start, iter_stop); | |
346 | ||
347 | for (iter = iter_start; iter < iter_stop; iter++) { | |
348 | struct static_key *iterk; | |
349 | ||
350 | /* rewrite NOPs */ | |
351 | if (jump_label_type(iter) == JUMP_LABEL_NOP) | |
352 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); | |
353 | ||
354 | iterk = jump_entry_key(iter); | |
355 | if (iterk == key) | |
356 | continue; | |
357 | ||
358 | key = iterk; | |
359 | static_key_set_entries(key, iter); | |
360 | } | |
361 | static_key_initialized = true; | |
362 | jump_label_unlock(); | |
363 | cpus_read_unlock(); | |
364 | } | |
365 | ||
366 | #ifdef CONFIG_MODULES | |
367 | ||
368 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) | |
369 | { | |
370 | struct static_key *key = jump_entry_key(entry); | |
371 | bool type = static_key_type(key); | |
372 | bool branch = jump_entry_branch(entry); | |
373 | ||
374 | /* See the comment in linux/jump_label.h */ | |
375 | return type ^ branch; | |
376 | } | |
377 | ||
378 | struct static_key_mod { | |
379 | struct static_key_mod *next; | |
380 | struct jump_entry *entries; | |
381 | struct module *mod; | |
382 | }; | |
383 | ||
384 | static inline struct static_key_mod *static_key_mod(struct static_key *key) | |
385 | { | |
386 | WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); | |
387 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); | |
388 | } | |
389 | ||
390 | /*** | |
391 | * key->type and key->next are the same via union. | |
392 | * This sets key->next and preserves the type bits. | |
393 | * | |
394 | * See additional comments above static_key_set_entries(). | |
395 | */ | |
396 | static void static_key_set_mod(struct static_key *key, | |
397 | struct static_key_mod *mod) | |
398 | { | |
399 | unsigned long type; | |
400 | ||
401 | WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); | |
402 | type = key->type & JUMP_TYPE_MASK; | |
403 | key->next = mod; | |
404 | key->type |= type; | |
405 | } | |
406 | ||
407 | static int __jump_label_mod_text_reserved(void *start, void *end) | |
408 | { | |
409 | struct module *mod; | |
410 | ||
411 | preempt_disable(); | |
412 | mod = __module_text_address((unsigned long)start); | |
413 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); | |
414 | preempt_enable(); | |
415 | ||
416 | if (!mod) | |
417 | return 0; | |
418 | ||
419 | ||
420 | return __jump_label_text_reserved(mod->jump_entries, | |
421 | mod->jump_entries + mod->num_jump_entries, | |
422 | start, end); | |
423 | } | |
424 | ||
425 | static void __jump_label_mod_update(struct static_key *key) | |
426 | { | |
427 | struct static_key_mod *mod; | |
428 | ||
429 | for (mod = static_key_mod(key); mod; mod = mod->next) { | |
430 | struct jump_entry *stop; | |
431 | struct module *m; | |
432 | ||
433 | /* | |
434 | * NULL if the static_key is defined in a module | |
435 | * that does not use it | |
436 | */ | |
437 | if (!mod->entries) | |
438 | continue; | |
439 | ||
440 | m = mod->mod; | |
441 | if (!m) | |
442 | stop = __stop___jump_table; | |
443 | else | |
444 | stop = m->jump_entries + m->num_jump_entries; | |
445 | __jump_label_update(key, mod->entries, stop); | |
446 | } | |
447 | } | |
448 | ||
449 | /*** | |
450 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | |
451 | * @mod: module to patch | |
452 | * | |
453 | * Allow for run-time selection of the optimal nops. Before the module | |
454 | * loads patch these with arch_get_jump_label_nop(), which is specified by | |
455 | * the arch specific jump label code. | |
456 | */ | |
457 | void jump_label_apply_nops(struct module *mod) | |
458 | { | |
459 | struct jump_entry *iter_start = mod->jump_entries; | |
460 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
461 | struct jump_entry *iter; | |
462 | ||
463 | /* if the module doesn't have jump label entries, just return */ | |
464 | if (iter_start == iter_stop) | |
465 | return; | |
466 | ||
467 | for (iter = iter_start; iter < iter_stop; iter++) { | |
468 | /* Only write NOPs for arch_branch_static(). */ | |
469 | if (jump_label_init_type(iter) == JUMP_LABEL_NOP) | |
470 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); | |
471 | } | |
472 | } | |
473 | ||
474 | static int jump_label_add_module(struct module *mod) | |
475 | { | |
476 | struct jump_entry *iter_start = mod->jump_entries; | |
477 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
478 | struct jump_entry *iter; | |
479 | struct static_key *key = NULL; | |
480 | struct static_key_mod *jlm, *jlm2; | |
481 | ||
482 | /* if the module doesn't have jump label entries, just return */ | |
483 | if (iter_start == iter_stop) | |
484 | return 0; | |
485 | ||
486 | jump_label_sort_entries(iter_start, iter_stop); | |
487 | ||
488 | for (iter = iter_start; iter < iter_stop; iter++) { | |
489 | struct static_key *iterk; | |
490 | ||
491 | iterk = jump_entry_key(iter); | |
492 | if (iterk == key) | |
493 | continue; | |
494 | ||
495 | key = iterk; | |
496 | if (within_module(iter->key, mod)) { | |
497 | static_key_set_entries(key, iter); | |
498 | continue; | |
499 | } | |
500 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); | |
501 | if (!jlm) | |
502 | return -ENOMEM; | |
503 | if (!static_key_linked(key)) { | |
504 | jlm2 = kzalloc(sizeof(struct static_key_mod), | |
505 | GFP_KERNEL); | |
506 | if (!jlm2) { | |
507 | kfree(jlm); | |
508 | return -ENOMEM; | |
509 | } | |
510 | preempt_disable(); | |
511 | jlm2->mod = __module_address((unsigned long)key); | |
512 | preempt_enable(); | |
513 | jlm2->entries = static_key_entries(key); | |
514 | jlm2->next = NULL; | |
515 | static_key_set_mod(key, jlm2); | |
516 | static_key_set_linked(key); | |
517 | } | |
518 | jlm->mod = mod; | |
519 | jlm->entries = iter; | |
520 | jlm->next = static_key_mod(key); | |
521 | static_key_set_mod(key, jlm); | |
522 | static_key_set_linked(key); | |
523 | ||
524 | /* Only update if we've changed from our initial state */ | |
525 | if (jump_label_type(iter) != jump_label_init_type(iter)) | |
526 | __jump_label_update(key, iter, iter_stop); | |
527 | } | |
528 | ||
529 | return 0; | |
530 | } | |
531 | ||
532 | static void jump_label_del_module(struct module *mod) | |
533 | { | |
534 | struct jump_entry *iter_start = mod->jump_entries; | |
535 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
536 | struct jump_entry *iter; | |
537 | struct static_key *key = NULL; | |
538 | struct static_key_mod *jlm, **prev; | |
539 | ||
540 | for (iter = iter_start; iter < iter_stop; iter++) { | |
541 | if (jump_entry_key(iter) == key) | |
542 | continue; | |
543 | ||
544 | key = jump_entry_key(iter); | |
545 | ||
546 | if (within_module(iter->key, mod)) | |
547 | continue; | |
548 | ||
549 | /* No memory during module load */ | |
550 | if (WARN_ON(!static_key_linked(key))) | |
551 | continue; | |
552 | ||
553 | prev = &key->next; | |
554 | jlm = static_key_mod(key); | |
555 | ||
556 | while (jlm && jlm->mod != mod) { | |
557 | prev = &jlm->next; | |
558 | jlm = jlm->next; | |
559 | } | |
560 | ||
561 | /* No memory during module load */ | |
562 | if (WARN_ON(!jlm)) | |
563 | continue; | |
564 | ||
565 | if (prev == &key->next) | |
566 | static_key_set_mod(key, jlm->next); | |
567 | else | |
568 | *prev = jlm->next; | |
569 | ||
570 | kfree(jlm); | |
571 | ||
572 | jlm = static_key_mod(key); | |
573 | /* if only one etry is left, fold it back into the static_key */ | |
574 | if (jlm->next == NULL) { | |
575 | static_key_set_entries(key, jlm->entries); | |
576 | static_key_clear_linked(key); | |
577 | kfree(jlm); | |
578 | } | |
579 | } | |
580 | } | |
581 | ||
582 | static void jump_label_invalidate_module_init(struct module *mod) | |
583 | { | |
584 | struct jump_entry *iter_start = mod->jump_entries; | |
585 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
586 | struct jump_entry *iter; | |
587 | ||
588 | for (iter = iter_start; iter < iter_stop; iter++) { | |
589 | if (within_module_init(iter->code, mod)) | |
590 | iter->code = 0; | |
591 | } | |
592 | } | |
593 | ||
594 | static int | |
595 | jump_label_module_notify(struct notifier_block *self, unsigned long val, | |
596 | void *data) | |
597 | { | |
598 | struct module *mod = data; | |
599 | int ret = 0; | |
600 | ||
601 | cpus_read_lock(); | |
602 | jump_label_lock(); | |
603 | ||
604 | switch (val) { | |
605 | case MODULE_STATE_COMING: | |
606 | ret = jump_label_add_module(mod); | |
607 | if (ret) { | |
608 | WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); | |
609 | jump_label_del_module(mod); | |
610 | } | |
611 | break; | |
612 | case MODULE_STATE_GOING: | |
613 | jump_label_del_module(mod); | |
614 | break; | |
615 | case MODULE_STATE_LIVE: | |
616 | jump_label_invalidate_module_init(mod); | |
617 | break; | |
618 | } | |
619 | ||
620 | jump_label_unlock(); | |
621 | cpus_read_unlock(); | |
622 | ||
623 | return notifier_from_errno(ret); | |
624 | } | |
625 | ||
626 | static struct notifier_block jump_label_module_nb = { | |
627 | .notifier_call = jump_label_module_notify, | |
628 | .priority = 1, /* higher than tracepoints */ | |
629 | }; | |
630 | ||
631 | static __init int jump_label_init_module(void) | |
632 | { | |
633 | return register_module_notifier(&jump_label_module_nb); | |
634 | } | |
635 | early_initcall(jump_label_init_module); | |
636 | ||
637 | #endif /* CONFIG_MODULES */ | |
638 | ||
639 | /*** | |
640 | * jump_label_text_reserved - check if addr range is reserved | |
641 | * @start: start text addr | |
642 | * @end: end text addr | |
643 | * | |
644 | * checks if the text addr located between @start and @end | |
645 | * overlaps with any of the jump label patch addresses. Code | |
646 | * that wants to modify kernel text should first verify that | |
647 | * it does not overlap with any of the jump label addresses. | |
648 | * Caller must hold jump_label_mutex. | |
649 | * | |
650 | * returns 1 if there is an overlap, 0 otherwise | |
651 | */ | |
652 | int jump_label_text_reserved(void *start, void *end) | |
653 | { | |
654 | int ret = __jump_label_text_reserved(__start___jump_table, | |
655 | __stop___jump_table, start, end); | |
656 | ||
657 | if (ret) | |
658 | return ret; | |
659 | ||
660 | #ifdef CONFIG_MODULES | |
661 | ret = __jump_label_mod_text_reserved(start, end); | |
662 | #endif | |
663 | return ret; | |
664 | } | |
665 | ||
666 | static void jump_label_update(struct static_key *key) | |
667 | { | |
668 | struct jump_entry *stop = __stop___jump_table; | |
669 | struct jump_entry *entry; | |
670 | #ifdef CONFIG_MODULES | |
671 | struct module *mod; | |
672 | ||
673 | if (static_key_linked(key)) { | |
674 | __jump_label_mod_update(key); | |
675 | return; | |
676 | } | |
677 | ||
678 | preempt_disable(); | |
679 | mod = __module_address((unsigned long)key); | |
680 | if (mod) | |
681 | stop = mod->jump_entries + mod->num_jump_entries; | |
682 | preempt_enable(); | |
683 | #endif | |
684 | entry = static_key_entries(key); | |
685 | /* if there are no users, entry can be NULL */ | |
686 | if (entry) | |
687 | __jump_label_update(key, entry, stop); | |
688 | } | |
689 | ||
690 | #ifdef CONFIG_STATIC_KEYS_SELFTEST | |
691 | static DEFINE_STATIC_KEY_TRUE(sk_true); | |
692 | static DEFINE_STATIC_KEY_FALSE(sk_false); | |
693 | ||
694 | static __init int jump_label_test(void) | |
695 | { | |
696 | int i; | |
697 | ||
698 | for (i = 0; i < 2; i++) { | |
699 | WARN_ON(static_key_enabled(&sk_true.key) != true); | |
700 | WARN_ON(static_key_enabled(&sk_false.key) != false); | |
701 | ||
702 | WARN_ON(!static_branch_likely(&sk_true)); | |
703 | WARN_ON(!static_branch_unlikely(&sk_true)); | |
704 | WARN_ON(static_branch_likely(&sk_false)); | |
705 | WARN_ON(static_branch_unlikely(&sk_false)); | |
706 | ||
707 | static_branch_disable(&sk_true); | |
708 | static_branch_enable(&sk_false); | |
709 | ||
710 | WARN_ON(static_key_enabled(&sk_true.key) == true); | |
711 | WARN_ON(static_key_enabled(&sk_false.key) == false); | |
712 | ||
713 | WARN_ON(static_branch_likely(&sk_true)); | |
714 | WARN_ON(static_branch_unlikely(&sk_true)); | |
715 | WARN_ON(!static_branch_likely(&sk_false)); | |
716 | WARN_ON(!static_branch_unlikely(&sk_false)); | |
717 | ||
718 | static_branch_enable(&sk_true); | |
719 | static_branch_disable(&sk_false); | |
720 | } | |
721 | ||
722 | return 0; | |
723 | } | |
724 | late_initcall(jump_label_test); | |
725 | #endif /* STATIC_KEYS_SELFTEST */ | |
726 | ||
727 | #endif /* HAVE_JUMP_LABEL */ |