]>
Commit | Line | Data |
---|---|---|
00de9d74 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
9b1d82fa PM |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
4 | * | |
9b1d82fa PM |
5 | * Copyright IBM Corporation, 2008 |
6 | * | |
00de9d74 | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
9b1d82fa PM |
8 | * |
9 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 10 | * Documentation/RCU |
9b1d82fa | 11 | */ |
4ce5b903 IM |
12 | #include <linux/completion.h> |
13 | #include <linux/interrupt.h> | |
9b1d82fa | 14 | #include <linux/notifier.h> |
f9411ebe | 15 | #include <linux/rcupdate_wait.h> |
4ce5b903 | 16 | #include <linux/kernel.h> |
9984de1a | 17 | #include <linux/export.h> |
9b1d82fa | 18 | #include <linux/mutex.h> |
4ce5b903 IM |
19 | #include <linux/sched.h> |
20 | #include <linux/types.h> | |
21 | #include <linux/init.h> | |
9b1d82fa | 22 | #include <linux/time.h> |
4ce5b903 | 23 | #include <linux/cpu.h> |
268bb0ce | 24 | #include <linux/prefetch.h> |
77a40f97 | 25 | #include <linux/slab.h> |
9b1d82fa | 26 | |
29c00b4a PM |
27 | #include "rcu.h" |
28 | ||
6d48152e PM |
29 | /* Global control variables for rcupdate callback mechanism. */ |
30 | struct rcu_ctrlblk { | |
31 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | |
32 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | |
33 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | |
34 | }; | |
35 | ||
36 | /* Definition for rcupdate control block. */ | |
709fdce7 PM |
37 | static struct rcu_ctrlblk rcu_ctrlblk = { |
38 | .donetail = &rcu_ctrlblk.rcucblist, | |
39 | .curtail = &rcu_ctrlblk.rcucblist, | |
6d48152e PM |
40 | }; |
41 | ||
709fdce7 | 42 | void rcu_barrier(void) |
9b1d82fa | 43 | { |
709fdce7 | 44 | wait_rcu_gp(call_rcu); |
9b1d82fa | 45 | } |
709fdce7 | 46 | EXPORT_SYMBOL(rcu_barrier); |
9b1d82fa | 47 | |
65cfe358 | 48 | /* Record an rcu quiescent state. */ |
709fdce7 | 49 | void rcu_qs(void) |
9b1d82fa | 50 | { |
b554d7de ED |
51 | unsigned long flags; |
52 | ||
53 | local_irq_save(flags); | |
709fdce7 PM |
54 | if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
55 | rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; | |
18d7e406 | 56 | raise_softirq_irqoff(RCU_SOFTIRQ); |
65cfe358 | 57 | } |
b554d7de | 58 | local_irq_restore(flags); |
9b1d82fa PM |
59 | } |
60 | ||
61 | /* | |
62 | * Check to see if the scheduling-clock interrupt came from an extended | |
9b2e4f18 PM |
63 | * quiescent state, and, if so, tell RCU about it. This function must |
64 | * be called from hardirq context. It is normally called from the | |
65 | * scheduling-clock interrupt. | |
9b1d82fa | 66 | */ |
c98cac60 | 67 | void rcu_sched_clock_irq(int user) |
9b1d82fa | 68 | { |
c5bacd94 | 69 | if (user) { |
709fdce7 | 70 | rcu_qs(); |
c5bacd94 PM |
71 | } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
72 | set_tsk_need_resched(current); | |
73 | set_preempt_need_resched(); | |
74 | } | |
9b1d82fa PM |
75 | } |
76 | ||
77a40f97 JFG |
77 | /* |
78 | * Reclaim the specified callback, either by invoking it for non-kfree cases or | |
79 | * freeing it directly (for kfree). Return true if kfreeing, false otherwise. | |
80 | */ | |
81 | static inline bool rcu_reclaim_tiny(struct rcu_head *head) | |
82 | { | |
83 | rcu_callback_t f; | |
84 | unsigned long offset = (unsigned long)head->func; | |
85 | ||
86 | rcu_lock_acquire(&rcu_callback_map); | |
87 | if (__is_kfree_rcu_offset(offset)) { | |
88 | trace_rcu_invoke_kfree_callback("", head, offset); | |
89 | kfree((void *)head - offset); | |
90 | rcu_lock_release(&rcu_callback_map); | |
91 | return true; | |
92 | } | |
93 | ||
94 | trace_rcu_invoke_callback("", head); | |
95 | f = head->func; | |
96 | WRITE_ONCE(head->func, (rcu_callback_t)0L); | |
97 | f(head); | |
98 | rcu_lock_release(&rcu_callback_map); | |
99 | return false; | |
100 | } | |
101 | ||
65cfe358 PM |
102 | /* Invoke the RCU callbacks whose grace period has elapsed. */ |
103 | static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) | |
9b1d82fa | 104 | { |
9b1d82fa | 105 | struct rcu_head *next, *list; |
4ce5b903 | 106 | unsigned long flags; |
9b1d82fa | 107 | |
9b1d82fa PM |
108 | /* Move the ready-to-invoke callbacks to a local list. */ |
109 | local_irq_save(flags); | |
709fdce7 | 110 | if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { |
6e91f8cb PM |
111 | /* No callbacks ready, so just leave. */ |
112 | local_irq_restore(flags); | |
113 | return; | |
114 | } | |
709fdce7 PM |
115 | list = rcu_ctrlblk.rcucblist; |
116 | rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; | |
117 | *rcu_ctrlblk.donetail = NULL; | |
118 | if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) | |
119 | rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; | |
120 | rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; | |
9b1d82fa PM |
121 | local_irq_restore(flags); |
122 | ||
123 | /* Invoke the callbacks on the local list. */ | |
124 | while (list) { | |
125 | next = list->next; | |
126 | prefetch(next); | |
551d55a9 | 127 | debug_rcu_head_unqueue(list); |
b2c0710c | 128 | local_bh_disable(); |
77a40f97 | 129 | rcu_reclaim_tiny(list); |
b2c0710c | 130 | local_bh_enable(); |
9b1d82fa PM |
131 | list = next; |
132 | } | |
133 | } | |
134 | ||
9b1d82fa PM |
135 | /* |
136 | * Wait for a grace period to elapse. But it is illegal to invoke | |
679d3f30 PM |
137 | * synchronize_rcu() from within an RCU read-side critical section. |
138 | * Therefore, any legal call to synchronize_rcu() is a quiescent | |
139 | * state, and so on a UP system, synchronize_rcu() need do nothing. | |
65cfe358 PM |
140 | * (But Lai Jiangshan points out the benefits of doing might_sleep() |
141 | * to reduce latency.) | |
9b1d82fa PM |
142 | * |
143 | * Cool, huh? (Due to Josh Triplett.) | |
9b1d82fa | 144 | */ |
709fdce7 | 145 | void synchronize_rcu(void) |
9b1d82fa | 146 | { |
f78f5b90 PM |
147 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
148 | lock_is_held(&rcu_lock_map) || | |
149 | lock_is_held(&rcu_sched_lock_map), | |
679d3f30 | 150 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
9b1d82fa | 151 | } |
709fdce7 | 152 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
9b1d82fa | 153 | |
9b1d82fa | 154 | /* |
679d3f30 | 155 | * Post an RCU callback to be invoked after the end of an RCU grace |
65cfe358 PM |
156 | * period. But since we have but one CPU, that would be after any |
157 | * quiescent state. | |
9b1d82fa | 158 | */ |
709fdce7 | 159 | void call_rcu(struct rcu_head *head, rcu_callback_t func) |
9b1d82fa PM |
160 | { |
161 | unsigned long flags; | |
162 | ||
551d55a9 | 163 | debug_rcu_head_queue(head); |
9b1d82fa PM |
164 | head->func = func; |
165 | head->next = NULL; | |
4ce5b903 | 166 | |
9b1d82fa | 167 | local_irq_save(flags); |
709fdce7 PM |
168 | *rcu_ctrlblk.curtail = head; |
169 | rcu_ctrlblk.curtail = &head->next; | |
9b1d82fa | 170 | local_irq_restore(flags); |
5f6130fa LJ |
171 | |
172 | if (unlikely(is_idle_task(current))) { | |
709fdce7 | 173 | /* force scheduling for rcu_qs() */ |
5f6130fa LJ |
174 | resched_cpu(0); |
175 | } | |
9b1d82fa | 176 | } |
709fdce7 | 177 | EXPORT_SYMBOL_GPL(call_rcu); |
9dc5ad32 | 178 | |
aa23c6fb | 179 | void __init rcu_init(void) |
9dc5ad32 PM |
180 | { |
181 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
aa23c6fb | 182 | rcu_early_boot_tests(); |
e0fcba9a | 183 | srcu_init(); |
9dc5ad32 | 184 | } |