]>
Commit | Line | Data |
---|---|---|
6c442127 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
9b1d82fa PM |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
4 | * | |
9b1d82fa PM |
5 | * Copyright IBM Corporation, 2008 |
6 | * | |
6c442127 | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
9b1d82fa PM |
8 | * |
9 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 10 | * Documentation/RCU |
9b1d82fa | 11 | */ |
9b1d82fa PM |
12 | #ifndef __LINUX_TINY_H |
13 | #define __LINUX_TINY_H | |
14 | ||
24691069 | 15 | #include <asm/param.h> /* for HZ */ |
9b1d82fa | 16 | |
71c40fd0 PM |
17 | /* Never flag non-existent other CPUs! */ |
18 | static inline bool rcu_eqs_special_set(int cpu) { return false; } | |
b8c17e66 | 19 | |
765a3f4f PM |
20 | static inline unsigned long get_state_synchronize_rcu(void) |
21 | { | |
22 | return 0; | |
23 | } | |
24 | ||
25 | static inline void cond_synchronize_rcu(unsigned long oldstate) | |
26 | { | |
27 | might_sleep(); | |
28 | } | |
29 | ||
709fdce7 | 30 | extern void rcu_barrier(void); |
2c42818e | 31 | |
a57eb940 | 32 | static inline void synchronize_rcu_expedited(void) |
bf66f18e | 33 | { |
a8bb74ac | 34 | synchronize_rcu(); |
bf66f18e PM |
35 | } |
36 | ||
709fdce7 | 37 | static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) |
bf66f18e | 38 | { |
486e2593 | 39 | call_rcu(head, func); |
9b1d82fa PM |
40 | } |
41 | ||
709fdce7 | 42 | void rcu_qs(void); |
7b27d547 | 43 | |
d28139c4 | 44 | static inline void rcu_softirq_qs(void) |
486e2593 | 45 | { |
709fdce7 | 46 | rcu_qs(); |
486e2593 PM |
47 | } |
48 | ||
bcbfdd01 PM |
49 | #define rcu_note_context_switch(preempt) \ |
50 | do { \ | |
709fdce7 | 51 | rcu_qs(); \ |
43766c3e | 52 | rcu_tasks_qs(current, (preempt)); \ |
bcbfdd01 | 53 | } while (0) |
a57eb940 | 54 | |
5f192ab0 PM |
55 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
56 | { | |
57 | *nextevt = KTIME_MAX; | |
58 | return 0; | |
59 | } | |
60 | ||
29ce8310 GN |
61 | /* |
62 | * Take advantage of the fact that there is only one CPU, which | |
63 | * allows us to ignore virtualization-based context switches. | |
64 | */ | |
71c40fd0 PM |
65 | static inline void rcu_virt_note_context_switch(int cpu) { } |
66 | static inline void rcu_cpu_stall_reset(void) { } | |
1b27291b | 67 | static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } |
71c40fd0 PM |
68 | static inline void rcu_idle_enter(void) { } |
69 | static inline void rcu_idle_exit(void) { } | |
70 | static inline void rcu_irq_enter(void) { } | |
71c40fd0 PM |
71 | static inline void rcu_irq_exit_irqson(void) { } |
72 | static inline void rcu_irq_enter_irqson(void) { } | |
73 | static inline void rcu_irq_exit(void) { } | |
8ae0ae67 | 74 | static inline void rcu_irq_exit_preempt(void) { } |
07325d4a | 75 | static inline void rcu_irq_exit_check_preempt(void) { } |
71c40fd0 | 76 | static inline void exit_rcu(void) { } |
3e310098 PM |
77 | static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
78 | { | |
79 | return false; | |
80 | } | |
81 | static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } | |
825c5bd2 | 82 | #ifdef CONFIG_SRCU |
584dc4ce | 83 | void rcu_scheduler_starting(void); |
825c5bd2 | 84 | #else /* #ifndef CONFIG_SRCU */ |
71c40fd0 | 85 | static inline void rcu_scheduler_starting(void) { } |
825c5bd2 | 86 | #endif /* #else #ifndef CONFIG_SRCU */ |
d2b1654f | 87 | static inline void rcu_end_inkernel_boot(void) { } |
59ee0326 | 88 | static inline bool rcu_inkernel_boot_has_ended(void) { return true; } |
71c40fd0 | 89 | static inline bool rcu_is_watching(void) { return true; } |
b1fcf9b8 | 90 | static inline bool __rcu_is_watching(void) { return true; } |
79ba7ff5 | 91 | static inline void rcu_momentary_dyntick_idle(void) { } |
a35d1690 | 92 | static inline void kfree_rcu_scheduler_running(void) { } |
6be7436d | 93 | static inline bool rcu_gp_might_be_stalled(void) { return false; } |
5c173eb8 | 94 | |
71c40fd0 PM |
95 | /* Avoid RCU read-side critical sections leaking across. */ |
96 | static inline void rcu_all_qs(void) { barrier(); } | |
5cd37193 | 97 | |
4df83742 TG |
98 | /* RCUtree hotplug events */ |
99 | #define rcutree_prepare_cpu NULL | |
100 | #define rcutree_online_cpu NULL | |
101 | #define rcutree_offline_cpu NULL | |
102 | #define rcutree_dead_cpu NULL | |
103 | #define rcutree_dying_cpu NULL | |
f64c6013 | 104 | static inline void rcu_cpu_starting(unsigned int cpu) { } |
4df83742 | 105 | |
9b1d82fa | 106 | #endif /* __LINUX_RCUTINY_H */ |