]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to interrupt-poll handling in the block layer. This | |
3 | * is similar to NAPI for network devices. | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/bio.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/irq_poll.h> | |
12 | #include <linux/delay.h> | |
13 | ||
14 | static unsigned int irq_poll_budget __read_mostly = 256; | |
15 | ||
16 | static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); | |
17 | ||
18 | /** | |
19 | * irq_poll_sched - Schedule a run of the iopoll handler | |
20 | * @iop: The parent iopoll structure | |
21 | * | |
22 | * Description: | |
23 | * Add this irq_poll structure to the pending poll list and trigger the | |
24 | * raise of the blk iopoll softirq. The driver must already have gotten a | |
25 | * successful return from irq_poll_sched_prep() before calling this. | |
26 | **/ | |
27 | void irq_poll_sched(struct irq_poll *iop) | |
28 | { | |
29 | unsigned long flags; | |
30 | ||
31 | local_irq_save(flags); | |
32 | list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); | |
33 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | |
34 | local_irq_restore(flags); | |
35 | } | |
36 | EXPORT_SYMBOL(irq_poll_sched); | |
37 | ||
38 | /** | |
39 | * __irq_poll_complete - Mark this @iop as un-polled again | |
40 | * @iop: The parent iopoll structure | |
41 | * | |
42 | * Description: | |
43 | * See irq_poll_complete(). This function must be called with interrupts | |
44 | * disabled. | |
45 | **/ | |
46 | void __irq_poll_complete(struct irq_poll *iop) | |
47 | { | |
48 | list_del(&iop->list); | |
49 | smp_mb__before_atomic(); | |
50 | clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); | |
51 | } | |
52 | EXPORT_SYMBOL(__irq_poll_complete); | |
53 | ||
54 | /** | |
55 | * irq_poll_complete - Mark this @iop as un-polled again | |
56 | * @iop: The parent iopoll structure | |
57 | * | |
58 | * Description: | |
59 | * If a driver consumes less than the assigned budget in its run of the | |
60 | * iopoll handler, it'll end the polled mode by calling this function. The | |
61 | * iopoll handler will not be invoked again before irq_poll_sched_prep() | |
62 | * is called. | |
63 | **/ | |
64 | void irq_poll_complete(struct irq_poll *iop) | |
65 | { | |
66 | unsigned long flags; | |
67 | ||
68 | local_irq_save(flags); | |
69 | __irq_poll_complete(iop); | |
70 | local_irq_restore(flags); | |
71 | } | |
72 | EXPORT_SYMBOL(irq_poll_complete); | |
73 | ||
74 | static void irq_poll_softirq(struct softirq_action *h) | |
75 | { | |
76 | struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); | |
77 | int rearm = 0, budget = irq_poll_budget; | |
78 | unsigned long start_time = jiffies; | |
79 | ||
80 | local_irq_disable(); | |
81 | ||
82 | while (!list_empty(list)) { | |
83 | struct irq_poll *iop; | |
84 | int work, weight; | |
85 | ||
86 | /* | |
87 | * If softirq window is exhausted then punt. | |
88 | */ | |
89 | if (budget <= 0 || time_after(jiffies, start_time)) { | |
90 | rearm = 1; | |
91 | break; | |
92 | } | |
93 | ||
94 | local_irq_enable(); | |
95 | ||
96 | /* Even though interrupts have been re-enabled, this | |
97 | * access is safe because interrupts can only add new | |
98 | * entries to the tail of this list, and only ->poll() | |
99 | * calls can remove this head entry from the list. | |
100 | */ | |
101 | iop = list_entry(list->next, struct irq_poll, list); | |
102 | ||
103 | weight = iop->weight; | |
104 | work = 0; | |
105 | if (test_bit(IRQ_POLL_F_SCHED, &iop->state)) | |
106 | work = iop->poll(iop, weight); | |
107 | ||
108 | budget -= work; | |
109 | ||
110 | local_irq_disable(); | |
111 | ||
112 | /* | |
113 | * Drivers must not modify the iopoll state, if they | |
114 | * consume their assigned weight (or more, some drivers can't | |
115 | * easily just stop processing, they have to complete an | |
116 | * entire mask of commands).In such cases this code | |
117 | * still "owns" the iopoll instance and therefore can | |
118 | * move the instance around on the list at-will. | |
119 | */ | |
120 | if (work >= weight) { | |
121 | if (irq_poll_disable_pending(iop)) | |
122 | __irq_poll_complete(iop); | |
123 | else | |
124 | list_move_tail(&iop->list, list); | |
125 | } | |
126 | } | |
127 | ||
128 | if (rearm) | |
129 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | |
130 | ||
131 | local_irq_enable(); | |
132 | } | |
133 | ||
134 | /** | |
135 | * irq_poll_disable - Disable iopoll on this @iop | |
136 | * @iop: The parent iopoll structure | |
137 | * | |
138 | * Description: | |
139 | * Disable io polling and wait for any pending callbacks to have completed. | |
140 | **/ | |
141 | void irq_poll_disable(struct irq_poll *iop) | |
142 | { | |
143 | set_bit(IRQ_POLL_F_DISABLE, &iop->state); | |
144 | while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) | |
145 | msleep(1); | |
146 | clear_bit(IRQ_POLL_F_DISABLE, &iop->state); | |
147 | } | |
148 | EXPORT_SYMBOL(irq_poll_disable); | |
149 | ||
150 | /** | |
151 | * irq_poll_enable - Enable iopoll on this @iop | |
152 | * @iop: The parent iopoll structure | |
153 | * | |
154 | * Description: | |
155 | * Enable iopoll on this @iop. Note that the handler run will not be | |
156 | * scheduled, it will only mark it as active. | |
157 | **/ | |
158 | void irq_poll_enable(struct irq_poll *iop) | |
159 | { | |
160 | BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state)); | |
161 | smp_mb__before_atomic(); | |
162 | clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); | |
163 | } | |
164 | EXPORT_SYMBOL(irq_poll_enable); | |
165 | ||
166 | /** | |
167 | * irq_poll_init - Initialize this @iop | |
168 | * @iop: The parent iopoll structure | |
169 | * @weight: The default weight (or command completion budget) | |
170 | * @poll_fn: The handler to invoke | |
171 | * | |
172 | * Description: | |
173 | * Initialize and enable this irq_poll structure. | |
174 | **/ | |
175 | void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn) | |
176 | { | |
177 | memset(iop, 0, sizeof(*iop)); | |
178 | INIT_LIST_HEAD(&iop->list); | |
179 | iop->weight = weight; | |
180 | iop->poll = poll_fn; | |
181 | } | |
182 | EXPORT_SYMBOL(irq_poll_init); | |
183 | ||
184 | static int irq_poll_cpu_notify(struct notifier_block *self, | |
185 | unsigned long action, void *hcpu) | |
186 | { | |
187 | /* | |
188 | * If a CPU goes away, splice its entries to the current CPU | |
189 | * and trigger a run of the softirq | |
190 | */ | |
191 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | |
192 | int cpu = (unsigned long) hcpu; | |
193 | ||
194 | local_irq_disable(); | |
195 | list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), | |
196 | this_cpu_ptr(&blk_cpu_iopoll)); | |
197 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | |
198 | local_irq_enable(); | |
199 | } | |
200 | ||
201 | return NOTIFY_OK; | |
202 | } | |
203 | ||
204 | static struct notifier_block irq_poll_cpu_notifier = { | |
205 | .notifier_call = irq_poll_cpu_notify, | |
206 | }; | |
207 | ||
208 | static __init int irq_poll_setup(void) | |
209 | { | |
210 | int i; | |
211 | ||
212 | for_each_possible_cpu(i) | |
213 | INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); | |
214 | ||
215 | open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); | |
216 | register_hotcpu_notifier(&irq_poll_cpu_notifier); | |
217 | return 0; | |
218 | } | |
219 | subsys_initcall(irq_poll_setup); |