]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to softirq rq completions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/init.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/sched/topology.h> | |
13 | ||
14 | #include "blk.h" | |
15 | ||
16 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | |
17 | ||
18 | /* | |
19 | * Softirq action handler - move entries to local list and loop over them | |
20 | * while passing them to the queue registered handler. | |
21 | */ | |
22 | static __latent_entropy void blk_done_softirq(struct softirq_action *h) | |
23 | { | |
24 | struct list_head *cpu_list, local_list; | |
25 | ||
26 | local_irq_disable(); | |
27 | cpu_list = this_cpu_ptr(&blk_cpu_done); | |
28 | list_replace_init(cpu_list, &local_list); | |
29 | local_irq_enable(); | |
30 | ||
31 | while (!list_empty(&local_list)) { | |
32 | struct request *rq; | |
33 | ||
34 | rq = list_entry(local_list.next, struct request, ipi_list); | |
35 | list_del_init(&rq->ipi_list); | |
36 | rq->q->softirq_done_fn(rq); | |
37 | } | |
38 | } | |
39 | ||
40 | #ifdef CONFIG_SMP | |
41 | static void trigger_softirq(void *data) | |
42 | { | |
43 | struct request *rq = data; | |
44 | unsigned long flags; | |
45 | struct list_head *list; | |
46 | ||
47 | local_irq_save(flags); | |
48 | list = this_cpu_ptr(&blk_cpu_done); | |
49 | list_add_tail(&rq->ipi_list, list); | |
50 | ||
51 | if (list->next == &rq->ipi_list) | |
52 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | |
53 | ||
54 | local_irq_restore(flags); | |
55 | } | |
56 | ||
57 | /* | |
58 | * Setup and invoke a run of 'trigger_softirq' on the given cpu. | |
59 | */ | |
60 | static int raise_blk_irq(int cpu, struct request *rq) | |
61 | { | |
62 | if (cpu_online(cpu)) { | |
63 | struct call_single_data *data = &rq->csd; | |
64 | ||
65 | data->func = trigger_softirq; | |
66 | data->info = rq; | |
67 | data->flags = 0; | |
68 | ||
69 | smp_call_function_single_async(cpu, data); | |
70 | return 0; | |
71 | } | |
72 | ||
73 | return 1; | |
74 | } | |
75 | #else /* CONFIG_SMP */ | |
76 | static int raise_blk_irq(int cpu, struct request *rq) | |
77 | { | |
78 | return 1; | |
79 | } | |
80 | #endif | |
81 | ||
82 | static int blk_softirq_cpu_dead(unsigned int cpu) | |
83 | { | |
84 | /* | |
85 | * If a CPU goes away, splice its entries to the current CPU | |
86 | * and trigger a run of the softirq | |
87 | */ | |
88 | local_irq_disable(); | |
89 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | |
90 | this_cpu_ptr(&blk_cpu_done)); | |
91 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | |
92 | local_irq_enable(); | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
97 | void __blk_complete_request(struct request *req) | |
98 | { | |
99 | int ccpu, cpu; | |
100 | struct request_queue *q = req->q; | |
101 | unsigned long flags; | |
102 | bool shared = false; | |
103 | ||
104 | BUG_ON(!q->softirq_done_fn); | |
105 | ||
106 | local_irq_save(flags); | |
107 | cpu = smp_processor_id(); | |
108 | ||
109 | /* | |
110 | * Select completion CPU | |
111 | */ | |
112 | if (req->cpu != -1) { | |
113 | ccpu = req->cpu; | |
114 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) | |
115 | shared = cpus_share_cache(cpu, ccpu); | |
116 | } else | |
117 | ccpu = cpu; | |
118 | ||
119 | /* | |
120 | * If current CPU and requested CPU share a cache, run the softirq on | |
121 | * the current CPU. One might concern this is just like | |
122 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is | |
123 | * running in interrupt handler, and currently I/O controller doesn't | |
124 | * support multiple interrupts, so current CPU is unique actually. This | |
125 | * avoids IPI sending from current CPU to the first CPU of a group. | |
126 | */ | |
127 | if (ccpu == cpu || shared) { | |
128 | struct list_head *list; | |
129 | do_local: | |
130 | list = this_cpu_ptr(&blk_cpu_done); | |
131 | list_add_tail(&req->ipi_list, list); | |
132 | ||
133 | /* | |
134 | * if the list only contains our just added request, | |
135 | * signal a raise of the softirq. If there are already | |
136 | * entries there, someone already raised the irq but it | |
137 | * hasn't run yet. | |
138 | */ | |
139 | if (list->next == &req->ipi_list) | |
140 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | |
141 | } else if (raise_blk_irq(ccpu, req)) | |
142 | goto do_local; | |
143 | ||
144 | local_irq_restore(flags); | |
145 | } | |
146 | ||
147 | /** | |
148 | * blk_complete_request - end I/O on a request | |
149 | * @req: the request being processed | |
150 | * | |
151 | * Description: | |
152 | * Ends all I/O on a request. It does not handle partial completions, | |
153 | * unless the driver actually implements this in its completion callback | |
154 | * through requeueing. The actual completion happens out-of-order, | |
155 | * through a softirq handler. The user must have registered a completion | |
156 | * callback through blk_queue_softirq_done(). | |
157 | **/ | |
158 | void blk_complete_request(struct request *req) | |
159 | { | |
160 | if (unlikely(blk_should_fake_timeout(req->q))) | |
161 | return; | |
162 | if (!blk_mark_rq_complete(req)) | |
163 | __blk_complete_request(req); | |
164 | } | |
165 | EXPORT_SYMBOL(blk_complete_request); | |
166 | ||
167 | static __init int blk_softirq_init(void) | |
168 | { | |
169 | int i; | |
170 | ||
171 | for_each_possible_cpu(i) | |
172 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | |
173 | ||
174 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); | |
175 | cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, | |
176 | "block/softirq:dead", NULL, | |
177 | blk_softirq_cpu_dead); | |
178 | return 0; | |
179 | } | |
180 | subsys_initcall(blk_softirq_init); |