]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-ioc.c
sched/headers: Prepare to use <linux/rcuupdate.h> instead of <linux/rculist.h> in...
[mirror_ubuntu-bionic-kernel.git] / block / blk-ioc.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
5a0e3ad6 9#include <linux/slab.h>
86db1e29
JA
10
11#include "blk.h"
12
13/*
14 * For io context allocations
15 */
16static struct kmem_cache *iocontext_cachep;
17
6e736be7
TH
18/**
19 * get_io_context - increment reference count to io_context
20 * @ioc: io_context to get
21 *
22 * Increment reference count to @ioc.
23 */
24void get_io_context(struct io_context *ioc)
25{
26 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
27 atomic_long_inc(&ioc->refcount);
28}
29EXPORT_SYMBOL(get_io_context);
30
7e5a8794
TH
31static void icq_free_icq_rcu(struct rcu_head *head)
32{
33 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
34
35 kmem_cache_free(icq->__rcu_icq_cache, icq);
36}
37
3d492c2e
OS
38/*
39 * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
40 * mq.
41 */
7e5a8794 42static void ioc_exit_icq(struct io_cq *icq)
621032ad
TH
43{
44 struct elevator_type *et = icq->q->elevator->type;
45
46 if (icq->flags & ICQ_EXITED)
47 return;
48
bd166ef1
JA
49 if (et->uses_mq && et->ops.mq.exit_icq)
50 et->ops.mq.exit_icq(icq);
51 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
c51ca6cf 52 et->ops.sq.elevator_exit_icq_fn(icq);
621032ad
TH
53
54 icq->flags |= ICQ_EXITED;
55}
56
57/* Release an icq. Called with both ioc and q locked. */
58static void ioc_destroy_icq(struct io_cq *icq)
7e5a8794
TH
59{
60 struct io_context *ioc = icq->ioc;
61 struct request_queue *q = icq->q;
62 struct elevator_type *et = q->elevator->type;
63
64 lockdep_assert_held(&ioc->lock);
65 lockdep_assert_held(q->queue_lock);
66
67 radix_tree_delete(&ioc->icq_tree, icq->q->id);
68 hlist_del_init(&icq->ioc_node);
69 list_del_init(&icq->q_node);
70
71 /*
72 * Both setting lookup hint to and clearing it from @icq are done
73 * under queue_lock. If it's not pointing to @icq now, it never
74 * will. Hint assignment itself can race safely.
75 */
ec6c676a 76 if (rcu_access_pointer(ioc->icq_hint) == icq)
7e5a8794
TH
77 rcu_assign_pointer(ioc->icq_hint, NULL);
78
621032ad 79 ioc_exit_icq(icq);
7e5a8794
TH
80
81 /*
82 * @icq->q might have gone away by the time RCU callback runs
83 * making it impossible to determine icq_cache. Record it in @icq.
84 */
85 icq->__rcu_icq_cache = et->icq_cache;
86 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
87}
88
b2efa052
TH
89/*
90 * Slow path for ioc release in put_io_context(). Performs double-lock
c5869807 91 * dancing to unlink all icq's and then frees ioc.
b2efa052
TH
92 */
93static void ioc_release_fn(struct work_struct *work)
86db1e29 94{
b2efa052
TH
95 struct io_context *ioc = container_of(work, struct io_context,
96 release_work);
d8c66c5d 97 unsigned long flags;
b2efa052 98
d8c66c5d
TH
99 /*
100 * Exiting icq may call into put_io_context() through elevator
101 * which will trigger lockdep warning. The ioc's are guaranteed to
102 * be different, use a different locking subclass here. Use
103 * irqsave variant as there's no spin_lock_irq_nested().
104 */
105 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b2efa052 106
c5869807
TH
107 while (!hlist_empty(&ioc->icq_list)) {
108 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
109 struct io_cq, ioc_node);
2274b029
TH
110 struct request_queue *q = icq->q;
111
112 if (spin_trylock(q->queue_lock)) {
621032ad 113 ioc_destroy_icq(icq);
2274b029
TH
114 spin_unlock(q->queue_lock);
115 } else {
116 spin_unlock_irqrestore(&ioc->lock, flags);
117 cpu_relax();
118 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b2efa052 119 }
b2efa052 120 }
ffc4e759 121
2274b029 122 spin_unlock_irqrestore(&ioc->lock, flags);
b2efa052
TH
123
124 kmem_cache_free(iocontext_cachep, ioc);
86db1e29
JA
125}
126
42ec57a8
TH
127/**
128 * put_io_context - put a reference of io_context
129 * @ioc: io_context to put
130 *
131 * Decrement reference count of @ioc and release it if the count reaches
11a3122f 132 * zero.
86db1e29 133 */
11a3122f 134void put_io_context(struct io_context *ioc)
86db1e29 135{
b2efa052 136 unsigned long flags;
ff8c1474 137 bool free_ioc = false;
b2efa052 138
86db1e29 139 if (ioc == NULL)
42ec57a8 140 return;
86db1e29 141
42ec57a8 142 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
86db1e29 143
b2efa052 144 /*
11a3122f
TH
145 * Releasing ioc requires reverse order double locking and we may
146 * already be holding a queue_lock. Do it asynchronously from wq.
b2efa052 147 */
11a3122f
TH
148 if (atomic_long_dec_and_test(&ioc->refcount)) {
149 spin_lock_irqsave(&ioc->lock, flags);
150 if (!hlist_empty(&ioc->icq_list))
695588f9
VK
151 queue_work(system_power_efficient_wq,
152 &ioc->release_work);
ff8c1474
XF
153 else
154 free_ioc = true;
11a3122f 155 spin_unlock_irqrestore(&ioc->lock, flags);
b2efa052 156 }
ff8c1474
XF
157
158 if (free_ioc)
159 kmem_cache_free(iocontext_cachep, ioc);
86db1e29 160}
b2efa052 161EXPORT_SYMBOL(put_io_context);
86db1e29 162
f6e8d01b
TH
163/**
164 * put_io_context_active - put active reference on ioc
165 * @ioc: ioc of interest
166 *
167 * Undo get_io_context_active(). If active reference reaches zero after
168 * put, @ioc can never issue further IOs and ioscheds are notified.
169 */
170void put_io_context_active(struct io_context *ioc)
86db1e29 171{
3d492c2e 172 struct elevator_type *et;
621032ad 173 unsigned long flags;
f6e8d01b 174 struct io_cq *icq;
86db1e29 175
f6e8d01b 176 if (!atomic_dec_and_test(&ioc->active_ref)) {
621032ad
TH
177 put_io_context(ioc);
178 return;
179 }
180
181 /*
182 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
183 * reverse double locking. Read comment in ioc_release_fn() for
184 * explanation on the nested locking annotation.
185 */
186retry:
187 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
b67bfe0d 188 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
621032ad
TH
189 if (icq->flags & ICQ_EXITED)
190 continue;
3d492c2e
OS
191
192 et = icq->q->elevator->type;
193 if (et->uses_mq) {
621032ad 194 ioc_exit_icq(icq);
621032ad 195 } else {
3d492c2e
OS
196 if (spin_trylock(icq->q->queue_lock)) {
197 ioc_exit_icq(icq);
198 spin_unlock(icq->q->queue_lock);
199 } else {
200 spin_unlock_irqrestore(&ioc->lock, flags);
201 cpu_relax();
202 goto retry;
203 }
621032ad
TH
204 }
205 }
206 spin_unlock_irqrestore(&ioc->lock, flags);
207
11a3122f 208 put_io_context(ioc);
86db1e29
JA
209}
210
f6e8d01b
TH
211/* Called by the exiting task */
212void exit_io_context(struct task_struct *task)
213{
214 struct io_context *ioc;
215
216 task_lock(task);
217 ioc = task->io_context;
218 task->io_context = NULL;
219 task_unlock(task);
220
221 atomic_dec(&ioc->nr_tasks);
222 put_io_context_active(ioc);
223}
224
7e5a8794
TH
225/**
226 * ioc_clear_queue - break any ioc association with the specified queue
227 * @q: request_queue being cleared
228 *
229 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
230 */
231void ioc_clear_queue(struct request_queue *q)
232{
233 lockdep_assert_held(q->queue_lock);
234
235 while (!list_empty(&q->icq_list)) {
236 struct io_cq *icq = list_entry(q->icq_list.next,
237 struct io_cq, q_node);
238 struct io_context *ioc = icq->ioc;
239
240 spin_lock(&ioc->lock);
621032ad 241 ioc_destroy_icq(icq);
7e5a8794
TH
242 spin_unlock(&ioc->lock);
243 }
244}
245
24acfc34 246int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
86db1e29 247{
df415656 248 struct io_context *ioc;
3c9c708c 249 int ret;
86db1e29 250
42ec57a8
TH
251 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
252 node);
253 if (unlikely(!ioc))
24acfc34 254 return -ENOMEM;
42ec57a8
TH
255
256 /* initialize */
257 atomic_long_set(&ioc->refcount, 1);
4638a83e 258 atomic_set(&ioc->nr_tasks, 1);
f6e8d01b 259 atomic_set(&ioc->active_ref, 1);
42ec57a8 260 spin_lock_init(&ioc->lock);
c5869807
TH
261 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
262 INIT_HLIST_HEAD(&ioc->icq_list);
b2efa052 263 INIT_WORK(&ioc->release_work, ioc_release_fn);
86db1e29 264
fd638368
TH
265 /*
266 * Try to install. ioc shouldn't be installed if someone else
267 * already did or @task, which isn't %current, is exiting. Note
268 * that we need to allow ioc creation on exiting %current as exit
269 * path may issue IOs from e.g. exit_files(). The exit path is
270 * responsible for not issuing IO after exit_io_context().
271 */
6e736be7 272 task_lock(task);
fd638368
TH
273 if (!task->io_context &&
274 (task == current || !(task->flags & PF_EXITING)))
6e736be7 275 task->io_context = ioc;
f2dbd76a 276 else
6e736be7 277 kmem_cache_free(iocontext_cachep, ioc);
3c9c708c
ED
278
279 ret = task->io_context ? 0 : -EBUSY;
280
6e736be7 281 task_unlock(task);
24acfc34 282
3c9c708c 283 return ret;
86db1e29 284}
86db1e29 285
6e736be7
TH
286/**
287 * get_task_io_context - get io_context of a task
288 * @task: task of interest
289 * @gfp_flags: allocation flags, used if allocation is necessary
290 * @node: allocation node, used if allocation is necessary
291 *
292 * Return io_context of @task. If it doesn't exist, it is created with
293 * @gfp_flags and @node. The returned io_context has its reference count
294 * incremented.
86db1e29 295 *
6e736be7 296 * This function always goes through task_lock() and it's better to use
f2dbd76a 297 * %current->io_context + get_io_context() for %current.
86db1e29 298 */
6e736be7
TH
299struct io_context *get_task_io_context(struct task_struct *task,
300 gfp_t gfp_flags, int node)
86db1e29 301{
6e736be7 302 struct io_context *ioc;
86db1e29 303
d0164adc 304 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
6e736be7 305
f2dbd76a
TH
306 do {
307 task_lock(task);
308 ioc = task->io_context;
309 if (likely(ioc)) {
310 get_io_context(ioc);
311 task_unlock(task);
312 return ioc;
313 }
6e736be7 314 task_unlock(task);
24acfc34 315 } while (!create_task_io_context(task, gfp_flags, node));
6e736be7 316
f2dbd76a 317 return NULL;
86db1e29 318}
6e736be7 319EXPORT_SYMBOL(get_task_io_context);
86db1e29 320
47fdd4ca
TH
321/**
322 * ioc_lookup_icq - lookup io_cq from ioc
323 * @ioc: the associated io_context
324 * @q: the associated request_queue
325 *
326 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
327 * with @q->queue_lock held.
328 */
329struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
330{
331 struct io_cq *icq;
332
333 lockdep_assert_held(q->queue_lock);
334
335 /*
336 * icq's are indexed from @ioc using radix tree and hint pointer,
337 * both of which are protected with RCU. All removals are done
338 * holding both q and ioc locks, and we're holding q lock - if we
339 * find a icq which points to us, it's guaranteed to be valid.
340 */
341 rcu_read_lock();
342 icq = rcu_dereference(ioc->icq_hint);
343 if (icq && icq->q == q)
344 goto out;
345
346 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
347 if (icq && icq->q == q)
348 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
349 else
350 icq = NULL;
351out:
352 rcu_read_unlock();
353 return icq;
354}
355EXPORT_SYMBOL(ioc_lookup_icq);
356
f1f8cc94
TH
357/**
358 * ioc_create_icq - create and link io_cq
24acfc34 359 * @ioc: io_context of interest
f1f8cc94
TH
360 * @q: request_queue of interest
361 * @gfp_mask: allocation mask
362 *
24acfc34
TH
363 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
364 * will be created using @gfp_mask.
f1f8cc94
TH
365 *
366 * The caller is responsible for ensuring @ioc won't go away and @q is
367 * alive and will stay alive until this function returns.
368 */
24acfc34
TH
369struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
370 gfp_t gfp_mask)
f1f8cc94
TH
371{
372 struct elevator_type *et = q->elevator->type;
f1f8cc94
TH
373 struct io_cq *icq;
374
375 /* allocate stuff */
f1f8cc94
TH
376 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
377 q->node);
378 if (!icq)
379 return NULL;
380
5e4c0d97 381 if (radix_tree_maybe_preload(gfp_mask) < 0) {
f1f8cc94
TH
382 kmem_cache_free(et->icq_cache, icq);
383 return NULL;
384 }
385
386 icq->ioc = ioc;
387 icq->q = q;
388 INIT_LIST_HEAD(&icq->q_node);
389 INIT_HLIST_NODE(&icq->ioc_node);
390
391 /* lock both q and ioc and try to link @icq */
392 spin_lock_irq(q->queue_lock);
393 spin_lock(&ioc->lock);
394
395 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
396 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
397 list_add(&icq->q_node, &q->icq_list);
bd166ef1
JA
398 if (et->uses_mq && et->ops.mq.init_icq)
399 et->ops.mq.init_icq(icq);
400 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
c51ca6cf 401 et->ops.sq.elevator_init_icq_fn(icq);
f1f8cc94
TH
402 } else {
403 kmem_cache_free(et->icq_cache, icq);
404 icq = ioc_lookup_icq(ioc, q);
405 if (!icq)
406 printk(KERN_ERR "cfq: icq link failed!\n");
407 }
408
409 spin_unlock(&ioc->lock);
410 spin_unlock_irq(q->queue_lock);
411 radix_tree_preload_end();
412 return icq;
413}
414
13341598 415static int __init blk_ioc_init(void)
86db1e29
JA
416{
417 iocontext_cachep = kmem_cache_create("blkdev_ioc",
418 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
419 return 0;
420}
421subsys_initcall(blk_ioc_init);