]>
Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to io context handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/init.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
86db1e29 JA |
11 | |
12 | #include "blk.h" | |
13 | ||
14 | /* | |
15 | * For io context allocations | |
16 | */ | |
17 | static struct kmem_cache *iocontext_cachep; | |
18 | ||
6e736be7 TH |
19 | /** |
20 | * get_io_context - increment reference count to io_context | |
21 | * @ioc: io_context to get | |
22 | * | |
23 | * Increment reference count to @ioc. | |
24 | */ | |
25 | void get_io_context(struct io_context *ioc) | |
26 | { | |
27 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | |
28 | atomic_long_inc(&ioc->refcount); | |
29 | } | |
30 | EXPORT_SYMBOL(get_io_context); | |
31 | ||
86db1e29 JA |
32 | static void cfq_dtor(struct io_context *ioc) |
33 | { | |
ffc4e759 JA |
34 | if (!hlist_empty(&ioc->cic_list)) { |
35 | struct cfq_io_context *cic; | |
36 | ||
e2bd9678 | 37 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, |
ffc4e759 JA |
38 | cic_list); |
39 | cic->dtor(ioc); | |
40 | } | |
86db1e29 JA |
41 | } |
42 | ||
42ec57a8 TH |
43 | /** |
44 | * put_io_context - put a reference of io_context | |
45 | * @ioc: io_context to put | |
46 | * | |
47 | * Decrement reference count of @ioc and release it if the count reaches | |
48 | * zero. | |
86db1e29 | 49 | */ |
42ec57a8 | 50 | void put_io_context(struct io_context *ioc) |
86db1e29 JA |
51 | { |
52 | if (ioc == NULL) | |
42ec57a8 | 53 | return; |
86db1e29 | 54 | |
42ec57a8 | 55 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
86db1e29 | 56 | |
42ec57a8 TH |
57 | if (!atomic_long_dec_and_test(&ioc->refcount)) |
58 | return; | |
86db1e29 | 59 | |
42ec57a8 TH |
60 | rcu_read_lock(); |
61 | cfq_dtor(ioc); | |
62 | rcu_read_unlock(); | |
63 | ||
64 | kmem_cache_free(iocontext_cachep, ioc); | |
86db1e29 JA |
65 | } |
66 | EXPORT_SYMBOL(put_io_context); | |
67 | ||
68 | static void cfq_exit(struct io_context *ioc) | |
69 | { | |
86db1e29 | 70 | rcu_read_lock(); |
86db1e29 | 71 | |
ffc4e759 JA |
72 | if (!hlist_empty(&ioc->cic_list)) { |
73 | struct cfq_io_context *cic; | |
74 | ||
e2bd9678 | 75 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, |
ffc4e759 JA |
76 | cic_list); |
77 | cic->exit(ioc); | |
78 | } | |
79 | rcu_read_unlock(); | |
86db1e29 JA |
80 | } |
81 | ||
27667c99 | 82 | /* Called by the exiting task */ |
b69f2292 | 83 | void exit_io_context(struct task_struct *task) |
86db1e29 JA |
84 | { |
85 | struct io_context *ioc; | |
86 | ||
6e736be7 TH |
87 | /* PF_EXITING prevents new io_context from being attached to @task */ |
88 | WARN_ON_ONCE(!(current->flags & PF_EXITING)); | |
89 | ||
b69f2292 LR |
90 | task_lock(task); |
91 | ioc = task->io_context; | |
92 | task->io_context = NULL; | |
93 | task_unlock(task); | |
86db1e29 | 94 | |
27667c99 | 95 | if (atomic_dec_and_test(&ioc->nr_tasks)) |
86db1e29 JA |
96 | cfq_exit(ioc); |
97 | ||
61cc74fb | 98 | put_io_context(ioc); |
86db1e29 JA |
99 | } |
100 | ||
6e736be7 TH |
101 | static struct io_context *create_task_io_context(struct task_struct *task, |
102 | gfp_t gfp_flags, int node, | |
103 | bool take_ref) | |
86db1e29 | 104 | { |
df415656 | 105 | struct io_context *ioc; |
86db1e29 | 106 | |
42ec57a8 TH |
107 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, |
108 | node); | |
109 | if (unlikely(!ioc)) | |
110 | return NULL; | |
111 | ||
112 | /* initialize */ | |
113 | atomic_long_set(&ioc->refcount, 1); | |
114 | atomic_set(&ioc->nr_tasks, 1); | |
115 | spin_lock_init(&ioc->lock); | |
116 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | |
117 | INIT_HLIST_HEAD(&ioc->cic_list); | |
86db1e29 | 118 | |
6e736be7 TH |
119 | /* try to install, somebody might already have beaten us to it */ |
120 | task_lock(task); | |
121 | ||
122 | if (!task->io_context && !(task->flags & PF_EXITING)) { | |
123 | task->io_context = ioc; | |
124 | } else { | |
125 | kmem_cache_free(iocontext_cachep, ioc); | |
126 | ioc = task->io_context; | |
127 | } | |
128 | ||
129 | if (ioc && take_ref) | |
130 | get_io_context(ioc); | |
131 | ||
132 | task_unlock(task); | |
df415656 | 133 | return ioc; |
86db1e29 JA |
134 | } |
135 | ||
42ec57a8 TH |
136 | /** |
137 | * current_io_context - get io_context of %current | |
138 | * @gfp_flags: allocation flags, used if allocation is necessary | |
139 | * @node: allocation node, used if allocation is necessary | |
86db1e29 | 140 | * |
42ec57a8 TH |
141 | * Return io_context of %current. If it doesn't exist, it is created with |
142 | * @gfp_flags and @node. The returned io_context does NOT have its | |
143 | * reference count incremented. Because io_context is exited only on task | |
144 | * exit, %current can be sure that the returned io_context is valid and | |
145 | * alive as long as it is executing. | |
86db1e29 JA |
146 | */ |
147 | struct io_context *current_io_context(gfp_t gfp_flags, int node) | |
148 | { | |
6e736be7 | 149 | might_sleep_if(gfp_flags & __GFP_WAIT); |
86db1e29 | 150 | |
6e736be7 TH |
151 | if (current->io_context) |
152 | return current->io_context; | |
153 | ||
154 | return create_task_io_context(current, gfp_flags, node, false); | |
86db1e29 | 155 | } |
6e736be7 | 156 | EXPORT_SYMBOL(current_io_context); |
86db1e29 | 157 | |
6e736be7 TH |
158 | /** |
159 | * get_task_io_context - get io_context of a task | |
160 | * @task: task of interest | |
161 | * @gfp_flags: allocation flags, used if allocation is necessary | |
162 | * @node: allocation node, used if allocation is necessary | |
163 | * | |
164 | * Return io_context of @task. If it doesn't exist, it is created with | |
165 | * @gfp_flags and @node. The returned io_context has its reference count | |
166 | * incremented. | |
86db1e29 | 167 | * |
6e736be7 TH |
168 | * This function always goes through task_lock() and it's better to use |
169 | * current_io_context() + get_io_context() for %current. | |
86db1e29 | 170 | */ |
6e736be7 TH |
171 | struct io_context *get_task_io_context(struct task_struct *task, |
172 | gfp_t gfp_flags, int node) | |
86db1e29 | 173 | { |
6e736be7 | 174 | struct io_context *ioc; |
86db1e29 | 175 | |
6e736be7 TH |
176 | might_sleep_if(gfp_flags & __GFP_WAIT); |
177 | ||
178 | task_lock(task); | |
179 | ioc = task->io_context; | |
180 | if (likely(ioc)) { | |
181 | get_io_context(ioc); | |
182 | task_unlock(task); | |
183 | return ioc; | |
184 | } | |
185 | task_unlock(task); | |
186 | ||
187 | return create_task_io_context(task, gfp_flags, node, true); | |
86db1e29 | 188 | } |
6e736be7 | 189 | EXPORT_SYMBOL(get_task_io_context); |
86db1e29 | 190 | |
dc86900e TH |
191 | void ioc_set_changed(struct io_context *ioc, int which) |
192 | { | |
193 | struct cfq_io_context *cic; | |
194 | struct hlist_node *n; | |
195 | ||
196 | hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list) | |
197 | set_bit(which, &cic->changed); | |
198 | } | |
199 | ||
200 | /** | |
201 | * ioc_ioprio_changed - notify ioprio change | |
202 | * @ioc: io_context of interest | |
203 | * @ioprio: new ioprio | |
204 | * | |
205 | * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all | |
206 | * cic's. iosched is responsible for checking the bit and applying it on | |
207 | * request issue path. | |
208 | */ | |
209 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |
210 | { | |
211 | unsigned long flags; | |
212 | ||
213 | spin_lock_irqsave(&ioc->lock, flags); | |
214 | ioc->ioprio = ioprio; | |
215 | ioc_set_changed(ioc, CIC_IOPRIO_CHANGED); | |
216 | spin_unlock_irqrestore(&ioc->lock, flags); | |
217 | } | |
218 | ||
219 | /** | |
220 | * ioc_cgroup_changed - notify cgroup change | |
221 | * @ioc: io_context of interest | |
222 | * | |
223 | * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's. | |
224 | * iosched is responsible for checking the bit and applying it on request | |
225 | * issue path. | |
226 | */ | |
227 | void ioc_cgroup_changed(struct io_context *ioc) | |
228 | { | |
229 | unsigned long flags; | |
230 | ||
231 | spin_lock_irqsave(&ioc->lock, flags); | |
232 | ioc_set_changed(ioc, CIC_CGROUP_CHANGED); | |
233 | spin_unlock_irqrestore(&ioc->lock, flags); | |
234 | } | |
235 | ||
13341598 | 236 | static int __init blk_ioc_init(void) |
86db1e29 JA |
237 | { |
238 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | |
239 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | |
240 | return 0; | |
241 | } | |
242 | subsys_initcall(blk_ioc_init); |