]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
70f10482 | 2 | * linux/drivers/mmc/card/queue.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
98ac2162 | 5 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/blkdev.h> | |
83144186 | 14 | #include <linux/freezer.h> |
87598a2b | 15 | #include <linux/kthread.h> |
45711f1a | 16 | #include <linux/scatterlist.h> |
1da177e4 LT |
17 | |
18 | #include <linux/mmc/card.h> | |
19 | #include <linux/mmc/host.h> | |
98ac2162 | 20 | #include "queue.h" |
1da177e4 | 21 | |
98ccf149 PO |
22 | #define MMC_QUEUE_BOUNCESZ 65536 |
23 | ||
87598a2b | 24 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
1da177e4 LT |
25 | |
26 | /* | |
9c9f2d63 | 27 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
28 | */ |
29 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
30 | { | |
9c9f2d63 PO |
31 | /* |
32 | * We only like normal block requests. | |
33 | */ | |
d6d8de33 | 34 | if (!blk_fs_request(req)) { |
1da177e4 | 35 | blk_dump_rq_flags(req, "MMC bad request"); |
9c9f2d63 | 36 | return BLKPREP_KILL; |
1da177e4 LT |
37 | } |
38 | ||
9c9f2d63 | 39 | req->cmd_flags |= REQ_DONTPREP; |
1da177e4 | 40 | |
9c9f2d63 | 41 | return BLKPREP_OK; |
1da177e4 LT |
42 | } |
43 | ||
44 | static int mmc_queue_thread(void *d) | |
45 | { | |
46 | struct mmc_queue *mq = d; | |
47 | struct request_queue *q = mq->queue; | |
1da177e4 | 48 | |
83144186 | 49 | current->flags |= PF_MEMALLOC; |
1da177e4 | 50 | |
1da177e4 | 51 | down(&mq->thread_sem); |
1da177e4 LT |
52 | do { |
53 | struct request *req = NULL; | |
54 | ||
55 | spin_lock_irq(q->queue_lock); | |
56 | set_current_state(TASK_INTERRUPTIBLE); | |
9934c8c0 TH |
57 | if (!blk_queue_plugged(q)) |
58 | req = blk_fetch_request(q); | |
c723e08a | 59 | mq->req = req; |
1da177e4 LT |
60 | spin_unlock_irq(q->queue_lock); |
61 | ||
62 | if (!req) { | |
7b30d281 VW |
63 | if (kthread_should_stop()) { |
64 | set_current_state(TASK_RUNNING); | |
1da177e4 | 65 | break; |
7b30d281 | 66 | } |
1da177e4 LT |
67 | up(&mq->thread_sem); |
68 | schedule(); | |
69 | down(&mq->thread_sem); | |
70 | continue; | |
71 | } | |
72 | set_current_state(TASK_RUNNING); | |
73 | ||
74 | mq->issue_fn(mq, req); | |
75 | } while (1); | |
1da177e4 LT |
76 | up(&mq->thread_sem); |
77 | ||
1da177e4 LT |
78 | return 0; |
79 | } | |
80 | ||
81 | /* | |
82 | * Generic MMC request handler. This is called for any queue on a | |
83 | * particular host. When the host is not busy, we look for a request | |
84 | * on any queue on this host, and attempt to issue it. This may | |
85 | * not be the queue we were asked to process. | |
86 | */ | |
165125e1 | 87 | static void mmc_request(struct request_queue *q) |
1da177e4 LT |
88 | { |
89 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 90 | struct request *req; |
89b4e133 PO |
91 | |
92 | if (!mq) { | |
93 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | |
9934c8c0 | 94 | while ((req = blk_fetch_request(q)) != NULL) |
296b2f6a | 95 | __blk_end_request_all(req, -EIO); |
89b4e133 PO |
96 | return; |
97 | } | |
1da177e4 LT |
98 | |
99 | if (!mq->req) | |
87598a2b | 100 | wake_up_process(mq->thread); |
1da177e4 LT |
101 | } |
102 | ||
103 | /** | |
104 | * mmc_init_queue - initialise a queue structure. | |
105 | * @mq: mmc queue | |
106 | * @card: mmc card to attach this queue | |
107 | * @lock: queue lock | |
108 | * | |
109 | * Initialise a MMC card request queue. | |
110 | */ | |
111 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | |
112 | { | |
113 | struct mmc_host *host = card->host; | |
114 | u64 limit = BLK_BOUNCE_HIGH; | |
115 | int ret; | |
116 | ||
fcaf71fd GKH |
117 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
118 | limit = *mmc_dev(host)->dma_mask; | |
1da177e4 LT |
119 | |
120 | mq->card = card; | |
121 | mq->queue = blk_init_queue(mmc_request, lock); | |
122 | if (!mq->queue) | |
123 | return -ENOMEM; | |
124 | ||
1da177e4 LT |
125 | mq->queue->queuedata = mq; |
126 | mq->req = NULL; | |
127 | ||
98ccf149 | 128 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
91028954 | 129 | blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); |
8dddfe19 | 130 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
98ccf149 PO |
131 | |
132 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
133 | if (host->max_hw_segs == 1) { | |
aafabfab PO |
134 | unsigned int bouncesz; |
135 | ||
98ccf149 PO |
136 | bouncesz = MMC_QUEUE_BOUNCESZ; |
137 | ||
138 | if (bouncesz > host->max_req_size) | |
139 | bouncesz = host->max_req_size; | |
140 | if (bouncesz > host->max_seg_size) | |
141 | bouncesz = host->max_seg_size; | |
f3eb0aaa PO |
142 | if (bouncesz > (host->max_blk_count * 512)) |
143 | bouncesz = host->max_blk_count * 512; | |
144 | ||
145 | if (bouncesz > 512) { | |
146 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | |
147 | if (!mq->bounce_buf) { | |
148 | printk(KERN_WARNING "%s: unable to " | |
149 | "allocate bounce buffer\n", | |
150 | mmc_card_name(card)); | |
151 | } | |
152 | } | |
98ccf149 | 153 | |
f3eb0aaa | 154 | if (mq->bounce_buf) { |
2ff1fa67 | 155 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
98ccf149 PO |
156 | blk_queue_max_sectors(mq->queue, bouncesz / 512); |
157 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | |
158 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | |
159 | blk_queue_max_segment_size(mq->queue, bouncesz); | |
160 | ||
45711f1a | 161 | mq->sg = kmalloc(sizeof(struct scatterlist), |
98ccf149 PO |
162 | GFP_KERNEL); |
163 | if (!mq->sg) { | |
164 | ret = -ENOMEM; | |
aafabfab | 165 | goto cleanup_queue; |
98ccf149 | 166 | } |
45711f1a | 167 | sg_init_table(mq->sg, 1); |
98ccf149 | 168 | |
45711f1a | 169 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * |
98ccf149 PO |
170 | bouncesz / 512, GFP_KERNEL); |
171 | if (!mq->bounce_sg) { | |
172 | ret = -ENOMEM; | |
aafabfab | 173 | goto cleanup_queue; |
98ccf149 | 174 | } |
45711f1a | 175 | sg_init_table(mq->bounce_sg, bouncesz / 512); |
98ccf149 PO |
176 | } |
177 | } | |
178 | #endif | |
179 | ||
180 | if (!mq->bounce_buf) { | |
181 | blk_queue_bounce_limit(mq->queue, limit); | |
f3eb0aaa PO |
182 | blk_queue_max_sectors(mq->queue, |
183 | min(host->max_blk_count, host->max_req_size / 512)); | |
98ccf149 PO |
184 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); |
185 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | |
186 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | |
187 | ||
05e5b136 | 188 | mq->sg = kmalloc(sizeof(struct scatterlist) * |
98ccf149 PO |
189 | host->max_phys_segs, GFP_KERNEL); |
190 | if (!mq->sg) { | |
191 | ret = -ENOMEM; | |
192 | goto cleanup_queue; | |
193 | } | |
05e5b136 | 194 | sg_init_table(mq->sg, host->max_phys_segs); |
1da177e4 LT |
195 | } |
196 | ||
1da177e4 LT |
197 | init_MUTEX(&mq->thread_sem); |
198 | ||
87598a2b CH |
199 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); |
200 | if (IS_ERR(mq->thread)) { | |
201 | ret = PTR_ERR(mq->thread); | |
98ccf149 | 202 | goto free_bounce_sg; |
1da177e4 LT |
203 | } |
204 | ||
87598a2b | 205 | return 0; |
98ccf149 PO |
206 | free_bounce_sg: |
207 | if (mq->bounce_sg) | |
208 | kfree(mq->bounce_sg); | |
209 | mq->bounce_sg = NULL; | |
aafabfab PO |
210 | cleanup_queue: |
211 | if (mq->sg) | |
212 | kfree(mq->sg); | |
1da177e4 | 213 | mq->sg = NULL; |
98ccf149 PO |
214 | if (mq->bounce_buf) |
215 | kfree(mq->bounce_buf); | |
216 | mq->bounce_buf = NULL; | |
1da177e4 | 217 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
218 | return ret; |
219 | } | |
1da177e4 LT |
220 | |
221 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
222 | { | |
165125e1 | 223 | struct request_queue *q = mq->queue; |
89b4e133 PO |
224 | unsigned long flags; |
225 | ||
226 | /* Mark that we should start throwing out stragglers */ | |
227 | spin_lock_irqsave(q->queue_lock, flags); | |
228 | q->queuedata = NULL; | |
229 | spin_unlock_irqrestore(q->queue_lock, flags); | |
230 | ||
d2b46f66 PO |
231 | /* Make sure the queue isn't suspended, as that will deadlock */ |
232 | mmc_queue_resume(mq); | |
233 | ||
89b4e133 | 234 | /* Then terminate our worker thread */ |
87598a2b | 235 | kthread_stop(mq->thread); |
1da177e4 | 236 | |
98ccf149 PO |
237 | if (mq->bounce_sg) |
238 | kfree(mq->bounce_sg); | |
239 | mq->bounce_sg = NULL; | |
240 | ||
1da177e4 LT |
241 | kfree(mq->sg); |
242 | mq->sg = NULL; | |
243 | ||
98ccf149 PO |
244 | if (mq->bounce_buf) |
245 | kfree(mq->bounce_buf); | |
246 | mq->bounce_buf = NULL; | |
247 | ||
1da177e4 LT |
248 | blk_cleanup_queue(mq->queue); |
249 | ||
250 | mq->card = NULL; | |
251 | } | |
252 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
253 | ||
254 | /** | |
255 | * mmc_queue_suspend - suspend a MMC request queue | |
256 | * @mq: MMC queue to suspend | |
257 | * | |
258 | * Stop the block request queue, and wait for our thread to | |
259 | * complete any outstanding requests. This ensures that we | |
260 | * won't suspend while a request is being processed. | |
261 | */ | |
262 | void mmc_queue_suspend(struct mmc_queue *mq) | |
263 | { | |
165125e1 | 264 | struct request_queue *q = mq->queue; |
1da177e4 LT |
265 | unsigned long flags; |
266 | ||
267 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
268 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
269 | ||
270 | spin_lock_irqsave(q->queue_lock, flags); | |
271 | blk_stop_queue(q); | |
272 | spin_unlock_irqrestore(q->queue_lock, flags); | |
273 | ||
274 | down(&mq->thread_sem); | |
275 | } | |
276 | } | |
1da177e4 LT |
277 | |
278 | /** | |
279 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
280 | * @mq: MMC queue to resume | |
281 | */ | |
282 | void mmc_queue_resume(struct mmc_queue *mq) | |
283 | { | |
165125e1 | 284 | struct request_queue *q = mq->queue; |
1da177e4 LT |
285 | unsigned long flags; |
286 | ||
287 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
288 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
289 | ||
290 | up(&mq->thread_sem); | |
291 | ||
292 | spin_lock_irqsave(q->queue_lock, flags); | |
293 | blk_start_queue(q); | |
294 | spin_unlock_irqrestore(q->queue_lock, flags); | |
295 | } | |
296 | } | |
98ac2162 | 297 | |
2ff1fa67 PO |
298 | /* |
299 | * Prepare the sg list(s) to be handed of to the host driver | |
300 | */ | |
98ccf149 PO |
301 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
302 | { | |
303 | unsigned int sg_len; | |
2ff1fa67 PO |
304 | size_t buflen; |
305 | struct scatterlist *sg; | |
306 | int i; | |
98ccf149 PO |
307 | |
308 | if (!mq->bounce_buf) | |
309 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | |
310 | ||
311 | BUG_ON(!mq->bounce_sg); | |
312 | ||
313 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | |
314 | ||
315 | mq->bounce_sg_len = sg_len; | |
316 | ||
2ff1fa67 PO |
317 | buflen = 0; |
318 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | |
319 | buflen += sg->length; | |
98ccf149 | 320 | |
2ff1fa67 | 321 | sg_init_one(mq->sg, mq->bounce_buf, buflen); |
98ccf149 PO |
322 | |
323 | return 1; | |
324 | } | |
325 | ||
2ff1fa67 PO |
326 | /* |
327 | * If writing, bounce the data to the buffer before the request | |
328 | * is sent to the host driver | |
329 | */ | |
98ccf149 PO |
330 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
331 | { | |
2ff1fa67 PO |
332 | unsigned long flags; |
333 | ||
98ccf149 PO |
334 | if (!mq->bounce_buf) |
335 | return; | |
336 | ||
98ccf149 PO |
337 | if (rq_data_dir(mq->req) != WRITE) |
338 | return; | |
339 | ||
2ff1fa67 PO |
340 | local_irq_save(flags); |
341 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
342 | mq->bounce_buf, mq->sg[0].length); | |
343 | local_irq_restore(flags); | |
98ccf149 PO |
344 | } |
345 | ||
2ff1fa67 PO |
346 | /* |
347 | * If reading, bounce the data from the buffer after the request | |
348 | * has been handled by the host driver | |
349 | */ | |
98ccf149 PO |
350 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
351 | { | |
2ff1fa67 PO |
352 | unsigned long flags; |
353 | ||
98ccf149 PO |
354 | if (!mq->bounce_buf) |
355 | return; | |
356 | ||
98ccf149 PO |
357 | if (rq_data_dir(mq->req) != READ) |
358 | return; | |
359 | ||
2ff1fa67 PO |
360 | local_irq_save(flags); |
361 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
362 | mq->bounce_buf, mq->sg[0].length); | |
363 | local_irq_restore(flags); | |
98ccf149 PO |
364 | } |
365 |