]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
70f10482 | 2 | * linux/drivers/mmc/card/queue.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
98ac2162 | 5 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/blkdev.h> | |
83144186 | 15 | #include <linux/freezer.h> |
87598a2b | 16 | #include <linux/kthread.h> |
45711f1a | 17 | #include <linux/scatterlist.h> |
1da177e4 LT |
18 | |
19 | #include <linux/mmc/card.h> | |
20 | #include <linux/mmc/host.h> | |
98ac2162 | 21 | #include "queue.h" |
1da177e4 | 22 | |
98ccf149 PO |
23 | #define MMC_QUEUE_BOUNCESZ 65536 |
24 | ||
87598a2b | 25 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
1da177e4 | 26 | |
369d321e SJ |
27 | #define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) |
28 | ||
1da177e4 | 29 | /* |
9c9f2d63 | 30 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
31 | */ |
32 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
33 | { | |
a8ad82cc SRT |
34 | struct mmc_queue *mq = q->queuedata; |
35 | ||
9c9f2d63 | 36 | /* |
bd788c96 | 37 | * We only like normal block requests and discards. |
9c9f2d63 | 38 | */ |
bd788c96 | 39 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
1da177e4 | 40 | blk_dump_rq_flags(req, "MMC bad request"); |
9c9f2d63 | 41 | return BLKPREP_KILL; |
1da177e4 LT |
42 | } |
43 | ||
a8ad82cc SRT |
44 | if (mq && mmc_card_removed(mq->card)) |
45 | return BLKPREP_KILL; | |
46 | ||
9c9f2d63 | 47 | req->cmd_flags |= REQ_DONTPREP; |
1da177e4 | 48 | |
9c9f2d63 | 49 | return BLKPREP_OK; |
1da177e4 LT |
50 | } |
51 | ||
52 | static int mmc_queue_thread(void *d) | |
53 | { | |
54 | struct mmc_queue *mq = d; | |
55 | struct request_queue *q = mq->queue; | |
1da177e4 | 56 | |
83144186 | 57 | current->flags |= PF_MEMALLOC; |
1da177e4 | 58 | |
1da177e4 | 59 | down(&mq->thread_sem); |
1da177e4 LT |
60 | do { |
61 | struct request *req = NULL; | |
ee8a43a5 | 62 | struct mmc_queue_req *tmp; |
369d321e | 63 | unsigned int cmd_flags = 0; |
1da177e4 LT |
64 | |
65 | spin_lock_irq(q->queue_lock); | |
66 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 67 | req = blk_fetch_request(q); |
97868a2b | 68 | mq->mqrq_cur->req = req; |
1da177e4 LT |
69 | spin_unlock_irq(q->queue_lock); |
70 | ||
ee8a43a5 PF |
71 | if (req || mq->mqrq_prev->req) { |
72 | set_current_state(TASK_RUNNING); | |
369d321e | 73 | cmd_flags = req ? req->cmd_flags : 0; |
ee8a43a5 | 74 | mq->issue_fn(mq, req); |
45c5a914 SJ |
75 | |
76 | /* | |
77 | * Current request becomes previous request | |
78 | * and vice versa. | |
369d321e SJ |
79 | * In case of special requests, current request |
80 | * has been finished. Do not assign it to previous | |
81 | * request. | |
45c5a914 | 82 | */ |
369d321e SJ |
83 | if (cmd_flags & MMC_REQ_SPECIAL_MASK) |
84 | mq->mqrq_cur->req = NULL; | |
85 | ||
45c5a914 SJ |
86 | mq->mqrq_prev->brq.mrq.data = NULL; |
87 | mq->mqrq_prev->req = NULL; | |
88 | tmp = mq->mqrq_prev; | |
89 | mq->mqrq_prev = mq->mqrq_cur; | |
90 | mq->mqrq_cur = tmp; | |
ee8a43a5 | 91 | } else { |
7b30d281 VW |
92 | if (kthread_should_stop()) { |
93 | set_current_state(TASK_RUNNING); | |
1da177e4 | 94 | break; |
7b30d281 | 95 | } |
1da177e4 LT |
96 | up(&mq->thread_sem); |
97 | schedule(); | |
98 | down(&mq->thread_sem); | |
1da177e4 | 99 | } |
1da177e4 | 100 | } while (1); |
1da177e4 LT |
101 | up(&mq->thread_sem); |
102 | ||
1da177e4 LT |
103 | return 0; |
104 | } | |
105 | ||
106 | /* | |
107 | * Generic MMC request handler. This is called for any queue on a | |
108 | * particular host. When the host is not busy, we look for a request | |
109 | * on any queue on this host, and attempt to issue it. This may | |
110 | * not be the queue we were asked to process. | |
111 | */ | |
1b50f5f3 | 112 | static void mmc_request_fn(struct request_queue *q) |
1da177e4 LT |
113 | { |
114 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 115 | struct request *req; |
89b4e133 PO |
116 | |
117 | if (!mq) { | |
5fa83ce2 AH |
118 | while ((req = blk_fetch_request(q)) != NULL) { |
119 | req->cmd_flags |= REQ_QUIET; | |
296b2f6a | 120 | __blk_end_request_all(req, -EIO); |
5fa83ce2 | 121 | } |
89b4e133 PO |
122 | return; |
123 | } | |
1da177e4 | 124 | |
ee8a43a5 | 125 | if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
87598a2b | 126 | wake_up_process(mq->thread); |
1da177e4 LT |
127 | } |
128 | ||
7513cd7a | 129 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
97868a2b PF |
130 | { |
131 | struct scatterlist *sg; | |
132 | ||
133 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | |
134 | if (!sg) | |
135 | *err = -ENOMEM; | |
136 | else { | |
137 | *err = 0; | |
138 | sg_init_table(sg, sg_len); | |
139 | } | |
140 | ||
141 | return sg; | |
142 | } | |
143 | ||
e056a1b5 AH |
144 | static void mmc_queue_setup_discard(struct request_queue *q, |
145 | struct mmc_card *card) | |
146 | { | |
147 | unsigned max_discard; | |
148 | ||
149 | max_discard = mmc_calc_max_discard(card); | |
150 | if (!max_discard) | |
151 | return; | |
152 | ||
153 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
154 | q->limits.max_discard_sectors = max_discard; | |
7194efb8 | 155 | if (card->erased_byte == 0 && !mmc_can_discard(card)) |
e056a1b5 AH |
156 | q->limits.discard_zeroes_data = 1; |
157 | q->limits.discard_granularity = card->pref_erase << 9; | |
158 | /* granularity must not be greater than max. discard */ | |
159 | if (card->pref_erase > max_discard) | |
160 | q->limits.discard_granularity = 0; | |
d9ddd629 | 161 | if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) |
e056a1b5 AH |
162 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); |
163 | } | |
164 | ||
1da177e4 LT |
165 | /** |
166 | * mmc_init_queue - initialise a queue structure. | |
167 | * @mq: mmc queue | |
168 | * @card: mmc card to attach this queue | |
169 | * @lock: queue lock | |
d09408ad | 170 | * @subname: partition subname |
1da177e4 LT |
171 | * |
172 | * Initialise a MMC card request queue. | |
173 | */ | |
d09408ad AH |
174 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
175 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
176 | { |
177 | struct mmc_host *host = card->host; | |
178 | u64 limit = BLK_BOUNCE_HIGH; | |
179 | int ret; | |
97868a2b | 180 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
04296b7b | 181 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
1da177e4 | 182 | |
fcaf71fd GKH |
183 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
184 | limit = *mmc_dev(host)->dma_mask; | |
1da177e4 LT |
185 | |
186 | mq->card = card; | |
1b50f5f3 | 187 | mq->queue = blk_init_queue(mmc_request_fn, lock); |
1da177e4 LT |
188 | if (!mq->queue) |
189 | return -ENOMEM; | |
190 | ||
97868a2b | 191 | mq->mqrq_cur = mqrq_cur; |
04296b7b | 192 | mq->mqrq_prev = mqrq_prev; |
1da177e4 | 193 | mq->queue->queuedata = mq; |
1da177e4 | 194 | |
98ccf149 | 195 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 196 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
e056a1b5 AH |
197 | if (mmc_can_erase(card)) |
198 | mmc_queue_setup_discard(mq->queue, card); | |
98ccf149 PO |
199 | |
200 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
a36274e0 | 201 | if (host->max_segs == 1) { |
aafabfab PO |
202 | unsigned int bouncesz; |
203 | ||
98ccf149 PO |
204 | bouncesz = MMC_QUEUE_BOUNCESZ; |
205 | ||
206 | if (bouncesz > host->max_req_size) | |
207 | bouncesz = host->max_req_size; | |
208 | if (bouncesz > host->max_seg_size) | |
209 | bouncesz = host->max_seg_size; | |
f3eb0aaa PO |
210 | if (bouncesz > (host->max_blk_count * 512)) |
211 | bouncesz = host->max_blk_count * 512; | |
212 | ||
213 | if (bouncesz > 512) { | |
97868a2b PF |
214 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
215 | if (!mqrq_cur->bounce_buf) { | |
a3c76eb9 | 216 | pr_warning("%s: unable to " |
97868a2b | 217 | "allocate bounce cur buffer\n", |
f3eb0aaa PO |
218 | mmc_card_name(card)); |
219 | } | |
04296b7b PF |
220 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
221 | if (!mqrq_prev->bounce_buf) { | |
a3c76eb9 | 222 | pr_warning("%s: unable to " |
04296b7b PF |
223 | "allocate bounce prev buffer\n", |
224 | mmc_card_name(card)); | |
225 | kfree(mqrq_cur->bounce_buf); | |
226 | mqrq_cur->bounce_buf = NULL; | |
227 | } | |
f3eb0aaa | 228 | } |
98ccf149 | 229 | |
04296b7b | 230 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
2ff1fa67 | 231 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
086fa5ff | 232 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
8a78362c | 233 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
98ccf149 PO |
234 | blk_queue_max_segment_size(mq->queue, bouncesz); |
235 | ||
97868a2b PF |
236 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
237 | if (ret) | |
aafabfab | 238 | goto cleanup_queue; |
98ccf149 | 239 | |
97868a2b PF |
240 | mqrq_cur->bounce_sg = |
241 | mmc_alloc_sg(bouncesz / 512, &ret); | |
242 | if (ret) | |
aafabfab | 243 | goto cleanup_queue; |
97868a2b | 244 | |
04296b7b PF |
245 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); |
246 | if (ret) | |
247 | goto cleanup_queue; | |
248 | ||
249 | mqrq_prev->bounce_sg = | |
250 | mmc_alloc_sg(bouncesz / 512, &ret); | |
251 | if (ret) | |
252 | goto cleanup_queue; | |
98ccf149 PO |
253 | } |
254 | } | |
255 | #endif | |
256 | ||
04296b7b | 257 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
98ccf149 | 258 | blk_queue_bounce_limit(mq->queue, limit); |
086fa5ff | 259 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 260 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 261 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 PO |
262 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
263 | ||
97868a2b PF |
264 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
265 | if (ret) | |
98ccf149 | 266 | goto cleanup_queue; |
97868a2b | 267 | |
04296b7b PF |
268 | |
269 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); | |
270 | if (ret) | |
271 | goto cleanup_queue; | |
1da177e4 LT |
272 | } |
273 | ||
632cf92a | 274 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 275 | |
d09408ad AH |
276 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
277 | host->index, subname ? subname : ""); | |
de528fa3 | 278 | |
87598a2b CH |
279 | if (IS_ERR(mq->thread)) { |
280 | ret = PTR_ERR(mq->thread); | |
98ccf149 | 281 | goto free_bounce_sg; |
1da177e4 LT |
282 | } |
283 | ||
87598a2b | 284 | return 0; |
98ccf149 | 285 | free_bounce_sg: |
97868a2b PF |
286 | kfree(mqrq_cur->bounce_sg); |
287 | mqrq_cur->bounce_sg = NULL; | |
04296b7b PF |
288 | kfree(mqrq_prev->bounce_sg); |
289 | mqrq_prev->bounce_sg = NULL; | |
97868a2b | 290 | |
aafabfab | 291 | cleanup_queue: |
97868a2b PF |
292 | kfree(mqrq_cur->sg); |
293 | mqrq_cur->sg = NULL; | |
294 | kfree(mqrq_cur->bounce_buf); | |
295 | mqrq_cur->bounce_buf = NULL; | |
296 | ||
04296b7b PF |
297 | kfree(mqrq_prev->sg); |
298 | mqrq_prev->sg = NULL; | |
299 | kfree(mqrq_prev->bounce_buf); | |
300 | mqrq_prev->bounce_buf = NULL; | |
301 | ||
1da177e4 | 302 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
303 | return ret; |
304 | } | |
1da177e4 LT |
305 | |
306 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
307 | { | |
165125e1 | 308 | struct request_queue *q = mq->queue; |
89b4e133 | 309 | unsigned long flags; |
97868a2b | 310 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; |
04296b7b | 311 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; |
89b4e133 | 312 | |
d2b46f66 PO |
313 | /* Make sure the queue isn't suspended, as that will deadlock */ |
314 | mmc_queue_resume(mq); | |
315 | ||
89b4e133 | 316 | /* Then terminate our worker thread */ |
87598a2b | 317 | kthread_stop(mq->thread); |
1da177e4 | 318 | |
5fa83ce2 AH |
319 | /* Empty the queue */ |
320 | spin_lock_irqsave(q->queue_lock, flags); | |
321 | q->queuedata = NULL; | |
322 | blk_start_queue(q); | |
323 | spin_unlock_irqrestore(q->queue_lock, flags); | |
324 | ||
97868a2b PF |
325 | kfree(mqrq_cur->bounce_sg); |
326 | mqrq_cur->bounce_sg = NULL; | |
98ccf149 | 327 | |
97868a2b PF |
328 | kfree(mqrq_cur->sg); |
329 | mqrq_cur->sg = NULL; | |
1da177e4 | 330 | |
97868a2b PF |
331 | kfree(mqrq_cur->bounce_buf); |
332 | mqrq_cur->bounce_buf = NULL; | |
98ccf149 | 333 | |
04296b7b PF |
334 | kfree(mqrq_prev->bounce_sg); |
335 | mqrq_prev->bounce_sg = NULL; | |
336 | ||
337 | kfree(mqrq_prev->sg); | |
338 | mqrq_prev->sg = NULL; | |
339 | ||
340 | kfree(mqrq_prev->bounce_buf); | |
341 | mqrq_prev->bounce_buf = NULL; | |
342 | ||
1da177e4 LT |
343 | mq->card = NULL; |
344 | } | |
345 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
346 | ||
347 | /** | |
348 | * mmc_queue_suspend - suspend a MMC request queue | |
349 | * @mq: MMC queue to suspend | |
350 | * | |
351 | * Stop the block request queue, and wait for our thread to | |
352 | * complete any outstanding requests. This ensures that we | |
353 | * won't suspend while a request is being processed. | |
354 | */ | |
355 | void mmc_queue_suspend(struct mmc_queue *mq) | |
356 | { | |
165125e1 | 357 | struct request_queue *q = mq->queue; |
1da177e4 LT |
358 | unsigned long flags; |
359 | ||
360 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
361 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
362 | ||
363 | spin_lock_irqsave(q->queue_lock, flags); | |
364 | blk_stop_queue(q); | |
365 | spin_unlock_irqrestore(q->queue_lock, flags); | |
366 | ||
367 | down(&mq->thread_sem); | |
368 | } | |
369 | } | |
1da177e4 LT |
370 | |
371 | /** | |
372 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
373 | * @mq: MMC queue to resume | |
374 | */ | |
375 | void mmc_queue_resume(struct mmc_queue *mq) | |
376 | { | |
165125e1 | 377 | struct request_queue *q = mq->queue; |
1da177e4 LT |
378 | unsigned long flags; |
379 | ||
380 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
381 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
382 | ||
383 | up(&mq->thread_sem); | |
384 | ||
385 | spin_lock_irqsave(q->queue_lock, flags); | |
386 | blk_start_queue(q); | |
387 | spin_unlock_irqrestore(q->queue_lock, flags); | |
388 | } | |
389 | } | |
98ac2162 | 390 | |
2ff1fa67 PO |
391 | /* |
392 | * Prepare the sg list(s) to be handed of to the host driver | |
393 | */ | |
97868a2b | 394 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 PO |
395 | { |
396 | unsigned int sg_len; | |
2ff1fa67 PO |
397 | size_t buflen; |
398 | struct scatterlist *sg; | |
399 | int i; | |
98ccf149 | 400 | |
97868a2b PF |
401 | if (!mqrq->bounce_buf) |
402 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | |
98ccf149 | 403 | |
97868a2b | 404 | BUG_ON(!mqrq->bounce_sg); |
98ccf149 | 405 | |
97868a2b | 406 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
98ccf149 | 407 | |
97868a2b | 408 | mqrq->bounce_sg_len = sg_len; |
98ccf149 | 409 | |
2ff1fa67 | 410 | buflen = 0; |
97868a2b | 411 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
2ff1fa67 | 412 | buflen += sg->length; |
98ccf149 | 413 | |
97868a2b | 414 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
98ccf149 PO |
415 | |
416 | return 1; | |
417 | } | |
418 | ||
2ff1fa67 PO |
419 | /* |
420 | * If writing, bounce the data to the buffer before the request | |
421 | * is sent to the host driver | |
422 | */ | |
97868a2b | 423 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
98ccf149 | 424 | { |
97868a2b | 425 | if (!mqrq->bounce_buf) |
98ccf149 PO |
426 | return; |
427 | ||
97868a2b | 428 | if (rq_data_dir(mqrq->req) != WRITE) |
98ccf149 PO |
429 | return; |
430 | ||
97868a2b PF |
431 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
432 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 PO |
433 | } |
434 | ||
2ff1fa67 PO |
435 | /* |
436 | * If reading, bounce the data from the buffer after the request | |
437 | * has been handled by the host driver | |
438 | */ | |
97868a2b | 439 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
98ccf149 | 440 | { |
97868a2b | 441 | if (!mqrq->bounce_buf) |
98ccf149 PO |
442 | return; |
443 | ||
97868a2b | 444 | if (rq_data_dir(mqrq->req) != READ) |
98ccf149 PO |
445 | return; |
446 | ||
97868a2b PF |
447 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
448 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 | 449 | } |