]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
98ac2162 | 3 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | */ | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/blkdev.h> | |
83144186 | 13 | #include <linux/freezer.h> |
87598a2b | 14 | #include <linux/kthread.h> |
45711f1a | 15 | #include <linux/scatterlist.h> |
8e0cb8a1 | 16 | #include <linux/dma-mapping.h> |
1da177e4 LT |
17 | |
18 | #include <linux/mmc/card.h> | |
19 | #include <linux/mmc/host.h> | |
29eb7bd0 | 20 | |
98ac2162 | 21 | #include "queue.h" |
29eb7bd0 | 22 | #include "block.h" |
55244c56 | 23 | #include "core.h" |
4facdde1 | 24 | #include "card.h" |
1da177e4 | 25 | |
98ccf149 PO |
26 | #define MMC_QUEUE_BOUNCESZ 65536 |
27 | ||
1da177e4 | 28 | /* |
9c9f2d63 | 29 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
30 | */ |
31 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
32 | { | |
a8ad82cc SRT |
33 | struct mmc_queue *mq = q->queuedata; |
34 | ||
4e93b9a6 | 35 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) |
a8ad82cc SRT |
36 | return BLKPREP_KILL; |
37 | ||
e8064021 | 38 | req->rq_flags |= RQF_DONTPREP; |
1da177e4 | 39 | |
9c9f2d63 | 40 | return BLKPREP_OK; |
1da177e4 LT |
41 | } |
42 | ||
43 | static int mmc_queue_thread(void *d) | |
44 | { | |
45 | struct mmc_queue *mq = d; | |
46 | struct request_queue *q = mq->queue; | |
e0097cf5 | 47 | struct mmc_context_info *cntx = &mq->card->host->context_info; |
1da177e4 | 48 | |
83144186 | 49 | current->flags |= PF_MEMALLOC; |
1da177e4 | 50 | |
1da177e4 | 51 | down(&mq->thread_sem); |
1da177e4 | 52 | do { |
cdf8a6fb | 53 | struct request *req; |
1da177e4 LT |
54 | |
55 | spin_lock_irq(q->queue_lock); | |
56 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 57 | req = blk_fetch_request(q); |
e0097cf5 AH |
58 | mq->asleep = false; |
59 | cntx->is_waiting_last_req = false; | |
60 | cntx->is_new_req = false; | |
61 | if (!req) { | |
62 | /* | |
63 | * Dispatch queue is empty so set flags for | |
64 | * mmc_request_fn() to wake us up. | |
65 | */ | |
cdf8a6fb | 66 | if (mq->qcnt) |
e0097cf5 AH |
67 | cntx->is_waiting_last_req = true; |
68 | else | |
69 | mq->asleep = true; | |
70 | } | |
1da177e4 LT |
71 | spin_unlock_irq(q->queue_lock); |
72 | ||
cdf8a6fb | 73 | if (req || mq->qcnt) { |
ee8a43a5 | 74 | set_current_state(TASK_RUNNING); |
29eb7bd0 | 75 | mmc_blk_issue_rq(mq, req); |
a8c27c0b | 76 | cond_resched(); |
ee8a43a5 | 77 | } else { |
7b30d281 VW |
78 | if (kthread_should_stop()) { |
79 | set_current_state(TASK_RUNNING); | |
1da177e4 | 80 | break; |
7b30d281 | 81 | } |
1da177e4 LT |
82 | up(&mq->thread_sem); |
83 | schedule(); | |
84 | down(&mq->thread_sem); | |
1da177e4 | 85 | } |
1da177e4 | 86 | } while (1); |
1da177e4 LT |
87 | up(&mq->thread_sem); |
88 | ||
1da177e4 LT |
89 | return 0; |
90 | } | |
91 | ||
92 | /* | |
93 | * Generic MMC request handler. This is called for any queue on a | |
94 | * particular host. When the host is not busy, we look for a request | |
95 | * on any queue on this host, and attempt to issue it. This may | |
96 | * not be the queue we were asked to process. | |
97 | */ | |
1b50f5f3 | 98 | static void mmc_request_fn(struct request_queue *q) |
1da177e4 LT |
99 | { |
100 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 101 | struct request *req; |
2220eedf | 102 | struct mmc_context_info *cntx; |
89b4e133 PO |
103 | |
104 | if (!mq) { | |
5fa83ce2 | 105 | while ((req = blk_fetch_request(q)) != NULL) { |
e8064021 | 106 | req->rq_flags |= RQF_QUIET; |
2a842aca | 107 | __blk_end_request_all(req, BLK_STS_IOERR); |
5fa83ce2 | 108 | } |
89b4e133 PO |
109 | return; |
110 | } | |
1da177e4 | 111 | |
2220eedf | 112 | cntx = &mq->card->host->context_info; |
e0097cf5 AH |
113 | |
114 | if (cntx->is_waiting_last_req) { | |
115 | cntx->is_new_req = true; | |
116 | wake_up_interruptible(&cntx->wait); | |
117 | } | |
118 | ||
119 | if (mq->asleep) | |
87598a2b | 120 | wake_up_process(mq->thread); |
1da177e4 LT |
121 | } |
122 | ||
304419d8 | 123 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
97868a2b PF |
124 | { |
125 | struct scatterlist *sg; | |
126 | ||
304419d8 | 127 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
7b410d07 | 128 | if (sg) |
97868a2b | 129 | sg_init_table(sg, sg_len); |
97868a2b PF |
130 | |
131 | return sg; | |
132 | } | |
133 | ||
e056a1b5 AH |
134 | static void mmc_queue_setup_discard(struct request_queue *q, |
135 | struct mmc_card *card) | |
136 | { | |
137 | unsigned max_discard; | |
138 | ||
139 | max_discard = mmc_calc_max_discard(card); | |
140 | if (!max_discard) | |
141 | return; | |
142 | ||
143 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
2bb4cd5c | 144 | blk_queue_max_discard_sectors(q, max_discard); |
e056a1b5 AH |
145 | q->limits.discard_granularity = card->pref_erase << 9; |
146 | /* granularity must not be greater than max. discard */ | |
147 | if (card->pref_erase > max_discard) | |
148 | q->limits.discard_granularity = 0; | |
775a9362 | 149 | if (mmc_can_secure_erase_trim(card)) |
288dab8a | 150 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
e056a1b5 AH |
151 | } |
152 | ||
7b410d07 AH |
153 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) |
154 | { | |
155 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | |
156 | ||
c3dccb74 | 157 | if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) |
7b410d07 AH |
158 | return 0; |
159 | ||
160 | if (bouncesz > host->max_req_size) | |
161 | bouncesz = host->max_req_size; | |
162 | if (bouncesz > host->max_seg_size) | |
163 | bouncesz = host->max_seg_size; | |
164 | if (bouncesz > host->max_blk_count * 512) | |
165 | bouncesz = host->max_blk_count * 512; | |
166 | ||
167 | if (bouncesz <= 512) | |
168 | return 0; | |
169 | ||
170 | return bouncesz; | |
171 | } | |
f2b8b522 | 172 | |
304419d8 LW |
173 | /** |
174 | * mmc_init_request() - initialize the MMC-specific per-request data | |
175 | * @q: the request queue | |
176 | * @req: the request | |
177 | * @gfp: memory allocation policy | |
178 | */ | |
179 | static int mmc_init_request(struct request_queue *q, struct request *req, | |
180 | gfp_t gfp) | |
64e29e42 | 181 | { |
304419d8 LW |
182 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
183 | struct mmc_queue *mq = q->queuedata; | |
184 | struct mmc_card *card = mq->card; | |
185 | struct mmc_host *host = card->host; | |
64e29e42 | 186 | |
304419d8 LW |
187 | if (card->bouncesz) { |
188 | mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); | |
189 | if (!mq_rq->bounce_buf) | |
190 | return -ENOMEM; | |
191 | if (card->bouncesz > 512) { | |
192 | mq_rq->sg = mmc_alloc_sg(1, gfp); | |
193 | if (!mq_rq->sg) | |
194 | return -ENOMEM; | |
195 | mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, | |
196 | gfp); | |
197 | if (!mq_rq->bounce_sg) | |
198 | return -ENOMEM; | |
199 | } | |
200 | } else { | |
201 | mq_rq->bounce_buf = NULL; | |
202 | mq_rq->bounce_sg = NULL; | |
203 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); | |
204 | if (!mq_rq->sg) | |
7b410d07 | 205 | return -ENOMEM; |
c5bda0ca | 206 | } |
64e29e42 | 207 | |
c5bda0ca AH |
208 | return 0; |
209 | } | |
64e29e42 | 210 | |
304419d8 | 211 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
c5bda0ca | 212 | { |
304419d8 | 213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
7b410d07 | 214 | |
304419d8 LW |
215 | /* It is OK to kfree(NULL) so this will be smooth */ |
216 | kfree(mq_rq->bounce_sg); | |
217 | mq_rq->bounce_sg = NULL; | |
7b410d07 | 218 | |
304419d8 LW |
219 | kfree(mq_rq->bounce_buf); |
220 | mq_rq->bounce_buf = NULL; | |
7b410d07 | 221 | |
304419d8 LW |
222 | kfree(mq_rq->sg); |
223 | mq_rq->sg = NULL; | |
c09949cf AH |
224 | } |
225 | ||
1da177e4 LT |
226 | /** |
227 | * mmc_init_queue - initialise a queue structure. | |
228 | * @mq: mmc queue | |
229 | * @card: mmc card to attach this queue | |
230 | * @lock: queue lock | |
d09408ad | 231 | * @subname: partition subname |
1da177e4 LT |
232 | * |
233 | * Initialise a MMC card request queue. | |
234 | */ | |
d09408ad AH |
235 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
236 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
237 | { |
238 | struct mmc_host *host = card->host; | |
239 | u64 limit = BLK_BOUNCE_HIGH; | |
c5bda0ca | 240 | int ret = -ENOMEM; |
1da177e4 | 241 | |
fcaf71fd | 242 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
e83b3664 | 243 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
1da177e4 LT |
244 | |
245 | mq->card = card; | |
304419d8 | 246 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
1da177e4 LT |
247 | if (!mq->queue) |
248 | return -ENOMEM; | |
304419d8 LW |
249 | mq->queue->queue_lock = lock; |
250 | mq->queue->request_fn = mmc_request_fn; | |
251 | mq->queue->init_rq_fn = mmc_init_request; | |
252 | mq->queue->exit_rq_fn = mmc_exit_request; | |
253 | mq->queue->cmd_size = sizeof(struct mmc_queue_req); | |
1da177e4 | 254 | mq->queue->queuedata = mq; |
304419d8 LW |
255 | mq->qcnt = 0; |
256 | ret = blk_init_allocated_queue(mq->queue); | |
257 | if (ret) { | |
258 | blk_cleanup_queue(mq->queue); | |
259 | return ret; | |
260 | } | |
1da177e4 | 261 | |
98ccf149 | 262 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 263 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
b277da0a | 264 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
e056a1b5 AH |
265 | if (mmc_can_erase(card)) |
266 | mmc_queue_setup_discard(mq->queue, card); | |
98ccf149 | 267 | |
304419d8 | 268 | card->bouncesz = mmc_queue_calc_bouncesz(host); |
7b410d07 | 269 | if (card->bouncesz) { |
7b410d07 AH |
270 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); |
271 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | |
272 | blk_queue_max_segment_size(mq->queue, card->bouncesz); | |
273 | } else { | |
98ccf149 | 274 | blk_queue_bounce_limit(mq->queue, limit); |
086fa5ff | 275 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 276 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 277 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 | 278 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
1da177e4 LT |
279 | } |
280 | ||
632cf92a | 281 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 282 | |
d09408ad AH |
283 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
284 | host->index, subname ? subname : ""); | |
de528fa3 | 285 | |
87598a2b CH |
286 | if (IS_ERR(mq->thread)) { |
287 | ret = PTR_ERR(mq->thread); | |
c09949cf | 288 | goto cleanup_queue; |
1da177e4 LT |
289 | } |
290 | ||
87598a2b | 291 | return 0; |
97868a2b | 292 | |
7b410d07 | 293 | cleanup_queue: |
1da177e4 | 294 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
295 | return ret; |
296 | } | |
1da177e4 LT |
297 | |
298 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
299 | { | |
165125e1 | 300 | struct request_queue *q = mq->queue; |
89b4e133 PO |
301 | unsigned long flags; |
302 | ||
d2b46f66 PO |
303 | /* Make sure the queue isn't suspended, as that will deadlock */ |
304 | mmc_queue_resume(mq); | |
305 | ||
89b4e133 | 306 | /* Then terminate our worker thread */ |
87598a2b | 307 | kthread_stop(mq->thread); |
1da177e4 | 308 | |
5fa83ce2 AH |
309 | /* Empty the queue */ |
310 | spin_lock_irqsave(q->queue_lock, flags); | |
311 | q->queuedata = NULL; | |
312 | blk_start_queue(q); | |
313 | spin_unlock_irqrestore(q->queue_lock, flags); | |
314 | ||
1da177e4 LT |
315 | mq->card = NULL; |
316 | } | |
317 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
318 | ||
319 | /** | |
320 | * mmc_queue_suspend - suspend a MMC request queue | |
321 | * @mq: MMC queue to suspend | |
322 | * | |
323 | * Stop the block request queue, and wait for our thread to | |
324 | * complete any outstanding requests. This ensures that we | |
325 | * won't suspend while a request is being processed. | |
326 | */ | |
327 | void mmc_queue_suspend(struct mmc_queue *mq) | |
328 | { | |
165125e1 | 329 | struct request_queue *q = mq->queue; |
1da177e4 LT |
330 | unsigned long flags; |
331 | ||
9491be5f LW |
332 | if (!mq->suspended) { |
333 | mq->suspended |= true; | |
1da177e4 LT |
334 | |
335 | spin_lock_irqsave(q->queue_lock, flags); | |
336 | blk_stop_queue(q); | |
337 | spin_unlock_irqrestore(q->queue_lock, flags); | |
338 | ||
339 | down(&mq->thread_sem); | |
340 | } | |
341 | } | |
1da177e4 LT |
342 | |
343 | /** | |
344 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
345 | * @mq: MMC queue to resume | |
346 | */ | |
347 | void mmc_queue_resume(struct mmc_queue *mq) | |
348 | { | |
165125e1 | 349 | struct request_queue *q = mq->queue; |
1da177e4 LT |
350 | unsigned long flags; |
351 | ||
9491be5f LW |
352 | if (mq->suspended) { |
353 | mq->suspended = false; | |
1da177e4 LT |
354 | |
355 | up(&mq->thread_sem); | |
356 | ||
357 | spin_lock_irqsave(q->queue_lock, flags); | |
358 | blk_start_queue(q); | |
359 | spin_unlock_irqrestore(q->queue_lock, flags); | |
360 | } | |
361 | } | |
98ac2162 | 362 | |
2ff1fa67 PO |
363 | /* |
364 | * Prepare the sg list(s) to be handed of to the host driver | |
365 | */ | |
97868a2b | 366 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 PO |
367 | { |
368 | unsigned int sg_len; | |
2ff1fa67 PO |
369 | size_t buflen; |
370 | struct scatterlist *sg; | |
67e69d52 | 371 | struct request *req = mmc_queue_req_to_req(mqrq); |
2ff1fa67 | 372 | int i; |
98ccf149 | 373 | |
03d640ae | 374 | if (!mqrq->bounce_buf) |
67e69d52 | 375 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
98ccf149 | 376 | |
67e69d52 | 377 | sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); |
98ccf149 | 378 | |
97868a2b | 379 | mqrq->bounce_sg_len = sg_len; |
98ccf149 | 380 | |
2ff1fa67 | 381 | buflen = 0; |
97868a2b | 382 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
2ff1fa67 | 383 | buflen += sg->length; |
98ccf149 | 384 | |
97868a2b | 385 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
98ccf149 PO |
386 | |
387 | return 1; | |
388 | } | |
389 | ||
2ff1fa67 PO |
390 | /* |
391 | * If writing, bounce the data to the buffer before the request | |
392 | * is sent to the host driver | |
393 | */ | |
97868a2b | 394 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
98ccf149 | 395 | { |
97868a2b | 396 | if (!mqrq->bounce_buf) |
98ccf149 PO |
397 | return; |
398 | ||
67e69d52 | 399 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) |
98ccf149 PO |
400 | return; |
401 | ||
97868a2b PF |
402 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
403 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 PO |
404 | } |
405 | ||
2ff1fa67 PO |
406 | /* |
407 | * If reading, bounce the data from the buffer after the request | |
408 | * has been handled by the host driver | |
409 | */ | |
97868a2b | 410 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
98ccf149 | 411 | { |
97868a2b | 412 | if (!mqrq->bounce_buf) |
98ccf149 PO |
413 | return; |
414 | ||
67e69d52 | 415 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) |
98ccf149 PO |
416 | return; |
417 | ||
97868a2b PF |
418 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
419 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 | 420 | } |