]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
98ac2162 | 3 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | */ | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/blkdev.h> | |
83144186 | 13 | #include <linux/freezer.h> |
87598a2b | 14 | #include <linux/kthread.h> |
45711f1a | 15 | #include <linux/scatterlist.h> |
8e0cb8a1 | 16 | #include <linux/dma-mapping.h> |
1da177e4 LT |
17 | |
18 | #include <linux/mmc/card.h> | |
19 | #include <linux/mmc/host.h> | |
29eb7bd0 | 20 | |
98ac2162 | 21 | #include "queue.h" |
29eb7bd0 | 22 | #include "block.h" |
55244c56 | 23 | #include "core.h" |
4facdde1 | 24 | #include "card.h" |
1da177e4 | 25 | |
98ccf149 PO |
26 | #define MMC_QUEUE_BOUNCESZ 65536 |
27 | ||
1da177e4 | 28 | /* |
9c9f2d63 | 29 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
30 | */ |
31 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
32 | { | |
a8ad82cc SRT |
33 | struct mmc_queue *mq = q->queuedata; |
34 | ||
4e93b9a6 | 35 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) |
a8ad82cc SRT |
36 | return BLKPREP_KILL; |
37 | ||
e8064021 | 38 | req->rq_flags |= RQF_DONTPREP; |
1da177e4 | 39 | |
9c9f2d63 | 40 | return BLKPREP_OK; |
1da177e4 LT |
41 | } |
42 | ||
43 | static int mmc_queue_thread(void *d) | |
44 | { | |
45 | struct mmc_queue *mq = d; | |
46 | struct request_queue *q = mq->queue; | |
e0097cf5 | 47 | struct mmc_context_info *cntx = &mq->card->host->context_info; |
1da177e4 | 48 | |
83144186 | 49 | current->flags |= PF_MEMALLOC; |
1da177e4 | 50 | |
1da177e4 | 51 | down(&mq->thread_sem); |
1da177e4 | 52 | do { |
cdf8a6fb | 53 | struct request *req; |
1da177e4 LT |
54 | |
55 | spin_lock_irq(q->queue_lock); | |
56 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 57 | req = blk_fetch_request(q); |
e0097cf5 AH |
58 | mq->asleep = false; |
59 | cntx->is_waiting_last_req = false; | |
60 | cntx->is_new_req = false; | |
61 | if (!req) { | |
62 | /* | |
63 | * Dispatch queue is empty so set flags for | |
64 | * mmc_request_fn() to wake us up. | |
65 | */ | |
cdf8a6fb | 66 | if (mq->qcnt) |
e0097cf5 AH |
67 | cntx->is_waiting_last_req = true; |
68 | else | |
69 | mq->asleep = true; | |
70 | } | |
1da177e4 LT |
71 | spin_unlock_irq(q->queue_lock); |
72 | ||
cdf8a6fb | 73 | if (req || mq->qcnt) { |
ee8a43a5 | 74 | set_current_state(TASK_RUNNING); |
29eb7bd0 | 75 | mmc_blk_issue_rq(mq, req); |
a8c27c0b | 76 | cond_resched(); |
ee8a43a5 | 77 | } else { |
7b30d281 VW |
78 | if (kthread_should_stop()) { |
79 | set_current_state(TASK_RUNNING); | |
1da177e4 | 80 | break; |
7b30d281 | 81 | } |
1da177e4 LT |
82 | up(&mq->thread_sem); |
83 | schedule(); | |
84 | down(&mq->thread_sem); | |
1da177e4 | 85 | } |
1da177e4 | 86 | } while (1); |
1da177e4 LT |
87 | up(&mq->thread_sem); |
88 | ||
1da177e4 LT |
89 | return 0; |
90 | } | |
91 | ||
92 | /* | |
93 | * Generic MMC request handler. This is called for any queue on a | |
94 | * particular host. When the host is not busy, we look for a request | |
95 | * on any queue on this host, and attempt to issue it. This may | |
96 | * not be the queue we were asked to process. | |
97 | */ | |
1b50f5f3 | 98 | static void mmc_request_fn(struct request_queue *q) |
1da177e4 LT |
99 | { |
100 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 101 | struct request *req; |
2220eedf | 102 | struct mmc_context_info *cntx; |
89b4e133 PO |
103 | |
104 | if (!mq) { | |
5fa83ce2 | 105 | while ((req = blk_fetch_request(q)) != NULL) { |
e8064021 | 106 | req->rq_flags |= RQF_QUIET; |
2a842aca | 107 | __blk_end_request_all(req, BLK_STS_IOERR); |
5fa83ce2 | 108 | } |
89b4e133 PO |
109 | return; |
110 | } | |
1da177e4 | 111 | |
2220eedf | 112 | cntx = &mq->card->host->context_info; |
e0097cf5 AH |
113 | |
114 | if (cntx->is_waiting_last_req) { | |
115 | cntx->is_new_req = true; | |
116 | wake_up_interruptible(&cntx->wait); | |
117 | } | |
118 | ||
119 | if (mq->asleep) | |
87598a2b | 120 | wake_up_process(mq->thread); |
1da177e4 LT |
121 | } |
122 | ||
304419d8 | 123 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
97868a2b PF |
124 | { |
125 | struct scatterlist *sg; | |
126 | ||
304419d8 | 127 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
7b410d07 | 128 | if (sg) |
97868a2b | 129 | sg_init_table(sg, sg_len); |
97868a2b PF |
130 | |
131 | return sg; | |
132 | } | |
133 | ||
e056a1b5 AH |
134 | static void mmc_queue_setup_discard(struct request_queue *q, |
135 | struct mmc_card *card) | |
136 | { | |
137 | unsigned max_discard; | |
138 | ||
139 | max_discard = mmc_calc_max_discard(card); | |
140 | if (!max_discard) | |
141 | return; | |
142 | ||
143 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
2bb4cd5c | 144 | blk_queue_max_discard_sectors(q, max_discard); |
e056a1b5 AH |
145 | q->limits.discard_granularity = card->pref_erase << 9; |
146 | /* granularity must not be greater than max. discard */ | |
147 | if (card->pref_erase > max_discard) | |
148 | q->limits.discard_granularity = 0; | |
775a9362 | 149 | if (mmc_can_secure_erase_trim(card)) |
288dab8a | 150 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
e056a1b5 AH |
151 | } |
152 | ||
7b410d07 AH |
153 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) |
154 | { | |
155 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | |
156 | ||
c3dccb74 | 157 | if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) |
7b410d07 AH |
158 | return 0; |
159 | ||
160 | if (bouncesz > host->max_req_size) | |
161 | bouncesz = host->max_req_size; | |
162 | if (bouncesz > host->max_seg_size) | |
163 | bouncesz = host->max_seg_size; | |
164 | if (bouncesz > host->max_blk_count * 512) | |
165 | bouncesz = host->max_blk_count * 512; | |
166 | ||
167 | if (bouncesz <= 512) | |
168 | return 0; | |
169 | ||
170 | return bouncesz; | |
171 | } | |
f2b8b522 | 172 | |
304419d8 LW |
173 | /** |
174 | * mmc_init_request() - initialize the MMC-specific per-request data | |
175 | * @q: the request queue | |
176 | * @req: the request | |
177 | * @gfp: memory allocation policy | |
178 | */ | |
179 | static int mmc_init_request(struct request_queue *q, struct request *req, | |
180 | gfp_t gfp) | |
64e29e42 | 181 | { |
304419d8 LW |
182 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
183 | struct mmc_queue *mq = q->queuedata; | |
184 | struct mmc_card *card = mq->card; | |
185 | struct mmc_host *host = card->host; | |
64e29e42 | 186 | |
304419d8 LW |
187 | if (card->bouncesz) { |
188 | mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); | |
189 | if (!mq_rq->bounce_buf) | |
190 | return -ENOMEM; | |
191 | if (card->bouncesz > 512) { | |
192 | mq_rq->sg = mmc_alloc_sg(1, gfp); | |
193 | if (!mq_rq->sg) | |
194 | return -ENOMEM; | |
195 | mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, | |
196 | gfp); | |
197 | if (!mq_rq->bounce_sg) | |
198 | return -ENOMEM; | |
199 | } | |
200 | } else { | |
201 | mq_rq->bounce_buf = NULL; | |
202 | mq_rq->bounce_sg = NULL; | |
203 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); | |
204 | if (!mq_rq->sg) | |
7b410d07 | 205 | return -ENOMEM; |
c5bda0ca | 206 | } |
64e29e42 | 207 | |
c5bda0ca AH |
208 | return 0; |
209 | } | |
64e29e42 | 210 | |
304419d8 | 211 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
c5bda0ca | 212 | { |
304419d8 | 213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
7b410d07 | 214 | |
304419d8 LW |
215 | /* It is OK to kfree(NULL) so this will be smooth */ |
216 | kfree(mq_rq->bounce_sg); | |
217 | mq_rq->bounce_sg = NULL; | |
7b410d07 | 218 | |
304419d8 LW |
219 | kfree(mq_rq->bounce_buf); |
220 | mq_rq->bounce_buf = NULL; | |
7b410d07 | 221 | |
304419d8 LW |
222 | kfree(mq_rq->sg); |
223 | mq_rq->sg = NULL; | |
c09949cf AH |
224 | } |
225 | ||
1da177e4 LT |
226 | /** |
227 | * mmc_init_queue - initialise a queue structure. | |
228 | * @mq: mmc queue | |
229 | * @card: mmc card to attach this queue | |
230 | * @lock: queue lock | |
d09408ad | 231 | * @subname: partition subname |
1da177e4 LT |
232 | * |
233 | * Initialise a MMC card request queue. | |
234 | */ | |
d09408ad AH |
235 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
236 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
237 | { |
238 | struct mmc_host *host = card->host; | |
239 | u64 limit = BLK_BOUNCE_HIGH; | |
c5bda0ca | 240 | int ret = -ENOMEM; |
1da177e4 | 241 | |
fcaf71fd | 242 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
e83b3664 | 243 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
1da177e4 | 244 | |
01f5bbd1 AH |
245 | /* |
246 | * mmc_init_request() depends on card->bouncesz so it must be calculated | |
247 | * before blk_init_allocated_queue() starts allocating requests. | |
248 | */ | |
249 | card->bouncesz = mmc_queue_calc_bouncesz(host); | |
250 | ||
1da177e4 | 251 | mq->card = card; |
304419d8 | 252 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
1da177e4 LT |
253 | if (!mq->queue) |
254 | return -ENOMEM; | |
304419d8 LW |
255 | mq->queue->queue_lock = lock; |
256 | mq->queue->request_fn = mmc_request_fn; | |
257 | mq->queue->init_rq_fn = mmc_init_request; | |
258 | mq->queue->exit_rq_fn = mmc_exit_request; | |
259 | mq->queue->cmd_size = sizeof(struct mmc_queue_req); | |
1da177e4 | 260 | mq->queue->queuedata = mq; |
304419d8 LW |
261 | mq->qcnt = 0; |
262 | ret = blk_init_allocated_queue(mq->queue); | |
263 | if (ret) { | |
264 | blk_cleanup_queue(mq->queue); | |
265 | return ret; | |
266 | } | |
1da177e4 | 267 | |
98ccf149 | 268 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 269 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
b277da0a | 270 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
e056a1b5 AH |
271 | if (mmc_can_erase(card)) |
272 | mmc_queue_setup_discard(mq->queue, card); | |
98ccf149 | 273 | |
7b410d07 | 274 | if (card->bouncesz) { |
7b410d07 AH |
275 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); |
276 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | |
277 | blk_queue_max_segment_size(mq->queue, card->bouncesz); | |
278 | } else { | |
98ccf149 | 279 | blk_queue_bounce_limit(mq->queue, limit); |
086fa5ff | 280 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 281 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 282 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 | 283 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
1da177e4 LT |
284 | } |
285 | ||
632cf92a | 286 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 287 | |
d09408ad AH |
288 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
289 | host->index, subname ? subname : ""); | |
de528fa3 | 290 | |
87598a2b CH |
291 | if (IS_ERR(mq->thread)) { |
292 | ret = PTR_ERR(mq->thread); | |
c09949cf | 293 | goto cleanup_queue; |
1da177e4 LT |
294 | } |
295 | ||
87598a2b | 296 | return 0; |
97868a2b | 297 | |
7b410d07 | 298 | cleanup_queue: |
1da177e4 | 299 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
300 | return ret; |
301 | } | |
1da177e4 LT |
302 | |
303 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
304 | { | |
165125e1 | 305 | struct request_queue *q = mq->queue; |
89b4e133 PO |
306 | unsigned long flags; |
307 | ||
d2b46f66 PO |
308 | /* Make sure the queue isn't suspended, as that will deadlock */ |
309 | mmc_queue_resume(mq); | |
310 | ||
89b4e133 | 311 | /* Then terminate our worker thread */ |
87598a2b | 312 | kthread_stop(mq->thread); |
1da177e4 | 313 | |
5fa83ce2 AH |
314 | /* Empty the queue */ |
315 | spin_lock_irqsave(q->queue_lock, flags); | |
316 | q->queuedata = NULL; | |
317 | blk_start_queue(q); | |
318 | spin_unlock_irqrestore(q->queue_lock, flags); | |
319 | ||
1da177e4 LT |
320 | mq->card = NULL; |
321 | } | |
322 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
323 | ||
324 | /** | |
325 | * mmc_queue_suspend - suspend a MMC request queue | |
326 | * @mq: MMC queue to suspend | |
327 | * | |
328 | * Stop the block request queue, and wait for our thread to | |
329 | * complete any outstanding requests. This ensures that we | |
330 | * won't suspend while a request is being processed. | |
331 | */ | |
332 | void mmc_queue_suspend(struct mmc_queue *mq) | |
333 | { | |
165125e1 | 334 | struct request_queue *q = mq->queue; |
1da177e4 LT |
335 | unsigned long flags; |
336 | ||
9491be5f LW |
337 | if (!mq->suspended) { |
338 | mq->suspended |= true; | |
1da177e4 LT |
339 | |
340 | spin_lock_irqsave(q->queue_lock, flags); | |
341 | blk_stop_queue(q); | |
342 | spin_unlock_irqrestore(q->queue_lock, flags); | |
343 | ||
344 | down(&mq->thread_sem); | |
345 | } | |
346 | } | |
1da177e4 LT |
347 | |
348 | /** | |
349 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
350 | * @mq: MMC queue to resume | |
351 | */ | |
352 | void mmc_queue_resume(struct mmc_queue *mq) | |
353 | { | |
165125e1 | 354 | struct request_queue *q = mq->queue; |
1da177e4 LT |
355 | unsigned long flags; |
356 | ||
9491be5f LW |
357 | if (mq->suspended) { |
358 | mq->suspended = false; | |
1da177e4 LT |
359 | |
360 | up(&mq->thread_sem); | |
361 | ||
362 | spin_lock_irqsave(q->queue_lock, flags); | |
363 | blk_start_queue(q); | |
364 | spin_unlock_irqrestore(q->queue_lock, flags); | |
365 | } | |
366 | } | |
98ac2162 | 367 | |
2ff1fa67 PO |
368 | /* |
369 | * Prepare the sg list(s) to be handed of to the host driver | |
370 | */ | |
97868a2b | 371 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 PO |
372 | { |
373 | unsigned int sg_len; | |
2ff1fa67 PO |
374 | size_t buflen; |
375 | struct scatterlist *sg; | |
67e69d52 | 376 | struct request *req = mmc_queue_req_to_req(mqrq); |
2ff1fa67 | 377 | int i; |
98ccf149 | 378 | |
03d640ae | 379 | if (!mqrq->bounce_buf) |
67e69d52 | 380 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
98ccf149 | 381 | |
67e69d52 | 382 | sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); |
98ccf149 | 383 | |
97868a2b | 384 | mqrq->bounce_sg_len = sg_len; |
98ccf149 | 385 | |
2ff1fa67 | 386 | buflen = 0; |
97868a2b | 387 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
2ff1fa67 | 388 | buflen += sg->length; |
98ccf149 | 389 | |
97868a2b | 390 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
98ccf149 PO |
391 | |
392 | return 1; | |
393 | } | |
394 | ||
2ff1fa67 PO |
395 | /* |
396 | * If writing, bounce the data to the buffer before the request | |
397 | * is sent to the host driver | |
398 | */ | |
97868a2b | 399 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
98ccf149 | 400 | { |
97868a2b | 401 | if (!mqrq->bounce_buf) |
98ccf149 PO |
402 | return; |
403 | ||
67e69d52 | 404 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) |
98ccf149 PO |
405 | return; |
406 | ||
97868a2b PF |
407 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
408 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 PO |
409 | } |
410 | ||
2ff1fa67 PO |
411 | /* |
412 | * If reading, bounce the data from the buffer after the request | |
413 | * has been handled by the host driver | |
414 | */ | |
97868a2b | 415 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
98ccf149 | 416 | { |
97868a2b | 417 | if (!mqrq->bounce_buf) |
98ccf149 PO |
418 | return; |
419 | ||
67e69d52 | 420 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) |
98ccf149 PO |
421 | return; |
422 | ||
97868a2b PF |
423 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
424 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 | 425 | } |