]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
70f10482 | 2 | * linux/drivers/mmc/card/queue.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
98ac2162 | 5 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/blkdev.h> | |
83144186 | 15 | #include <linux/freezer.h> |
87598a2b | 16 | #include <linux/kthread.h> |
45711f1a | 17 | #include <linux/scatterlist.h> |
1da177e4 LT |
18 | |
19 | #include <linux/mmc/card.h> | |
20 | #include <linux/mmc/host.h> | |
98ac2162 | 21 | #include "queue.h" |
1da177e4 | 22 | |
98ccf149 PO |
23 | #define MMC_QUEUE_BOUNCESZ 65536 |
24 | ||
87598a2b | 25 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
1da177e4 LT |
26 | |
27 | /* | |
9c9f2d63 | 28 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
29 | */ |
30 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
31 | { | |
9c9f2d63 | 32 | /* |
bd788c96 | 33 | * We only like normal block requests and discards. |
9c9f2d63 | 34 | */ |
bd788c96 | 35 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
1da177e4 | 36 | blk_dump_rq_flags(req, "MMC bad request"); |
9c9f2d63 | 37 | return BLKPREP_KILL; |
1da177e4 LT |
38 | } |
39 | ||
9c9f2d63 | 40 | req->cmd_flags |= REQ_DONTPREP; |
1da177e4 | 41 | |
9c9f2d63 | 42 | return BLKPREP_OK; |
1da177e4 LT |
43 | } |
44 | ||
45 | static int mmc_queue_thread(void *d) | |
46 | { | |
47 | struct mmc_queue *mq = d; | |
48 | struct request_queue *q = mq->queue; | |
1da177e4 | 49 | |
83144186 | 50 | current->flags |= PF_MEMALLOC; |
1da177e4 | 51 | |
1da177e4 | 52 | down(&mq->thread_sem); |
1da177e4 LT |
53 | do { |
54 | struct request *req = NULL; | |
55 | ||
56 | spin_lock_irq(q->queue_lock); | |
57 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 58 | req = blk_fetch_request(q); |
97868a2b | 59 | mq->mqrq_cur->req = req; |
1da177e4 LT |
60 | spin_unlock_irq(q->queue_lock); |
61 | ||
62 | if (!req) { | |
7b30d281 VW |
63 | if (kthread_should_stop()) { |
64 | set_current_state(TASK_RUNNING); | |
1da177e4 | 65 | break; |
7b30d281 | 66 | } |
1da177e4 LT |
67 | up(&mq->thread_sem); |
68 | schedule(); | |
69 | down(&mq->thread_sem); | |
70 | continue; | |
71 | } | |
72 | set_current_state(TASK_RUNNING); | |
73 | ||
74 | mq->issue_fn(mq, req); | |
75 | } while (1); | |
1da177e4 LT |
76 | up(&mq->thread_sem); |
77 | ||
1da177e4 LT |
78 | return 0; |
79 | } | |
80 | ||
81 | /* | |
82 | * Generic MMC request handler. This is called for any queue on a | |
83 | * particular host. When the host is not busy, we look for a request | |
84 | * on any queue on this host, and attempt to issue it. This may | |
85 | * not be the queue we were asked to process. | |
86 | */ | |
165125e1 | 87 | static void mmc_request(struct request_queue *q) |
1da177e4 LT |
88 | { |
89 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 90 | struct request *req; |
89b4e133 PO |
91 | |
92 | if (!mq) { | |
5fa83ce2 AH |
93 | while ((req = blk_fetch_request(q)) != NULL) { |
94 | req->cmd_flags |= REQ_QUIET; | |
296b2f6a | 95 | __blk_end_request_all(req, -EIO); |
5fa83ce2 | 96 | } |
89b4e133 PO |
97 | return; |
98 | } | |
1da177e4 | 99 | |
97868a2b | 100 | if (!mq->mqrq_cur->req) |
87598a2b | 101 | wake_up_process(mq->thread); |
1da177e4 LT |
102 | } |
103 | ||
97868a2b PF |
104 | struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
105 | { | |
106 | struct scatterlist *sg; | |
107 | ||
108 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | |
109 | if (!sg) | |
110 | *err = -ENOMEM; | |
111 | else { | |
112 | *err = 0; | |
113 | sg_init_table(sg, sg_len); | |
114 | } | |
115 | ||
116 | return sg; | |
117 | } | |
118 | ||
e056a1b5 AH |
119 | static void mmc_queue_setup_discard(struct request_queue *q, |
120 | struct mmc_card *card) | |
121 | { | |
122 | unsigned max_discard; | |
123 | ||
124 | max_discard = mmc_calc_max_discard(card); | |
125 | if (!max_discard) | |
126 | return; | |
127 | ||
128 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
129 | q->limits.max_discard_sectors = max_discard; | |
130 | if (card->erased_byte == 0) | |
131 | q->limits.discard_zeroes_data = 1; | |
132 | q->limits.discard_granularity = card->pref_erase << 9; | |
133 | /* granularity must not be greater than max. discard */ | |
134 | if (card->pref_erase > max_discard) | |
135 | q->limits.discard_granularity = 0; | |
136 | if (mmc_can_secure_erase_trim(card)) | |
137 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); | |
138 | } | |
139 | ||
1da177e4 LT |
140 | /** |
141 | * mmc_init_queue - initialise a queue structure. | |
142 | * @mq: mmc queue | |
143 | * @card: mmc card to attach this queue | |
144 | * @lock: queue lock | |
d09408ad | 145 | * @subname: partition subname |
1da177e4 LT |
146 | * |
147 | * Initialise a MMC card request queue. | |
148 | */ | |
d09408ad AH |
149 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
150 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
151 | { |
152 | struct mmc_host *host = card->host; | |
153 | u64 limit = BLK_BOUNCE_HIGH; | |
154 | int ret; | |
97868a2b | 155 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
1da177e4 | 156 | |
fcaf71fd GKH |
157 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
158 | limit = *mmc_dev(host)->dma_mask; | |
1da177e4 LT |
159 | |
160 | mq->card = card; | |
161 | mq->queue = blk_init_queue(mmc_request, lock); | |
162 | if (!mq->queue) | |
163 | return -ENOMEM; | |
164 | ||
97868a2b PF |
165 | memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); |
166 | mq->mqrq_cur = mqrq_cur; | |
1da177e4 | 167 | mq->queue->queuedata = mq; |
1da177e4 | 168 | |
98ccf149 | 169 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 170 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
e056a1b5 AH |
171 | if (mmc_can_erase(card)) |
172 | mmc_queue_setup_discard(mq->queue, card); | |
98ccf149 PO |
173 | |
174 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
a36274e0 | 175 | if (host->max_segs == 1) { |
aafabfab PO |
176 | unsigned int bouncesz; |
177 | ||
98ccf149 PO |
178 | bouncesz = MMC_QUEUE_BOUNCESZ; |
179 | ||
180 | if (bouncesz > host->max_req_size) | |
181 | bouncesz = host->max_req_size; | |
182 | if (bouncesz > host->max_seg_size) | |
183 | bouncesz = host->max_seg_size; | |
f3eb0aaa PO |
184 | if (bouncesz > (host->max_blk_count * 512)) |
185 | bouncesz = host->max_blk_count * 512; | |
186 | ||
187 | if (bouncesz > 512) { | |
97868a2b PF |
188 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
189 | if (!mqrq_cur->bounce_buf) { | |
f3eb0aaa | 190 | printk(KERN_WARNING "%s: unable to " |
97868a2b | 191 | "allocate bounce cur buffer\n", |
f3eb0aaa PO |
192 | mmc_card_name(card)); |
193 | } | |
194 | } | |
98ccf149 | 195 | |
97868a2b | 196 | if (mqrq_cur->bounce_buf) { |
2ff1fa67 | 197 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
086fa5ff | 198 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
8a78362c | 199 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
98ccf149 PO |
200 | blk_queue_max_segment_size(mq->queue, bouncesz); |
201 | ||
97868a2b PF |
202 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
203 | if (ret) | |
aafabfab | 204 | goto cleanup_queue; |
98ccf149 | 205 | |
97868a2b PF |
206 | mqrq_cur->bounce_sg = |
207 | mmc_alloc_sg(bouncesz / 512, &ret); | |
208 | if (ret) | |
aafabfab | 209 | goto cleanup_queue; |
97868a2b | 210 | |
98ccf149 PO |
211 | } |
212 | } | |
213 | #endif | |
214 | ||
97868a2b | 215 | if (!mqrq_cur->bounce_buf) { |
98ccf149 | 216 | blk_queue_bounce_limit(mq->queue, limit); |
086fa5ff | 217 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 218 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 219 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 PO |
220 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
221 | ||
97868a2b PF |
222 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
223 | if (ret) | |
98ccf149 | 224 | goto cleanup_queue; |
97868a2b | 225 | |
1da177e4 LT |
226 | } |
227 | ||
632cf92a | 228 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 229 | |
d09408ad AH |
230 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
231 | host->index, subname ? subname : ""); | |
de528fa3 | 232 | |
87598a2b CH |
233 | if (IS_ERR(mq->thread)) { |
234 | ret = PTR_ERR(mq->thread); | |
98ccf149 | 235 | goto free_bounce_sg; |
1da177e4 LT |
236 | } |
237 | ||
87598a2b | 238 | return 0; |
98ccf149 | 239 | free_bounce_sg: |
97868a2b PF |
240 | kfree(mqrq_cur->bounce_sg); |
241 | mqrq_cur->bounce_sg = NULL; | |
242 | ||
aafabfab | 243 | cleanup_queue: |
97868a2b PF |
244 | kfree(mqrq_cur->sg); |
245 | mqrq_cur->sg = NULL; | |
246 | kfree(mqrq_cur->bounce_buf); | |
247 | mqrq_cur->bounce_buf = NULL; | |
248 | ||
1da177e4 | 249 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
250 | return ret; |
251 | } | |
1da177e4 LT |
252 | |
253 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
254 | { | |
165125e1 | 255 | struct request_queue *q = mq->queue; |
89b4e133 | 256 | unsigned long flags; |
97868a2b | 257 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; |
89b4e133 | 258 | |
d2b46f66 PO |
259 | /* Make sure the queue isn't suspended, as that will deadlock */ |
260 | mmc_queue_resume(mq); | |
261 | ||
89b4e133 | 262 | /* Then terminate our worker thread */ |
87598a2b | 263 | kthread_stop(mq->thread); |
1da177e4 | 264 | |
5fa83ce2 AH |
265 | /* Empty the queue */ |
266 | spin_lock_irqsave(q->queue_lock, flags); | |
267 | q->queuedata = NULL; | |
268 | blk_start_queue(q); | |
269 | spin_unlock_irqrestore(q->queue_lock, flags); | |
270 | ||
97868a2b PF |
271 | kfree(mqrq_cur->bounce_sg); |
272 | mqrq_cur->bounce_sg = NULL; | |
98ccf149 | 273 | |
97868a2b PF |
274 | kfree(mqrq_cur->sg); |
275 | mqrq_cur->sg = NULL; | |
1da177e4 | 276 | |
97868a2b PF |
277 | kfree(mqrq_cur->bounce_buf); |
278 | mqrq_cur->bounce_buf = NULL; | |
98ccf149 | 279 | |
1da177e4 LT |
280 | mq->card = NULL; |
281 | } | |
282 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
283 | ||
284 | /** | |
285 | * mmc_queue_suspend - suspend a MMC request queue | |
286 | * @mq: MMC queue to suspend | |
287 | * | |
288 | * Stop the block request queue, and wait for our thread to | |
289 | * complete any outstanding requests. This ensures that we | |
290 | * won't suspend while a request is being processed. | |
291 | */ | |
292 | void mmc_queue_suspend(struct mmc_queue *mq) | |
293 | { | |
165125e1 | 294 | struct request_queue *q = mq->queue; |
1da177e4 LT |
295 | unsigned long flags; |
296 | ||
297 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
298 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
299 | ||
300 | spin_lock_irqsave(q->queue_lock, flags); | |
301 | blk_stop_queue(q); | |
302 | spin_unlock_irqrestore(q->queue_lock, flags); | |
303 | ||
304 | down(&mq->thread_sem); | |
305 | } | |
306 | } | |
1da177e4 LT |
307 | |
308 | /** | |
309 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
310 | * @mq: MMC queue to resume | |
311 | */ | |
312 | void mmc_queue_resume(struct mmc_queue *mq) | |
313 | { | |
165125e1 | 314 | struct request_queue *q = mq->queue; |
1da177e4 LT |
315 | unsigned long flags; |
316 | ||
317 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
318 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
319 | ||
320 | up(&mq->thread_sem); | |
321 | ||
322 | spin_lock_irqsave(q->queue_lock, flags); | |
323 | blk_start_queue(q); | |
324 | spin_unlock_irqrestore(q->queue_lock, flags); | |
325 | } | |
326 | } | |
98ac2162 | 327 | |
2ff1fa67 PO |
328 | /* |
329 | * Prepare the sg list(s) to be handed of to the host driver | |
330 | */ | |
97868a2b | 331 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 PO |
332 | { |
333 | unsigned int sg_len; | |
2ff1fa67 PO |
334 | size_t buflen; |
335 | struct scatterlist *sg; | |
336 | int i; | |
98ccf149 | 337 | |
97868a2b PF |
338 | if (!mqrq->bounce_buf) |
339 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | |
98ccf149 | 340 | |
97868a2b | 341 | BUG_ON(!mqrq->bounce_sg); |
98ccf149 | 342 | |
97868a2b | 343 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
98ccf149 | 344 | |
97868a2b | 345 | mqrq->bounce_sg_len = sg_len; |
98ccf149 | 346 | |
2ff1fa67 | 347 | buflen = 0; |
97868a2b | 348 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
2ff1fa67 | 349 | buflen += sg->length; |
98ccf149 | 350 | |
97868a2b | 351 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
98ccf149 PO |
352 | |
353 | return 1; | |
354 | } | |
355 | ||
2ff1fa67 PO |
356 | /* |
357 | * If writing, bounce the data to the buffer before the request | |
358 | * is sent to the host driver | |
359 | */ | |
97868a2b | 360 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
98ccf149 | 361 | { |
97868a2b | 362 | if (!mqrq->bounce_buf) |
98ccf149 PO |
363 | return; |
364 | ||
97868a2b | 365 | if (rq_data_dir(mqrq->req) != WRITE) |
98ccf149 PO |
366 | return; |
367 | ||
97868a2b PF |
368 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
369 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 PO |
370 | } |
371 | ||
2ff1fa67 PO |
372 | /* |
373 | * If reading, bounce the data from the buffer after the request | |
374 | * has been handled by the host driver | |
375 | */ | |
97868a2b | 376 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
98ccf149 | 377 | { |
97868a2b | 378 | if (!mqrq->bounce_buf) |
98ccf149 PO |
379 | return; |
380 | ||
97868a2b | 381 | if (rq_data_dir(mqrq->req) != READ) |
98ccf149 PO |
382 | return; |
383 | ||
97868a2b PF |
384 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
385 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 | 386 | } |