]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/drivers/mmc/card/queue.c | |
3 | * | |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
5 | * Copyright 2006-2007 Pierre Ossman | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/blkdev.h> | |
14 | #include <linux/freezer.h> | |
15 | #include <linux/kthread.h> | |
16 | #include <linux/scatterlist.h> | |
17 | ||
18 | #include <linux/mmc/card.h> | |
19 | #include <linux/mmc/host.h> | |
20 | #include "queue.h" | |
21 | ||
22 | #define MMC_QUEUE_BOUNCESZ 65536 | |
23 | ||
24 | #define MMC_QUEUE_SUSPENDED (1 << 0) | |
25 | ||
26 | /* | |
27 | * Prepare a MMC request. This just filters out odd stuff. | |
28 | */ | |
29 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
30 | { | |
31 | /* | |
32 | * We only like normal block requests. | |
33 | */ | |
34 | if (!blk_fs_request(req) && !blk_pc_request(req)) { | |
35 | blk_dump_rq_flags(req, "MMC bad request"); | |
36 | return BLKPREP_KILL; | |
37 | } | |
38 | ||
39 | req->cmd_flags |= REQ_DONTPREP; | |
40 | ||
41 | return BLKPREP_OK; | |
42 | } | |
43 | ||
44 | static int mmc_queue_thread(void *d) | |
45 | { | |
46 | struct mmc_queue *mq = d; | |
47 | struct request_queue *q = mq->queue; | |
48 | ||
49 | current->flags |= PF_MEMALLOC; | |
50 | ||
51 | down(&mq->thread_sem); | |
52 | do { | |
53 | struct request *req = NULL; | |
54 | ||
55 | spin_lock_irq(q->queue_lock); | |
56 | set_current_state(TASK_INTERRUPTIBLE); | |
57 | if (!blk_queue_plugged(q)) | |
58 | req = elv_next_request(q); | |
59 | mq->req = req; | |
60 | spin_unlock_irq(q->queue_lock); | |
61 | ||
62 | if (!req) { | |
63 | if (kthread_should_stop()) { | |
64 | set_current_state(TASK_RUNNING); | |
65 | break; | |
66 | } | |
67 | up(&mq->thread_sem); | |
68 | schedule(); | |
69 | down(&mq->thread_sem); | |
70 | continue; | |
71 | } | |
72 | set_current_state(TASK_RUNNING); | |
73 | ||
74 | mq->issue_fn(mq, req); | |
75 | } while (1); | |
76 | up(&mq->thread_sem); | |
77 | ||
78 | return 0; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Generic MMC request handler. This is called for any queue on a | |
83 | * particular host. When the host is not busy, we look for a request | |
84 | * on any queue on this host, and attempt to issue it. This may | |
85 | * not be the queue we were asked to process. | |
86 | */ | |
87 | static void mmc_request(struct request_queue *q) | |
88 | { | |
89 | struct mmc_queue *mq = q->queuedata; | |
90 | struct request *req; | |
91 | int ret; | |
92 | ||
93 | if (!mq) { | |
94 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | |
95 | while ((req = elv_next_request(q)) != NULL) { | |
96 | do { | |
97 | ret = __blk_end_request(req, -EIO, | |
98 | blk_rq_cur_bytes(req)); | |
99 | } while (ret); | |
100 | } | |
101 | return; | |
102 | } | |
103 | ||
104 | if (!mq->req) | |
105 | wake_up_process(mq->thread); | |
106 | } | |
107 | ||
108 | /** | |
109 | * mmc_init_queue - initialise a queue structure. | |
110 | * @mq: mmc queue | |
111 | * @card: mmc card to attach this queue | |
112 | * @lock: queue lock | |
113 | * | |
114 | * Initialise a MMC card request queue. | |
115 | */ | |
116 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | |
117 | { | |
118 | struct mmc_host *host = card->host; | |
119 | u64 limit = BLK_BOUNCE_HIGH; | |
120 | int ret; | |
121 | ||
122 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | |
123 | limit = *mmc_dev(host)->dma_mask; | |
124 | ||
125 | mq->card = card; | |
126 | mq->queue = blk_init_queue(mmc_request, lock); | |
127 | if (!mq->queue) | |
128 | return -ENOMEM; | |
129 | ||
130 | mq->queue->queuedata = mq; | |
131 | mq->req = NULL; | |
132 | ||
133 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | |
134 | ||
135 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
136 | if (host->max_hw_segs == 1) { | |
137 | unsigned int bouncesz; | |
138 | ||
139 | bouncesz = MMC_QUEUE_BOUNCESZ; | |
140 | ||
141 | if (bouncesz > host->max_req_size) | |
142 | bouncesz = host->max_req_size; | |
143 | if (bouncesz > host->max_seg_size) | |
144 | bouncesz = host->max_seg_size; | |
145 | if (bouncesz > (host->max_blk_count * 512)) | |
146 | bouncesz = host->max_blk_count * 512; | |
147 | ||
148 | if (bouncesz > 512) { | |
149 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | |
150 | if (!mq->bounce_buf) { | |
151 | printk(KERN_WARNING "%s: unable to " | |
152 | "allocate bounce buffer\n", | |
153 | mmc_card_name(card)); | |
154 | } | |
155 | } | |
156 | ||
157 | if (mq->bounce_buf) { | |
158 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | |
159 | blk_queue_max_sectors(mq->queue, bouncesz / 512); | |
160 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | |
161 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | |
162 | blk_queue_max_segment_size(mq->queue, bouncesz); | |
163 | ||
164 | mq->sg = kmalloc(sizeof(struct scatterlist), | |
165 | GFP_KERNEL); | |
166 | if (!mq->sg) { | |
167 | ret = -ENOMEM; | |
168 | goto cleanup_queue; | |
169 | } | |
170 | sg_init_table(mq->sg, 1); | |
171 | ||
172 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | |
173 | bouncesz / 512, GFP_KERNEL); | |
174 | if (!mq->bounce_sg) { | |
175 | ret = -ENOMEM; | |
176 | goto cleanup_queue; | |
177 | } | |
178 | sg_init_table(mq->bounce_sg, bouncesz / 512); | |
179 | } | |
180 | } | |
181 | #endif | |
182 | ||
183 | if (!mq->bounce_buf) { | |
184 | blk_queue_bounce_limit(mq->queue, limit); | |
185 | blk_queue_max_sectors(mq->queue, | |
186 | min(host->max_blk_count, host->max_req_size / 512)); | |
187 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | |
188 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | |
189 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | |
190 | ||
191 | mq->sg = kmalloc(sizeof(struct scatterlist) * | |
192 | host->max_phys_segs, GFP_KERNEL); | |
193 | if (!mq->sg) { | |
194 | ret = -ENOMEM; | |
195 | goto cleanup_queue; | |
196 | } | |
197 | sg_init_table(mq->sg, host->max_phys_segs); | |
198 | } | |
199 | ||
200 | init_MUTEX(&mq->thread_sem); | |
201 | ||
202 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | |
203 | if (IS_ERR(mq->thread)) { | |
204 | ret = PTR_ERR(mq->thread); | |
205 | goto free_bounce_sg; | |
206 | } | |
207 | ||
208 | return 0; | |
209 | free_bounce_sg: | |
210 | if (mq->bounce_sg) | |
211 | kfree(mq->bounce_sg); | |
212 | mq->bounce_sg = NULL; | |
213 | cleanup_queue: | |
214 | if (mq->sg) | |
215 | kfree(mq->sg); | |
216 | mq->sg = NULL; | |
217 | if (mq->bounce_buf) | |
218 | kfree(mq->bounce_buf); | |
219 | mq->bounce_buf = NULL; | |
220 | blk_cleanup_queue(mq->queue); | |
221 | return ret; | |
222 | } | |
223 | ||
224 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
225 | { | |
226 | struct request_queue *q = mq->queue; | |
227 | unsigned long flags; | |
228 | ||
229 | /* Mark that we should start throwing out stragglers */ | |
230 | spin_lock_irqsave(q->queue_lock, flags); | |
231 | q->queuedata = NULL; | |
232 | spin_unlock_irqrestore(q->queue_lock, flags); | |
233 | ||
234 | /* Make sure the queue isn't suspended, as that will deadlock */ | |
235 | mmc_queue_resume(mq); | |
236 | ||
237 | /* Then terminate our worker thread */ | |
238 | kthread_stop(mq->thread); | |
239 | ||
240 | if (mq->bounce_sg) | |
241 | kfree(mq->bounce_sg); | |
242 | mq->bounce_sg = NULL; | |
243 | ||
244 | kfree(mq->sg); | |
245 | mq->sg = NULL; | |
246 | ||
247 | if (mq->bounce_buf) | |
248 | kfree(mq->bounce_buf); | |
249 | mq->bounce_buf = NULL; | |
250 | ||
251 | blk_cleanup_queue(mq->queue); | |
252 | ||
253 | mq->card = NULL; | |
254 | } | |
255 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
256 | ||
257 | /** | |
258 | * mmc_queue_suspend - suspend a MMC request queue | |
259 | * @mq: MMC queue to suspend | |
260 | * | |
261 | * Stop the block request queue, and wait for our thread to | |
262 | * complete any outstanding requests. This ensures that we | |
263 | * won't suspend while a request is being processed. | |
264 | */ | |
265 | void mmc_queue_suspend(struct mmc_queue *mq) | |
266 | { | |
267 | struct request_queue *q = mq->queue; | |
268 | unsigned long flags; | |
269 | ||
270 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
271 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
272 | ||
273 | spin_lock_irqsave(q->queue_lock, flags); | |
274 | blk_stop_queue(q); | |
275 | spin_unlock_irqrestore(q->queue_lock, flags); | |
276 | ||
277 | down(&mq->thread_sem); | |
278 | } | |
279 | } | |
280 | ||
281 | /** | |
282 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
283 | * @mq: MMC queue to resume | |
284 | */ | |
285 | void mmc_queue_resume(struct mmc_queue *mq) | |
286 | { | |
287 | struct request_queue *q = mq->queue; | |
288 | unsigned long flags; | |
289 | ||
290 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
291 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
292 | ||
293 | up(&mq->thread_sem); | |
294 | ||
295 | spin_lock_irqsave(q->queue_lock, flags); | |
296 | blk_start_queue(q); | |
297 | spin_unlock_irqrestore(q->queue_lock, flags); | |
298 | } | |
299 | } | |
300 | ||
301 | /* | |
302 | * Prepare the sg list(s) to be handed of to the host driver | |
303 | */ | |
304 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |
305 | { | |
306 | unsigned int sg_len; | |
307 | size_t buflen; | |
308 | struct scatterlist *sg; | |
309 | int i; | |
310 | ||
311 | if (!mq->bounce_buf) | |
312 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | |
313 | ||
314 | BUG_ON(!mq->bounce_sg); | |
315 | ||
316 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | |
317 | ||
318 | mq->bounce_sg_len = sg_len; | |
319 | ||
320 | buflen = 0; | |
321 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | |
322 | buflen += sg->length; | |
323 | ||
324 | sg_init_one(mq->sg, mq->bounce_buf, buflen); | |
325 | ||
326 | return 1; | |
327 | } | |
328 | ||
329 | /* | |
330 | * If writing, bounce the data to the buffer before the request | |
331 | * is sent to the host driver | |
332 | */ | |
333 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | |
334 | { | |
335 | unsigned long flags; | |
336 | ||
337 | if (!mq->bounce_buf) | |
338 | return; | |
339 | ||
340 | if (rq_data_dir(mq->req) != WRITE) | |
341 | return; | |
342 | ||
343 | local_irq_save(flags); | |
344 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
345 | mq->bounce_buf, mq->sg[0].length); | |
346 | local_irq_restore(flags); | |
347 | } | |
348 | ||
349 | /* | |
350 | * If reading, bounce the data from the buffer after the request | |
351 | * has been handled by the host driver | |
352 | */ | |
353 | void mmc_queue_bounce_post(struct mmc_queue *mq) | |
354 | { | |
355 | unsigned long flags; | |
356 | ||
357 | if (!mq->bounce_buf) | |
358 | return; | |
359 | ||
360 | if (rq_data_dir(mq->req) != READ) | |
361 | return; | |
362 | ||
363 | local_irq_save(flags); | |
364 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | |
365 | mq->bounce_buf, mq->sg[0].length); | |
366 | local_irq_restore(flags); | |
367 | } | |
368 |