]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/card/queue.c
mmc_block: inform block layer about sector count restriction
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / queue.c
CommitLineData
1da177e4 1/*
70f10482 2 * linux/drivers/mmc/card/queue.c
1da177e4
LT
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 5 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/module.h>
13#include <linux/blkdev.h>
83144186 14#include <linux/freezer.h>
87598a2b 15#include <linux/kthread.h>
45711f1a 16#include <linux/scatterlist.h>
1da177e4
LT
17
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
98ac2162 20#include "queue.h"
1da177e4 21
98ccf149
PO
22#define MMC_QUEUE_BOUNCESZ 65536
23
87598a2b 24#define MMC_QUEUE_SUSPENDED (1 << 0)
1da177e4
LT
25
26/*
9c9f2d63 27 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
9c9f2d63
PO
31 /*
32 * We only like normal block requests.
33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) {
1da177e4 35 blk_dump_rq_flags(req, "MMC bad request");
9c9f2d63 36 return BLKPREP_KILL;
1da177e4
LT
37 }
38
9c9f2d63 39 req->cmd_flags |= REQ_DONTPREP;
1da177e4 40
9c9f2d63 41 return BLKPREP_OK;
1da177e4
LT
42}
43
44static int mmc_queue_thread(void *d)
45{
46 struct mmc_queue *mq = d;
47 struct request_queue *q = mq->queue;
1da177e4 48
83144186 49 current->flags |= PF_MEMALLOC;
1da177e4 50
1da177e4 51 down(&mq->thread_sem);
1da177e4
LT
52 do {
53 struct request *req = NULL;
54
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q))
c723e08a
JY
58 req = elv_next_request(q);
59 mq->req = req;
1da177e4
LT
60 spin_unlock_irq(q->queue_lock);
61
62 if (!req) {
7b30d281
VW
63 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
1da177e4 65 break;
7b30d281 66 }
1da177e4
LT
67 up(&mq->thread_sem);
68 schedule();
69 down(&mq->thread_sem);
70 continue;
71 }
72 set_current_state(TASK_RUNNING);
73
74 mq->issue_fn(mq, req);
75 } while (1);
1da177e4
LT
76 up(&mq->thread_sem);
77
1da177e4
LT
78 return 0;
79}
80
81/*
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
86 */
165125e1 87static void mmc_request(struct request_queue *q)
1da177e4
LT
88{
89 struct mmc_queue *mq = q->queuedata;
89b4e133
PO
90 struct request *req;
91 int ret;
92
93 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) {
96 do {
fd539832
KU
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
89b4e133
PO
99 } while (ret);
100 }
101 return;
102 }
1da177e4
LT
103
104 if (!mq->req)
87598a2b 105 wake_up_process(mq->thread);
1da177e4
LT
106}
107
108/**
109 * mmc_init_queue - initialise a queue structure.
110 * @mq: mmc queue
111 * @card: mmc card to attach this queue
112 * @lock: queue lock
113 *
114 * Initialise a MMC card request queue.
115 */
116int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
117{
118 struct mmc_host *host = card->host;
119 u64 limit = BLK_BOUNCE_HIGH;
120 int ret;
121
fcaf71fd
GKH
122 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
123 limit = *mmc_dev(host)->dma_mask;
1da177e4
LT
124
125 mq->card = card;
126 mq->queue = blk_init_queue(mmc_request, lock);
127 if (!mq->queue)
128 return -ENOMEM;
129
1da177e4
LT
130 mq->queue->queuedata = mq;
131 mq->req = NULL;
132
98ccf149
PO
133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134
135#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) {
aafabfab
PO
137 unsigned int bouncesz;
138
98ccf149
PO
139 bouncesz = MMC_QUEUE_BOUNCESZ;
140
141 if (bouncesz > host->max_req_size)
142 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size;
f3eb0aaa
PO
145 if (bouncesz > (host->max_blk_count * 512))
146 bouncesz = host->max_blk_count * 512;
147
148 if (bouncesz > 512) {
149 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
150 if (!mq->bounce_buf) {
151 printk(KERN_WARNING "%s: unable to "
152 "allocate bounce buffer\n",
153 mmc_card_name(card));
154 }
155 }
98ccf149 156
f3eb0aaa 157 if (mq->bounce_buf) {
2ff1fa67 158 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
98ccf149
PO
159 blk_queue_max_sectors(mq->queue, bouncesz / 512);
160 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
161 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
162 blk_queue_max_segment_size(mq->queue, bouncesz);
163
45711f1a 164 mq->sg = kmalloc(sizeof(struct scatterlist),
98ccf149
PO
165 GFP_KERNEL);
166 if (!mq->sg) {
167 ret = -ENOMEM;
aafabfab 168 goto cleanup_queue;
98ccf149 169 }
45711f1a 170 sg_init_table(mq->sg, 1);
98ccf149 171
45711f1a 172 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
98ccf149
PO
173 bouncesz / 512, GFP_KERNEL);
174 if (!mq->bounce_sg) {
175 ret = -ENOMEM;
aafabfab 176 goto cleanup_queue;
98ccf149 177 }
45711f1a 178 sg_init_table(mq->bounce_sg, bouncesz / 512);
98ccf149
PO
179 }
180 }
181#endif
182
183 if (!mq->bounce_buf) {
184 blk_queue_bounce_limit(mq->queue, limit);
f3eb0aaa
PO
185 blk_queue_max_sectors(mq->queue,
186 min(host->max_blk_count, host->max_req_size / 512));
98ccf149
PO
187 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
188 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
189 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
190
05e5b136 191 mq->sg = kmalloc(sizeof(struct scatterlist) *
98ccf149
PO
192 host->max_phys_segs, GFP_KERNEL);
193 if (!mq->sg) {
194 ret = -ENOMEM;
195 goto cleanup_queue;
196 }
05e5b136 197 sg_init_table(mq->sg, host->max_phys_segs);
1da177e4
LT
198 }
199
1da177e4
LT
200 init_MUTEX(&mq->thread_sem);
201
87598a2b
CH
202 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
203 if (IS_ERR(mq->thread)) {
204 ret = PTR_ERR(mq->thread);
98ccf149 205 goto free_bounce_sg;
1da177e4
LT
206 }
207
87598a2b 208 return 0;
98ccf149
PO
209 free_bounce_sg:
210 if (mq->bounce_sg)
211 kfree(mq->bounce_sg);
212 mq->bounce_sg = NULL;
aafabfab
PO
213 cleanup_queue:
214 if (mq->sg)
215 kfree(mq->sg);
1da177e4 216 mq->sg = NULL;
98ccf149
PO
217 if (mq->bounce_buf)
218 kfree(mq->bounce_buf);
219 mq->bounce_buf = NULL;
1da177e4 220 blk_cleanup_queue(mq->queue);
1da177e4
LT
221 return ret;
222}
1da177e4
LT
223
224void mmc_cleanup_queue(struct mmc_queue *mq)
225{
165125e1 226 struct request_queue *q = mq->queue;
89b4e133
PO
227 unsigned long flags;
228
229 /* Mark that we should start throwing out stragglers */
230 spin_lock_irqsave(q->queue_lock, flags);
231 q->queuedata = NULL;
232 spin_unlock_irqrestore(q->queue_lock, flags);
233
d2b46f66
PO
234 /* Make sure the queue isn't suspended, as that will deadlock */
235 mmc_queue_resume(mq);
236
89b4e133 237 /* Then terminate our worker thread */
87598a2b 238 kthread_stop(mq->thread);
1da177e4 239
98ccf149
PO
240 if (mq->bounce_sg)
241 kfree(mq->bounce_sg);
242 mq->bounce_sg = NULL;
243
1da177e4
LT
244 kfree(mq->sg);
245 mq->sg = NULL;
246
98ccf149
PO
247 if (mq->bounce_buf)
248 kfree(mq->bounce_buf);
249 mq->bounce_buf = NULL;
250
1da177e4
LT
251 blk_cleanup_queue(mq->queue);
252
253 mq->card = NULL;
254}
255EXPORT_SYMBOL(mmc_cleanup_queue);
256
257/**
258 * mmc_queue_suspend - suspend a MMC request queue
259 * @mq: MMC queue to suspend
260 *
261 * Stop the block request queue, and wait for our thread to
262 * complete any outstanding requests. This ensures that we
263 * won't suspend while a request is being processed.
264 */
265void mmc_queue_suspend(struct mmc_queue *mq)
266{
165125e1 267 struct request_queue *q = mq->queue;
1da177e4
LT
268 unsigned long flags;
269
270 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
271 mq->flags |= MMC_QUEUE_SUSPENDED;
272
273 spin_lock_irqsave(q->queue_lock, flags);
274 blk_stop_queue(q);
275 spin_unlock_irqrestore(q->queue_lock, flags);
276
277 down(&mq->thread_sem);
278 }
279}
1da177e4
LT
280
281/**
282 * mmc_queue_resume - resume a previously suspended MMC request queue
283 * @mq: MMC queue to resume
284 */
285void mmc_queue_resume(struct mmc_queue *mq)
286{
165125e1 287 struct request_queue *q = mq->queue;
1da177e4
LT
288 unsigned long flags;
289
290 if (mq->flags & MMC_QUEUE_SUSPENDED) {
291 mq->flags &= ~MMC_QUEUE_SUSPENDED;
292
293 up(&mq->thread_sem);
294
295 spin_lock_irqsave(q->queue_lock, flags);
296 blk_start_queue(q);
297 spin_unlock_irqrestore(q->queue_lock, flags);
298 }
299}
98ac2162 300
2ff1fa67
PO
301/*
302 * Prepare the sg list(s) to be handed of to the host driver
303 */
98ccf149
PO
304unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
305{
306 unsigned int sg_len;
2ff1fa67
PO
307 size_t buflen;
308 struct scatterlist *sg;
309 int i;
98ccf149
PO
310
311 if (!mq->bounce_buf)
312 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
313
314 BUG_ON(!mq->bounce_sg);
315
316 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
317
318 mq->bounce_sg_len = sg_len;
319
2ff1fa67
PO
320 buflen = 0;
321 for_each_sg(mq->bounce_sg, sg, sg_len, i)
322 buflen += sg->length;
98ccf149 323
2ff1fa67 324 sg_init_one(mq->sg, mq->bounce_buf, buflen);
98ccf149
PO
325
326 return 1;
327}
328
2ff1fa67
PO
329/*
330 * If writing, bounce the data to the buffer before the request
331 * is sent to the host driver
332 */
98ccf149
PO
333void mmc_queue_bounce_pre(struct mmc_queue *mq)
334{
2ff1fa67
PO
335 unsigned long flags;
336
98ccf149
PO
337 if (!mq->bounce_buf)
338 return;
339
98ccf149
PO
340 if (rq_data_dir(mq->req) != WRITE)
341 return;
342
2ff1fa67
PO
343 local_irq_save(flags);
344 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
345 mq->bounce_buf, mq->sg[0].length);
346 local_irq_restore(flags);
98ccf149
PO
347}
348
2ff1fa67
PO
349/*
350 * If reading, bounce the data from the buffer after the request
351 * has been handled by the host driver
352 */
98ccf149
PO
353void mmc_queue_bounce_post(struct mmc_queue *mq)
354{
2ff1fa67
PO
355 unsigned long flags;
356
98ccf149
PO
357 if (!mq->bounce_buf)
358 return;
359
98ccf149
PO
360 if (rq_data_dir(mq->req) != READ)
361 return;
362
2ff1fa67
PO
363 local_irq_save(flags);
364 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
365 mq->bounce_buf, mq->sg[0].length);
366 local_irq_restore(flags);
98ccf149
PO
367}
368