]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/core/queue.c
mmc: remove pointless request type check in mmc_prep_request
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / core / queue.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 3 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
5a0e3ad6 10#include <linux/slab.h>
1da177e4
LT
11#include <linux/module.h>
12#include <linux/blkdev.h>
83144186 13#include <linux/freezer.h>
87598a2b 14#include <linux/kthread.h>
45711f1a 15#include <linux/scatterlist.h>
8e0cb8a1 16#include <linux/dma-mapping.h>
1da177e4
LT
17
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
29eb7bd0 20
98ac2162 21#include "queue.h"
29eb7bd0 22#include "block.h"
1da177e4 23
98ccf149
PO
24#define MMC_QUEUE_BOUNCESZ 65536
25
1da177e4 26/*
9c9f2d63 27 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
a8ad82cc
SRT
31 struct mmc_queue *mq = q->queuedata;
32
4e93b9a6 33 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
a8ad82cc
SRT
34 return BLKPREP_KILL;
35
e8064021 36 req->rq_flags |= RQF_DONTPREP;
1da177e4 37
9c9f2d63 38 return BLKPREP_OK;
1da177e4
LT
39}
40
41static int mmc_queue_thread(void *d)
42{
43 struct mmc_queue *mq = d;
44 struct request_queue *q = mq->queue;
e0097cf5 45 struct mmc_context_info *cntx = &mq->card->host->context_info;
1da177e4 46
83144186 47 current->flags |= PF_MEMALLOC;
1da177e4 48
1da177e4 49 down(&mq->thread_sem);
1da177e4
LT
50 do {
51 struct request *req = NULL;
52
53 spin_lock_irq(q->queue_lock);
54 set_current_state(TASK_INTERRUPTIBLE);
7eaceacc 55 req = blk_fetch_request(q);
e0097cf5
AH
56 mq->asleep = false;
57 cntx->is_waiting_last_req = false;
58 cntx->is_new_req = false;
59 if (!req) {
60 /*
61 * Dispatch queue is empty so set flags for
62 * mmc_request_fn() to wake us up.
63 */
64 if (mq->mqrq_prev->req)
65 cntx->is_waiting_last_req = true;
66 else
67 mq->asleep = true;
68 }
97868a2b 69 mq->mqrq_cur->req = req;
1da177e4
LT
70 spin_unlock_irq(q->queue_lock);
71
ee8a43a5 72 if (req || mq->mqrq_prev->req) {
869c5548
AH
73 bool req_is_special = mmc_req_is_special(req);
74
ee8a43a5 75 set_current_state(TASK_RUNNING);
29eb7bd0 76 mmc_blk_issue_rq(mq, req);
a8c27c0b 77 cond_resched();
2220eedf
KD
78 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
79 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
80 continue; /* fetch again */
81 }
45c5a914
SJ
82
83 /*
84 * Current request becomes previous request
85 * and vice versa.
369d321e
SJ
86 * In case of special requests, current request
87 * has been finished. Do not assign it to previous
88 * request.
45c5a914 89 */
869c5548 90 if (req_is_special)
369d321e
SJ
91 mq->mqrq_cur->req = NULL;
92
45c5a914
SJ
93 mq->mqrq_prev->brq.mrq.data = NULL;
94 mq->mqrq_prev->req = NULL;
7551847c 95 swap(mq->mqrq_prev, mq->mqrq_cur);
ee8a43a5 96 } else {
7b30d281
VW
97 if (kthread_should_stop()) {
98 set_current_state(TASK_RUNNING);
1da177e4 99 break;
7b30d281 100 }
1da177e4
LT
101 up(&mq->thread_sem);
102 schedule();
103 down(&mq->thread_sem);
1da177e4 104 }
1da177e4 105 } while (1);
1da177e4
LT
106 up(&mq->thread_sem);
107
1da177e4
LT
108 return 0;
109}
110
111/*
112 * Generic MMC request handler. This is called for any queue on a
113 * particular host. When the host is not busy, we look for a request
114 * on any queue on this host, and attempt to issue it. This may
115 * not be the queue we were asked to process.
116 */
1b50f5f3 117static void mmc_request_fn(struct request_queue *q)
1da177e4
LT
118{
119 struct mmc_queue *mq = q->queuedata;
89b4e133 120 struct request *req;
2220eedf 121 struct mmc_context_info *cntx;
89b4e133
PO
122
123 if (!mq) {
5fa83ce2 124 while ((req = blk_fetch_request(q)) != NULL) {
e8064021 125 req->rq_flags |= RQF_QUIET;
296b2f6a 126 __blk_end_request_all(req, -EIO);
5fa83ce2 127 }
89b4e133
PO
128 return;
129 }
1da177e4 130
2220eedf 131 cntx = &mq->card->host->context_info;
e0097cf5
AH
132
133 if (cntx->is_waiting_last_req) {
134 cntx->is_new_req = true;
135 wake_up_interruptible(&cntx->wait);
136 }
137
138 if (mq->asleep)
87598a2b 139 wake_up_process(mq->thread);
1da177e4
LT
140}
141
7513cd7a 142static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
97868a2b
PF
143{
144 struct scatterlist *sg;
145
146 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
147 if (!sg)
148 *err = -ENOMEM;
149 else {
150 *err = 0;
151 sg_init_table(sg, sg_len);
152 }
153
154 return sg;
155}
156
e056a1b5
AH
157static void mmc_queue_setup_discard(struct request_queue *q,
158 struct mmc_card *card)
159{
160 unsigned max_discard;
161
162 max_discard = mmc_calc_max_discard(card);
163 if (!max_discard)
164 return;
165
166 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
2bb4cd5c 167 blk_queue_max_discard_sectors(q, max_discard);
7194efb8 168 if (card->erased_byte == 0 && !mmc_can_discard(card))
e056a1b5
AH
169 q->limits.discard_zeroes_data = 1;
170 q->limits.discard_granularity = card->pref_erase << 9;
171 /* granularity must not be greater than max. discard */
172 if (card->pref_erase > max_discard)
173 q->limits.discard_granularity = 0;
775a9362 174 if (mmc_can_secure_erase_trim(card))
288dab8a 175 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
e056a1b5
AH
176}
177
c853982e
AH
178#ifdef CONFIG_MMC_BLOCK_BOUNCE
179static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
180 unsigned int bouncesz)
181{
c5bda0ca 182 int i;
c853982e 183
c5bda0ca
AH
184 for (i = 0; i < mq->qdepth; i++) {
185 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
186 if (!mq->mqrq[i].bounce_buf)
187 goto out_err;
c853982e
AH
188 }
189
190 return true;
c5bda0ca
AH
191
192out_err:
193 while (--i >= 0) {
194 kfree(mq->mqrq[i].bounce_buf);
195 mq->mqrq[i].bounce_buf = NULL;
196 }
197 pr_warn("%s: unable to allocate bounce buffers\n",
198 mmc_card_name(mq->card));
199 return false;
c853982e 200}
f2b8b522
AH
201
202static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
203 unsigned int bouncesz)
204{
c5bda0ca 205 int i, ret;
f2b8b522 206
c5bda0ca
AH
207 for (i = 0; i < mq->qdepth; i++) {
208 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
209 if (ret)
210 return ret;
f2b8b522 211
c5bda0ca
AH
212 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
213 if (ret)
214 return ret;
215 }
f2b8b522 216
c5bda0ca 217 return 0;
f2b8b522 218}
c853982e
AH
219#endif
220
64e29e42
AH
221static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
222{
c5bda0ca 223 int i, ret;
64e29e42 224
c5bda0ca
AH
225 for (i = 0; i < mq->qdepth; i++) {
226 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
227 if (ret)
228 return ret;
229 }
64e29e42 230
c5bda0ca
AH
231 return 0;
232}
64e29e42 233
c5bda0ca
AH
234static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
235{
236 kfree(mqrq->bounce_sg);
237 mqrq->bounce_sg = NULL;
238
239 kfree(mqrq->sg);
240 mqrq->sg = NULL;
241
242 kfree(mqrq->bounce_buf);
243 mqrq->bounce_buf = NULL;
64e29e42
AH
244}
245
c09949cf
AH
246static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
247{
c5bda0ca
AH
248 int i;
249
250 for (i = 0; i < mq->qdepth; i++)
251 mmc_queue_req_free_bufs(&mq->mqrq[i]);
c09949cf
AH
252}
253
1da177e4
LT
254/**
255 * mmc_init_queue - initialise a queue structure.
256 * @mq: mmc queue
257 * @card: mmc card to attach this queue
258 * @lock: queue lock
d09408ad 259 * @subname: partition subname
1da177e4
LT
260 *
261 * Initialise a MMC card request queue.
262 */
d09408ad
AH
263int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
264 spinlock_t *lock, const char *subname)
1da177e4
LT
265{
266 struct mmc_host *host = card->host;
267 u64 limit = BLK_BOUNCE_HIGH;
f2b8b522 268 bool bounce = false;
c5bda0ca 269 int ret = -ENOMEM;
1da177e4 270
fcaf71fd 271 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
e83b3664 272 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1da177e4
LT
273
274 mq->card = card;
1b50f5f3 275 mq->queue = blk_init_queue(mmc_request_fn, lock);
1da177e4
LT
276 if (!mq->queue)
277 return -ENOMEM;
278
c5bda0ca
AH
279 mq->qdepth = 2;
280 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
281 GFP_KERNEL);
282 if (!mq->mqrq)
283 goto blk_cleanup;
c09949cf
AH
284 mq->mqrq_cur = &mq->mqrq[0];
285 mq->mqrq_prev = &mq->mqrq[1];
1da177e4 286 mq->queue->queuedata = mq;
1da177e4 287
98ccf149 288 blk_queue_prep_rq(mq->queue, mmc_prep_request);
8dddfe19 289 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
b277da0a 290 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
e056a1b5
AH
291 if (mmc_can_erase(card))
292 mmc_queue_setup_discard(mq->queue, card);
98ccf149
PO
293
294#ifdef CONFIG_MMC_BLOCK_BOUNCE
a36274e0 295 if (host->max_segs == 1) {
aafabfab
PO
296 unsigned int bouncesz;
297
98ccf149
PO
298 bouncesz = MMC_QUEUE_BOUNCESZ;
299
300 if (bouncesz > host->max_req_size)
301 bouncesz = host->max_req_size;
302 if (bouncesz > host->max_seg_size)
303 bouncesz = host->max_seg_size;
f3eb0aaa
PO
304 if (bouncesz > (host->max_blk_count * 512))
305 bouncesz = host->max_blk_count * 512;
306
c853982e
AH
307 if (bouncesz > 512 &&
308 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
2ff1fa67 309 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
086fa5ff 310 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
8a78362c 311 blk_queue_max_segments(mq->queue, bouncesz / 512);
98ccf149
PO
312 blk_queue_max_segment_size(mq->queue, bouncesz);
313
f2b8b522 314 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
04296b7b
PF
315 if (ret)
316 goto cleanup_queue;
f2b8b522 317 bounce = true;
98ccf149
PO
318 }
319 }
320#endif
321
f2b8b522 322 if (!bounce) {
98ccf149 323 blk_queue_bounce_limit(mq->queue, limit);
086fa5ff 324 blk_queue_max_hw_sectors(mq->queue,
f3eb0aaa 325 min(host->max_blk_count, host->max_req_size / 512));
a36274e0 326 blk_queue_max_segments(mq->queue, host->max_segs);
98ccf149
PO
327 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
328
64e29e42 329 ret = mmc_queue_alloc_sgs(mq, host->max_segs);
04296b7b
PF
330 if (ret)
331 goto cleanup_queue;
1da177e4
LT
332 }
333
632cf92a 334 sema_init(&mq->thread_sem, 1);
1da177e4 335
d09408ad
AH
336 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
337 host->index, subname ? subname : "");
de528fa3 338
87598a2b
CH
339 if (IS_ERR(mq->thread)) {
340 ret = PTR_ERR(mq->thread);
c09949cf 341 goto cleanup_queue;
1da177e4
LT
342 }
343
87598a2b 344 return 0;
97868a2b 345
aafabfab 346 cleanup_queue:
c09949cf 347 mmc_queue_reqs_free_bufs(mq);
c5bda0ca
AH
348 kfree(mq->mqrq);
349 mq->mqrq = NULL;
350blk_cleanup:
1da177e4 351 blk_cleanup_queue(mq->queue);
1da177e4
LT
352 return ret;
353}
1da177e4
LT
354
355void mmc_cleanup_queue(struct mmc_queue *mq)
356{
165125e1 357 struct request_queue *q = mq->queue;
89b4e133
PO
358 unsigned long flags;
359
d2b46f66
PO
360 /* Make sure the queue isn't suspended, as that will deadlock */
361 mmc_queue_resume(mq);
362
89b4e133 363 /* Then terminate our worker thread */
87598a2b 364 kthread_stop(mq->thread);
1da177e4 365
5fa83ce2
AH
366 /* Empty the queue */
367 spin_lock_irqsave(q->queue_lock, flags);
368 q->queuedata = NULL;
369 blk_start_queue(q);
370 spin_unlock_irqrestore(q->queue_lock, flags);
371
c09949cf 372 mmc_queue_reqs_free_bufs(mq);
c5bda0ca
AH
373 kfree(mq->mqrq);
374 mq->mqrq = NULL;
04296b7b 375
1da177e4
LT
376 mq->card = NULL;
377}
378EXPORT_SYMBOL(mmc_cleanup_queue);
379
380/**
381 * mmc_queue_suspend - suspend a MMC request queue
382 * @mq: MMC queue to suspend
383 *
384 * Stop the block request queue, and wait for our thread to
385 * complete any outstanding requests. This ensures that we
386 * won't suspend while a request is being processed.
387 */
388void mmc_queue_suspend(struct mmc_queue *mq)
389{
165125e1 390 struct request_queue *q = mq->queue;
1da177e4
LT
391 unsigned long flags;
392
393 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
394 mq->flags |= MMC_QUEUE_SUSPENDED;
395
396 spin_lock_irqsave(q->queue_lock, flags);
397 blk_stop_queue(q);
398 spin_unlock_irqrestore(q->queue_lock, flags);
399
400 down(&mq->thread_sem);
401 }
402}
1da177e4
LT
403
404/**
405 * mmc_queue_resume - resume a previously suspended MMC request queue
406 * @mq: MMC queue to resume
407 */
408void mmc_queue_resume(struct mmc_queue *mq)
409{
165125e1 410 struct request_queue *q = mq->queue;
1da177e4
LT
411 unsigned long flags;
412
413 if (mq->flags & MMC_QUEUE_SUSPENDED) {
414 mq->flags &= ~MMC_QUEUE_SUSPENDED;
415
416 up(&mq->thread_sem);
417
418 spin_lock_irqsave(q->queue_lock, flags);
419 blk_start_queue(q);
420 spin_unlock_irqrestore(q->queue_lock, flags);
421 }
422}
98ac2162 423
2ff1fa67
PO
424/*
425 * Prepare the sg list(s) to be handed of to the host driver
426 */
97868a2b 427unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
98ccf149
PO
428{
429 unsigned int sg_len;
2ff1fa67
PO
430 size_t buflen;
431 struct scatterlist *sg;
432 int i;
98ccf149 433
03d640ae
LW
434 if (!mqrq->bounce_buf)
435 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
98ccf149 436
03d640ae 437 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
98ccf149 438
97868a2b 439 mqrq->bounce_sg_len = sg_len;
98ccf149 440
2ff1fa67 441 buflen = 0;
97868a2b 442 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
2ff1fa67 443 buflen += sg->length;
98ccf149 444
97868a2b 445 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
98ccf149
PO
446
447 return 1;
448}
449
2ff1fa67
PO
450/*
451 * If writing, bounce the data to the buffer before the request
452 * is sent to the host driver
453 */
97868a2b 454void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
98ccf149 455{
97868a2b 456 if (!mqrq->bounce_buf)
98ccf149
PO
457 return;
458
97868a2b 459 if (rq_data_dir(mqrq->req) != WRITE)
98ccf149
PO
460 return;
461
97868a2b
PF
462 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
463 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149
PO
464}
465
2ff1fa67
PO
466/*
467 * If reading, bounce the data from the buffer after the request
468 * has been handled by the host driver
469 */
97868a2b 470void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
98ccf149 471{
97868a2b 472 if (!mqrq->bounce_buf)
98ccf149
PO
473 return;
474
97868a2b 475 if (rq_data_dir(mqrq->req) != READ)
98ccf149
PO
476 return;
477
97868a2b
PF
478 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
479 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149 480}