]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/card/queue.c
mmc: Convert pr_warning to pr_warn
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / queue.c
CommitLineData
1da177e4 1/*
70f10482 2 * linux/drivers/mmc/card/queue.c
1da177e4
LT
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 5 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
5a0e3ad6 12#include <linux/slab.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/blkdev.h>
83144186 15#include <linux/freezer.h>
87598a2b 16#include <linux/kthread.h>
45711f1a 17#include <linux/scatterlist.h>
8e0cb8a1 18#include <linux/dma-mapping.h>
1da177e4
LT
19
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
98ac2162 22#include "queue.h"
1da177e4 23
98ccf149
PO
24#define MMC_QUEUE_BOUNCESZ 65536
25
1da177e4 26/*
9c9f2d63 27 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
a8ad82cc
SRT
31 struct mmc_queue *mq = q->queuedata;
32
9c9f2d63 33 /*
bd788c96 34 * We only like normal block requests and discards.
9c9f2d63 35 */
bd788c96 36 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
1da177e4 37 blk_dump_rq_flags(req, "MMC bad request");
9c9f2d63 38 return BLKPREP_KILL;
1da177e4
LT
39 }
40
a8ad82cc
SRT
41 if (mq && mmc_card_removed(mq->card))
42 return BLKPREP_KILL;
43
9c9f2d63 44 req->cmd_flags |= REQ_DONTPREP;
1da177e4 45
9c9f2d63 46 return BLKPREP_OK;
1da177e4
LT
47}
48
49static int mmc_queue_thread(void *d)
50{
51 struct mmc_queue *mq = d;
52 struct request_queue *q = mq->queue;
1da177e4 53
83144186 54 current->flags |= PF_MEMALLOC;
1da177e4 55
1da177e4 56 down(&mq->thread_sem);
1da177e4
LT
57 do {
58 struct request *req = NULL;
ee8a43a5 59 struct mmc_queue_req *tmp;
369d321e 60 unsigned int cmd_flags = 0;
1da177e4
LT
61
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
7eaceacc 64 req = blk_fetch_request(q);
97868a2b 65 mq->mqrq_cur->req = req;
1da177e4
LT
66 spin_unlock_irq(q->queue_lock);
67
ee8a43a5
PF
68 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING);
369d321e 70 cmd_flags = req ? req->cmd_flags : 0;
ee8a43a5 71 mq->issue_fn(mq, req);
2220eedf
KD
72 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
73 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
74 continue; /* fetch again */
75 }
45c5a914
SJ
76
77 /*
78 * Current request becomes previous request
79 * and vice versa.
369d321e
SJ
80 * In case of special requests, current request
81 * has been finished. Do not assign it to previous
82 * request.
45c5a914 83 */
369d321e
SJ
84 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
85 mq->mqrq_cur->req = NULL;
86
45c5a914
SJ
87 mq->mqrq_prev->brq.mrq.data = NULL;
88 mq->mqrq_prev->req = NULL;
89 tmp = mq->mqrq_prev;
90 mq->mqrq_prev = mq->mqrq_cur;
91 mq->mqrq_cur = tmp;
ee8a43a5 92 } else {
7b30d281
VW
93 if (kthread_should_stop()) {
94 set_current_state(TASK_RUNNING);
1da177e4 95 break;
7b30d281 96 }
1da177e4
LT
97 up(&mq->thread_sem);
98 schedule();
99 down(&mq->thread_sem);
1da177e4 100 }
1da177e4 101 } while (1);
1da177e4
LT
102 up(&mq->thread_sem);
103
1da177e4
LT
104 return 0;
105}
106
107/*
108 * Generic MMC request handler. This is called for any queue on a
109 * particular host. When the host is not busy, we look for a request
110 * on any queue on this host, and attempt to issue it. This may
111 * not be the queue we were asked to process.
112 */
1b50f5f3 113static void mmc_request_fn(struct request_queue *q)
1da177e4
LT
114{
115 struct mmc_queue *mq = q->queuedata;
89b4e133 116 struct request *req;
2220eedf
KD
117 unsigned long flags;
118 struct mmc_context_info *cntx;
89b4e133
PO
119
120 if (!mq) {
5fa83ce2
AH
121 while ((req = blk_fetch_request(q)) != NULL) {
122 req->cmd_flags |= REQ_QUIET;
296b2f6a 123 __blk_end_request_all(req, -EIO);
5fa83ce2 124 }
89b4e133
PO
125 return;
126 }
1da177e4 127
2220eedf
KD
128 cntx = &mq->card->host->context_info;
129 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
130 /*
131 * New MMC request arrived when MMC thread may be
132 * blocked on the previous request to be complete
133 * with no current request fetched
134 */
135 spin_lock_irqsave(&cntx->lock, flags);
136 if (cntx->is_waiting_last_req) {
137 cntx->is_new_req = true;
138 wake_up_interruptible(&cntx->wait);
139 }
140 spin_unlock_irqrestore(&cntx->lock, flags);
141 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
87598a2b 142 wake_up_process(mq->thread);
1da177e4
LT
143}
144
7513cd7a 145static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
97868a2b
PF
146{
147 struct scatterlist *sg;
148
149 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
150 if (!sg)
151 *err = -ENOMEM;
152 else {
153 *err = 0;
154 sg_init_table(sg, sg_len);
155 }
156
157 return sg;
158}
159
e056a1b5
AH
160static void mmc_queue_setup_discard(struct request_queue *q,
161 struct mmc_card *card)
162{
163 unsigned max_discard;
164
165 max_discard = mmc_calc_max_discard(card);
166 if (!max_discard)
167 return;
168
169 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
170 q->limits.max_discard_sectors = max_discard;
7194efb8 171 if (card->erased_byte == 0 && !mmc_can_discard(card))
e056a1b5
AH
172 q->limits.discard_zeroes_data = 1;
173 q->limits.discard_granularity = card->pref_erase << 9;
174 /* granularity must not be greater than max. discard */
175 if (card->pref_erase > max_discard)
176 q->limits.discard_granularity = 0;
775a9362 177 if (mmc_can_secure_erase_trim(card))
e056a1b5
AH
178 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
179}
180
1da177e4
LT
181/**
182 * mmc_init_queue - initialise a queue structure.
183 * @mq: mmc queue
184 * @card: mmc card to attach this queue
185 * @lock: queue lock
d09408ad 186 * @subname: partition subname
1da177e4
LT
187 *
188 * Initialise a MMC card request queue.
189 */
d09408ad
AH
190int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
191 spinlock_t *lock, const char *subname)
1da177e4
LT
192{
193 struct mmc_host *host = card->host;
194 u64 limit = BLK_BOUNCE_HIGH;
195 int ret;
97868a2b 196 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
04296b7b 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1da177e4 198
fcaf71fd 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
e83b3664 200 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1da177e4
LT
201
202 mq->card = card;
1b50f5f3 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
1da177e4
LT
204 if (!mq->queue)
205 return -ENOMEM;
206
97868a2b 207 mq->mqrq_cur = mqrq_cur;
04296b7b 208 mq->mqrq_prev = mqrq_prev;
1da177e4 209 mq->queue->queuedata = mq;
1da177e4 210
98ccf149 211 blk_queue_prep_rq(mq->queue, mmc_prep_request);
8dddfe19 212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
e056a1b5
AH
213 if (mmc_can_erase(card))
214 mmc_queue_setup_discard(mq->queue, card);
98ccf149
PO
215
216#ifdef CONFIG_MMC_BLOCK_BOUNCE
a36274e0 217 if (host->max_segs == 1) {
aafabfab
PO
218 unsigned int bouncesz;
219
98ccf149
PO
220 bouncesz = MMC_QUEUE_BOUNCESZ;
221
222 if (bouncesz > host->max_req_size)
223 bouncesz = host->max_req_size;
224 if (bouncesz > host->max_seg_size)
225 bouncesz = host->max_seg_size;
f3eb0aaa
PO
226 if (bouncesz > (host->max_blk_count * 512))
227 bouncesz = host->max_blk_count * 512;
228
229 if (bouncesz > 512) {
97868a2b
PF
230 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
231 if (!mqrq_cur->bounce_buf) {
6606110d 232 pr_warn("%s: unable to allocate bounce cur buffer\n",
f3eb0aaa
PO
233 mmc_card_name(card));
234 }
04296b7b
PF
235 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
236 if (!mqrq_prev->bounce_buf) {
6606110d 237 pr_warn("%s: unable to allocate bounce prev buffer\n",
04296b7b
PF
238 mmc_card_name(card));
239 kfree(mqrq_cur->bounce_buf);
240 mqrq_cur->bounce_buf = NULL;
241 }
f3eb0aaa 242 }
98ccf149 243
04296b7b 244 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
2ff1fa67 245 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
086fa5ff 246 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
8a78362c 247 blk_queue_max_segments(mq->queue, bouncesz / 512);
98ccf149
PO
248 blk_queue_max_segment_size(mq->queue, bouncesz);
249
97868a2b
PF
250 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
251 if (ret)
aafabfab 252 goto cleanup_queue;
98ccf149 253
97868a2b
PF
254 mqrq_cur->bounce_sg =
255 mmc_alloc_sg(bouncesz / 512, &ret);
256 if (ret)
aafabfab 257 goto cleanup_queue;
97868a2b 258
04296b7b
PF
259 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
260 if (ret)
261 goto cleanup_queue;
262
263 mqrq_prev->bounce_sg =
264 mmc_alloc_sg(bouncesz / 512, &ret);
265 if (ret)
266 goto cleanup_queue;
98ccf149
PO
267 }
268 }
269#endif
270
04296b7b 271 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
98ccf149 272 blk_queue_bounce_limit(mq->queue, limit);
086fa5ff 273 blk_queue_max_hw_sectors(mq->queue,
f3eb0aaa 274 min(host->max_blk_count, host->max_req_size / 512));
a36274e0 275 blk_queue_max_segments(mq->queue, host->max_segs);
98ccf149
PO
276 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
277
97868a2b
PF
278 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
279 if (ret)
98ccf149 280 goto cleanup_queue;
97868a2b 281
04296b7b
PF
282
283 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
284 if (ret)
285 goto cleanup_queue;
1da177e4
LT
286 }
287
632cf92a 288 sema_init(&mq->thread_sem, 1);
1da177e4 289
d09408ad
AH
290 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
291 host->index, subname ? subname : "");
de528fa3 292
87598a2b
CH
293 if (IS_ERR(mq->thread)) {
294 ret = PTR_ERR(mq->thread);
98ccf149 295 goto free_bounce_sg;
1da177e4
LT
296 }
297
87598a2b 298 return 0;
98ccf149 299 free_bounce_sg:
97868a2b
PF
300 kfree(mqrq_cur->bounce_sg);
301 mqrq_cur->bounce_sg = NULL;
04296b7b
PF
302 kfree(mqrq_prev->bounce_sg);
303 mqrq_prev->bounce_sg = NULL;
97868a2b 304
aafabfab 305 cleanup_queue:
97868a2b
PF
306 kfree(mqrq_cur->sg);
307 mqrq_cur->sg = NULL;
308 kfree(mqrq_cur->bounce_buf);
309 mqrq_cur->bounce_buf = NULL;
310
04296b7b
PF
311 kfree(mqrq_prev->sg);
312 mqrq_prev->sg = NULL;
313 kfree(mqrq_prev->bounce_buf);
314 mqrq_prev->bounce_buf = NULL;
315
1da177e4 316 blk_cleanup_queue(mq->queue);
1da177e4
LT
317 return ret;
318}
1da177e4
LT
319
320void mmc_cleanup_queue(struct mmc_queue *mq)
321{
165125e1 322 struct request_queue *q = mq->queue;
89b4e133 323 unsigned long flags;
97868a2b 324 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
04296b7b 325 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
89b4e133 326
d2b46f66
PO
327 /* Make sure the queue isn't suspended, as that will deadlock */
328 mmc_queue_resume(mq);
329
89b4e133 330 /* Then terminate our worker thread */
87598a2b 331 kthread_stop(mq->thread);
1da177e4 332
5fa83ce2
AH
333 /* Empty the queue */
334 spin_lock_irqsave(q->queue_lock, flags);
335 q->queuedata = NULL;
336 blk_start_queue(q);
337 spin_unlock_irqrestore(q->queue_lock, flags);
338
97868a2b
PF
339 kfree(mqrq_cur->bounce_sg);
340 mqrq_cur->bounce_sg = NULL;
98ccf149 341
97868a2b
PF
342 kfree(mqrq_cur->sg);
343 mqrq_cur->sg = NULL;
1da177e4 344
97868a2b
PF
345 kfree(mqrq_cur->bounce_buf);
346 mqrq_cur->bounce_buf = NULL;
98ccf149 347
04296b7b
PF
348 kfree(mqrq_prev->bounce_sg);
349 mqrq_prev->bounce_sg = NULL;
350
351 kfree(mqrq_prev->sg);
352 mqrq_prev->sg = NULL;
353
354 kfree(mqrq_prev->bounce_buf);
355 mqrq_prev->bounce_buf = NULL;
356
1da177e4
LT
357 mq->card = NULL;
358}
359EXPORT_SYMBOL(mmc_cleanup_queue);
360
ce39f9d1
SJ
361int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
362{
363 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
364 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
365 int ret = 0;
366
367
368 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
369 if (!mqrq_cur->packed) {
370 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
371 mmc_card_name(card));
372 ret = -ENOMEM;
373 goto out;
374 }
375
376 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
377 if (!mqrq_prev->packed) {
378 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
379 mmc_card_name(card));
380 kfree(mqrq_cur->packed);
381 mqrq_cur->packed = NULL;
382 ret = -ENOMEM;
383 goto out;
384 }
385
386 INIT_LIST_HEAD(&mqrq_cur->packed->list);
387 INIT_LIST_HEAD(&mqrq_prev->packed->list);
388
389out:
390 return ret;
391}
392
393void mmc_packed_clean(struct mmc_queue *mq)
394{
395 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
396 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
397
398 kfree(mqrq_cur->packed);
399 mqrq_cur->packed = NULL;
400 kfree(mqrq_prev->packed);
401 mqrq_prev->packed = NULL;
402}
403
1da177e4
LT
404/**
405 * mmc_queue_suspend - suspend a MMC request queue
406 * @mq: MMC queue to suspend
407 *
408 * Stop the block request queue, and wait for our thread to
409 * complete any outstanding requests. This ensures that we
410 * won't suspend while a request is being processed.
411 */
412void mmc_queue_suspend(struct mmc_queue *mq)
413{
165125e1 414 struct request_queue *q = mq->queue;
1da177e4
LT
415 unsigned long flags;
416
417 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
418 mq->flags |= MMC_QUEUE_SUSPENDED;
419
420 spin_lock_irqsave(q->queue_lock, flags);
421 blk_stop_queue(q);
422 spin_unlock_irqrestore(q->queue_lock, flags);
423
424 down(&mq->thread_sem);
425 }
426}
1da177e4
LT
427
428/**
429 * mmc_queue_resume - resume a previously suspended MMC request queue
430 * @mq: MMC queue to resume
431 */
432void mmc_queue_resume(struct mmc_queue *mq)
433{
165125e1 434 struct request_queue *q = mq->queue;
1da177e4
LT
435 unsigned long flags;
436
437 if (mq->flags & MMC_QUEUE_SUSPENDED) {
438 mq->flags &= ~MMC_QUEUE_SUSPENDED;
439
440 up(&mq->thread_sem);
441
442 spin_lock_irqsave(q->queue_lock, flags);
443 blk_start_queue(q);
444 spin_unlock_irqrestore(q->queue_lock, flags);
445 }
446}
98ac2162 447
ce39f9d1
SJ
448static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
449 struct mmc_packed *packed,
450 struct scatterlist *sg,
451 enum mmc_packed_type cmd_type)
452{
453 struct scatterlist *__sg = sg;
454 unsigned int sg_len = 0;
455 struct request *req;
456
457 if (mmc_packed_wr(cmd_type)) {
458 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
459 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
460 unsigned int len, remain, offset = 0;
461 u8 *buf = (u8 *)packed->cmd_hdr;
462
463 remain = hdr_sz;
464 do {
465 len = min(remain, max_seg_sz);
466 sg_set_buf(__sg, buf + offset, len);
467 offset += len;
468 remain -= len;
469 (__sg++)->page_link &= ~0x02;
470 sg_len++;
471 } while (remain);
472 }
473
474 list_for_each_entry(req, &packed->list, queuelist) {
475 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
476 __sg = sg + (sg_len - 1);
477 (__sg++)->page_link &= ~0x02;
478 }
479 sg_mark_end(sg + (sg_len - 1));
480 return sg_len;
481}
482
2ff1fa67
PO
483/*
484 * Prepare the sg list(s) to be handed of to the host driver
485 */
97868a2b 486unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
98ccf149
PO
487{
488 unsigned int sg_len;
2ff1fa67
PO
489 size_t buflen;
490 struct scatterlist *sg;
ce39f9d1 491 enum mmc_packed_type cmd_type;
2ff1fa67 492 int i;
98ccf149 493
ce39f9d1
SJ
494 cmd_type = mqrq->cmd_type;
495
496 if (!mqrq->bounce_buf) {
497 if (mmc_packed_cmd(cmd_type))
498 return mmc_queue_packed_map_sg(mq, mqrq->packed,
499 mqrq->sg, cmd_type);
500 else
501 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
502 }
98ccf149 503
97868a2b 504 BUG_ON(!mqrq->bounce_sg);
98ccf149 505
ce39f9d1
SJ
506 if (mmc_packed_cmd(cmd_type))
507 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
508 mqrq->bounce_sg, cmd_type);
509 else
510 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
98ccf149 511
97868a2b 512 mqrq->bounce_sg_len = sg_len;
98ccf149 513
2ff1fa67 514 buflen = 0;
97868a2b 515 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
2ff1fa67 516 buflen += sg->length;
98ccf149 517
97868a2b 518 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
98ccf149
PO
519
520 return 1;
521}
522
2ff1fa67
PO
523/*
524 * If writing, bounce the data to the buffer before the request
525 * is sent to the host driver
526 */
97868a2b 527void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
98ccf149 528{
97868a2b 529 if (!mqrq->bounce_buf)
98ccf149
PO
530 return;
531
97868a2b 532 if (rq_data_dir(mqrq->req) != WRITE)
98ccf149
PO
533 return;
534
97868a2b
PF
535 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
536 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149
PO
537}
538
2ff1fa67
PO
539/*
540 * If reading, bounce the data from the buffer after the request
541 * has been handled by the host driver
542 */
97868a2b 543void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
98ccf149 544{
97868a2b 545 if (!mqrq->bounce_buf)
98ccf149
PO
546 return;
547
97868a2b 548 if (rq_data_dir(mqrq->req) != READ)
98ccf149
PO
549 return;
550
97868a2b
PF
551 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
552 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149 553}