]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/mmc/card/queue.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / card / queue.c
CommitLineData
1da177e4 1/*
70f10482 2 * linux/drivers/mmc/card/queue.c
1da177e4
LT
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 5 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
5a0e3ad6 12#include <linux/slab.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/blkdev.h>
83144186 15#include <linux/freezer.h>
87598a2b 16#include <linux/kthread.h>
45711f1a 17#include <linux/scatterlist.h>
8e0cb8a1 18#include <linux/dma-mapping.h>
1da177e4
LT
19
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
98ac2162 22#include "queue.h"
1da177e4 23
98ccf149
PO
24#define MMC_QUEUE_BOUNCESZ 65536
25
1da177e4 26/*
9c9f2d63 27 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
a8ad82cc
SRT
31 struct mmc_queue *mq = q->queuedata;
32
9c9f2d63 33 /*
bd788c96 34 * We only like normal block requests and discards.
9c9f2d63 35 */
7afafc8a
AH
36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37 req_op(req) != REQ_OP_SECURE_ERASE) {
1da177e4 38 blk_dump_rq_flags(req, "MMC bad request");
9c9f2d63 39 return BLKPREP_KILL;
1da177e4
LT
40 }
41
4e93b9a6 42 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
a8ad82cc
SRT
43 return BLKPREP_KILL;
44
9c9f2d63 45 req->cmd_flags |= REQ_DONTPREP;
1da177e4 46
9c9f2d63 47 return BLKPREP_OK;
1da177e4
LT
48}
49
50static int mmc_queue_thread(void *d)
51{
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
1da177e4 54
83144186 55 current->flags |= PF_MEMALLOC;
1da177e4 56
1da177e4 57 down(&mq->thread_sem);
1da177e4
LT
58 do {
59 struct request *req = NULL;
60
61 spin_lock_irq(q->queue_lock);
62 set_current_state(TASK_INTERRUPTIBLE);
7eaceacc 63 req = blk_fetch_request(q);
97868a2b 64 mq->mqrq_cur->req = req;
1da177e4
LT
65 spin_unlock_irq(q->queue_lock);
66
ee8a43a5 67 if (req || mq->mqrq_prev->req) {
869c5548
AH
68 bool req_is_special = mmc_req_is_special(req);
69
ee8a43a5
PF
70 set_current_state(TASK_RUNNING);
71 mq->issue_fn(mq, req);
a8c27c0b 72 cond_resched();
2220eedf
KD
73 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
74 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
75 continue; /* fetch again */
76 }
45c5a914
SJ
77
78 /*
79 * Current request becomes previous request
80 * and vice versa.
369d321e
SJ
81 * In case of special requests, current request
82 * has been finished. Do not assign it to previous
83 * request.
45c5a914 84 */
869c5548 85 if (req_is_special)
369d321e
SJ
86 mq->mqrq_cur->req = NULL;
87
45c5a914
SJ
88 mq->mqrq_prev->brq.mrq.data = NULL;
89 mq->mqrq_prev->req = NULL;
7551847c 90 swap(mq->mqrq_prev, mq->mqrq_cur);
ee8a43a5 91 } else {
7b30d281
VW
92 if (kthread_should_stop()) {
93 set_current_state(TASK_RUNNING);
1da177e4 94 break;
7b30d281 95 }
1da177e4
LT
96 up(&mq->thread_sem);
97 schedule();
98 down(&mq->thread_sem);
1da177e4 99 }
1da177e4 100 } while (1);
1da177e4
LT
101 up(&mq->thread_sem);
102
1da177e4
LT
103 return 0;
104}
105
106/*
107 * Generic MMC request handler. This is called for any queue on a
108 * particular host. When the host is not busy, we look for a request
109 * on any queue on this host, and attempt to issue it. This may
110 * not be the queue we were asked to process.
111 */
1b50f5f3 112static void mmc_request_fn(struct request_queue *q)
1da177e4
LT
113{
114 struct mmc_queue *mq = q->queuedata;
89b4e133 115 struct request *req;
2220eedf
KD
116 unsigned long flags;
117 struct mmc_context_info *cntx;
89b4e133
PO
118
119 if (!mq) {
5fa83ce2
AH
120 while ((req = blk_fetch_request(q)) != NULL) {
121 req->cmd_flags |= REQ_QUIET;
296b2f6a 122 __blk_end_request_all(req, -EIO);
5fa83ce2 123 }
89b4e133
PO
124 return;
125 }
1da177e4 126
2220eedf
KD
127 cntx = &mq->card->host->context_info;
128 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
129 /*
130 * New MMC request arrived when MMC thread may be
131 * blocked on the previous request to be complete
132 * with no current request fetched
133 */
134 spin_lock_irqsave(&cntx->lock, flags);
135 if (cntx->is_waiting_last_req) {
136 cntx->is_new_req = true;
137 wake_up_interruptible(&cntx->wait);
138 }
139 spin_unlock_irqrestore(&cntx->lock, flags);
140 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
87598a2b 141 wake_up_process(mq->thread);
1da177e4
LT
142}
143
7513cd7a 144static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
97868a2b
PF
145{
146 struct scatterlist *sg;
147
148 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
149 if (!sg)
150 *err = -ENOMEM;
151 else {
152 *err = 0;
153 sg_init_table(sg, sg_len);
154 }
155
156 return sg;
157}
158
e056a1b5
AH
159static void mmc_queue_setup_discard(struct request_queue *q,
160 struct mmc_card *card)
161{
162 unsigned max_discard;
163
164 max_discard = mmc_calc_max_discard(card);
165 if (!max_discard)
166 return;
167
168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
2bb4cd5c 169 blk_queue_max_discard_sectors(q, max_discard);
7194efb8 170 if (card->erased_byte == 0 && !mmc_can_discard(card))
e056a1b5
AH
171 q->limits.discard_zeroes_data = 1;
172 q->limits.discard_granularity = card->pref_erase << 9;
173 /* granularity must not be greater than max. discard */
174 if (card->pref_erase > max_discard)
175 q->limits.discard_granularity = 0;
775a9362 176 if (mmc_can_secure_erase_trim(card))
288dab8a 177 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
e056a1b5
AH
178}
179
1da177e4
LT
180/**
181 * mmc_init_queue - initialise a queue structure.
182 * @mq: mmc queue
183 * @card: mmc card to attach this queue
184 * @lock: queue lock
d09408ad 185 * @subname: partition subname
1da177e4
LT
186 *
187 * Initialise a MMC card request queue.
188 */
d09408ad
AH
189int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
190 spinlock_t *lock, const char *subname)
1da177e4
LT
191{
192 struct mmc_host *host = card->host;
193 u64 limit = BLK_BOUNCE_HIGH;
194 int ret;
97868a2b 195 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
04296b7b 196 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1da177e4 197
fcaf71fd 198 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
e83b3664 199 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1da177e4
LT
200
201 mq->card = card;
1b50f5f3 202 mq->queue = blk_init_queue(mmc_request_fn, lock);
1da177e4
LT
203 if (!mq->queue)
204 return -ENOMEM;
205
97868a2b 206 mq->mqrq_cur = mqrq_cur;
04296b7b 207 mq->mqrq_prev = mqrq_prev;
1da177e4 208 mq->queue->queuedata = mq;
1da177e4 209
98ccf149 210 blk_queue_prep_rq(mq->queue, mmc_prep_request);
8dddfe19 211 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
b277da0a 212 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
e056a1b5
AH
213 if (mmc_can_erase(card))
214 mmc_queue_setup_discard(mq->queue, card);
98ccf149
PO
215
216#ifdef CONFIG_MMC_BLOCK_BOUNCE
a36274e0 217 if (host->max_segs == 1) {
aafabfab
PO
218 unsigned int bouncesz;
219
98ccf149
PO
220 bouncesz = MMC_QUEUE_BOUNCESZ;
221
222 if (bouncesz > host->max_req_size)
223 bouncesz = host->max_req_size;
224 if (bouncesz > host->max_seg_size)
225 bouncesz = host->max_seg_size;
f3eb0aaa
PO
226 if (bouncesz > (host->max_blk_count * 512))
227 bouncesz = host->max_blk_count * 512;
228
229 if (bouncesz > 512) {
97868a2b
PF
230 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
231 if (!mqrq_cur->bounce_buf) {
6606110d 232 pr_warn("%s: unable to allocate bounce cur buffer\n",
f3eb0aaa 233 mmc_card_name(card));
fdb409f6
BS
234 } else {
235 mqrq_prev->bounce_buf =
236 kmalloc(bouncesz, GFP_KERNEL);
237 if (!mqrq_prev->bounce_buf) {
238 pr_warn("%s: unable to allocate bounce prev buffer\n",
239 mmc_card_name(card));
240 kfree(mqrq_cur->bounce_buf);
241 mqrq_cur->bounce_buf = NULL;
242 }
04296b7b 243 }
f3eb0aaa 244 }
98ccf149 245
04296b7b 246 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
2ff1fa67 247 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
086fa5ff 248 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
8a78362c 249 blk_queue_max_segments(mq->queue, bouncesz / 512);
98ccf149
PO
250 blk_queue_max_segment_size(mq->queue, bouncesz);
251
97868a2b
PF
252 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
253 if (ret)
aafabfab 254 goto cleanup_queue;
98ccf149 255
97868a2b
PF
256 mqrq_cur->bounce_sg =
257 mmc_alloc_sg(bouncesz / 512, &ret);
258 if (ret)
aafabfab 259 goto cleanup_queue;
97868a2b 260
04296b7b
PF
261 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
262 if (ret)
263 goto cleanup_queue;
264
265 mqrq_prev->bounce_sg =
266 mmc_alloc_sg(bouncesz / 512, &ret);
267 if (ret)
268 goto cleanup_queue;
98ccf149
PO
269 }
270 }
271#endif
272
04296b7b 273 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
98ccf149 274 blk_queue_bounce_limit(mq->queue, limit);
086fa5ff 275 blk_queue_max_hw_sectors(mq->queue,
f3eb0aaa 276 min(host->max_blk_count, host->max_req_size / 512));
a36274e0 277 blk_queue_max_segments(mq->queue, host->max_segs);
98ccf149
PO
278 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279
97868a2b
PF
280 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
281 if (ret)
98ccf149 282 goto cleanup_queue;
97868a2b 283
04296b7b
PF
284
285 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
286 if (ret)
287 goto cleanup_queue;
1da177e4
LT
288 }
289
632cf92a 290 sema_init(&mq->thread_sem, 1);
1da177e4 291
d09408ad
AH
292 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
293 host->index, subname ? subname : "");
de528fa3 294
87598a2b
CH
295 if (IS_ERR(mq->thread)) {
296 ret = PTR_ERR(mq->thread);
98ccf149 297 goto free_bounce_sg;
1da177e4
LT
298 }
299
87598a2b 300 return 0;
98ccf149 301 free_bounce_sg:
97868a2b
PF
302 kfree(mqrq_cur->bounce_sg);
303 mqrq_cur->bounce_sg = NULL;
04296b7b
PF
304 kfree(mqrq_prev->bounce_sg);
305 mqrq_prev->bounce_sg = NULL;
97868a2b 306
aafabfab 307 cleanup_queue:
97868a2b
PF
308 kfree(mqrq_cur->sg);
309 mqrq_cur->sg = NULL;
310 kfree(mqrq_cur->bounce_buf);
311 mqrq_cur->bounce_buf = NULL;
312
04296b7b
PF
313 kfree(mqrq_prev->sg);
314 mqrq_prev->sg = NULL;
315 kfree(mqrq_prev->bounce_buf);
316 mqrq_prev->bounce_buf = NULL;
317
1da177e4 318 blk_cleanup_queue(mq->queue);
1da177e4
LT
319 return ret;
320}
1da177e4
LT
321
322void mmc_cleanup_queue(struct mmc_queue *mq)
323{
165125e1 324 struct request_queue *q = mq->queue;
89b4e133 325 unsigned long flags;
97868a2b 326 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
04296b7b 327 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
89b4e133 328
d2b46f66
PO
329 /* Make sure the queue isn't suspended, as that will deadlock */
330 mmc_queue_resume(mq);
331
89b4e133 332 /* Then terminate our worker thread */
87598a2b 333 kthread_stop(mq->thread);
1da177e4 334
5fa83ce2
AH
335 /* Empty the queue */
336 spin_lock_irqsave(q->queue_lock, flags);
337 q->queuedata = NULL;
338 blk_start_queue(q);
339 spin_unlock_irqrestore(q->queue_lock, flags);
340
97868a2b
PF
341 kfree(mqrq_cur->bounce_sg);
342 mqrq_cur->bounce_sg = NULL;
98ccf149 343
97868a2b
PF
344 kfree(mqrq_cur->sg);
345 mqrq_cur->sg = NULL;
1da177e4 346
97868a2b
PF
347 kfree(mqrq_cur->bounce_buf);
348 mqrq_cur->bounce_buf = NULL;
98ccf149 349
04296b7b
PF
350 kfree(mqrq_prev->bounce_sg);
351 mqrq_prev->bounce_sg = NULL;
352
353 kfree(mqrq_prev->sg);
354 mqrq_prev->sg = NULL;
355
356 kfree(mqrq_prev->bounce_buf);
357 mqrq_prev->bounce_buf = NULL;
358
1da177e4
LT
359 mq->card = NULL;
360}
361EXPORT_SYMBOL(mmc_cleanup_queue);
362
ce39f9d1
SJ
363int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
364{
365 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
366 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
367 int ret = 0;
368
369
370 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
371 if (!mqrq_cur->packed) {
372 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
373 mmc_card_name(card));
374 ret = -ENOMEM;
375 goto out;
376 }
377
378 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
379 if (!mqrq_prev->packed) {
380 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
381 mmc_card_name(card));
382 kfree(mqrq_cur->packed);
383 mqrq_cur->packed = NULL;
384 ret = -ENOMEM;
385 goto out;
386 }
387
388 INIT_LIST_HEAD(&mqrq_cur->packed->list);
389 INIT_LIST_HEAD(&mqrq_prev->packed->list);
390
391out:
392 return ret;
393}
394
395void mmc_packed_clean(struct mmc_queue *mq)
396{
397 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
398 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
399
400 kfree(mqrq_cur->packed);
401 mqrq_cur->packed = NULL;
402 kfree(mqrq_prev->packed);
403 mqrq_prev->packed = NULL;
404}
405
1da177e4
LT
406/**
407 * mmc_queue_suspend - suspend a MMC request queue
408 * @mq: MMC queue to suspend
409 *
410 * Stop the block request queue, and wait for our thread to
411 * complete any outstanding requests. This ensures that we
412 * won't suspend while a request is being processed.
413 */
414void mmc_queue_suspend(struct mmc_queue *mq)
415{
165125e1 416 struct request_queue *q = mq->queue;
1da177e4
LT
417 unsigned long flags;
418
419 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
420 mq->flags |= MMC_QUEUE_SUSPENDED;
421
422 spin_lock_irqsave(q->queue_lock, flags);
423 blk_stop_queue(q);
424 spin_unlock_irqrestore(q->queue_lock, flags);
425
426 down(&mq->thread_sem);
427 }
428}
1da177e4
LT
429
430/**
431 * mmc_queue_resume - resume a previously suspended MMC request queue
432 * @mq: MMC queue to resume
433 */
434void mmc_queue_resume(struct mmc_queue *mq)
435{
165125e1 436 struct request_queue *q = mq->queue;
1da177e4
LT
437 unsigned long flags;
438
439 if (mq->flags & MMC_QUEUE_SUSPENDED) {
440 mq->flags &= ~MMC_QUEUE_SUSPENDED;
441
442 up(&mq->thread_sem);
443
444 spin_lock_irqsave(q->queue_lock, flags);
445 blk_start_queue(q);
446 spin_unlock_irqrestore(q->queue_lock, flags);
447 }
448}
98ac2162 449
ce39f9d1
SJ
450static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
451 struct mmc_packed *packed,
452 struct scatterlist *sg,
453 enum mmc_packed_type cmd_type)
454{
455 struct scatterlist *__sg = sg;
456 unsigned int sg_len = 0;
457 struct request *req;
458
459 if (mmc_packed_wr(cmd_type)) {
460 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
461 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
462 unsigned int len, remain, offset = 0;
463 u8 *buf = (u8 *)packed->cmd_hdr;
464
465 remain = hdr_sz;
466 do {
467 len = min(remain, max_seg_sz);
468 sg_set_buf(__sg, buf + offset, len);
469 offset += len;
470 remain -= len;
da81ed16 471 sg_unmark_end(__sg++);
ce39f9d1
SJ
472 sg_len++;
473 } while (remain);
474 }
475
476 list_for_each_entry(req, &packed->list, queuelist) {
477 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
478 __sg = sg + (sg_len - 1);
da81ed16 479 sg_unmark_end(__sg++);
ce39f9d1
SJ
480 }
481 sg_mark_end(sg + (sg_len - 1));
482 return sg_len;
483}
484
2ff1fa67
PO
485/*
486 * Prepare the sg list(s) to be handed of to the host driver
487 */
97868a2b 488unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
98ccf149
PO
489{
490 unsigned int sg_len;
2ff1fa67
PO
491 size_t buflen;
492 struct scatterlist *sg;
ce39f9d1 493 enum mmc_packed_type cmd_type;
2ff1fa67 494 int i;
98ccf149 495
ce39f9d1
SJ
496 cmd_type = mqrq->cmd_type;
497
498 if (!mqrq->bounce_buf) {
499 if (mmc_packed_cmd(cmd_type))
500 return mmc_queue_packed_map_sg(mq, mqrq->packed,
501 mqrq->sg, cmd_type);
502 else
503 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
504 }
98ccf149 505
97868a2b 506 BUG_ON(!mqrq->bounce_sg);
98ccf149 507
ce39f9d1
SJ
508 if (mmc_packed_cmd(cmd_type))
509 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
510 mqrq->bounce_sg, cmd_type);
511 else
512 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
98ccf149 513
97868a2b 514 mqrq->bounce_sg_len = sg_len;
98ccf149 515
2ff1fa67 516 buflen = 0;
97868a2b 517 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
2ff1fa67 518 buflen += sg->length;
98ccf149 519
97868a2b 520 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
98ccf149
PO
521
522 return 1;
523}
524
2ff1fa67
PO
525/*
526 * If writing, bounce the data to the buffer before the request
527 * is sent to the host driver
528 */
97868a2b 529void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
98ccf149 530{
97868a2b 531 if (!mqrq->bounce_buf)
98ccf149
PO
532 return;
533
97868a2b 534 if (rq_data_dir(mqrq->req) != WRITE)
98ccf149
PO
535 return;
536
97868a2b
PF
537 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
538 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149
PO
539}
540
2ff1fa67
PO
541/*
542 * If reading, bounce the data from the buffer after the request
543 * has been handled by the host driver
544 */
97868a2b 545void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
98ccf149 546{
97868a2b 547 if (!mqrq->bounce_buf)
98ccf149
PO
548 return;
549
97868a2b 550 if (rq_data_dir(mqrq->req) != READ)
98ccf149
PO
551 return;
552
97868a2b
PF
553 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
554 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149 555}