]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mmc/card/queue.c
2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
24 #define MMC_QUEUE_BOUNCESZ 65536
27 * Prepare a MMC request. This just filters out odd stuff.
29 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
31 struct mmc_queue
*mq
= q
->queuedata
;
34 * We only like normal block requests and discards.
36 if (req
->cmd_type
!= REQ_TYPE_FS
&& req_op(req
) != REQ_OP_DISCARD
) {
37 blk_dump_rq_flags(req
, "MMC bad request");
41 if (mq
&& (mmc_card_removed(mq
->card
) || mmc_access_rpmb(mq
)))
44 req
->cmd_flags
|= REQ_DONTPREP
;
49 static int mmc_queue_thread(void *d
)
51 struct mmc_queue
*mq
= d
;
52 struct request_queue
*q
= mq
->queue
;
54 current
->flags
|= PF_MEMALLOC
;
56 down(&mq
->thread_sem
);
58 struct request
*req
= NULL
;
60 spin_lock_irq(q
->queue_lock
);
61 set_current_state(TASK_INTERRUPTIBLE
);
62 req
= blk_fetch_request(q
);
63 mq
->mqrq_cur
->req
= req
;
64 spin_unlock_irq(q
->queue_lock
);
66 if (req
|| mq
->mqrq_prev
->req
) {
67 set_current_state(TASK_RUNNING
);
68 mq
->issue_fn(mq
, req
);
70 if (mq
->flags
& MMC_QUEUE_NEW_REQUEST
) {
71 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
72 continue; /* fetch again */
76 * Current request becomes previous request
78 * In case of special requests, current request
79 * has been finished. Do not assign it to previous
82 if (mmc_req_is_special(req
))
83 mq
->mqrq_cur
->req
= NULL
;
85 mq
->mqrq_prev
->brq
.mrq
.data
= NULL
;
86 mq
->mqrq_prev
->req
= NULL
;
87 swap(mq
->mqrq_prev
, mq
->mqrq_cur
);
89 if (kthread_should_stop()) {
90 set_current_state(TASK_RUNNING
);
95 down(&mq
->thread_sem
);
104 * Generic MMC request handler. This is called for any queue on a
105 * particular host. When the host is not busy, we look for a request
106 * on any queue on this host, and attempt to issue it. This may
107 * not be the queue we were asked to process.
109 static void mmc_request_fn(struct request_queue
*q
)
111 struct mmc_queue
*mq
= q
->queuedata
;
114 struct mmc_context_info
*cntx
;
117 while ((req
= blk_fetch_request(q
)) != NULL
) {
118 req
->cmd_flags
|= REQ_QUIET
;
119 __blk_end_request_all(req
, -EIO
);
124 cntx
= &mq
->card
->host
->context_info
;
125 if (!mq
->mqrq_cur
->req
&& mq
->mqrq_prev
->req
) {
127 * New MMC request arrived when MMC thread may be
128 * blocked on the previous request to be complete
129 * with no current request fetched
131 spin_lock_irqsave(&cntx
->lock
, flags
);
132 if (cntx
->is_waiting_last_req
) {
133 cntx
->is_new_req
= true;
134 wake_up_interruptible(&cntx
->wait
);
136 spin_unlock_irqrestore(&cntx
->lock
, flags
);
137 } else if (!mq
->mqrq_cur
->req
&& !mq
->mqrq_prev
->req
)
138 wake_up_process(mq
->thread
);
141 static struct scatterlist
*mmc_alloc_sg(int sg_len
, int *err
)
143 struct scatterlist
*sg
;
145 sg
= kmalloc(sizeof(struct scatterlist
)*sg_len
, GFP_KERNEL
);
150 sg_init_table(sg
, sg_len
);
156 static void mmc_queue_setup_discard(struct request_queue
*q
,
157 struct mmc_card
*card
)
159 unsigned max_discard
;
161 max_discard
= mmc_calc_max_discard(card
);
165 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
166 blk_queue_max_discard_sectors(q
, max_discard
);
167 if (card
->erased_byte
== 0 && !mmc_can_discard(card
))
168 q
->limits
.discard_zeroes_data
= 1;
169 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
170 /* granularity must not be greater than max. discard */
171 if (card
->pref_erase
> max_discard
)
172 q
->limits
.discard_granularity
= 0;
173 if (mmc_can_secure_erase_trim(card
))
174 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE
, q
);
178 * mmc_init_queue - initialise a queue structure.
180 * @card: mmc card to attach this queue
182 * @subname: partition subname
184 * Initialise a MMC card request queue.
186 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
,
187 spinlock_t
*lock
, const char *subname
)
189 struct mmc_host
*host
= card
->host
;
190 u64 limit
= BLK_BOUNCE_HIGH
;
192 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
193 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
195 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
196 limit
= (u64
)dma_max_pfn(mmc_dev(host
)) << PAGE_SHIFT
;
199 mq
->queue
= blk_init_queue(mmc_request_fn
, lock
);
203 mq
->mqrq_cur
= mqrq_cur
;
204 mq
->mqrq_prev
= mqrq_prev
;
205 mq
->queue
->queuedata
= mq
;
207 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
208 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, mq
->queue
);
209 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, mq
->queue
);
210 if (mmc_can_erase(card
))
211 mmc_queue_setup_discard(mq
->queue
, card
);
213 #ifdef CONFIG_MMC_BLOCK_BOUNCE
214 if (host
->max_segs
== 1) {
215 unsigned int bouncesz
;
217 bouncesz
= MMC_QUEUE_BOUNCESZ
;
219 if (bouncesz
> host
->max_req_size
)
220 bouncesz
= host
->max_req_size
;
221 if (bouncesz
> host
->max_seg_size
)
222 bouncesz
= host
->max_seg_size
;
223 if (bouncesz
> (host
->max_blk_count
* 512))
224 bouncesz
= host
->max_blk_count
* 512;
226 if (bouncesz
> 512) {
227 mqrq_cur
->bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
228 if (!mqrq_cur
->bounce_buf
) {
229 pr_warn("%s: unable to allocate bounce cur buffer\n",
230 mmc_card_name(card
));
232 mqrq_prev
->bounce_buf
=
233 kmalloc(bouncesz
, GFP_KERNEL
);
234 if (!mqrq_prev
->bounce_buf
) {
235 pr_warn("%s: unable to allocate bounce prev buffer\n",
236 mmc_card_name(card
));
237 kfree(mqrq_cur
->bounce_buf
);
238 mqrq_cur
->bounce_buf
= NULL
;
243 if (mqrq_cur
->bounce_buf
&& mqrq_prev
->bounce_buf
) {
244 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_ANY
);
245 blk_queue_max_hw_sectors(mq
->queue
, bouncesz
/ 512);
246 blk_queue_max_segments(mq
->queue
, bouncesz
/ 512);
247 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
249 mqrq_cur
->sg
= mmc_alloc_sg(1, &ret
);
253 mqrq_cur
->bounce_sg
=
254 mmc_alloc_sg(bouncesz
/ 512, &ret
);
258 mqrq_prev
->sg
= mmc_alloc_sg(1, &ret
);
262 mqrq_prev
->bounce_sg
=
263 mmc_alloc_sg(bouncesz
/ 512, &ret
);
270 if (!mqrq_cur
->bounce_buf
&& !mqrq_prev
->bounce_buf
) {
271 blk_queue_bounce_limit(mq
->queue
, limit
);
272 blk_queue_max_hw_sectors(mq
->queue
,
273 min(host
->max_blk_count
, host
->max_req_size
/ 512));
274 blk_queue_max_segments(mq
->queue
, host
->max_segs
);
275 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
277 mqrq_cur
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
282 mqrq_prev
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
287 sema_init(&mq
->thread_sem
, 1);
289 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd/%d%s",
290 host
->index
, subname
? subname
: "");
292 if (IS_ERR(mq
->thread
)) {
293 ret
= PTR_ERR(mq
->thread
);
299 kfree(mqrq_cur
->bounce_sg
);
300 mqrq_cur
->bounce_sg
= NULL
;
301 kfree(mqrq_prev
->bounce_sg
);
302 mqrq_prev
->bounce_sg
= NULL
;
307 kfree(mqrq_cur
->bounce_buf
);
308 mqrq_cur
->bounce_buf
= NULL
;
310 kfree(mqrq_prev
->sg
);
311 mqrq_prev
->sg
= NULL
;
312 kfree(mqrq_prev
->bounce_buf
);
313 mqrq_prev
->bounce_buf
= NULL
;
315 blk_cleanup_queue(mq
->queue
);
319 void mmc_cleanup_queue(struct mmc_queue
*mq
)
321 struct request_queue
*q
= mq
->queue
;
323 struct mmc_queue_req
*mqrq_cur
= mq
->mqrq_cur
;
324 struct mmc_queue_req
*mqrq_prev
= mq
->mqrq_prev
;
326 /* Make sure the queue isn't suspended, as that will deadlock */
327 mmc_queue_resume(mq
);
329 /* Then terminate our worker thread */
330 kthread_stop(mq
->thread
);
332 /* Empty the queue */
333 spin_lock_irqsave(q
->queue_lock
, flags
);
336 spin_unlock_irqrestore(q
->queue_lock
, flags
);
338 kfree(mqrq_cur
->bounce_sg
);
339 mqrq_cur
->bounce_sg
= NULL
;
344 kfree(mqrq_cur
->bounce_buf
);
345 mqrq_cur
->bounce_buf
= NULL
;
347 kfree(mqrq_prev
->bounce_sg
);
348 mqrq_prev
->bounce_sg
= NULL
;
350 kfree(mqrq_prev
->sg
);
351 mqrq_prev
->sg
= NULL
;
353 kfree(mqrq_prev
->bounce_buf
);
354 mqrq_prev
->bounce_buf
= NULL
;
358 EXPORT_SYMBOL(mmc_cleanup_queue
);
360 int mmc_packed_init(struct mmc_queue
*mq
, struct mmc_card
*card
)
362 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
363 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
367 mqrq_cur
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
368 if (!mqrq_cur
->packed
) {
369 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
370 mmc_card_name(card
));
375 mqrq_prev
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
376 if (!mqrq_prev
->packed
) {
377 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
378 mmc_card_name(card
));
379 kfree(mqrq_cur
->packed
);
380 mqrq_cur
->packed
= NULL
;
385 INIT_LIST_HEAD(&mqrq_cur
->packed
->list
);
386 INIT_LIST_HEAD(&mqrq_prev
->packed
->list
);
392 void mmc_packed_clean(struct mmc_queue
*mq
)
394 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
395 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
397 kfree(mqrq_cur
->packed
);
398 mqrq_cur
->packed
= NULL
;
399 kfree(mqrq_prev
->packed
);
400 mqrq_prev
->packed
= NULL
;
404 * mmc_queue_suspend - suspend a MMC request queue
405 * @mq: MMC queue to suspend
407 * Stop the block request queue, and wait for our thread to
408 * complete any outstanding requests. This ensures that we
409 * won't suspend while a request is being processed.
411 void mmc_queue_suspend(struct mmc_queue
*mq
)
413 struct request_queue
*q
= mq
->queue
;
416 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
417 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
419 spin_lock_irqsave(q
->queue_lock
, flags
);
421 spin_unlock_irqrestore(q
->queue_lock
, flags
);
423 down(&mq
->thread_sem
);
428 * mmc_queue_resume - resume a previously suspended MMC request queue
429 * @mq: MMC queue to resume
431 void mmc_queue_resume(struct mmc_queue
*mq
)
433 struct request_queue
*q
= mq
->queue
;
436 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
437 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
441 spin_lock_irqsave(q
->queue_lock
, flags
);
443 spin_unlock_irqrestore(q
->queue_lock
, flags
);
447 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue
*mq
,
448 struct mmc_packed
*packed
,
449 struct scatterlist
*sg
,
450 enum mmc_packed_type cmd_type
)
452 struct scatterlist
*__sg
= sg
;
453 unsigned int sg_len
= 0;
456 if (mmc_packed_wr(cmd_type
)) {
457 unsigned int hdr_sz
= mmc_large_sector(mq
->card
) ? 4096 : 512;
458 unsigned int max_seg_sz
= queue_max_segment_size(mq
->queue
);
459 unsigned int len
, remain
, offset
= 0;
460 u8
*buf
= (u8
*)packed
->cmd_hdr
;
464 len
= min(remain
, max_seg_sz
);
465 sg_set_buf(__sg
, buf
+ offset
, len
);
468 sg_unmark_end(__sg
++);
473 list_for_each_entry(req
, &packed
->list
, queuelist
) {
474 sg_len
+= blk_rq_map_sg(mq
->queue
, req
, __sg
);
475 __sg
= sg
+ (sg_len
- 1);
476 sg_unmark_end(__sg
++);
478 sg_mark_end(sg
+ (sg_len
- 1));
483 * Prepare the sg list(s) to be handed of to the host driver
485 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
489 struct scatterlist
*sg
;
490 enum mmc_packed_type cmd_type
;
493 cmd_type
= mqrq
->cmd_type
;
495 if (!mqrq
->bounce_buf
) {
496 if (mmc_packed_cmd(cmd_type
))
497 return mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
500 return blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->sg
);
503 BUG_ON(!mqrq
->bounce_sg
);
505 if (mmc_packed_cmd(cmd_type
))
506 sg_len
= mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
507 mqrq
->bounce_sg
, cmd_type
);
509 sg_len
= blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->bounce_sg
);
511 mqrq
->bounce_sg_len
= sg_len
;
514 for_each_sg(mqrq
->bounce_sg
, sg
, sg_len
, i
)
515 buflen
+= sg
->length
;
517 sg_init_one(mqrq
->sg
, mqrq
->bounce_buf
, buflen
);
523 * If writing, bounce the data to the buffer before the request
524 * is sent to the host driver
526 void mmc_queue_bounce_pre(struct mmc_queue_req
*mqrq
)
528 if (!mqrq
->bounce_buf
)
531 if (rq_data_dir(mqrq
->req
) != WRITE
)
534 sg_copy_to_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
535 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);
539 * If reading, bounce the data from the buffer after the request
540 * has been handled by the host driver
542 void mmc_queue_bounce_post(struct mmc_queue_req
*mqrq
)
544 if (!mqrq
->bounce_buf
)
547 if (rq_data_dir(mqrq
->req
) != READ
)
550 sg_copy_from_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
551 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);