]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/card/queue.c
Freezer: make kernel threads nonfreezable by default
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / card / queue.c
CommitLineData
1da177e4 1/*
98ac2162 2 * linux/drivers/mmc/queue.c
1da177e4
LT
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 5 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/module.h>
13#include <linux/blkdev.h>
83144186 14#include <linux/freezer.h>
87598a2b 15#include <linux/kthread.h>
1da177e4
LT
16
17#include <linux/mmc/card.h>
18#include <linux/mmc/host.h>
98ac2162 19#include "queue.h"
1da177e4 20
98ccf149
PO
21#define MMC_QUEUE_BOUNCESZ 65536
22
87598a2b 23#define MMC_QUEUE_SUSPENDED (1 << 0)
1da177e4
LT
24
25/*
9c9f2d63 26 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
27 */
28static int mmc_prep_request(struct request_queue *q, struct request *req)
29{
9c9f2d63
PO
30 /*
31 * We only like normal block requests.
32 */
33 if (!blk_fs_request(req) && !blk_pc_request(req)) {
1da177e4 34 blk_dump_rq_flags(req, "MMC bad request");
9c9f2d63 35 return BLKPREP_KILL;
1da177e4
LT
36 }
37
9c9f2d63 38 req->cmd_flags |= REQ_DONTPREP;
1da177e4 39
9c9f2d63 40 return BLKPREP_OK;
1da177e4
LT
41}
42
43static int mmc_queue_thread(void *d)
44{
45 struct mmc_queue *mq = d;
46 struct request_queue *q = mq->queue;
1da177e4 47
83144186 48 current->flags |= PF_MEMALLOC;
1da177e4 49
1da177e4 50 down(&mq->thread_sem);
1da177e4
LT
51 do {
52 struct request *req = NULL;
53
54 spin_lock_irq(q->queue_lock);
55 set_current_state(TASK_INTERRUPTIBLE);
56 if (!blk_queue_plugged(q))
c723e08a
JY
57 req = elv_next_request(q);
58 mq->req = req;
1da177e4
LT
59 spin_unlock_irq(q->queue_lock);
60
61 if (!req) {
7b30d281
VW
62 if (kthread_should_stop()) {
63 set_current_state(TASK_RUNNING);
1da177e4 64 break;
7b30d281 65 }
1da177e4
LT
66 up(&mq->thread_sem);
67 schedule();
68 down(&mq->thread_sem);
69 continue;
70 }
71 set_current_state(TASK_RUNNING);
72
73 mq->issue_fn(mq, req);
74 } while (1);
1da177e4
LT
75 up(&mq->thread_sem);
76
1da177e4
LT
77 return 0;
78}
79
80/*
81 * Generic MMC request handler. This is called for any queue on a
82 * particular host. When the host is not busy, we look for a request
83 * on any queue on this host, and attempt to issue it. This may
84 * not be the queue we were asked to process.
85 */
86static void mmc_request(request_queue_t *q)
87{
88 struct mmc_queue *mq = q->queuedata;
89b4e133
PO
89 struct request *req;
90 int ret;
91
92 if (!mq) {
93 printk(KERN_ERR "MMC: killing requests for dead queue\n");
94 while ((req = elv_next_request(q)) != NULL) {
95 do {
96 ret = end_that_request_chunk(req, 0,
97 req->current_nr_sectors << 9);
98 } while (ret);
99 }
100 return;
101 }
1da177e4
LT
102
103 if (!mq->req)
87598a2b 104 wake_up_process(mq->thread);
1da177e4
LT
105}
106
107/**
108 * mmc_init_queue - initialise a queue structure.
109 * @mq: mmc queue
110 * @card: mmc card to attach this queue
111 * @lock: queue lock
112 *
113 * Initialise a MMC card request queue.
114 */
115int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
116{
117 struct mmc_host *host = card->host;
118 u64 limit = BLK_BOUNCE_HIGH;
119 int ret;
98ccf149 120 unsigned int bouncesz;
1da177e4 121
fcaf71fd
GKH
122 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
123 limit = *mmc_dev(host)->dma_mask;
1da177e4
LT
124
125 mq->card = card;
126 mq->queue = blk_init_queue(mmc_request, lock);
127 if (!mq->queue)
128 return -ENOMEM;
129
1da177e4
LT
130 mq->queue->queuedata = mq;
131 mq->req = NULL;
132
98ccf149
PO
133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134
135#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) {
137 bouncesz = MMC_QUEUE_BOUNCESZ;
138
139 if (bouncesz > host->max_req_size)
140 bouncesz = host->max_req_size;
141 if (bouncesz > host->max_seg_size)
142 bouncesz = host->max_seg_size;
143
144 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
145 if (!mq->bounce_buf) {
146 printk(KERN_WARNING "%s: unable to allocate "
147 "bounce buffer\n", mmc_card_name(card));
148 } else {
149 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
150 blk_queue_max_sectors(mq->queue, bouncesz / 512);
151 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
152 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
153 blk_queue_max_segment_size(mq->queue, bouncesz);
154
155 mq->sg = kmalloc(sizeof(struct scatterlist),
156 GFP_KERNEL);
157 if (!mq->sg) {
158 ret = -ENOMEM;
159 goto free_bounce_buf;
160 }
161
162 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
163 bouncesz / 512, GFP_KERNEL);
164 if (!mq->bounce_sg) {
165 ret = -ENOMEM;
166 goto free_sg;
167 }
168 }
169 }
170#endif
171
172 if (!mq->bounce_buf) {
173 blk_queue_bounce_limit(mq->queue, limit);
174 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
175 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
176 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
177 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
178
179 mq->sg = kmalloc(sizeof(struct scatterlist) *
180 host->max_phys_segs, GFP_KERNEL);
181 if (!mq->sg) {
182 ret = -ENOMEM;
183 goto cleanup_queue;
184 }
1da177e4
LT
185 }
186
1da177e4
LT
187 init_MUTEX(&mq->thread_sem);
188
87598a2b
CH
189 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
190 if (IS_ERR(mq->thread)) {
191 ret = PTR_ERR(mq->thread);
98ccf149 192 goto free_bounce_sg;
1da177e4
LT
193 }
194
87598a2b 195 return 0;
98ccf149
PO
196 free_bounce_sg:
197 if (mq->bounce_sg)
198 kfree(mq->bounce_sg);
199 mq->bounce_sg = NULL;
87598a2b 200 free_sg:
1da177e4
LT
201 kfree(mq->sg);
202 mq->sg = NULL;
98ccf149
PO
203 free_bounce_buf:
204 if (mq->bounce_buf)
205 kfree(mq->bounce_buf);
206 mq->bounce_buf = NULL;
87598a2b 207 cleanup_queue:
1da177e4 208 blk_cleanup_queue(mq->queue);
1da177e4
LT
209 return ret;
210}
1da177e4
LT
211
212void mmc_cleanup_queue(struct mmc_queue *mq)
213{
89b4e133
PO
214 request_queue_t *q = mq->queue;
215 unsigned long flags;
216
217 /* Mark that we should start throwing out stragglers */
218 spin_lock_irqsave(q->queue_lock, flags);
219 q->queuedata = NULL;
220 spin_unlock_irqrestore(q->queue_lock, flags);
221
d2b46f66
PO
222 /* Make sure the queue isn't suspended, as that will deadlock */
223 mmc_queue_resume(mq);
224
89b4e133 225 /* Then terminate our worker thread */
87598a2b 226 kthread_stop(mq->thread);
1da177e4 227
98ccf149
PO
228 if (mq->bounce_sg)
229 kfree(mq->bounce_sg);
230 mq->bounce_sg = NULL;
231
1da177e4
LT
232 kfree(mq->sg);
233 mq->sg = NULL;
234
98ccf149
PO
235 if (mq->bounce_buf)
236 kfree(mq->bounce_buf);
237 mq->bounce_buf = NULL;
238
1da177e4
LT
239 blk_cleanup_queue(mq->queue);
240
241 mq->card = NULL;
242}
243EXPORT_SYMBOL(mmc_cleanup_queue);
244
245/**
246 * mmc_queue_suspend - suspend a MMC request queue
247 * @mq: MMC queue to suspend
248 *
249 * Stop the block request queue, and wait for our thread to
250 * complete any outstanding requests. This ensures that we
251 * won't suspend while a request is being processed.
252 */
253void mmc_queue_suspend(struct mmc_queue *mq)
254{
255 request_queue_t *q = mq->queue;
256 unsigned long flags;
257
258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
259 mq->flags |= MMC_QUEUE_SUSPENDED;
260
261 spin_lock_irqsave(q->queue_lock, flags);
262 blk_stop_queue(q);
263 spin_unlock_irqrestore(q->queue_lock, flags);
264
265 down(&mq->thread_sem);
266 }
267}
1da177e4
LT
268
269/**
270 * mmc_queue_resume - resume a previously suspended MMC request queue
271 * @mq: MMC queue to resume
272 */
273void mmc_queue_resume(struct mmc_queue *mq)
274{
275 request_queue_t *q = mq->queue;
276 unsigned long flags;
277
278 if (mq->flags & MMC_QUEUE_SUSPENDED) {
279 mq->flags &= ~MMC_QUEUE_SUSPENDED;
280
281 up(&mq->thread_sem);
282
283 spin_lock_irqsave(q->queue_lock, flags);
284 blk_start_queue(q);
285 spin_unlock_irqrestore(q->queue_lock, flags);
286 }
287}
98ac2162 288
98ccf149
PO
289static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
290 struct scatterlist *src, unsigned int src_len)
291{
292 unsigned int chunk;
293 char *dst_buf, *src_buf;
294 unsigned int dst_size, src_size;
295
296 dst_buf = NULL;
297 src_buf = NULL;
298 dst_size = 0;
299 src_size = 0;
300
301 while (src_len) {
302 BUG_ON(dst_len == 0);
303
304 if (dst_size == 0) {
305 dst_buf = page_address(dst->page) + dst->offset;
306 dst_size = dst->length;
307 }
308
309 if (src_size == 0) {
310 src_buf = page_address(src->page) + src->offset;
311 src_size = src->length;
312 }
313
314 chunk = min(dst_size, src_size);
315
316 memcpy(dst_buf, src_buf, chunk);
317
318 dst_buf += chunk;
319 src_buf += chunk;
320 dst_size -= chunk;
321 src_size -= chunk;
322
323 if (dst_size == 0) {
324 dst++;
325 dst_len--;
326 }
327
328 if (src_size == 0) {
329 src++;
330 src_len--;
331 }
332 }
333}
334
335unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
336{
337 unsigned int sg_len;
338
339 if (!mq->bounce_buf)
340 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
341
342 BUG_ON(!mq->bounce_sg);
343
344 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
345
346 mq->bounce_sg_len = sg_len;
347
348 /*
349 * Shortcut in the event we only get a single entry.
350 */
351 if (sg_len == 1) {
352 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
353 return 1;
354 }
355
356 mq->sg[0].page = virt_to_page(mq->bounce_buf);
357 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
358 mq->sg[0].length = 0;
359
360 while (sg_len) {
361 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
362 sg_len--;
363 }
364
365 return 1;
366}
367
368void mmc_queue_bounce_pre(struct mmc_queue *mq)
369{
370 if (!mq->bounce_buf)
371 return;
372
373 if (mq->bounce_sg_len == 1)
374 return;
375 if (rq_data_dir(mq->req) != WRITE)
376 return;
377
378 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
379}
380
381void mmc_queue_bounce_post(struct mmc_queue *mq)
382{
383 if (!mq->bounce_buf)
384 return;
385
386 if (mq->bounce_sg_len == 1)
387 return;
388 if (rq_data_dir(mq->req) != READ)
389 return;
390
391 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
392}
393