]>
Commit | Line | Data |
---|---|---|
aa387cc8 MC |
1 | /* |
2 | * BSG helper library | |
3 | * | |
4 | * Copyright (C) 2008 James Smart, Emulex Corporation | |
5 | * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | |
6 | * Copyright (C) 2011 Mike Christie | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | #include <linux/slab.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/scatterlist.h> | |
27 | #include <linux/bsg-lib.h> | |
28 | #include <scsi/scsi_cmnd.h> | |
29 | ||
30 | /** | |
31 | * bsg_destroy_job - routine to teardown/delete a bsg job | |
32 | * @job: bsg_job that is to be torn down | |
33 | */ | |
34 | static void bsg_destroy_job(struct bsg_job *job) | |
35 | { | |
36 | put_device(job->dev); /* release reference for the request */ | |
37 | ||
38 | kfree(job->request_payload.sg_list); | |
39 | kfree(job->reply_payload.sg_list); | |
40 | kfree(job); | |
41 | } | |
42 | ||
43 | /** | |
44 | * bsg_job_done - completion routine for bsg requests | |
45 | * @job: bsg_job that is complete | |
46 | * @result: job reply result | |
47 | * @reply_payload_rcv_len: length of payload recvd | |
48 | * | |
49 | * The LLD should call this when the bsg job has completed. | |
50 | */ | |
51 | void bsg_job_done(struct bsg_job *job, int result, | |
52 | unsigned int reply_payload_rcv_len) | |
53 | { | |
54 | struct request *req = job->req; | |
55 | struct request *rsp = req->next_rq; | |
56 | int err; | |
57 | ||
58 | err = job->req->errors = result; | |
59 | if (err < 0) | |
60 | /* we're only returning the result field in the reply */ | |
61 | job->req->sense_len = sizeof(u32); | |
62 | else | |
63 | job->req->sense_len = job->reply_len; | |
64 | /* we assume all request payload was transferred, residual == 0 */ | |
65 | req->resid_len = 0; | |
66 | ||
67 | if (rsp) { | |
68 | WARN_ON(reply_payload_rcv_len > rsp->resid_len); | |
69 | ||
70 | /* set reply (bidi) residual */ | |
71 | rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); | |
72 | } | |
73 | blk_complete_request(req); | |
74 | } | |
75 | EXPORT_SYMBOL_GPL(bsg_job_done); | |
76 | ||
77 | /** | |
78 | * bsg_softirq_done - softirq done routine for destroying the bsg requests | |
79 | * @rq: BSG request that holds the job to be destroyed | |
80 | */ | |
81 | static void bsg_softirq_done(struct request *rq) | |
82 | { | |
83 | struct bsg_job *job = rq->special; | |
84 | ||
85 | blk_end_request_all(rq, rq->errors); | |
86 | bsg_destroy_job(job); | |
87 | } | |
88 | ||
89 | static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) | |
90 | { | |
91 | size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); | |
92 | ||
93 | BUG_ON(!req->nr_phys_segments); | |
94 | ||
95 | buf->sg_list = kzalloc(sz, GFP_KERNEL); | |
96 | if (!buf->sg_list) | |
97 | return -ENOMEM; | |
98 | sg_init_table(buf->sg_list, req->nr_phys_segments); | |
99 | buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); | |
100 | buf->payload_len = blk_rq_bytes(req); | |
101 | return 0; | |
102 | } | |
103 | ||
104 | /** | |
105 | * bsg_create_job - create the bsg_job structure for the bsg request | |
106 | * @dev: device that is being sent the bsg request | |
107 | * @req: BSG request that needs a job structure | |
108 | */ | |
109 | static int bsg_create_job(struct device *dev, struct request *req) | |
110 | { | |
111 | struct request *rsp = req->next_rq; | |
112 | struct request_queue *q = req->q; | |
113 | struct bsg_job *job; | |
114 | int ret; | |
115 | ||
116 | BUG_ON(req->special); | |
117 | ||
118 | job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); | |
119 | if (!job) | |
120 | return -ENOMEM; | |
121 | ||
122 | req->special = job; | |
123 | job->req = req; | |
124 | if (q->bsg_job_size) | |
125 | job->dd_data = (void *)&job[1]; | |
126 | job->request = req->cmd; | |
127 | job->request_len = req->cmd_len; | |
128 | job->reply = req->sense; | |
129 | job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer | |
130 | * allocated */ | |
131 | if (req->bio) { | |
132 | ret = bsg_map_buffer(&job->request_payload, req); | |
133 | if (ret) | |
134 | goto failjob_rls_job; | |
135 | } | |
136 | if (rsp && rsp->bio) { | |
137 | ret = bsg_map_buffer(&job->reply_payload, rsp); | |
138 | if (ret) | |
139 | goto failjob_rls_rqst_payload; | |
140 | } | |
141 | job->dev = dev; | |
142 | /* take a reference for the request */ | |
143 | get_device(job->dev); | |
144 | return 0; | |
145 | ||
146 | failjob_rls_rqst_payload: | |
147 | kfree(job->request_payload.sg_list); | |
148 | failjob_rls_job: | |
149 | kfree(job); | |
150 | return -ENOMEM; | |
151 | } | |
152 | ||
153 | /* | |
154 | * bsg_goose_queue - restart queue in case it was stopped | |
155 | * @q: request q to be restarted | |
156 | */ | |
157 | void bsg_goose_queue(struct request_queue *q) | |
158 | { | |
159 | if (!q) | |
160 | return; | |
161 | ||
162 | blk_run_queue_async(q); | |
163 | } | |
164 | EXPORT_SYMBOL_GPL(bsg_goose_queue); | |
165 | ||
166 | /** | |
167 | * bsg_request_fn - generic handler for bsg requests | |
168 | * @q: request queue to manage | |
169 | * | |
170 | * On error the create_bsg_job function should return a -Exyz error value | |
171 | * that will be set to the req->errors. | |
172 | * | |
173 | * Drivers/subsys should pass this to the queue init function. | |
174 | */ | |
175 | void bsg_request_fn(struct request_queue *q) | |
176 | { | |
177 | struct device *dev = q->queuedata; | |
178 | struct request *req; | |
179 | struct bsg_job *job; | |
180 | int ret; | |
181 | ||
182 | if (!get_device(dev)) | |
183 | return; | |
184 | ||
185 | while (1) { | |
186 | req = blk_fetch_request(q); | |
187 | if (!req) | |
188 | break; | |
189 | spin_unlock_irq(q->queue_lock); | |
190 | ||
191 | ret = bsg_create_job(dev, req); | |
192 | if (ret) { | |
193 | req->errors = ret; | |
194 | blk_end_request_all(req, ret); | |
195 | spin_lock_irq(q->queue_lock); | |
196 | continue; | |
197 | } | |
198 | ||
199 | job = req->special; | |
200 | ret = q->bsg_job_fn(job); | |
201 | spin_lock_irq(q->queue_lock); | |
202 | if (ret) | |
203 | break; | |
204 | } | |
205 | ||
206 | spin_unlock_irq(q->queue_lock); | |
207 | put_device(dev); | |
208 | spin_lock_irq(q->queue_lock); | |
209 | } | |
210 | EXPORT_SYMBOL_GPL(bsg_request_fn); | |
211 | ||
212 | /** | |
213 | * bsg_setup_queue - Create and add the bsg hooks so we can receive requests | |
214 | * @dev: device to attach bsg device to | |
215 | * @q: request queue setup by caller | |
216 | * @name: device to give bsg device | |
217 | * @job_fn: bsg job handler | |
218 | * @dd_job_size: size of LLD data needed for each job | |
219 | * | |
220 | * The caller should have setup the reuqest queue with bsg_request_fn | |
221 | * as the request_fn. | |
222 | */ | |
223 | int bsg_setup_queue(struct device *dev, struct request_queue *q, | |
224 | char *name, bsg_job_fn *job_fn, int dd_job_size) | |
225 | { | |
226 | int ret; | |
227 | ||
228 | q->queuedata = dev; | |
229 | q->bsg_job_size = dd_job_size; | |
230 | q->bsg_job_fn = job_fn; | |
231 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | |
232 | blk_queue_softirq_done(q, bsg_softirq_done); | |
233 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | |
234 | ||
235 | ret = bsg_register_queue(q, dev, name, NULL); | |
236 | if (ret) { | |
237 | printk(KERN_ERR "%s: bsg interface failed to " | |
238 | "initialize - register queue\n", dev->kobj.name); | |
239 | return ret; | |
240 | } | |
241 | ||
242 | return 0; | |
243 | } | |
244 | EXPORT_SYMBOL_GPL(bsg_setup_queue); | |
245 | ||
246 | /** | |
247 | * bsg_remove_queue - Deletes the bsg dev from the q | |
248 | * @q: the request_queue that is to be torn down. | |
249 | * | |
250 | * Notes: | |
251 | * Before unregistering the queue empty any requests that are blocked | |
252 | */ | |
253 | void bsg_remove_queue(struct request_queue *q) | |
254 | { | |
255 | struct request *req; /* block request */ | |
256 | int counts; /* totals for request_list count and starved */ | |
257 | ||
258 | if (!q) | |
259 | return; | |
260 | ||
261 | /* Stop taking in new requests */ | |
262 | spin_lock_irq(q->queue_lock); | |
263 | blk_stop_queue(q); | |
264 | ||
265 | /* drain all requests in the queue */ | |
266 | while (1) { | |
267 | /* need the lock to fetch a request | |
268 | * this may fetch the same reqeust as the previous pass | |
269 | */ | |
270 | req = blk_fetch_request(q); | |
271 | /* save requests in use and starved */ | |
272 | counts = q->rq.count[0] + q->rq.count[1] + | |
273 | q->rq.starved[0] + q->rq.starved[1]; | |
274 | spin_unlock_irq(q->queue_lock); | |
275 | /* any requests still outstanding? */ | |
276 | if (counts == 0) | |
277 | break; | |
278 | ||
279 | /* This may be the same req as the previous iteration, | |
280 | * always send the blk_end_request_all after a prefetch. | |
281 | * It is not okay to not end the request because the | |
282 | * prefetch started the request. | |
283 | */ | |
284 | if (req) { | |
285 | /* return -ENXIO to indicate that this queue is | |
286 | * going away | |
287 | */ | |
288 | req->errors = -ENXIO; | |
289 | blk_end_request_all(req, -ENXIO); | |
290 | } | |
291 | ||
292 | msleep(200); /* allow bsg to possibly finish */ | |
293 | spin_lock_irq(q->queue_lock); | |
294 | } | |
295 | bsg_unregister_queue(q); | |
296 | } | |
297 | EXPORT_SYMBOL_GPL(bsg_remove_queue); |