]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * BSG helper library | |
3 | * | |
4 | * Copyright (C) 2008 James Smart, Emulex Corporation | |
5 | * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | |
6 | * Copyright (C) 2011 Mike Christie | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | */ | |
23 | #include <linux/slab.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/scatterlist.h> | |
27 | #include <linux/bsg-lib.h> | |
28 | #include <linux/export.h> | |
29 | #include <scsi/scsi_cmnd.h> | |
30 | #include <scsi/sg.h> | |
31 | ||
32 | #define uptr64(val) ((void __user *)(uintptr_t)(val)) | |
33 | ||
34 | static int bsg_transport_check_proto(struct sg_io_v4 *hdr) | |
35 | { | |
36 | if (hdr->protocol != BSG_PROTOCOL_SCSI || | |
37 | hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) | |
38 | return -EINVAL; | |
39 | if (!capable(CAP_SYS_RAWIO)) | |
40 | return -EPERM; | |
41 | return 0; | |
42 | } | |
43 | ||
44 | static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, | |
45 | fmode_t mode) | |
46 | { | |
47 | struct bsg_job *job = blk_mq_rq_to_pdu(rq); | |
48 | ||
49 | job->request_len = hdr->request_len; | |
50 | job->request = memdup_user(uptr64(hdr->request), hdr->request_len); | |
51 | if (IS_ERR(job->request)) | |
52 | return PTR_ERR(job->request); | |
53 | return 0; | |
54 | } | |
55 | ||
56 | static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) | |
57 | { | |
58 | struct bsg_job *job = blk_mq_rq_to_pdu(rq); | |
59 | int ret = 0; | |
60 | ||
61 | /* | |
62 | * The assignments below don't make much sense, but are kept for | |
63 | * bug by bug backwards compatibility: | |
64 | */ | |
65 | hdr->device_status = job->result & 0xff; | |
66 | hdr->transport_status = host_byte(job->result); | |
67 | hdr->driver_status = driver_byte(job->result); | |
68 | hdr->info = 0; | |
69 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
70 | hdr->info |= SG_INFO_CHECK; | |
71 | hdr->response_len = 0; | |
72 | ||
73 | if (job->result < 0) { | |
74 | /* we're only returning the result field in the reply */ | |
75 | job->reply_len = sizeof(u32); | |
76 | ret = job->result; | |
77 | } | |
78 | ||
79 | if (job->reply_len && hdr->response) { | |
80 | int len = min(hdr->max_response_len, job->reply_len); | |
81 | ||
82 | if (copy_to_user(uptr64(hdr->response), job->reply, len)) | |
83 | ret = -EFAULT; | |
84 | else | |
85 | hdr->response_len = len; | |
86 | } | |
87 | ||
88 | /* we assume all request payload was transferred, residual == 0 */ | |
89 | hdr->dout_resid = 0; | |
90 | ||
91 | if (rq->next_rq) { | |
92 | unsigned int rsp_len = job->reply_payload.payload_len; | |
93 | ||
94 | if (WARN_ON(job->reply_payload_rcv_len > rsp_len)) | |
95 | hdr->din_resid = 0; | |
96 | else | |
97 | hdr->din_resid = rsp_len - job->reply_payload_rcv_len; | |
98 | } else { | |
99 | hdr->din_resid = 0; | |
100 | } | |
101 | ||
102 | return ret; | |
103 | } | |
104 | ||
105 | static void bsg_transport_free_rq(struct request *rq) | |
106 | { | |
107 | struct bsg_job *job = blk_mq_rq_to_pdu(rq); | |
108 | ||
109 | kfree(job->request); | |
110 | } | |
111 | ||
112 | static const struct bsg_ops bsg_transport_ops = { | |
113 | .check_proto = bsg_transport_check_proto, | |
114 | .fill_hdr = bsg_transport_fill_hdr, | |
115 | .complete_rq = bsg_transport_complete_rq, | |
116 | .free_rq = bsg_transport_free_rq, | |
117 | }; | |
118 | ||
119 | /** | |
120 | * bsg_teardown_job - routine to teardown a bsg job | |
121 | * @kref: kref inside bsg_job that is to be torn down | |
122 | */ | |
123 | static void bsg_teardown_job(struct kref *kref) | |
124 | { | |
125 | struct bsg_job *job = container_of(kref, struct bsg_job, kref); | |
126 | struct request *rq = blk_mq_rq_from_pdu(job); | |
127 | ||
128 | put_device(job->dev); /* release reference for the request */ | |
129 | ||
130 | kfree(job->request_payload.sg_list); | |
131 | kfree(job->reply_payload.sg_list); | |
132 | ||
133 | blk_end_request_all(rq, BLK_STS_OK); | |
134 | } | |
135 | ||
136 | void bsg_job_put(struct bsg_job *job) | |
137 | { | |
138 | kref_put(&job->kref, bsg_teardown_job); | |
139 | } | |
140 | EXPORT_SYMBOL_GPL(bsg_job_put); | |
141 | ||
142 | int bsg_job_get(struct bsg_job *job) | |
143 | { | |
144 | return kref_get_unless_zero(&job->kref); | |
145 | } | |
146 | EXPORT_SYMBOL_GPL(bsg_job_get); | |
147 | ||
148 | /** | |
149 | * bsg_job_done - completion routine for bsg requests | |
150 | * @job: bsg_job that is complete | |
151 | * @result: job reply result | |
152 | * @reply_payload_rcv_len: length of payload recvd | |
153 | * | |
154 | * The LLD should call this when the bsg job has completed. | |
155 | */ | |
156 | void bsg_job_done(struct bsg_job *job, int result, | |
157 | unsigned int reply_payload_rcv_len) | |
158 | { | |
159 | job->result = result; | |
160 | job->reply_payload_rcv_len = reply_payload_rcv_len; | |
161 | blk_complete_request(blk_mq_rq_from_pdu(job)); | |
162 | } | |
163 | EXPORT_SYMBOL_GPL(bsg_job_done); | |
164 | ||
165 | /** | |
166 | * bsg_softirq_done - softirq done routine for destroying the bsg requests | |
167 | * @rq: BSG request that holds the job to be destroyed | |
168 | */ | |
169 | static void bsg_softirq_done(struct request *rq) | |
170 | { | |
171 | struct bsg_job *job = blk_mq_rq_to_pdu(rq); | |
172 | ||
173 | bsg_job_put(job); | |
174 | } | |
175 | ||
176 | static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) | |
177 | { | |
178 | size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); | |
179 | ||
180 | BUG_ON(!req->nr_phys_segments); | |
181 | ||
182 | buf->sg_list = kzalloc(sz, GFP_KERNEL); | |
183 | if (!buf->sg_list) | |
184 | return -ENOMEM; | |
185 | sg_init_table(buf->sg_list, req->nr_phys_segments); | |
186 | buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); | |
187 | buf->payload_len = blk_rq_bytes(req); | |
188 | return 0; | |
189 | } | |
190 | ||
191 | /** | |
192 | * bsg_prepare_job - create the bsg_job structure for the bsg request | |
193 | * @dev: device that is being sent the bsg request | |
194 | * @req: BSG request that needs a job structure | |
195 | */ | |
196 | static bool bsg_prepare_job(struct device *dev, struct request *req) | |
197 | { | |
198 | struct request *rsp = req->next_rq; | |
199 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | |
200 | int ret; | |
201 | ||
202 | job->timeout = req->timeout; | |
203 | ||
204 | if (req->bio) { | |
205 | ret = bsg_map_buffer(&job->request_payload, req); | |
206 | if (ret) | |
207 | goto failjob_rls_job; | |
208 | } | |
209 | if (rsp && rsp->bio) { | |
210 | ret = bsg_map_buffer(&job->reply_payload, rsp); | |
211 | if (ret) | |
212 | goto failjob_rls_rqst_payload; | |
213 | } | |
214 | job->dev = dev; | |
215 | /* take a reference for the request */ | |
216 | get_device(job->dev); | |
217 | kref_init(&job->kref); | |
218 | return true; | |
219 | ||
220 | failjob_rls_rqst_payload: | |
221 | kfree(job->request_payload.sg_list); | |
222 | failjob_rls_job: | |
223 | job->result = -ENOMEM; | |
224 | return false; | |
225 | } | |
226 | ||
227 | /** | |
228 | * bsg_request_fn - generic handler for bsg requests | |
229 | * @q: request queue to manage | |
230 | * | |
231 | * On error the create_bsg_job function should return a -Exyz error value | |
232 | * that will be set to ->result. | |
233 | * | |
234 | * Drivers/subsys should pass this to the queue init function. | |
235 | */ | |
236 | static void bsg_request_fn(struct request_queue *q) | |
237 | __releases(q->queue_lock) | |
238 | __acquires(q->queue_lock) | |
239 | { | |
240 | struct device *dev = q->queuedata; | |
241 | struct request *req; | |
242 | int ret; | |
243 | ||
244 | if (!get_device(dev)) | |
245 | return; | |
246 | ||
247 | while (1) { | |
248 | req = blk_fetch_request(q); | |
249 | if (!req) | |
250 | break; | |
251 | spin_unlock_irq(q->queue_lock); | |
252 | ||
253 | if (!bsg_prepare_job(dev, req)) { | |
254 | blk_end_request_all(req, BLK_STS_OK); | |
255 | spin_lock_irq(q->queue_lock); | |
256 | continue; | |
257 | } | |
258 | ||
259 | ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req)); | |
260 | spin_lock_irq(q->queue_lock); | |
261 | if (ret) | |
262 | break; | |
263 | } | |
264 | ||
265 | spin_unlock_irq(q->queue_lock); | |
266 | put_device(dev); | |
267 | spin_lock_irq(q->queue_lock); | |
268 | } | |
269 | ||
270 | /* called right after the request is allocated for the request_queue */ | |
271 | static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) | |
272 | { | |
273 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | |
274 | ||
275 | job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); | |
276 | if (!job->reply) | |
277 | return -ENOMEM; | |
278 | return 0; | |
279 | } | |
280 | ||
281 | /* called right before the request is given to the request_queue user */ | |
282 | static void bsg_initialize_rq(struct request *req) | |
283 | { | |
284 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | |
285 | void *reply = job->reply; | |
286 | ||
287 | memset(job, 0, sizeof(*job)); | |
288 | job->reply = reply; | |
289 | job->reply_len = SCSI_SENSE_BUFFERSIZE; | |
290 | job->dd_data = job + 1; | |
291 | } | |
292 | ||
293 | static void bsg_exit_rq(struct request_queue *q, struct request *req) | |
294 | { | |
295 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | |
296 | ||
297 | kfree(job->reply); | |
298 | } | |
299 | ||
300 | /** | |
301 | * bsg_setup_queue - Create and add the bsg hooks so we can receive requests | |
302 | * @dev: device to attach bsg device to | |
303 | * @name: device to give bsg device | |
304 | * @job_fn: bsg job handler | |
305 | * @dd_job_size: size of LLD data needed for each job | |
306 | * @release: @dev release function | |
307 | */ | |
308 | struct request_queue *bsg_setup_queue(struct device *dev, const char *name, | |
309 | bsg_job_fn *job_fn, int dd_job_size, | |
310 | void (*release)(struct device *)) | |
311 | { | |
312 | struct request_queue *q; | |
313 | int ret; | |
314 | ||
315 | q = blk_alloc_queue(GFP_KERNEL); | |
316 | if (!q) | |
317 | return ERR_PTR(-ENOMEM); | |
318 | q->cmd_size = sizeof(struct bsg_job) + dd_job_size; | |
319 | q->init_rq_fn = bsg_init_rq; | |
320 | q->exit_rq_fn = bsg_exit_rq; | |
321 | q->initialize_rq_fn = bsg_initialize_rq; | |
322 | q->request_fn = bsg_request_fn; | |
323 | ||
324 | ret = blk_init_allocated_queue(q); | |
325 | if (ret) | |
326 | goto out_cleanup_queue; | |
327 | ||
328 | q->queuedata = dev; | |
329 | q->bsg_job_fn = job_fn; | |
330 | blk_queue_flag_set(QUEUE_FLAG_BIDI, q); | |
331 | blk_queue_softirq_done(q, bsg_softirq_done); | |
332 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | |
333 | ||
334 | ret = bsg_register_queue(q, dev, name, &bsg_transport_ops, release); | |
335 | if (ret) { | |
336 | printk(KERN_ERR "%s: bsg interface failed to " | |
337 | "initialize - register queue\n", dev->kobj.name); | |
338 | goto out_cleanup_queue; | |
339 | } | |
340 | ||
341 | return q; | |
342 | out_cleanup_queue: | |
343 | blk_cleanup_queue(q); | |
344 | return ERR_PTR(ret); | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(bsg_setup_queue); |