]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - block/bsg-lib.c
block: kill legacy parts of timeout handling
[mirror_ubuntu-hirsute-kernel.git] / block / bsg-lib.c
1 /*
2 * BSG helper library
3 *
4 * Copyright (C) 2008 James Smart, Emulex Corporation
5 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2011 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23 #include <linux/slab.h>
24 #include <linux/blk-mq.h>
25 #include <linux/delay.h>
26 #include <linux/scatterlist.h>
27 #include <linux/bsg-lib.h>
28 #include <linux/export.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/sg.h>
31
32 #define uptr64(val) ((void __user *)(uintptr_t)(val))
33
34 static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
35 {
36 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
37 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
38 return -EINVAL;
39 if (!capable(CAP_SYS_RAWIO))
40 return -EPERM;
41 return 0;
42 }
43
44 static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
45 fmode_t mode)
46 {
47 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
48
49 job->request_len = hdr->request_len;
50 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
51
52 return PTR_ERR_OR_ZERO(job->request);
53 }
54
55 static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
56 {
57 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
58 int ret = 0;
59
60 /*
61 * The assignments below don't make much sense, but are kept for
62 * bug by bug backwards compatibility:
63 */
64 hdr->device_status = job->result & 0xff;
65 hdr->transport_status = host_byte(job->result);
66 hdr->driver_status = driver_byte(job->result);
67 hdr->info = 0;
68 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
69 hdr->info |= SG_INFO_CHECK;
70 hdr->response_len = 0;
71
72 if (job->result < 0) {
73 /* we're only returning the result field in the reply */
74 job->reply_len = sizeof(u32);
75 ret = job->result;
76 }
77
78 if (job->reply_len && hdr->response) {
79 int len = min(hdr->max_response_len, job->reply_len);
80
81 if (copy_to_user(uptr64(hdr->response), job->reply, len))
82 ret = -EFAULT;
83 else
84 hdr->response_len = len;
85 }
86
87 /* we assume all request payload was transferred, residual == 0 */
88 hdr->dout_resid = 0;
89
90 if (rq->next_rq) {
91 unsigned int rsp_len = job->reply_payload.payload_len;
92
93 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
94 hdr->din_resid = 0;
95 else
96 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
97 } else {
98 hdr->din_resid = 0;
99 }
100
101 return ret;
102 }
103
104 static void bsg_transport_free_rq(struct request *rq)
105 {
106 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
107
108 kfree(job->request);
109 }
110
111 static const struct bsg_ops bsg_transport_ops = {
112 .check_proto = bsg_transport_check_proto,
113 .fill_hdr = bsg_transport_fill_hdr,
114 .complete_rq = bsg_transport_complete_rq,
115 .free_rq = bsg_transport_free_rq,
116 };
117
118 /**
119 * bsg_teardown_job - routine to teardown a bsg job
120 * @kref: kref inside bsg_job that is to be torn down
121 */
122 static void bsg_teardown_job(struct kref *kref)
123 {
124 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
125 struct request *rq = blk_mq_rq_from_pdu(job);
126
127 put_device(job->dev); /* release reference for the request */
128
129 kfree(job->request_payload.sg_list);
130 kfree(job->reply_payload.sg_list);
131
132 blk_mq_end_request(rq, BLK_STS_OK);
133 }
134
135 void bsg_job_put(struct bsg_job *job)
136 {
137 kref_put(&job->kref, bsg_teardown_job);
138 }
139 EXPORT_SYMBOL_GPL(bsg_job_put);
140
141 int bsg_job_get(struct bsg_job *job)
142 {
143 return kref_get_unless_zero(&job->kref);
144 }
145 EXPORT_SYMBOL_GPL(bsg_job_get);
146
147 /**
148 * bsg_job_done - completion routine for bsg requests
149 * @job: bsg_job that is complete
150 * @result: job reply result
151 * @reply_payload_rcv_len: length of payload recvd
152 *
153 * The LLD should call this when the bsg job has completed.
154 */
155 void bsg_job_done(struct bsg_job *job, int result,
156 unsigned int reply_payload_rcv_len)
157 {
158 job->result = result;
159 job->reply_payload_rcv_len = reply_payload_rcv_len;
160 blk_mq_complete_request(blk_mq_rq_from_pdu(job));
161 }
162 EXPORT_SYMBOL_GPL(bsg_job_done);
163
164 /**
165 * bsg_complete - softirq done routine for destroying the bsg requests
166 * @rq: BSG request that holds the job to be destroyed
167 */
168 static void bsg_complete(struct request *rq)
169 {
170 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
171
172 bsg_job_put(job);
173 }
174
175 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
176 {
177 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
178
179 BUG_ON(!req->nr_phys_segments);
180
181 buf->sg_list = kzalloc(sz, GFP_KERNEL);
182 if (!buf->sg_list)
183 return -ENOMEM;
184 sg_init_table(buf->sg_list, req->nr_phys_segments);
185 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
186 buf->payload_len = blk_rq_bytes(req);
187 return 0;
188 }
189
190 /**
191 * bsg_prepare_job - create the bsg_job structure for the bsg request
192 * @dev: device that is being sent the bsg request
193 * @req: BSG request that needs a job structure
194 */
195 static bool bsg_prepare_job(struct device *dev, struct request *req)
196 {
197 struct request *rsp = req->next_rq;
198 struct bsg_job *job = blk_mq_rq_to_pdu(req);
199 int ret;
200
201 job->timeout = req->timeout;
202
203 if (req->bio) {
204 ret = bsg_map_buffer(&job->request_payload, req);
205 if (ret)
206 goto failjob_rls_job;
207 }
208 if (rsp && rsp->bio) {
209 ret = bsg_map_buffer(&job->reply_payload, rsp);
210 if (ret)
211 goto failjob_rls_rqst_payload;
212 }
213 job->dev = dev;
214 /* take a reference for the request */
215 get_device(job->dev);
216 kref_init(&job->kref);
217 return true;
218
219 failjob_rls_rqst_payload:
220 kfree(job->request_payload.sg_list);
221 failjob_rls_job:
222 job->result = -ENOMEM;
223 return false;
224 }
225
226 /**
227 * bsg_queue_rq - generic handler for bsg requests
228 * @hctx: hardware queue
229 * @bd: queue data
230 *
231 * On error the create_bsg_job function should return a -Exyz error value
232 * that will be set to ->result.
233 *
234 * Drivers/subsys should pass this to the queue init function.
235 */
236 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
237 const struct blk_mq_queue_data *bd)
238 {
239 struct request_queue *q = hctx->queue;
240 struct device *dev = q->queuedata;
241 struct request *req = bd->rq;
242 int ret;
243
244 blk_mq_start_request(req);
245
246 if (!get_device(dev))
247 return BLK_STS_IOERR;
248
249 if (!bsg_prepare_job(dev, req))
250 return BLK_STS_IOERR;
251
252 ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
253 if (ret)
254 return BLK_STS_IOERR;
255
256 put_device(dev);
257 return BLK_STS_OK;
258 }
259
260 /* called right after the request is allocated for the request_queue */
261 static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
262 unsigned int hctx_idx, unsigned int numa_node)
263 {
264 struct bsg_job *job = blk_mq_rq_to_pdu(req);
265
266 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
267 if (!job->reply)
268 return -ENOMEM;
269 return 0;
270 }
271
272 /* called right before the request is given to the request_queue user */
273 static void bsg_initialize_rq(struct request *req)
274 {
275 struct bsg_job *job = blk_mq_rq_to_pdu(req);
276 void *reply = job->reply;
277
278 memset(job, 0, sizeof(*job));
279 job->reply = reply;
280 job->reply_len = SCSI_SENSE_BUFFERSIZE;
281 job->dd_data = job + 1;
282 }
283
284 static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
285 unsigned int hctx_idx)
286 {
287 struct bsg_job *job = blk_mq_rq_to_pdu(req);
288
289 kfree(job->reply);
290 }
291
292 void bsg_remove_queue(struct request_queue *q)
293 {
294 if (q) {
295 struct blk_mq_tag_set *set = q->tag_set;
296
297 bsg_unregister_queue(q);
298 blk_cleanup_queue(q);
299 blk_mq_free_tag_set(set);
300 kfree(set);
301 }
302 }
303 EXPORT_SYMBOL_GPL(bsg_remove_queue);
304
305 static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
306 {
307 enum blk_eh_timer_return ret = BLK_EH_DONE;
308 struct request_queue *q = rq->q;
309
310 if (q->bsg_job_timeout_fn)
311 ret = q->bsg_job_timeout_fn(rq);
312
313 return ret;
314 }
315
316 static const struct blk_mq_ops bsg_mq_ops = {
317 .queue_rq = bsg_queue_rq,
318 .init_request = bsg_init_rq,
319 .exit_request = bsg_exit_rq,
320 .initialize_rq_fn = bsg_initialize_rq,
321 .complete = bsg_complete,
322 .timeout = bsg_timeout,
323 };
324
325 /**
326 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
327 * @dev: device to attach bsg device to
328 * @name: device to give bsg device
329 * @job_fn: bsg job handler
330 * @dd_job_size: size of LLD data needed for each job
331 */
332 struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
333 bsg_job_fn *job_fn, rq_timed_out_fn *timeout, int dd_job_size)
334 {
335 struct blk_mq_tag_set *set;
336 struct request_queue *q;
337 int ret = -ENOMEM;
338
339 set = kzalloc(sizeof(*set), GFP_KERNEL);
340 if (!set)
341 return ERR_PTR(-ENOMEM);
342
343 set->ops = &bsg_mq_ops,
344 set->nr_hw_queues = 1;
345 set->queue_depth = 128;
346 set->numa_node = NUMA_NO_NODE;
347 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
348 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
349 if (blk_mq_alloc_tag_set(set))
350 goto out_tag_set;
351
352 q = blk_mq_init_queue(set);
353 if (IS_ERR(q)) {
354 ret = PTR_ERR(q);
355 goto out_queue;
356 }
357
358 q->queuedata = dev;
359 q->bsg_job_fn = job_fn;
360 q->bsg_job_timeout_fn = timeout;
361 blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
362 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
363
364 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
365 if (ret) {
366 printk(KERN_ERR "%s: bsg interface failed to "
367 "initialize - register queue\n", dev->kobj.name);
368 goto out_cleanup_queue;
369 }
370
371 return q;
372 out_cleanup_queue:
373 blk_cleanup_queue(q);
374 out_queue:
375 blk_mq_free_tag_set(set);
376 out_tag_set:
377 kfree(set);
378 return ERR_PTR(ret);
379 }
380 EXPORT_SYMBOL_GPL(bsg_setup_queue);