]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Virtio SCSI HBA driver | |
3 | * | |
4 | * Copyright IBM Corp. 2010 | |
5 | * Copyright Red Hat, Inc. 2011 | |
6 | * | |
7 | * Authors: | |
8 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | |
9 | * Paolo Bonzini <pbonzini@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/mempool.h> | |
21 | #include <linux/virtio.h> | |
22 | #include <linux/virtio_ids.h> | |
23 | #include <linux/virtio_config.h> | |
24 | #include <linux/virtio_scsi.h> | |
25 | #include <linux/cpu.h> | |
26 | #include <scsi/scsi_host.h> | |
27 | #include <scsi/scsi_device.h> | |
28 | #include <scsi/scsi_cmnd.h> | |
29 | ||
30 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 | |
31 | #define VIRTIO_SCSI_EVENT_LEN 8 | |
32 | #define VIRTIO_SCSI_VQ_BASE 2 | |
33 | ||
34 | /* Command queue element */ | |
35 | struct virtio_scsi_cmd { | |
36 | struct scsi_cmnd *sc; | |
37 | struct completion *comp; | |
38 | union { | |
39 | struct virtio_scsi_cmd_req cmd; | |
40 | struct virtio_scsi_ctrl_tmf_req tmf; | |
41 | struct virtio_scsi_ctrl_an_req an; | |
42 | } req; | |
43 | union { | |
44 | struct virtio_scsi_cmd_resp cmd; | |
45 | struct virtio_scsi_ctrl_tmf_resp tmf; | |
46 | struct virtio_scsi_ctrl_an_resp an; | |
47 | struct virtio_scsi_event evt; | |
48 | } resp; | |
49 | } ____cacheline_aligned_in_smp; | |
50 | ||
51 | struct virtio_scsi_event_node { | |
52 | struct virtio_scsi *vscsi; | |
53 | struct virtio_scsi_event event; | |
54 | struct work_struct work; | |
55 | }; | |
56 | ||
57 | struct virtio_scsi_vq { | |
58 | /* Protects vq */ | |
59 | spinlock_t vq_lock; | |
60 | ||
61 | struct virtqueue *vq; | |
62 | }; | |
63 | ||
64 | /* | |
65 | * Per-target queue state. | |
66 | * | |
67 | * This struct holds the data needed by the queue steering policy. When a | |
68 | * target is sent multiple requests, we need to drive them to the same queue so | |
69 | * that FIFO processing order is kept. However, if a target was idle, we can | |
70 | * choose a queue arbitrarily. In this case the queue is chosen according to | |
71 | * the current VCPU, so the driver expects the number of request queues to be | |
72 | * equal to the number of VCPUs. This makes it easy and fast to select the | |
73 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues | |
74 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). | |
75 | * | |
76 | * An interesting effect of this policy is that only writes to req_vq need to | |
77 | * take the tgt_lock. Read can be done outside the lock because: | |
78 | * | |
79 | * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. | |
80 | * In that case, no other CPU is reading req_vq: even if they were in | |
81 | * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. | |
82 | * | |
83 | * - reads of req_vq only occur when the target is not idle (reqs != 0). | |
84 | * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. | |
85 | * | |
86 | * Similarly, decrements of reqs are never concurrent with writes of req_vq. | |
87 | * Thus they can happen outside the tgt_lock, provided of course we make reqs | |
88 | * an atomic_t. | |
89 | */ | |
90 | struct virtio_scsi_target_state { | |
91 | /* This spinlock never held at the same time as vq_lock. */ | |
92 | spinlock_t tgt_lock; | |
93 | ||
94 | /* Count of outstanding requests. */ | |
95 | atomic_t reqs; | |
96 | ||
97 | /* Currently active virtqueue for requests sent to this target. */ | |
98 | struct virtio_scsi_vq *req_vq; | |
99 | }; | |
100 | ||
101 | /* Driver instance state */ | |
102 | struct virtio_scsi { | |
103 | struct virtio_device *vdev; | |
104 | ||
105 | /* Get some buffers ready for event vq */ | |
106 | struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; | |
107 | ||
108 | u32 num_queues; | |
109 | ||
110 | /* If the affinity hint is set for virtqueues */ | |
111 | bool affinity_hint_set; | |
112 | ||
113 | /* CPU hotplug notifier */ | |
114 | struct notifier_block nb; | |
115 | ||
116 | struct virtio_scsi_vq ctrl_vq; | |
117 | struct virtio_scsi_vq event_vq; | |
118 | struct virtio_scsi_vq req_vqs[]; | |
119 | }; | |
120 | ||
121 | static struct kmem_cache *virtscsi_cmd_cache; | |
122 | static mempool_t *virtscsi_cmd_pool; | |
123 | ||
124 | static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) | |
125 | { | |
126 | return vdev->priv; | |
127 | } | |
128 | ||
129 | static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) | |
130 | { | |
131 | if (!resid) | |
132 | return; | |
133 | ||
134 | if (!scsi_bidi_cmnd(sc)) { | |
135 | scsi_set_resid(sc, resid); | |
136 | return; | |
137 | } | |
138 | ||
139 | scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); | |
140 | scsi_out(sc)->resid = resid - scsi_in(sc)->resid; | |
141 | } | |
142 | ||
143 | /** | |
144 | * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done | |
145 | * | |
146 | * Called with vq_lock held. | |
147 | */ | |
148 | static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) | |
149 | { | |
150 | struct virtio_scsi_cmd *cmd = buf; | |
151 | struct scsi_cmnd *sc = cmd->sc; | |
152 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; | |
153 | struct virtio_scsi_target_state *tgt = | |
154 | scsi_target(sc->device)->hostdata; | |
155 | ||
156 | dev_dbg(&sc->device->sdev_gendev, | |
157 | "cmd %p response %u status %#02x sense_len %u\n", | |
158 | sc, resp->response, resp->status, resp->sense_len); | |
159 | ||
160 | sc->result = resp->status; | |
161 | virtscsi_compute_resid(sc, resp->resid); | |
162 | switch (resp->response) { | |
163 | case VIRTIO_SCSI_S_OK: | |
164 | set_host_byte(sc, DID_OK); | |
165 | break; | |
166 | case VIRTIO_SCSI_S_OVERRUN: | |
167 | set_host_byte(sc, DID_ERROR); | |
168 | break; | |
169 | case VIRTIO_SCSI_S_ABORTED: | |
170 | set_host_byte(sc, DID_ABORT); | |
171 | break; | |
172 | case VIRTIO_SCSI_S_BAD_TARGET: | |
173 | set_host_byte(sc, DID_BAD_TARGET); | |
174 | break; | |
175 | case VIRTIO_SCSI_S_RESET: | |
176 | set_host_byte(sc, DID_RESET); | |
177 | break; | |
178 | case VIRTIO_SCSI_S_BUSY: | |
179 | set_host_byte(sc, DID_BUS_BUSY); | |
180 | break; | |
181 | case VIRTIO_SCSI_S_TRANSPORT_FAILURE: | |
182 | set_host_byte(sc, DID_TRANSPORT_DISRUPTED); | |
183 | break; | |
184 | case VIRTIO_SCSI_S_TARGET_FAILURE: | |
185 | set_host_byte(sc, DID_TARGET_FAILURE); | |
186 | break; | |
187 | case VIRTIO_SCSI_S_NEXUS_FAILURE: | |
188 | set_host_byte(sc, DID_NEXUS_FAILURE); | |
189 | break; | |
190 | default: | |
191 | scmd_printk(KERN_WARNING, sc, "Unknown response %d", | |
192 | resp->response); | |
193 | /* fall through */ | |
194 | case VIRTIO_SCSI_S_FAILURE: | |
195 | set_host_byte(sc, DID_ERROR); | |
196 | break; | |
197 | } | |
198 | ||
199 | WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); | |
200 | if (sc->sense_buffer) { | |
201 | memcpy(sc->sense_buffer, resp->sense, | |
202 | min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); | |
203 | if (resp->sense_len) | |
204 | set_driver_byte(sc, DRIVER_SENSE); | |
205 | } | |
206 | ||
207 | mempool_free(cmd, virtscsi_cmd_pool); | |
208 | sc->scsi_done(sc); | |
209 | ||
210 | atomic_dec(&tgt->reqs); | |
211 | } | |
212 | ||
213 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, | |
214 | struct virtio_scsi_vq *virtscsi_vq, | |
215 | void (*fn)(struct virtio_scsi *vscsi, void *buf)) | |
216 | { | |
217 | void *buf; | |
218 | unsigned int len; | |
219 | unsigned long flags; | |
220 | struct virtqueue *vq = virtscsi_vq->vq; | |
221 | ||
222 | spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); | |
223 | do { | |
224 | virtqueue_disable_cb(vq); | |
225 | while ((buf = virtqueue_get_buf(vq, &len)) != NULL) | |
226 | fn(vscsi, buf); | |
227 | } while (!virtqueue_enable_cb(vq)); | |
228 | spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); | |
229 | } | |
230 | ||
231 | static void virtscsi_req_done(struct virtqueue *vq) | |
232 | { | |
233 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | |
234 | struct virtio_scsi *vscsi = shost_priv(sh); | |
235 | int index = vq->index - VIRTIO_SCSI_VQ_BASE; | |
236 | struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; | |
237 | ||
238 | /* | |
239 | * Read req_vq before decrementing the reqs field in | |
240 | * virtscsi_complete_cmd. | |
241 | * | |
242 | * With barriers: | |
243 | * | |
244 | * CPU #0 virtscsi_queuecommand_multi (CPU #1) | |
245 | * ------------------------------------------------------------ | |
246 | * lock vq_lock | |
247 | * read req_vq | |
248 | * read reqs (reqs = 1) | |
249 | * write reqs (reqs = 0) | |
250 | * increment reqs (reqs = 1) | |
251 | * write req_vq | |
252 | * | |
253 | * Possible reordering without barriers: | |
254 | * | |
255 | * CPU #0 virtscsi_queuecommand_multi (CPU #1) | |
256 | * ------------------------------------------------------------ | |
257 | * lock vq_lock | |
258 | * read reqs (reqs = 1) | |
259 | * write reqs (reqs = 0) | |
260 | * increment reqs (reqs = 1) | |
261 | * write req_vq | |
262 | * read (wrong) req_vq | |
263 | * | |
264 | * We do not need a full smp_rmb, because req_vq is required to get | |
265 | * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored | |
266 | * in the virtqueue as the user token. | |
267 | */ | |
268 | smp_read_barrier_depends(); | |
269 | ||
270 | virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); | |
271 | }; | |
272 | ||
273 | static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) | |
274 | { | |
275 | struct virtio_scsi_cmd *cmd = buf; | |
276 | ||
277 | if (cmd->comp) | |
278 | complete_all(cmd->comp); | |
279 | else | |
280 | mempool_free(cmd, virtscsi_cmd_pool); | |
281 | } | |
282 | ||
283 | static void virtscsi_ctrl_done(struct virtqueue *vq) | |
284 | { | |
285 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | |
286 | struct virtio_scsi *vscsi = shost_priv(sh); | |
287 | ||
288 | virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); | |
289 | }; | |
290 | ||
291 | static int virtscsi_kick_event(struct virtio_scsi *vscsi, | |
292 | struct virtio_scsi_event_node *event_node) | |
293 | { | |
294 | int err; | |
295 | struct scatterlist sg; | |
296 | unsigned long flags; | |
297 | ||
298 | sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); | |
299 | ||
300 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | |
301 | ||
302 | err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, | |
303 | GFP_ATOMIC); | |
304 | if (!err) | |
305 | virtqueue_kick(vscsi->event_vq.vq); | |
306 | ||
307 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | |
308 | ||
309 | return err; | |
310 | } | |
311 | ||
312 | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) | |
313 | { | |
314 | int i; | |
315 | ||
316 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { | |
317 | vscsi->event_list[i].vscsi = vscsi; | |
318 | virtscsi_kick_event(vscsi, &vscsi->event_list[i]); | |
319 | } | |
320 | ||
321 | return 0; | |
322 | } | |
323 | ||
324 | static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) | |
325 | { | |
326 | int i; | |
327 | ||
328 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) | |
329 | cancel_work_sync(&vscsi->event_list[i].work); | |
330 | } | |
331 | ||
332 | static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, | |
333 | struct virtio_scsi_event *event) | |
334 | { | |
335 | struct scsi_device *sdev; | |
336 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | |
337 | unsigned int target = event->lun[1]; | |
338 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; | |
339 | ||
340 | switch (event->reason) { | |
341 | case VIRTIO_SCSI_EVT_RESET_RESCAN: | |
342 | scsi_add_device(shost, 0, target, lun); | |
343 | break; | |
344 | case VIRTIO_SCSI_EVT_RESET_REMOVED: | |
345 | sdev = scsi_device_lookup(shost, 0, target, lun); | |
346 | if (sdev) { | |
347 | scsi_remove_device(sdev); | |
348 | scsi_device_put(sdev); | |
349 | } else { | |
350 | pr_err("SCSI device %d 0 %d %d not found\n", | |
351 | shost->host_no, target, lun); | |
352 | } | |
353 | break; | |
354 | default: | |
355 | pr_info("Unsupport virtio scsi event reason %x\n", event->reason); | |
356 | } | |
357 | } | |
358 | ||
359 | static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, | |
360 | struct virtio_scsi_event *event) | |
361 | { | |
362 | struct scsi_device *sdev; | |
363 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | |
364 | unsigned int target = event->lun[1]; | |
365 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; | |
366 | u8 asc = event->reason & 255; | |
367 | u8 ascq = event->reason >> 8; | |
368 | ||
369 | sdev = scsi_device_lookup(shost, 0, target, lun); | |
370 | if (!sdev) { | |
371 | pr_err("SCSI device %d 0 %d %d not found\n", | |
372 | shost->host_no, target, lun); | |
373 | return; | |
374 | } | |
375 | ||
376 | /* Handle "Parameters changed", "Mode parameters changed", and | |
377 | "Capacity data has changed". */ | |
378 | if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) | |
379 | scsi_rescan_device(&sdev->sdev_gendev); | |
380 | ||
381 | scsi_device_put(sdev); | |
382 | } | |
383 | ||
384 | static void virtscsi_handle_event(struct work_struct *work) | |
385 | { | |
386 | struct virtio_scsi_event_node *event_node = | |
387 | container_of(work, struct virtio_scsi_event_node, work); | |
388 | struct virtio_scsi *vscsi = event_node->vscsi; | |
389 | struct virtio_scsi_event *event = &event_node->event; | |
390 | ||
391 | if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) { | |
392 | event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; | |
393 | scsi_scan_host(virtio_scsi_host(vscsi->vdev)); | |
394 | } | |
395 | ||
396 | switch (event->event) { | |
397 | case VIRTIO_SCSI_T_NO_EVENT: | |
398 | break; | |
399 | case VIRTIO_SCSI_T_TRANSPORT_RESET: | |
400 | virtscsi_handle_transport_reset(vscsi, event); | |
401 | break; | |
402 | case VIRTIO_SCSI_T_PARAM_CHANGE: | |
403 | virtscsi_handle_param_change(vscsi, event); | |
404 | break; | |
405 | default: | |
406 | pr_err("Unsupport virtio scsi event %x\n", event->event); | |
407 | } | |
408 | virtscsi_kick_event(vscsi, event_node); | |
409 | } | |
410 | ||
411 | static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) | |
412 | { | |
413 | struct virtio_scsi_event_node *event_node = buf; | |
414 | ||
415 | INIT_WORK(&event_node->work, virtscsi_handle_event); | |
416 | schedule_work(&event_node->work); | |
417 | } | |
418 | ||
419 | static void virtscsi_event_done(struct virtqueue *vq) | |
420 | { | |
421 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | |
422 | struct virtio_scsi *vscsi = shost_priv(sh); | |
423 | ||
424 | virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); | |
425 | }; | |
426 | ||
427 | /** | |
428 | * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue | |
429 | * @vq : the struct virtqueue we're talking about | |
430 | * @cmd : command structure | |
431 | * @req_size : size of the request buffer | |
432 | * @resp_size : size of the response buffer | |
433 | * @gfp : flags to use for memory allocations | |
434 | */ | |
435 | static int virtscsi_add_cmd(struct virtqueue *vq, | |
436 | struct virtio_scsi_cmd *cmd, | |
437 | size_t req_size, size_t resp_size, gfp_t gfp) | |
438 | { | |
439 | struct scsi_cmnd *sc = cmd->sc; | |
440 | struct scatterlist *sgs[4], req, resp; | |
441 | struct sg_table *out, *in; | |
442 | unsigned out_num = 0, in_num = 0; | |
443 | ||
444 | out = in = NULL; | |
445 | ||
446 | if (sc && sc->sc_data_direction != DMA_NONE) { | |
447 | if (sc->sc_data_direction != DMA_FROM_DEVICE) | |
448 | out = &scsi_out(sc)->table; | |
449 | if (sc->sc_data_direction != DMA_TO_DEVICE) | |
450 | in = &scsi_in(sc)->table; | |
451 | } | |
452 | ||
453 | /* Request header. */ | |
454 | sg_init_one(&req, &cmd->req, req_size); | |
455 | sgs[out_num++] = &req; | |
456 | ||
457 | /* Data-out buffer. */ | |
458 | if (out) | |
459 | sgs[out_num++] = out->sgl; | |
460 | ||
461 | /* Response header. */ | |
462 | sg_init_one(&resp, &cmd->resp, resp_size); | |
463 | sgs[out_num + in_num++] = &resp; | |
464 | ||
465 | /* Data-in buffer */ | |
466 | if (in) | |
467 | sgs[out_num + in_num++] = in->sgl; | |
468 | ||
469 | return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); | |
470 | } | |
471 | ||
472 | static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, | |
473 | struct virtio_scsi_cmd *cmd, | |
474 | size_t req_size, size_t resp_size, gfp_t gfp) | |
475 | { | |
476 | unsigned long flags; | |
477 | int err; | |
478 | bool needs_kick = false; | |
479 | ||
480 | spin_lock_irqsave(&vq->vq_lock, flags); | |
481 | err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); | |
482 | if (!err) | |
483 | needs_kick = virtqueue_kick_prepare(vq->vq); | |
484 | ||
485 | spin_unlock_irqrestore(&vq->vq_lock, flags); | |
486 | ||
487 | if (needs_kick) | |
488 | virtqueue_notify(vq->vq); | |
489 | return err; | |
490 | } | |
491 | ||
492 | static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |
493 | struct virtio_scsi_vq *req_vq, | |
494 | struct scsi_cmnd *sc) | |
495 | { | |
496 | struct virtio_scsi_cmd *cmd; | |
497 | int ret; | |
498 | ||
499 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | |
500 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); | |
501 | ||
502 | /* TODO: check feature bit and fail if unsupported? */ | |
503 | BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); | |
504 | ||
505 | dev_dbg(&sc->device->sdev_gendev, | |
506 | "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); | |
507 | ||
508 | ret = SCSI_MLQUEUE_HOST_BUSY; | |
509 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); | |
510 | if (!cmd) | |
511 | goto out; | |
512 | ||
513 | memset(cmd, 0, sizeof(*cmd)); | |
514 | cmd->sc = sc; | |
515 | cmd->req.cmd = (struct virtio_scsi_cmd_req){ | |
516 | .lun[0] = 1, | |
517 | .lun[1] = sc->device->id, | |
518 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
519 | .lun[3] = sc->device->lun & 0xff, | |
520 | .tag = (unsigned long)sc, | |
521 | .task_attr = VIRTIO_SCSI_S_SIMPLE, | |
522 | .prio = 0, | |
523 | .crn = 0, | |
524 | }; | |
525 | ||
526 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); | |
527 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); | |
528 | ||
529 | if (virtscsi_kick_cmd(req_vq, cmd, | |
530 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | |
531 | GFP_ATOMIC) == 0) | |
532 | ret = 0; | |
533 | else | |
534 | mempool_free(cmd, virtscsi_cmd_pool); | |
535 | ||
536 | out: | |
537 | return ret; | |
538 | } | |
539 | ||
540 | static int virtscsi_queuecommand_single(struct Scsi_Host *sh, | |
541 | struct scsi_cmnd *sc) | |
542 | { | |
543 | struct virtio_scsi *vscsi = shost_priv(sh); | |
544 | struct virtio_scsi_target_state *tgt = | |
545 | scsi_target(sc->device)->hostdata; | |
546 | ||
547 | atomic_inc(&tgt->reqs); | |
548 | return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); | |
549 | } | |
550 | ||
551 | static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, | |
552 | struct virtio_scsi_target_state *tgt) | |
553 | { | |
554 | struct virtio_scsi_vq *vq; | |
555 | unsigned long flags; | |
556 | u32 queue_num; | |
557 | ||
558 | spin_lock_irqsave(&tgt->tgt_lock, flags); | |
559 | ||
560 | /* | |
561 | * The memory barrier after atomic_inc_return matches | |
562 | * the smp_read_barrier_depends() in virtscsi_req_done. | |
563 | */ | |
564 | if (atomic_inc_return(&tgt->reqs) > 1) | |
565 | vq = ACCESS_ONCE(tgt->req_vq); | |
566 | else { | |
567 | queue_num = smp_processor_id(); | |
568 | while (unlikely(queue_num >= vscsi->num_queues)) | |
569 | queue_num -= vscsi->num_queues; | |
570 | ||
571 | tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; | |
572 | } | |
573 | ||
574 | spin_unlock_irqrestore(&tgt->tgt_lock, flags); | |
575 | return vq; | |
576 | } | |
577 | ||
578 | static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, | |
579 | struct scsi_cmnd *sc) | |
580 | { | |
581 | struct virtio_scsi *vscsi = shost_priv(sh); | |
582 | struct virtio_scsi_target_state *tgt = | |
583 | scsi_target(sc->device)->hostdata; | |
584 | struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); | |
585 | ||
586 | return virtscsi_queuecommand(vscsi, req_vq, sc); | |
587 | } | |
588 | ||
589 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | |
590 | { | |
591 | DECLARE_COMPLETION_ONSTACK(comp); | |
592 | int ret = FAILED; | |
593 | ||
594 | cmd->comp = ∁ | |
595 | if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, | |
596 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, | |
597 | GFP_NOIO) < 0) | |
598 | goto out; | |
599 | ||
600 | wait_for_completion(&comp); | |
601 | if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || | |
602 | cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) | |
603 | ret = SUCCESS; | |
604 | ||
605 | out: | |
606 | mempool_free(cmd, virtscsi_cmd_pool); | |
607 | return ret; | |
608 | } | |
609 | ||
610 | static int virtscsi_device_reset(struct scsi_cmnd *sc) | |
611 | { | |
612 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | |
613 | struct virtio_scsi_cmd *cmd; | |
614 | ||
615 | sdev_printk(KERN_INFO, sc->device, "device reset\n"); | |
616 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | |
617 | if (!cmd) | |
618 | return FAILED; | |
619 | ||
620 | memset(cmd, 0, sizeof(*cmd)); | |
621 | cmd->sc = sc; | |
622 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | |
623 | .type = VIRTIO_SCSI_T_TMF, | |
624 | .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, | |
625 | .lun[0] = 1, | |
626 | .lun[1] = sc->device->id, | |
627 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
628 | .lun[3] = sc->device->lun & 0xff, | |
629 | }; | |
630 | return virtscsi_tmf(vscsi, cmd); | |
631 | } | |
632 | ||
633 | static int virtscsi_abort(struct scsi_cmnd *sc) | |
634 | { | |
635 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | |
636 | struct virtio_scsi_cmd *cmd; | |
637 | ||
638 | scmd_printk(KERN_INFO, sc, "abort\n"); | |
639 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | |
640 | if (!cmd) | |
641 | return FAILED; | |
642 | ||
643 | memset(cmd, 0, sizeof(*cmd)); | |
644 | cmd->sc = sc; | |
645 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | |
646 | .type = VIRTIO_SCSI_T_TMF, | |
647 | .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, | |
648 | .lun[0] = 1, | |
649 | .lun[1] = sc->device->id, | |
650 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
651 | .lun[3] = sc->device->lun & 0xff, | |
652 | .tag = (unsigned long)sc, | |
653 | }; | |
654 | return virtscsi_tmf(vscsi, cmd); | |
655 | } | |
656 | ||
657 | static int virtscsi_target_alloc(struct scsi_target *starget) | |
658 | { | |
659 | struct virtio_scsi_target_state *tgt = | |
660 | kmalloc(sizeof(*tgt), GFP_KERNEL); | |
661 | if (!tgt) | |
662 | return -ENOMEM; | |
663 | ||
664 | spin_lock_init(&tgt->tgt_lock); | |
665 | atomic_set(&tgt->reqs, 0); | |
666 | tgt->req_vq = NULL; | |
667 | ||
668 | starget->hostdata = tgt; | |
669 | return 0; | |
670 | } | |
671 | ||
672 | static void virtscsi_target_destroy(struct scsi_target *starget) | |
673 | { | |
674 | struct virtio_scsi_target_state *tgt = starget->hostdata; | |
675 | kfree(tgt); | |
676 | } | |
677 | ||
678 | static struct scsi_host_template virtscsi_host_template_single = { | |
679 | .module = THIS_MODULE, | |
680 | .name = "Virtio SCSI HBA", | |
681 | .proc_name = "virtio_scsi", | |
682 | .this_id = -1, | |
683 | .queuecommand = virtscsi_queuecommand_single, | |
684 | .eh_abort_handler = virtscsi_abort, | |
685 | .eh_device_reset_handler = virtscsi_device_reset, | |
686 | ||
687 | .can_queue = 1024, | |
688 | .dma_boundary = UINT_MAX, | |
689 | .use_clustering = ENABLE_CLUSTERING, | |
690 | .target_alloc = virtscsi_target_alloc, | |
691 | .target_destroy = virtscsi_target_destroy, | |
692 | }; | |
693 | ||
694 | static struct scsi_host_template virtscsi_host_template_multi = { | |
695 | .module = THIS_MODULE, | |
696 | .name = "Virtio SCSI HBA", | |
697 | .proc_name = "virtio_scsi", | |
698 | .this_id = -1, | |
699 | .queuecommand = virtscsi_queuecommand_multi, | |
700 | .eh_abort_handler = virtscsi_abort, | |
701 | .eh_device_reset_handler = virtscsi_device_reset, | |
702 | ||
703 | .can_queue = 1024, | |
704 | .dma_boundary = UINT_MAX, | |
705 | .use_clustering = ENABLE_CLUSTERING, | |
706 | .target_alloc = virtscsi_target_alloc, | |
707 | .target_destroy = virtscsi_target_destroy, | |
708 | }; | |
709 | ||
710 | #define virtscsi_config_get(vdev, fld) \ | |
711 | ({ \ | |
712 | typeof(((struct virtio_scsi_config *)0)->fld) __val; \ | |
713 | virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ | |
714 | __val; \ | |
715 | }) | |
716 | ||
717 | #define virtscsi_config_set(vdev, fld, val) \ | |
718 | do { \ | |
719 | typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ | |
720 | virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ | |
721 | } while(0) | |
722 | ||
723 | static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) | |
724 | { | |
725 | int i; | |
726 | int cpu; | |
727 | ||
728 | /* In multiqueue mode, when the number of cpu is equal | |
729 | * to the number of request queues, we let the qeueues | |
730 | * to be private to one cpu by setting the affinity hint | |
731 | * to eliminate the contention. | |
732 | */ | |
733 | if ((vscsi->num_queues == 1 || | |
734 | vscsi->num_queues != num_online_cpus()) && affinity) { | |
735 | if (vscsi->affinity_hint_set) | |
736 | affinity = false; | |
737 | else | |
738 | return; | |
739 | } | |
740 | ||
741 | if (affinity) { | |
742 | i = 0; | |
743 | for_each_online_cpu(cpu) { | |
744 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); | |
745 | i++; | |
746 | } | |
747 | ||
748 | vscsi->affinity_hint_set = true; | |
749 | } else { | |
750 | for (i = 0; i < vscsi->num_queues; i++) | |
751 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); | |
752 | ||
753 | vscsi->affinity_hint_set = false; | |
754 | } | |
755 | } | |
756 | ||
757 | static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) | |
758 | { | |
759 | get_online_cpus(); | |
760 | __virtscsi_set_affinity(vscsi, affinity); | |
761 | put_online_cpus(); | |
762 | } | |
763 | ||
764 | static int virtscsi_cpu_callback(struct notifier_block *nfb, | |
765 | unsigned long action, void *hcpu) | |
766 | { | |
767 | struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); | |
768 | switch(action) { | |
769 | case CPU_ONLINE: | |
770 | case CPU_ONLINE_FROZEN: | |
771 | case CPU_DEAD: | |
772 | case CPU_DEAD_FROZEN: | |
773 | __virtscsi_set_affinity(vscsi, true); | |
774 | break; | |
775 | default: | |
776 | break; | |
777 | } | |
778 | return NOTIFY_OK; | |
779 | } | |
780 | ||
781 | static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, | |
782 | struct virtqueue *vq) | |
783 | { | |
784 | spin_lock_init(&virtscsi_vq->vq_lock); | |
785 | virtscsi_vq->vq = vq; | |
786 | } | |
787 | ||
788 | static void virtscsi_scan(struct virtio_device *vdev) | |
789 | { | |
790 | struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv; | |
791 | ||
792 | scsi_scan_host(shost); | |
793 | } | |
794 | ||
795 | static void virtscsi_remove_vqs(struct virtio_device *vdev) | |
796 | { | |
797 | struct Scsi_Host *sh = virtio_scsi_host(vdev); | |
798 | struct virtio_scsi *vscsi = shost_priv(sh); | |
799 | ||
800 | virtscsi_set_affinity(vscsi, false); | |
801 | ||
802 | /* Stop all the virtqueues. */ | |
803 | vdev->config->reset(vdev); | |
804 | ||
805 | vdev->config->del_vqs(vdev); | |
806 | } | |
807 | ||
808 | static int virtscsi_init(struct virtio_device *vdev, | |
809 | struct virtio_scsi *vscsi) | |
810 | { | |
811 | int err; | |
812 | u32 i; | |
813 | u32 num_vqs; | |
814 | vq_callback_t **callbacks; | |
815 | const char **names; | |
816 | struct virtqueue **vqs; | |
817 | ||
818 | num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; | |
819 | vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); | |
820 | callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL); | |
821 | names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL); | |
822 | ||
823 | if (!callbacks || !vqs || !names) { | |
824 | err = -ENOMEM; | |
825 | goto out; | |
826 | } | |
827 | ||
828 | callbacks[0] = virtscsi_ctrl_done; | |
829 | callbacks[1] = virtscsi_event_done; | |
830 | names[0] = "control"; | |
831 | names[1] = "event"; | |
832 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { | |
833 | callbacks[i] = virtscsi_req_done; | |
834 | names[i] = "request"; | |
835 | } | |
836 | ||
837 | /* Discover virtqueues and write information to configuration. */ | |
838 | err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); | |
839 | if (err) | |
840 | goto out; | |
841 | ||
842 | virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); | |
843 | virtscsi_init_vq(&vscsi->event_vq, vqs[1]); | |
844 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) | |
845 | virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], | |
846 | vqs[i]); | |
847 | ||
848 | virtscsi_set_affinity(vscsi, true); | |
849 | ||
850 | virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); | |
851 | virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); | |
852 | ||
853 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | |
854 | virtscsi_kick_event_all(vscsi); | |
855 | ||
856 | err = 0; | |
857 | ||
858 | out: | |
859 | kfree(names); | |
860 | kfree(callbacks); | |
861 | kfree(vqs); | |
862 | if (err) | |
863 | virtscsi_remove_vqs(vdev); | |
864 | return err; | |
865 | } | |
866 | ||
867 | static int virtscsi_probe(struct virtio_device *vdev) | |
868 | { | |
869 | struct Scsi_Host *shost; | |
870 | struct virtio_scsi *vscsi; | |
871 | int err; | |
872 | u32 sg_elems, num_targets; | |
873 | u32 cmd_per_lun; | |
874 | u32 num_queues; | |
875 | struct scsi_host_template *hostt; | |
876 | ||
877 | /* We need to know how many queues before we allocate. */ | |
878 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; | |
879 | ||
880 | num_targets = virtscsi_config_get(vdev, max_target) + 1; | |
881 | ||
882 | if (num_queues == 1) | |
883 | hostt = &virtscsi_host_template_single; | |
884 | else | |
885 | hostt = &virtscsi_host_template_multi; | |
886 | ||
887 | shost = scsi_host_alloc(hostt, | |
888 | sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); | |
889 | if (!shost) | |
890 | return -ENOMEM; | |
891 | ||
892 | sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; | |
893 | shost->sg_tablesize = sg_elems; | |
894 | vscsi = shost_priv(shost); | |
895 | vscsi->vdev = vdev; | |
896 | vscsi->num_queues = num_queues; | |
897 | vdev->priv = shost; | |
898 | ||
899 | err = virtscsi_init(vdev, vscsi); | |
900 | if (err) | |
901 | goto virtscsi_init_failed; | |
902 | ||
903 | vscsi->nb.notifier_call = &virtscsi_cpu_callback; | |
904 | err = register_hotcpu_notifier(&vscsi->nb); | |
905 | if (err) { | |
906 | pr_err("registering cpu notifier failed\n"); | |
907 | goto scsi_add_host_failed; | |
908 | } | |
909 | ||
910 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; | |
911 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); | |
912 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; | |
913 | ||
914 | /* LUNs > 256 are reported with format 1, so they go in the range | |
915 | * 16640-32767. | |
916 | */ | |
917 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; | |
918 | shost->max_id = num_targets; | |
919 | shost->max_channel = 0; | |
920 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; | |
921 | err = scsi_add_host(shost, &vdev->dev); | |
922 | if (err) | |
923 | goto scsi_add_host_failed; | |
924 | /* | |
925 | * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan() | |
926 | * after VIRTIO_CONFIG_S_DRIVER_OK has been set.. | |
927 | */ | |
928 | return 0; | |
929 | ||
930 | scsi_add_host_failed: | |
931 | vdev->config->del_vqs(vdev); | |
932 | virtscsi_init_failed: | |
933 | scsi_host_put(shost); | |
934 | return err; | |
935 | } | |
936 | ||
937 | static void virtscsi_remove(struct virtio_device *vdev) | |
938 | { | |
939 | struct Scsi_Host *shost = virtio_scsi_host(vdev); | |
940 | struct virtio_scsi *vscsi = shost_priv(shost); | |
941 | ||
942 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | |
943 | virtscsi_cancel_event_work(vscsi); | |
944 | ||
945 | scsi_remove_host(shost); | |
946 | ||
947 | unregister_hotcpu_notifier(&vscsi->nb); | |
948 | ||
949 | virtscsi_remove_vqs(vdev); | |
950 | scsi_host_put(shost); | |
951 | } | |
952 | ||
953 | #ifdef CONFIG_PM_SLEEP | |
954 | static int virtscsi_freeze(struct virtio_device *vdev) | |
955 | { | |
956 | virtscsi_remove_vqs(vdev); | |
957 | return 0; | |
958 | } | |
959 | ||
960 | static int virtscsi_restore(struct virtio_device *vdev) | |
961 | { | |
962 | struct Scsi_Host *sh = virtio_scsi_host(vdev); | |
963 | struct virtio_scsi *vscsi = shost_priv(sh); | |
964 | ||
965 | return virtscsi_init(vdev, vscsi); | |
966 | } | |
967 | #endif | |
968 | ||
969 | static struct virtio_device_id id_table[] = { | |
970 | { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, | |
971 | { 0 }, | |
972 | }; | |
973 | ||
974 | static unsigned int features[] = { | |
975 | VIRTIO_SCSI_F_HOTPLUG, | |
976 | VIRTIO_SCSI_F_CHANGE, | |
977 | }; | |
978 | ||
979 | static struct virtio_driver virtio_scsi_driver = { | |
980 | .feature_table = features, | |
981 | .feature_table_size = ARRAY_SIZE(features), | |
982 | .driver.name = KBUILD_MODNAME, | |
983 | .driver.owner = THIS_MODULE, | |
984 | .id_table = id_table, | |
985 | .probe = virtscsi_probe, | |
986 | .scan = virtscsi_scan, | |
987 | #ifdef CONFIG_PM_SLEEP | |
988 | .freeze = virtscsi_freeze, | |
989 | .restore = virtscsi_restore, | |
990 | #endif | |
991 | .remove = virtscsi_remove, | |
992 | }; | |
993 | ||
994 | static int __init init(void) | |
995 | { | |
996 | int ret = -ENOMEM; | |
997 | ||
998 | virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); | |
999 | if (!virtscsi_cmd_cache) { | |
1000 | pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); | |
1001 | goto error; | |
1002 | } | |
1003 | ||
1004 | ||
1005 | virtscsi_cmd_pool = | |
1006 | mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, | |
1007 | virtscsi_cmd_cache); | |
1008 | if (!virtscsi_cmd_pool) { | |
1009 | pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); | |
1010 | goto error; | |
1011 | } | |
1012 | ret = register_virtio_driver(&virtio_scsi_driver); | |
1013 | if (ret < 0) | |
1014 | goto error; | |
1015 | ||
1016 | return 0; | |
1017 | ||
1018 | error: | |
1019 | if (virtscsi_cmd_pool) { | |
1020 | mempool_destroy(virtscsi_cmd_pool); | |
1021 | virtscsi_cmd_pool = NULL; | |
1022 | } | |
1023 | if (virtscsi_cmd_cache) { | |
1024 | kmem_cache_destroy(virtscsi_cmd_cache); | |
1025 | virtscsi_cmd_cache = NULL; | |
1026 | } | |
1027 | return ret; | |
1028 | } | |
1029 | ||
1030 | static void __exit fini(void) | |
1031 | { | |
1032 | unregister_virtio_driver(&virtio_scsi_driver); | |
1033 | mempool_destroy(virtscsi_cmd_pool); | |
1034 | kmem_cache_destroy(virtscsi_cmd_cache); | |
1035 | } | |
1036 | module_init(init); | |
1037 | module_exit(fini); | |
1038 | ||
1039 | MODULE_DEVICE_TABLE(virtio, id_table); | |
1040 | MODULE_DESCRIPTION("Virtio SCSI HBA driver"); | |
1041 | MODULE_LICENSE("GPL"); |