]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* ------------------------------------------------------------ |
2 | * ibmvscsi.c | |
3 | * (C) Copyright IBM Corporation 1994, 2004 | |
4 | * Authors: Colin DeVilbiss (devilbis@us.ibm.com) | |
5 | * Santiago Leon (santil@us.ibm.com) | |
6 | * Dave Boutcher (sleddog@us.ibm.com) | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | |
21 | * USA | |
22 | * | |
23 | * ------------------------------------------------------------ | |
24 | * Emulation of a SCSI host adapter for Virtual I/O devices | |
25 | * | |
26 | * This driver supports the SCSI adapter implemented by the IBM | |
27 | * Power5 firmware. That SCSI adapter is not a physical adapter, | |
28 | * but allows Linux SCSI peripheral drivers to directly | |
29 | * access devices in another logical partition on the physical system. | |
30 | * | |
31 | * The virtual adapter(s) are present in the open firmware device | |
32 | * tree just like real adapters. | |
33 | * | |
34 | * One of the capabilities provided on these systems is the ability | |
35 | * to DMA between partitions. The architecture states that for VSCSI, | |
36 | * the server side is allowed to DMA to and from the client. The client | |
37 | * is never trusted to DMA to or from the server directly. | |
38 | * | |
39 | * Messages are sent between partitions on a "Command/Response Queue" | |
40 | * (CRQ), which is just a buffer of 16 byte entries in the receiver's | |
41 | * Senders cannot access the buffer directly, but send messages by | |
42 | * making a hypervisor call and passing in the 16 bytes. The hypervisor | |
43 | * puts the message in the next 16 byte space in round-robbin fashion, | |
44 | * turns on the high order bit of the message (the valid bit), and | |
45 | * generates an interrupt to the receiver (if interrupts are turned on.) | |
46 | * The receiver just turns off the valid bit when they have copied out | |
47 | * the message. | |
48 | * | |
49 | * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit | |
50 | * (IU) (as defined in the T10 standard available at www.t10.org), gets | |
51 | * a DMA address for the message, and sends it to the server as the | |
52 | * payload of a CRQ message. The server DMAs the SRP IU and processes it, | |
53 | * including doing any additional data transfers. When it is done, it | |
54 | * DMAs the SRP response back to the same address as the request came from, | |
55 | * and sends a CRQ message back to inform the client that the request has | |
56 | * completed. | |
57 | * | |
58 | * Note that some of the underlying infrastructure is different between | |
59 | * machines conforming to the "RS/6000 Platform Architecture" (RPA) and | |
60 | * the older iSeries hypervisor models. To support both, some low level | |
61 | * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. | |
62 | * The Makefile should pick one, not two, not zero, of these. | |
63 | * | |
64 | * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor | |
65 | * interfaces. It would be really nice to abstract this above an RDMA | |
66 | * layer. | |
67 | */ | |
68 | ||
69 | #include <linux/module.h> | |
70 | #include <linux/moduleparam.h> | |
71 | #include <linux/dma-mapping.h> | |
72 | #include <linux/delay.h> | |
73 | #include <asm/vio.h> | |
74 | #include <scsi/scsi.h> | |
75 | #include <scsi/scsi_cmnd.h> | |
76 | #include <scsi/scsi_host.h> | |
77 | #include <scsi/scsi_device.h> | |
78 | #include "ibmvscsi.h" | |
79 | ||
80 | /* The values below are somewhat arbitrary default values, but | |
81 | * OS/400 will use 3 busses (disks, CDs, tapes, I think.) | |
82 | * Note that there are 3 bits of channel value, 6 bits of id, and | |
83 | * 5 bits of LUN. | |
84 | */ | |
85 | static int max_id = 64; | |
86 | static int max_channel = 3; | |
87 | static int init_timeout = 5; | |
88 | static int max_requests = 50; | |
89 | ||
90 | #define IBMVSCSI_VERSION "1.5.5" | |
91 | ||
92 | MODULE_DESCRIPTION("IBM Virtual SCSI"); | |
93 | MODULE_AUTHOR("Dave Boutcher"); | |
94 | MODULE_LICENSE("GPL"); | |
95 | MODULE_VERSION(IBMVSCSI_VERSION); | |
96 | ||
97 | module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); | |
98 | MODULE_PARM_DESC(max_id, "Largest ID value for each channel"); | |
99 | module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); | |
100 | MODULE_PARM_DESC(max_channel, "Largest channel value"); | |
101 | module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); | |
102 | MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); | |
103 | module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); | |
104 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); | |
105 | ||
106 | /* ------------------------------------------------------------ | |
107 | * Routines for the event pool and event structs | |
108 | */ | |
109 | /** | |
110 | * initialize_event_pool: - Allocates and initializes the event pool for a host | |
111 | * @pool: event_pool to be initialized | |
112 | * @size: Number of events in pool | |
113 | * @hostdata: ibmvscsi_host_data who owns the event pool | |
114 | * | |
115 | * Returns zero on success. | |
116 | */ | |
117 | static int initialize_event_pool(struct event_pool *pool, | |
118 | int size, struct ibmvscsi_host_data *hostdata) | |
119 | { | |
120 | int i; | |
121 | ||
122 | pool->size = size; | |
123 | pool->next = 0; | |
124 | pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL); | |
125 | if (!pool->events) | |
126 | return -ENOMEM; | |
127 | memset(pool->events, 0x00, pool->size * sizeof(*pool->events)); | |
128 | ||
129 | pool->iu_storage = | |
130 | dma_alloc_coherent(hostdata->dev, | |
131 | pool->size * sizeof(*pool->iu_storage), | |
132 | &pool->iu_token, 0); | |
133 | if (!pool->iu_storage) { | |
134 | kfree(pool->events); | |
135 | return -ENOMEM; | |
136 | } | |
137 | ||
138 | for (i = 0; i < pool->size; ++i) { | |
139 | struct srp_event_struct *evt = &pool->events[i]; | |
140 | memset(&evt->crq, 0x00, sizeof(evt->crq)); | |
141 | atomic_set(&evt->free, 1); | |
142 | evt->crq.valid = 0x80; | |
143 | evt->crq.IU_length = sizeof(*evt->xfer_iu); | |
144 | evt->crq.IU_data_ptr = pool->iu_token + | |
145 | sizeof(*evt->xfer_iu) * i; | |
146 | evt->xfer_iu = pool->iu_storage + i; | |
147 | evt->hostdata = hostdata; | |
148 | } | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | /** | |
154 | * release_event_pool: - Frees memory of an event pool of a host | |
155 | * @pool: event_pool to be released | |
156 | * @hostdata: ibmvscsi_host_data who owns the even pool | |
157 | * | |
158 | * Returns zero on success. | |
159 | */ | |
160 | static void release_event_pool(struct event_pool *pool, | |
161 | struct ibmvscsi_host_data *hostdata) | |
162 | { | |
163 | int i, in_use = 0; | |
164 | for (i = 0; i < pool->size; ++i) | |
165 | if (atomic_read(&pool->events[i].free) != 1) | |
166 | ++in_use; | |
167 | if (in_use) | |
168 | printk(KERN_WARNING | |
169 | "ibmvscsi: releasing event pool with %d " | |
170 | "events still in use?\n", in_use); | |
171 | kfree(pool->events); | |
172 | dma_free_coherent(hostdata->dev, | |
173 | pool->size * sizeof(*pool->iu_storage), | |
174 | pool->iu_storage, pool->iu_token); | |
175 | } | |
176 | ||
177 | /** | |
178 | * valid_event_struct: - Determines if event is valid. | |
179 | * @pool: event_pool that contains the event | |
180 | * @evt: srp_event_struct to be checked for validity | |
181 | * | |
182 | * Returns zero if event is invalid, one otherwise. | |
183 | */ | |
184 | static int valid_event_struct(struct event_pool *pool, | |
185 | struct srp_event_struct *evt) | |
186 | { | |
187 | int index = evt - pool->events; | |
188 | if (index < 0 || index >= pool->size) /* outside of bounds */ | |
189 | return 0; | |
190 | if (evt != pool->events + index) /* unaligned */ | |
191 | return 0; | |
192 | return 1; | |
193 | } | |
194 | ||
195 | /** | |
196 | * ibmvscsi_free-event_struct: - Changes status of event to "free" | |
197 | * @pool: event_pool that contains the event | |
198 | * @evt: srp_event_struct to be modified | |
199 | * | |
200 | */ | |
201 | static void free_event_struct(struct event_pool *pool, | |
202 | struct srp_event_struct *evt) | |
203 | { | |
204 | if (!valid_event_struct(pool, evt)) { | |
205 | printk(KERN_ERR | |
206 | "ibmvscsi: Freeing invalid event_struct %p " | |
207 | "(not in pool %p)\n", evt, pool->events); | |
208 | return; | |
209 | } | |
210 | if (atomic_inc_return(&evt->free) != 1) { | |
211 | printk(KERN_ERR | |
212 | "ibmvscsi: Freeing event_struct %p " | |
213 | "which is not in use!\n", evt); | |
214 | return; | |
215 | } | |
216 | } | |
217 | ||
218 | /** | |
219 | * get_evt_struct: - Gets the next free event in pool | |
220 | * @pool: event_pool that contains the events to be searched | |
221 | * | |
222 | * Returns the next event in "free" state, and NULL if none are free. | |
223 | * Note that no synchronization is done here, we assume the host_lock | |
224 | * will syncrhonze things. | |
225 | */ | |
226 | static struct srp_event_struct *get_event_struct(struct event_pool *pool) | |
227 | { | |
228 | int i; | |
229 | int poolsize = pool->size; | |
230 | int offset = pool->next; | |
231 | ||
232 | for (i = 0; i < poolsize; i++) { | |
233 | offset = (offset + 1) % poolsize; | |
234 | if (!atomic_dec_if_positive(&pool->events[offset].free)) { | |
235 | pool->next = offset; | |
236 | return &pool->events[offset]; | |
237 | } | |
238 | } | |
239 | ||
240 | printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); | |
241 | return NULL; | |
242 | } | |
243 | ||
244 | /** | |
245 | * init_event_struct: Initialize fields in an event struct that are always | |
246 | * required. | |
247 | * @evt: The event | |
248 | * @done: Routine to call when the event is responded to | |
249 | * @format: SRP or MAD format | |
250 | * @timeout: timeout value set in the CRQ | |
251 | */ | |
252 | static void init_event_struct(struct srp_event_struct *evt_struct, | |
253 | void (*done) (struct srp_event_struct *), | |
254 | u8 format, | |
255 | int timeout) | |
256 | { | |
257 | evt_struct->cmnd = NULL; | |
258 | evt_struct->cmnd_done = NULL; | |
259 | evt_struct->sync_srp = NULL; | |
260 | evt_struct->crq.format = format; | |
261 | evt_struct->crq.timeout = timeout; | |
262 | evt_struct->done = done; | |
263 | } | |
264 | ||
265 | /* ------------------------------------------------------------ | |
266 | * Routines for receiving SCSI responses from the hosting partition | |
267 | */ | |
268 | ||
269 | /** | |
270 | * set_srp_direction: Set the fields in the srp related to data | |
271 | * direction and number of buffers based on the direction in | |
272 | * the scsi_cmnd and the number of buffers | |
273 | */ | |
274 | static void set_srp_direction(struct scsi_cmnd *cmd, | |
275 | struct srp_cmd *srp_cmd, | |
276 | int numbuf) | |
277 | { | |
278 | if (numbuf == 0) | |
279 | return; | |
280 | ||
281 | if (numbuf == 1) { | |
282 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | |
283 | srp_cmd->data_out_format = SRP_DIRECT_BUFFER; | |
284 | else | |
285 | srp_cmd->data_in_format = SRP_DIRECT_BUFFER; | |
286 | } else { | |
287 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | |
288 | srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; | |
289 | srp_cmd->data_out_count = numbuf; | |
290 | } else { | |
291 | srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; | |
292 | srp_cmd->data_in_count = numbuf; | |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | /** | |
298 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | |
299 | * @cmd: srp_cmd whose additional_data member will be unmapped | |
300 | * @dev: device for which the memory is mapped | |
301 | * | |
302 | */ | |
303 | static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) | |
304 | { | |
305 | int i; | |
306 | ||
307 | if ((cmd->data_out_format == SRP_NO_BUFFER) && | |
308 | (cmd->data_in_format == SRP_NO_BUFFER)) | |
309 | return; | |
310 | else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || | |
311 | (cmd->data_in_format == SRP_DIRECT_BUFFER)) { | |
312 | struct memory_descriptor *data = | |
313 | (struct memory_descriptor *)cmd->additional_data; | |
314 | dma_unmap_single(dev, data->virtual_address, data->length, | |
315 | DMA_BIDIRECTIONAL); | |
316 | } else { | |
317 | struct indirect_descriptor *indirect = | |
318 | (struct indirect_descriptor *)cmd->additional_data; | |
319 | int num_mapped = indirect->head.length / | |
320 | sizeof(indirect->list[0]); | |
321 | for (i = 0; i < num_mapped; ++i) { | |
322 | struct memory_descriptor *data = &indirect->list[i]; | |
323 | dma_unmap_single(dev, | |
324 | data->virtual_address, | |
325 | data->length, DMA_BIDIRECTIONAL); | |
326 | } | |
327 | } | |
328 | } | |
329 | ||
330 | /** | |
331 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields | |
332 | * @cmd: Scsi_Cmnd with the scatterlist | |
333 | * @srp_cmd: srp_cmd that contains the memory descriptor | |
334 | * @dev: device for which to map dma memory | |
335 | * | |
336 | * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. | |
337 | * Returns 1 on success. | |
338 | */ | |
339 | static int map_sg_data(struct scsi_cmnd *cmd, | |
340 | struct srp_cmd *srp_cmd, struct device *dev) | |
341 | { | |
342 | ||
343 | int i, sg_mapped; | |
344 | u64 total_length = 0; | |
345 | struct scatterlist *sg = cmd->request_buffer; | |
346 | struct memory_descriptor *data = | |
347 | (struct memory_descriptor *)srp_cmd->additional_data; | |
348 | struct indirect_descriptor *indirect = | |
349 | (struct indirect_descriptor *)data; | |
350 | ||
351 | sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); | |
352 | ||
353 | if (sg_mapped == 0) | |
354 | return 0; | |
355 | ||
356 | set_srp_direction(cmd, srp_cmd, sg_mapped); | |
357 | ||
358 | /* special case; we can use a single direct descriptor */ | |
359 | if (sg_mapped == 1) { | |
360 | data->virtual_address = sg_dma_address(&sg[0]); | |
361 | data->length = sg_dma_len(&sg[0]); | |
362 | data->memory_handle = 0; | |
363 | return 1; | |
364 | } | |
365 | ||
366 | if (sg_mapped > MAX_INDIRECT_BUFS) { | |
367 | printk(KERN_ERR | |
368 | "ibmvscsi: More than %d mapped sg entries, got %d\n", | |
369 | MAX_INDIRECT_BUFS, sg_mapped); | |
370 | return 0; | |
371 | } | |
372 | ||
373 | indirect->head.virtual_address = 0; | |
374 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | |
375 | indirect->head.memory_handle = 0; | |
376 | for (i = 0; i < sg_mapped; ++i) { | |
377 | struct memory_descriptor *descr = &indirect->list[i]; | |
378 | struct scatterlist *sg_entry = &sg[i]; | |
379 | descr->virtual_address = sg_dma_address(sg_entry); | |
380 | descr->length = sg_dma_len(sg_entry); | |
381 | descr->memory_handle = 0; | |
382 | total_length += sg_dma_len(sg_entry); | |
383 | } | |
384 | indirect->total_length = total_length; | |
385 | ||
386 | return 1; | |
387 | } | |
388 | ||
389 | /** | |
390 | * map_single_data: - Maps memory and initializes memory decriptor fields | |
391 | * @cmd: struct scsi_cmnd with the memory to be mapped | |
392 | * @srp_cmd: srp_cmd that contains the memory descriptor | |
393 | * @dev: device for which to map dma memory | |
394 | * | |
395 | * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. | |
396 | * Returns 1 on success. | |
397 | */ | |
398 | static int map_single_data(struct scsi_cmnd *cmd, | |
399 | struct srp_cmd *srp_cmd, struct device *dev) | |
400 | { | |
401 | struct memory_descriptor *data = | |
402 | (struct memory_descriptor *)srp_cmd->additional_data; | |
403 | ||
404 | data->virtual_address = | |
405 | dma_map_single(dev, cmd->request_buffer, | |
406 | cmd->request_bufflen, | |
407 | DMA_BIDIRECTIONAL); | |
408 | if (dma_mapping_error(data->virtual_address)) { | |
409 | printk(KERN_ERR | |
410 | "ibmvscsi: Unable to map request_buffer for command!\n"); | |
411 | return 0; | |
412 | } | |
413 | data->length = cmd->request_bufflen; | |
414 | data->memory_handle = 0; | |
415 | ||
416 | set_srp_direction(cmd, srp_cmd, 1); | |
417 | ||
418 | return 1; | |
419 | } | |
420 | ||
421 | /** | |
422 | * map_data_for_srp_cmd: - Calls functions to map data for srp cmds | |
423 | * @cmd: struct scsi_cmnd with the memory to be mapped | |
424 | * @srp_cmd: srp_cmd that contains the memory descriptor | |
425 | * @dev: dma device for which to map dma memory | |
426 | * | |
427 | * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds | |
428 | * Returns 1 on success. | |
429 | */ | |
430 | static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | |
431 | struct srp_cmd *srp_cmd, struct device *dev) | |
432 | { | |
433 | switch (cmd->sc_data_direction) { | |
434 | case DMA_FROM_DEVICE: | |
435 | case DMA_TO_DEVICE: | |
436 | break; | |
437 | case DMA_NONE: | |
438 | return 1; | |
439 | case DMA_BIDIRECTIONAL: | |
440 | printk(KERN_ERR | |
441 | "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); | |
442 | return 0; | |
443 | default: | |
444 | printk(KERN_ERR | |
445 | "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", | |
446 | cmd->sc_data_direction); | |
447 | return 0; | |
448 | } | |
449 | ||
450 | if (!cmd->request_buffer) | |
451 | return 1; | |
452 | if (cmd->use_sg) | |
453 | return map_sg_data(cmd, srp_cmd, dev); | |
454 | return map_single_data(cmd, srp_cmd, dev); | |
455 | } | |
456 | ||
457 | /* ------------------------------------------------------------ | |
458 | * Routines for sending and receiving SRPs | |
459 | */ | |
460 | /** | |
461 | * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() | |
462 | * @evt_struct: evt_struct to be sent | |
463 | * @hostdata: ibmvscsi_host_data of host | |
464 | * | |
465 | * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) | |
466 | * Note that this routine assumes that host_lock is held for synchronization | |
467 | */ | |
468 | static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |
469 | struct ibmvscsi_host_data *hostdata) | |
470 | { | |
471 | struct scsi_cmnd *cmnd; | |
472 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; | |
473 | int rc; | |
474 | ||
475 | /* If we have exhausted our request limit, just fail this request. | |
476 | * Note that there are rare cases involving driver generated requests | |
477 | * (such as task management requests) that the mid layer may think we | |
478 | * can handle more requests (can_queue) when we actually can't | |
479 | */ | |
480 | if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && | |
481 | (atomic_dec_if_positive(&hostdata->request_limit) < 0)) { | |
482 | /* See if the adapter is disabled */ | |
483 | if (atomic_read(&hostdata->request_limit) < 0) | |
484 | goto send_error; | |
485 | ||
486 | printk(KERN_WARNING | |
487 | "ibmvscsi: Warning, request_limit exceeded\n"); | |
488 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | |
489 | hostdata->dev); | |
490 | free_event_struct(&hostdata->pool, evt_struct); | |
491 | return SCSI_MLQUEUE_HOST_BUSY; | |
492 | } | |
493 | ||
494 | /* Copy the IU into the transfer area */ | |
495 | *evt_struct->xfer_iu = evt_struct->iu; | |
496 | evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; | |
497 | ||
498 | /* Add this to the sent list. We need to do this | |
499 | * before we actually send | |
500 | * in case it comes back REALLY fast | |
501 | */ | |
502 | list_add_tail(&evt_struct->list, &hostdata->sent); | |
503 | ||
504 | if ((rc = | |
505 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | |
506 | list_del(&evt_struct->list); | |
507 | ||
508 | printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n", | |
509 | rc); | |
510 | goto send_error; | |
511 | } | |
512 | ||
513 | return 0; | |
514 | ||
515 | send_error: | |
516 | unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); | |
517 | ||
518 | if ((cmnd = evt_struct->cmnd) != NULL) { | |
519 | cmnd->result = DID_ERROR << 16; | |
520 | evt_struct->cmnd_done(cmnd); | |
521 | } else if (evt_struct->done) | |
522 | evt_struct->done(evt_struct); | |
523 | ||
524 | free_event_struct(&hostdata->pool, evt_struct); | |
525 | return 0; | |
526 | } | |
527 | ||
528 | /** | |
529 | * handle_cmd_rsp: - Handle responses from commands | |
530 | * @evt_struct: srp_event_struct to be handled | |
531 | * | |
532 | * Used as a callback by when sending scsi cmds. | |
533 | * Gets called by ibmvscsi_handle_crq() | |
534 | */ | |
535 | static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |
536 | { | |
537 | struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; | |
538 | struct scsi_cmnd *cmnd = evt_struct->cmnd; | |
539 | ||
540 | if (unlikely(rsp->type != SRP_RSP_TYPE)) { | |
541 | if (printk_ratelimit()) | |
542 | printk(KERN_WARNING | |
543 | "ibmvscsi: bad SRP RSP type %d\n", | |
544 | rsp->type); | |
545 | } | |
546 | ||
547 | if (cmnd) { | |
548 | cmnd->result = rsp->status; | |
549 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) | |
550 | memcpy(cmnd->sense_buffer, | |
551 | rsp->sense_and_response_data, | |
552 | rsp->sense_data_list_length); | |
553 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | |
554 | evt_struct->hostdata->dev); | |
555 | ||
556 | if (rsp->doover) | |
557 | cmnd->resid = rsp->data_out_residual_count; | |
558 | else if (rsp->diover) | |
559 | cmnd->resid = rsp->data_in_residual_count; | |
560 | } | |
561 | ||
562 | if (evt_struct->cmnd_done) | |
563 | evt_struct->cmnd_done(cmnd); | |
564 | } | |
565 | ||
566 | /** | |
567 | * lun_from_dev: - Returns the lun of the scsi device | |
568 | * @dev: struct scsi_device | |
569 | * | |
570 | */ | |
571 | static inline u16 lun_from_dev(struct scsi_device *dev) | |
572 | { | |
573 | return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; | |
574 | } | |
575 | ||
576 | /** | |
577 | * ibmvscsi_queue: - The queuecommand function of the scsi template | |
578 | * @cmd: struct scsi_cmnd to be executed | |
579 | * @done: Callback function to be called when cmd is completed | |
580 | */ | |
581 | static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |
582 | void (*done) (struct scsi_cmnd *)) | |
583 | { | |
584 | struct srp_cmd *srp_cmd; | |
585 | struct srp_event_struct *evt_struct; | |
586 | struct ibmvscsi_host_data *hostdata = | |
587 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; | |
588 | u16 lun = lun_from_dev(cmnd->device); | |
589 | ||
590 | evt_struct = get_event_struct(&hostdata->pool); | |
591 | if (!evt_struct) | |
592 | return SCSI_MLQUEUE_HOST_BUSY; | |
593 | ||
594 | init_event_struct(evt_struct, | |
595 | handle_cmd_rsp, | |
596 | VIOSRP_SRP_FORMAT, | |
597 | cmnd->timeout); | |
598 | ||
599 | evt_struct->cmnd = cmnd; | |
600 | evt_struct->cmnd_done = done; | |
601 | ||
602 | /* Set up the actual SRP IU */ | |
603 | srp_cmd = &evt_struct->iu.srp.cmd; | |
604 | memset(srp_cmd, 0x00, sizeof(*srp_cmd)); | |
605 | srp_cmd->type = SRP_CMD_TYPE; | |
606 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); | |
607 | srp_cmd->lun = ((u64) lun) << 48; | |
608 | ||
609 | if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { | |
610 | printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); | |
611 | free_event_struct(&hostdata->pool, evt_struct); | |
612 | return SCSI_MLQUEUE_HOST_BUSY; | |
613 | } | |
614 | ||
615 | /* Fix up dma address of the buffer itself */ | |
616 | if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || | |
617 | (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { | |
618 | struct indirect_descriptor *indirect = | |
619 | (struct indirect_descriptor *)srp_cmd->additional_data; | |
620 | indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + | |
621 | offsetof(struct srp_cmd, additional_data) + | |
622 | offsetof(struct indirect_descriptor, list); | |
623 | } | |
624 | ||
625 | return ibmvscsi_send_srp_event(evt_struct, hostdata); | |
626 | } | |
627 | ||
628 | /* ------------------------------------------------------------ | |
629 | * Routines for driver initialization | |
630 | */ | |
631 | /** | |
632 | * adapter_info_rsp: - Handle response to MAD adapter info request | |
633 | * @evt_struct: srp_event_struct with the response | |
634 | * | |
635 | * Used as a "done" callback by when sending adapter_info. Gets called | |
636 | * by ibmvscsi_handle_crq() | |
637 | */ | |
638 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |
639 | { | |
640 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | |
641 | dma_unmap_single(hostdata->dev, | |
642 | evt_struct->iu.mad.adapter_info.buffer, | |
643 | evt_struct->iu.mad.adapter_info.common.length, | |
644 | DMA_BIDIRECTIONAL); | |
645 | ||
646 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | |
647 | printk("ibmvscsi: error %d getting adapter info\n", | |
648 | evt_struct->xfer_iu->mad.adapter_info.common.status); | |
649 | } else { | |
650 | printk("ibmvscsi: host srp version: %s, " | |
651 | "host partition %s (%d), OS %d, max io %u\n", | |
652 | hostdata->madapter_info.srp_version, | |
653 | hostdata->madapter_info.partition_name, | |
654 | hostdata->madapter_info.partition_number, | |
655 | hostdata->madapter_info.os_type, | |
656 | hostdata->madapter_info.port_max_txu[0]); | |
657 | ||
658 | if (hostdata->madapter_info.port_max_txu[0]) | |
659 | hostdata->host->max_sectors = | |
660 | hostdata->madapter_info.port_max_txu[0] >> 9; | |
661 | } | |
662 | } | |
663 | ||
664 | /** | |
665 | * send_mad_adapter_info: - Sends the mad adapter info request | |
666 | * and stores the result so it can be retrieved with | |
667 | * sysfs. We COULD consider causing a failure if the | |
668 | * returned SRP version doesn't match ours. | |
669 | * @hostdata: ibmvscsi_host_data of host | |
670 | * | |
671 | * Returns zero if successful. | |
672 | */ | |
673 | static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |
674 | { | |
675 | struct viosrp_adapter_info *req; | |
676 | struct srp_event_struct *evt_struct; | |
677 | ||
678 | memset(&hostdata->madapter_info, 0x00, sizeof(hostdata->madapter_info)); | |
679 | ||
680 | evt_struct = get_event_struct(&hostdata->pool); | |
681 | if (!evt_struct) { | |
682 | printk(KERN_ERR "ibmvscsi: couldn't allocate an event " | |
683 | "for ADAPTER_INFO_REQ!\n"); | |
684 | return; | |
685 | } | |
686 | ||
687 | init_event_struct(evt_struct, | |
688 | adapter_info_rsp, | |
689 | VIOSRP_MAD_FORMAT, | |
690 | init_timeout * HZ); | |
691 | ||
692 | req = &evt_struct->iu.mad.adapter_info; | |
693 | memset(req, 0x00, sizeof(*req)); | |
694 | ||
695 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | |
696 | req->common.length = sizeof(hostdata->madapter_info); | |
697 | req->buffer = dma_map_single(hostdata->dev, | |
698 | &hostdata->madapter_info, | |
699 | sizeof(hostdata->madapter_info), | |
700 | DMA_BIDIRECTIONAL); | |
701 | ||
702 | if (dma_mapping_error(req->buffer)) { | |
703 | printk(KERN_ERR | |
704 | "ibmvscsi: Unable to map request_buffer " | |
705 | "for adapter_info!\n"); | |
706 | free_event_struct(&hostdata->pool, evt_struct); | |
707 | return; | |
708 | } | |
709 | ||
710 | if (ibmvscsi_send_srp_event(evt_struct, hostdata)) | |
711 | printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); | |
712 | }; | |
713 | ||
714 | /** | |
715 | * login_rsp: - Handle response to SRP login request | |
716 | * @evt_struct: srp_event_struct with the response | |
717 | * | |
718 | * Used as a "done" callback by when sending srp_login. Gets called | |
719 | * by ibmvscsi_handle_crq() | |
720 | */ | |
721 | static void login_rsp(struct srp_event_struct *evt_struct) | |
722 | { | |
723 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | |
724 | switch (evt_struct->xfer_iu->srp.generic.type) { | |
725 | case SRP_LOGIN_RSP_TYPE: /* it worked! */ | |
726 | break; | |
727 | case SRP_LOGIN_REJ_TYPE: /* refused! */ | |
728 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REQ rejected\n"); | |
729 | /* Login failed. */ | |
730 | atomic_set(&hostdata->request_limit, -1); | |
731 | return; | |
732 | default: | |
733 | printk(KERN_ERR | |
734 | "ibmvscsi: Invalid login response typecode 0x%02x!\n", | |
735 | evt_struct->xfer_iu->srp.generic.type); | |
736 | /* Login failed. */ | |
737 | atomic_set(&hostdata->request_limit, -1); | |
738 | return; | |
739 | } | |
740 | ||
741 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); | |
742 | ||
743 | if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > | |
744 | (max_requests - 2)) | |
745 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = | |
746 | max_requests - 2; | |
747 | ||
748 | /* Now we know what the real request-limit is */ | |
749 | atomic_set(&hostdata->request_limit, | |
750 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); | |
751 | ||
752 | hostdata->host->can_queue = | |
753 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; | |
754 | ||
755 | if (hostdata->host->can_queue < 1) { | |
756 | printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); | |
757 | return; | |
758 | } | |
759 | ||
760 | send_mad_adapter_info(hostdata); | |
761 | return; | |
762 | } | |
763 | ||
764 | /** | |
765 | * send_srp_login: - Sends the srp login | |
766 | * @hostdata: ibmvscsi_host_data of host | |
767 | * | |
768 | * Returns zero if successful. | |
769 | */ | |
770 | static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |
771 | { | |
772 | int rc; | |
773 | unsigned long flags; | |
774 | struct srp_login_req *login; | |
775 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); | |
776 | if (!evt_struct) { | |
777 | printk(KERN_ERR | |
778 | "ibmvscsi: couldn't allocate an event for login req!\n"); | |
779 | return FAILED; | |
780 | } | |
781 | ||
782 | init_event_struct(evt_struct, | |
783 | login_rsp, | |
784 | VIOSRP_SRP_FORMAT, | |
785 | init_timeout * HZ); | |
786 | ||
787 | login = &evt_struct->iu.srp.login_req; | |
788 | login->type = SRP_LOGIN_REQ_TYPE; | |
789 | login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); | |
790 | login->required_buffer_formats = 0x0006; | |
791 | ||
792 | /* Start out with a request limit of 1, since this is negotiated in | |
793 | * the login request we are just sending | |
794 | */ | |
795 | atomic_set(&hostdata->request_limit, 1); | |
796 | ||
797 | spin_lock_irqsave(hostdata->host->host_lock, flags); | |
798 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | |
799 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | |
800 | return rc; | |
801 | }; | |
802 | ||
803 | /** | |
804 | * sync_completion: Signal that a synchronous command has completed | |
805 | * Note that after returning from this call, the evt_struct is freed. | |
806 | * the caller waiting on this completion shouldn't touch the evt_struct | |
807 | * again. | |
808 | */ | |
809 | static void sync_completion(struct srp_event_struct *evt_struct) | |
810 | { | |
811 | /* copy the response back */ | |
812 | if (evt_struct->sync_srp) | |
813 | *evt_struct->sync_srp = *evt_struct->xfer_iu; | |
814 | ||
815 | complete(&evt_struct->comp); | |
816 | } | |
817 | ||
818 | /** | |
819 | * ibmvscsi_abort: Abort a command...from scsi host template | |
820 | * send this over to the server and wait synchronously for the response | |
821 | */ | |
822 | static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |
823 | { | |
824 | struct ibmvscsi_host_data *hostdata = | |
825 | (struct ibmvscsi_host_data *)cmd->device->host->hostdata; | |
826 | struct srp_tsk_mgmt *tsk_mgmt; | |
827 | struct srp_event_struct *evt; | |
828 | struct srp_event_struct *tmp_evt, *found_evt; | |
829 | union viosrp_iu srp_rsp; | |
830 | int rsp_rc; | |
831 | u16 lun = lun_from_dev(cmd->device); | |
832 | ||
833 | /* First, find this command in our sent list so we can figure | |
834 | * out the correct tag | |
835 | */ | |
836 | found_evt = NULL; | |
837 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | |
838 | if (tmp_evt->cmnd == cmd) { | |
839 | found_evt = tmp_evt; | |
840 | break; | |
841 | } | |
842 | } | |
843 | ||
844 | if (!found_evt) | |
845 | return FAILED; | |
846 | ||
847 | evt = get_event_struct(&hostdata->pool); | |
848 | if (evt == NULL) { | |
849 | printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); | |
850 | return FAILED; | |
851 | } | |
852 | ||
853 | init_event_struct(evt, | |
854 | sync_completion, | |
855 | VIOSRP_SRP_FORMAT, | |
856 | init_timeout * HZ); | |
857 | ||
858 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | |
859 | ||
860 | /* Set up an abort SRP command */ | |
861 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | |
862 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | |
863 | tsk_mgmt->lun = ((u64) lun) << 48; | |
864 | tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ | |
865 | tsk_mgmt->managed_task_tag = (u64) found_evt; | |
866 | ||
867 | printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", | |
868 | tsk_mgmt->lun, tsk_mgmt->managed_task_tag); | |
869 | ||
870 | evt->sync_srp = &srp_rsp; | |
871 | init_completion(&evt->comp); | |
872 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | |
873 | printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); | |
874 | return FAILED; | |
875 | } | |
876 | ||
1da177e4 | 877 | wait_for_completion(&evt->comp); |
1da177e4 LT |
878 | |
879 | /* make sure we got a good response */ | |
880 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | |
881 | if (printk_ratelimit()) | |
882 | printk(KERN_WARNING | |
883 | "ibmvscsi: abort bad SRP RSP type %d\n", | |
884 | srp_rsp.srp.generic.type); | |
885 | return FAILED; | |
886 | } | |
887 | ||
888 | if (srp_rsp.srp.rsp.rspvalid) | |
889 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | |
890 | else | |
891 | rsp_rc = srp_rsp.srp.rsp.status; | |
892 | ||
893 | if (rsp_rc) { | |
894 | if (printk_ratelimit()) | |
895 | printk(KERN_WARNING | |
896 | "ibmvscsi: abort code %d for task tag 0x%lx\n", | |
897 | rsp_rc, | |
898 | tsk_mgmt->managed_task_tag); | |
899 | return FAILED; | |
900 | } | |
901 | ||
902 | /* Because we dropped the spinlock above, it's possible | |
903 | * The event is no longer in our list. Make sure it didn't | |
904 | * complete while we were aborting | |
905 | */ | |
906 | found_evt = NULL; | |
907 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | |
908 | if (tmp_evt->cmnd == cmd) { | |
909 | found_evt = tmp_evt; | |
910 | break; | |
911 | } | |
912 | } | |
913 | ||
914 | if (found_evt == NULL) { | |
915 | printk(KERN_INFO | |
916 | "ibmvscsi: aborted task tag 0x%lx completed\n", | |
917 | tsk_mgmt->managed_task_tag); | |
918 | return SUCCESS; | |
919 | } | |
920 | ||
921 | printk(KERN_INFO | |
922 | "ibmvscsi: successfully aborted task tag 0x%lx\n", | |
923 | tsk_mgmt->managed_task_tag); | |
924 | ||
925 | cmd->result = (DID_ABORT << 16); | |
926 | list_del(&found_evt->list); | |
927 | unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); | |
928 | free_event_struct(&found_evt->hostdata->pool, found_evt); | |
929 | atomic_inc(&hostdata->request_limit); | |
930 | return SUCCESS; | |
931 | } | |
932 | ||
933 | /** | |
934 | * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host | |
935 | * template send this over to the server and wait synchronously for the | |
936 | * response | |
937 | */ | |
938 | static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |
939 | { | |
940 | struct ibmvscsi_host_data *hostdata = | |
941 | (struct ibmvscsi_host_data *)cmd->device->host->hostdata; | |
942 | ||
943 | struct srp_tsk_mgmt *tsk_mgmt; | |
944 | struct srp_event_struct *evt; | |
945 | struct srp_event_struct *tmp_evt, *pos; | |
946 | union viosrp_iu srp_rsp; | |
947 | int rsp_rc; | |
948 | u16 lun = lun_from_dev(cmd->device); | |
949 | ||
950 | evt = get_event_struct(&hostdata->pool); | |
951 | if (evt == NULL) { | |
952 | printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); | |
953 | return FAILED; | |
954 | } | |
955 | ||
956 | init_event_struct(evt, | |
957 | sync_completion, | |
958 | VIOSRP_SRP_FORMAT, | |
959 | init_timeout * HZ); | |
960 | ||
961 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | |
962 | ||
963 | /* Set up a lun reset SRP command */ | |
964 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | |
965 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | |
966 | tsk_mgmt->lun = ((u64) lun) << 48; | |
967 | tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ | |
968 | ||
969 | printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", | |
970 | tsk_mgmt->lun); | |
971 | ||
972 | evt->sync_srp = &srp_rsp; | |
973 | init_completion(&evt->comp); | |
974 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | |
975 | printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); | |
976 | return FAILED; | |
977 | } | |
978 | ||
979 | spin_unlock_irq(hostdata->host->host_lock); | |
980 | wait_for_completion(&evt->comp); | |
981 | spin_lock_irq(hostdata->host->host_lock); | |
982 | ||
983 | /* make sure we got a good response */ | |
984 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | |
985 | if (printk_ratelimit()) | |
986 | printk(KERN_WARNING | |
987 | "ibmvscsi: reset bad SRP RSP type %d\n", | |
988 | srp_rsp.srp.generic.type); | |
989 | return FAILED; | |
990 | } | |
991 | ||
992 | if (srp_rsp.srp.rsp.rspvalid) | |
993 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | |
994 | else | |
995 | rsp_rc = srp_rsp.srp.rsp.status; | |
996 | ||
997 | if (rsp_rc) { | |
998 | if (printk_ratelimit()) | |
999 | printk(KERN_WARNING | |
1000 | "ibmvscsi: reset code %d for task tag 0x%lx\n", | |
1001 | rsp_rc, | |
1002 | tsk_mgmt->managed_task_tag); | |
1003 | return FAILED; | |
1004 | } | |
1005 | ||
1006 | /* We need to find all commands for this LUN that have not yet been | |
1007 | * responded to, and fail them with DID_RESET | |
1008 | */ | |
1009 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | |
1010 | if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { | |
1011 | if (tmp_evt->cmnd) | |
1012 | tmp_evt->cmnd->result = (DID_RESET << 16); | |
1013 | list_del(&tmp_evt->list); | |
1014 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); | |
1015 | free_event_struct(&tmp_evt->hostdata->pool, | |
1016 | tmp_evt); | |
1017 | atomic_inc(&hostdata->request_limit); | |
1018 | if (tmp_evt->cmnd_done) | |
1019 | tmp_evt->cmnd_done(tmp_evt->cmnd); | |
1020 | else if (tmp_evt->done) | |
1021 | tmp_evt->done(tmp_evt); | |
1022 | } | |
1023 | } | |
1024 | return SUCCESS; | |
1025 | } | |
1026 | ||
1027 | /** | |
1028 | * purge_requests: Our virtual adapter just shut down. purge any sent requests | |
1029 | * @hostdata: the adapter | |
1030 | */ | |
1031 | static void purge_requests(struct ibmvscsi_host_data *hostdata) | |
1032 | { | |
1033 | struct srp_event_struct *tmp_evt, *pos; | |
1034 | unsigned long flags; | |
1035 | ||
1036 | spin_lock_irqsave(hostdata->host->host_lock, flags); | |
1037 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | |
1038 | list_del(&tmp_evt->list); | |
1039 | if (tmp_evt->cmnd) { | |
1040 | tmp_evt->cmnd->result = (DID_ERROR << 16); | |
1041 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, | |
1042 | tmp_evt->hostdata->dev); | |
1043 | if (tmp_evt->cmnd_done) | |
1044 | tmp_evt->cmnd_done(tmp_evt->cmnd); | |
1045 | } else { | |
1046 | if (tmp_evt->done) { | |
1047 | tmp_evt->done(tmp_evt); | |
1048 | } | |
1049 | } | |
1050 | free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); | |
1051 | } | |
1052 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | |
1053 | } | |
1054 | ||
1055 | /** | |
1056 | * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ | |
1057 | * @crq: Command/Response queue | |
1058 | * @hostdata: ibmvscsi_host_data of host | |
1059 | * | |
1060 | */ | |
1061 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |
1062 | struct ibmvscsi_host_data *hostdata) | |
1063 | { | |
1064 | unsigned long flags; | |
1065 | struct srp_event_struct *evt_struct = | |
1066 | (struct srp_event_struct *)crq->IU_data_ptr; | |
1067 | switch (crq->valid) { | |
1068 | case 0xC0: /* initialization */ | |
1069 | switch (crq->format) { | |
1070 | case 0x01: /* Initialization message */ | |
1071 | printk(KERN_INFO "ibmvscsi: partner initialized\n"); | |
1072 | /* Send back a response */ | |
1073 | if (ibmvscsi_send_crq(hostdata, | |
1074 | 0xC002000000000000LL, 0) == 0) { | |
1075 | /* Now login */ | |
1076 | send_srp_login(hostdata); | |
1077 | } else { | |
1078 | printk(KERN_ERR | |
1079 | "ibmvscsi: Unable to send init rsp\n"); | |
1080 | } | |
1081 | ||
1082 | break; | |
1083 | case 0x02: /* Initialization response */ | |
1084 | printk(KERN_INFO | |
1085 | "ibmvscsi: partner initialization complete\n"); | |
1086 | ||
1087 | /* Now login */ | |
1088 | send_srp_login(hostdata); | |
1089 | break; | |
1090 | default: | |
1091 | printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); | |
1092 | } | |
1093 | return; | |
1094 | case 0xFF: /* Hypervisor telling us the connection is closed */ | |
1095 | printk(KERN_INFO "ibmvscsi: Virtual adapter failed!\n"); | |
1096 | ||
1097 | atomic_set(&hostdata->request_limit, -1); | |
1098 | purge_requests(hostdata); | |
1099 | ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); | |
1100 | return; | |
1101 | case 0x80: /* real payload */ | |
1102 | break; | |
1103 | default: | |
1104 | printk(KERN_ERR | |
1105 | "ibmvscsi: got an invalid message type 0x%02x\n", | |
1106 | crq->valid); | |
1107 | return; | |
1108 | } | |
1109 | ||
1110 | /* The only kind of payload CRQs we should get are responses to | |
1111 | * things we send. Make sure this response is to something we | |
1112 | * actually sent | |
1113 | */ | |
1114 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { | |
1115 | printk(KERN_ERR | |
1116 | "ibmvscsi: returned correlation_token 0x%p is invalid!\n", | |
1117 | (void *)crq->IU_data_ptr); | |
1118 | return; | |
1119 | } | |
1120 | ||
1121 | if (atomic_read(&evt_struct->free)) { | |
1122 | printk(KERN_ERR | |
1123 | "ibmvscsi: received duplicate correlation_token 0x%p!\n", | |
1124 | (void *)crq->IU_data_ptr); | |
1125 | return; | |
1126 | } | |
1127 | ||
1128 | if (crq->format == VIOSRP_SRP_FORMAT) | |
1129 | atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, | |
1130 | &hostdata->request_limit); | |
1131 | ||
1132 | if (evt_struct->done) | |
1133 | evt_struct->done(evt_struct); | |
1134 | else | |
1135 | printk(KERN_ERR | |
1136 | "ibmvscsi: returned done() is NULL; not running it!\n"); | |
1137 | ||
1138 | /* | |
1139 | * Lock the host_lock before messing with these structures, since we | |
1140 | * are running in a task context | |
1141 | */ | |
1142 | spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); | |
1143 | list_del(&evt_struct->list); | |
1144 | free_event_struct(&evt_struct->hostdata->pool, evt_struct); | |
1145 | spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); | |
1146 | } | |
1147 | ||
1148 | /** | |
1149 | * ibmvscsi_get_host_config: Send the command to the server to get host | |
1150 | * configuration data. The data is opaque to us. | |
1151 | */ | |
1152 | static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |
1153 | unsigned char *buffer, int length) | |
1154 | { | |
1155 | struct viosrp_host_config *host_config; | |
1156 | struct srp_event_struct *evt_struct; | |
1157 | int rc; | |
1158 | ||
1159 | evt_struct = get_event_struct(&hostdata->pool); | |
1160 | if (!evt_struct) { | |
1161 | printk(KERN_ERR | |
1162 | "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); | |
1163 | return -1; | |
1164 | } | |
1165 | ||
1166 | init_event_struct(evt_struct, | |
1167 | sync_completion, | |
1168 | VIOSRP_MAD_FORMAT, | |
1169 | init_timeout * HZ); | |
1170 | ||
1171 | host_config = &evt_struct->iu.mad.host_config; | |
1172 | ||
1173 | /* Set up a lun reset SRP command */ | |
1174 | memset(host_config, 0x00, sizeof(*host_config)); | |
1175 | host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; | |
1176 | host_config->common.length = length; | |
1177 | host_config->buffer = dma_map_single(hostdata->dev, buffer, length, | |
1178 | DMA_BIDIRECTIONAL); | |
1179 | ||
1180 | if (dma_mapping_error(host_config->buffer)) { | |
1181 | printk(KERN_ERR | |
1182 | "ibmvscsi: dma_mapping error " "getting host config\n"); | |
1183 | free_event_struct(&hostdata->pool, evt_struct); | |
1184 | return -1; | |
1185 | } | |
1186 | ||
1187 | init_completion(&evt_struct->comp); | |
1188 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | |
1189 | if (rc == 0) { | |
1190 | wait_for_completion(&evt_struct->comp); | |
1191 | dma_unmap_single(hostdata->dev, host_config->buffer, | |
1192 | length, DMA_BIDIRECTIONAL); | |
1193 | } | |
1194 | ||
1195 | return rc; | |
1196 | } | |
1197 | ||
1198 | /* ------------------------------------------------------------ | |
1199 | * sysfs attributes | |
1200 | */ | |
1201 | static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf) | |
1202 | { | |
1203 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1204 | struct ibmvscsi_host_data *hostdata = | |
1205 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1206 | int len; | |
1207 | ||
1208 | len = snprintf(buf, PAGE_SIZE, "%s\n", | |
1209 | hostdata->madapter_info.srp_version); | |
1210 | return len; | |
1211 | } | |
1212 | ||
1213 | static struct class_device_attribute ibmvscsi_host_srp_version = { | |
1214 | .attr = { | |
1215 | .name = "srp_version", | |
1216 | .mode = S_IRUGO, | |
1217 | }, | |
1218 | .show = show_host_srp_version, | |
1219 | }; | |
1220 | ||
1221 | static ssize_t show_host_partition_name(struct class_device *class_dev, | |
1222 | char *buf) | |
1223 | { | |
1224 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1225 | struct ibmvscsi_host_data *hostdata = | |
1226 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1227 | int len; | |
1228 | ||
1229 | len = snprintf(buf, PAGE_SIZE, "%s\n", | |
1230 | hostdata->madapter_info.partition_name); | |
1231 | return len; | |
1232 | } | |
1233 | ||
1234 | static struct class_device_attribute ibmvscsi_host_partition_name = { | |
1235 | .attr = { | |
1236 | .name = "partition_name", | |
1237 | .mode = S_IRUGO, | |
1238 | }, | |
1239 | .show = show_host_partition_name, | |
1240 | }; | |
1241 | ||
1242 | static ssize_t show_host_partition_number(struct class_device *class_dev, | |
1243 | char *buf) | |
1244 | { | |
1245 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1246 | struct ibmvscsi_host_data *hostdata = | |
1247 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1248 | int len; | |
1249 | ||
1250 | len = snprintf(buf, PAGE_SIZE, "%d\n", | |
1251 | hostdata->madapter_info.partition_number); | |
1252 | return len; | |
1253 | } | |
1254 | ||
1255 | static struct class_device_attribute ibmvscsi_host_partition_number = { | |
1256 | .attr = { | |
1257 | .name = "partition_number", | |
1258 | .mode = S_IRUGO, | |
1259 | }, | |
1260 | .show = show_host_partition_number, | |
1261 | }; | |
1262 | ||
1263 | static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf) | |
1264 | { | |
1265 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1266 | struct ibmvscsi_host_data *hostdata = | |
1267 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1268 | int len; | |
1269 | ||
1270 | len = snprintf(buf, PAGE_SIZE, "%d\n", | |
1271 | hostdata->madapter_info.mad_version); | |
1272 | return len; | |
1273 | } | |
1274 | ||
1275 | static struct class_device_attribute ibmvscsi_host_mad_version = { | |
1276 | .attr = { | |
1277 | .name = "mad_version", | |
1278 | .mode = S_IRUGO, | |
1279 | }, | |
1280 | .show = show_host_mad_version, | |
1281 | }; | |
1282 | ||
1283 | static ssize_t show_host_os_type(struct class_device *class_dev, char *buf) | |
1284 | { | |
1285 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1286 | struct ibmvscsi_host_data *hostdata = | |
1287 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1288 | int len; | |
1289 | ||
1290 | len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); | |
1291 | return len; | |
1292 | } | |
1293 | ||
1294 | static struct class_device_attribute ibmvscsi_host_os_type = { | |
1295 | .attr = { | |
1296 | .name = "os_type", | |
1297 | .mode = S_IRUGO, | |
1298 | }, | |
1299 | .show = show_host_os_type, | |
1300 | }; | |
1301 | ||
1302 | static ssize_t show_host_config(struct class_device *class_dev, char *buf) | |
1303 | { | |
1304 | struct Scsi_Host *shost = class_to_shost(class_dev); | |
1305 | struct ibmvscsi_host_data *hostdata = | |
1306 | (struct ibmvscsi_host_data *)shost->hostdata; | |
1307 | ||
1308 | /* returns null-terminated host config data */ | |
1309 | if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) | |
1310 | return strlen(buf); | |
1311 | else | |
1312 | return 0; | |
1313 | } | |
1314 | ||
1315 | static struct class_device_attribute ibmvscsi_host_config = { | |
1316 | .attr = { | |
1317 | .name = "config", | |
1318 | .mode = S_IRUGO, | |
1319 | }, | |
1320 | .show = show_host_config, | |
1321 | }; | |
1322 | ||
1323 | static struct class_device_attribute *ibmvscsi_attrs[] = { | |
1324 | &ibmvscsi_host_srp_version, | |
1325 | &ibmvscsi_host_partition_name, | |
1326 | &ibmvscsi_host_partition_number, | |
1327 | &ibmvscsi_host_mad_version, | |
1328 | &ibmvscsi_host_os_type, | |
1329 | &ibmvscsi_host_config, | |
1330 | NULL | |
1331 | }; | |
1332 | ||
1333 | /* ------------------------------------------------------------ | |
1334 | * SCSI driver registration | |
1335 | */ | |
1336 | static struct scsi_host_template driver_template = { | |
1337 | .module = THIS_MODULE, | |
1338 | .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, | |
1339 | .proc_name = "ibmvscsi", | |
1340 | .queuecommand = ibmvscsi_queuecommand, | |
1341 | .eh_abort_handler = ibmvscsi_eh_abort_handler, | |
1342 | .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, | |
1343 | .cmd_per_lun = 16, | |
1344 | .can_queue = 1, /* Updated after SRP_LOGIN */ | |
1345 | .this_id = -1, | |
1346 | .sg_tablesize = MAX_INDIRECT_BUFS, | |
1347 | .use_clustering = ENABLE_CLUSTERING, | |
1348 | .shost_attrs = ibmvscsi_attrs, | |
1349 | }; | |
1350 | ||
1351 | /** | |
1352 | * Called by bus code for each adapter | |
1353 | */ | |
1354 | static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |
1355 | { | |
1356 | struct ibmvscsi_host_data *hostdata; | |
1357 | struct Scsi_Host *host; | |
1358 | struct device *dev = &vdev->dev; | |
1359 | unsigned long wait_switch = 0; | |
1360 | ||
1361 | vdev->dev.driver_data = NULL; | |
1362 | ||
1363 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); | |
1364 | if (!host) { | |
1365 | printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); | |
1366 | goto scsi_host_alloc_failed; | |
1367 | } | |
1368 | ||
1369 | hostdata = (struct ibmvscsi_host_data *)host->hostdata; | |
1370 | memset(hostdata, 0x00, sizeof(*hostdata)); | |
1371 | INIT_LIST_HEAD(&hostdata->sent); | |
1372 | hostdata->host = host; | |
1373 | hostdata->dev = dev; | |
1374 | atomic_set(&hostdata->request_limit, -1); | |
1375 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ | |
1376 | ||
1377 | if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, | |
1378 | max_requests) != 0) { | |
1379 | printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); | |
1380 | goto init_crq_failed; | |
1381 | } | |
1382 | if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { | |
1383 | printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); | |
1384 | goto init_pool_failed; | |
1385 | } | |
1386 | ||
1387 | host->max_lun = 8; | |
1388 | host->max_id = max_id; | |
1389 | host->max_channel = max_channel; | |
1390 | ||
1391 | if (scsi_add_host(hostdata->host, hostdata->dev)) | |
1392 | goto add_host_failed; | |
1393 | ||
1394 | /* Try to send an initialization message. Note that this is allowed | |
1395 | * to fail if the other end is not acive. In that case we don't | |
1396 | * want to scan | |
1397 | */ | |
1398 | if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { | |
1399 | /* | |
1400 | * Wait around max init_timeout secs for the adapter to finish | |
1401 | * initializing. When we are done initializing, we will have a | |
1402 | * valid request_limit. We don't want Linux scanning before | |
1403 | * we are ready. | |
1404 | */ | |
1405 | for (wait_switch = jiffies + (init_timeout * HZ); | |
1406 | time_before(jiffies, wait_switch) && | |
1407 | atomic_read(&hostdata->request_limit) < 2;) { | |
1408 | ||
1409 | msleep(10); | |
1410 | } | |
1411 | ||
1412 | /* if we now have a valid request_limit, initiate a scan */ | |
1413 | if (atomic_read(&hostdata->request_limit) > 0) | |
1414 | scsi_scan_host(host); | |
1415 | } | |
1416 | ||
1417 | vdev->dev.driver_data = hostdata; | |
1418 | return 0; | |
1419 | ||
1420 | add_host_failed: | |
1421 | release_event_pool(&hostdata->pool, hostdata); | |
1422 | init_pool_failed: | |
1423 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); | |
1424 | init_crq_failed: | |
1425 | scsi_host_put(host); | |
1426 | scsi_host_alloc_failed: | |
1427 | return -1; | |
1428 | } | |
1429 | ||
1430 | static int ibmvscsi_remove(struct vio_dev *vdev) | |
1431 | { | |
1432 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | |
1433 | release_event_pool(&hostdata->pool, hostdata); | |
1434 | ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, | |
1435 | max_requests); | |
1436 | ||
1437 | scsi_remove_host(hostdata->host); | |
1438 | scsi_host_put(hostdata->host); | |
1439 | ||
1440 | return 0; | |
1441 | } | |
1442 | ||
1443 | /** | |
1444 | * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we | |
1445 | * support. | |
1446 | */ | |
1447 | static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { | |
1448 | {"vscsi", "IBM,v-scsi"}, | |
1449 | {0,} | |
1450 | }; | |
1451 | ||
1452 | MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); | |
1453 | static struct vio_driver ibmvscsi_driver = { | |
1454 | .name = "ibmvscsi", | |
1455 | .id_table = ibmvscsi_device_table, | |
1456 | .probe = ibmvscsi_probe, | |
1457 | .remove = ibmvscsi_remove | |
1458 | }; | |
1459 | ||
1460 | int __init ibmvscsi_module_init(void) | |
1461 | { | |
1462 | return vio_register_driver(&ibmvscsi_driver); | |
1463 | } | |
1464 | ||
1465 | void __exit ibmvscsi_module_exit(void) | |
1466 | { | |
1467 | vio_unregister_driver(&ibmvscsi_driver); | |
1468 | } | |
1469 | ||
1470 | module_init(ibmvscsi_module_init); | |
1471 | module_exit(ibmvscsi_module_exit); |