]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/3w-9xxx.c
some kmalloc/memset ->kzalloc (tree wide)
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / 3w-9xxx.c
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com>
6
7 Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 NO WARRANTY
19 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 solely responsible for determining the appropriateness of using and
24 distributing the Program and assumes all risks associated with its
25 exercise of rights under this Agreement, including but not limited to
26 the risks and costs of program errors, damage to or loss of data,
27 programs or equipment, and unavailability or interruption of operations.
28
29 DISCLAIMER OF LIABILITY
30 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 You should have received a copy of the GNU General Public License
39 along with this program; if not, write to the Free Software
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41
42 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com
44
45 For more information, goto:
46 http://www.amcc.com
47
48 Note: This version of the driver does not contain a bundled firmware
49 image.
50
51 History
52 -------
53 2.26.02.000 - Driver cleanup for kernel submission.
54 2.26.02.001 - Replace schedule_timeout() calls with msleep().
55 2.26.02.002 - Add support for PAE mode.
56 Add lun support.
57 Fix twa_remove() to free irq handler/unregister_chrdev()
58 before shutting down card.
59 Change to new 'change_queue_depth' api.
60 Fix 'handled=1' ISR usage, remove bogus IRQ check.
61 Remove un-needed eh_abort handler.
62 Add support for embedded firmware error strings.
63 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
64 2.26.02.004 - Add support for 9550SX controllers.
65 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
66 2.26.02.006 - Fix 9550SX pchip reset timeout.
67 Add big endian support.
68 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
69 2.26.02.008 - Free irq handler in __twa_shutdown().
70 Serialize reset code.
71 Add support for 9650SE controllers.
72 */
73
74 #include <linux/module.h>
75 #include <linux/reboot.h>
76 #include <linux/spinlock.h>
77 #include <linux/interrupt.h>
78 #include <linux/moduleparam.h>
79 #include <linux/errno.h>
80 #include <linux/types.h>
81 #include <linux/delay.h>
82 #include <linux/pci.h>
83 #include <linux/time.h>
84 #include <linux/mutex.h>
85 #include <asm/io.h>
86 #include <asm/irq.h>
87 #include <asm/uaccess.h>
88 #include <scsi/scsi.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_tcq.h>
91 #include <scsi/scsi_cmnd.h>
92 #include "3w-9xxx.h"
93
94 /* Globals */
95 #define TW_DRIVER_VERSION "2.26.02.008"
96 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
97 static unsigned int twa_device_extension_count;
98 static int twa_major = -1;
99 extern struct timezone sys_tz;
100
101 /* Module parameters */
102 MODULE_AUTHOR ("AMCC");
103 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
104 MODULE_LICENSE("GPL");
105 MODULE_VERSION(TW_DRIVER_VERSION);
106
107 /* Function prototypes */
108 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
109 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
110 static char *twa_aen_severity_lookup(unsigned char severity_code);
111 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
112 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
113 static int twa_chrdev_open(struct inode *inode, struct file *file);
114 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
115 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
116 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
117 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
118 u32 set_features, unsigned short current_fw_srl,
119 unsigned short current_fw_arch_id,
120 unsigned short current_fw_branch,
121 unsigned short current_fw_build,
122 unsigned short *fw_on_ctlr_srl,
123 unsigned short *fw_on_ctlr_arch_id,
124 unsigned short *fw_on_ctlr_branch,
125 unsigned short *fw_on_ctlr_build,
126 u32 *init_connect_result);
127 static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
128 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
129 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
130 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
131 static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
132 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
133 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
134 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
135 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
136 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
137
138 /* Functions */
139
140 /* Show some statistics about the card */
141 static ssize_t twa_show_stats(struct class_device *class_dev, char *buf)
142 {
143 struct Scsi_Host *host = class_to_shost(class_dev);
144 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
145 unsigned long flags = 0;
146 ssize_t len;
147
148 spin_lock_irqsave(tw_dev->host->host_lock, flags);
149 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
150 "Current commands posted: %4d\n"
151 "Max commands posted: %4d\n"
152 "Current pending commands: %4d\n"
153 "Max pending commands: %4d\n"
154 "Last sgl length: %4d\n"
155 "Max sgl length: %4d\n"
156 "Last sector count: %4d\n"
157 "Max sector count: %4d\n"
158 "SCSI Host Resets: %4d\n"
159 "AEN's: %4d\n",
160 TW_DRIVER_VERSION,
161 tw_dev->posted_request_count,
162 tw_dev->max_posted_request_count,
163 tw_dev->pending_request_count,
164 tw_dev->max_pending_request_count,
165 tw_dev->sgl_entries,
166 tw_dev->max_sgl_entries,
167 tw_dev->sector_count,
168 tw_dev->max_sector_count,
169 tw_dev->num_resets,
170 tw_dev->aen_count);
171 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
172 return len;
173 } /* End twa_show_stats() */
174
175 /* This function will set a devices queue depth */
176 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth)
177 {
178 if (queue_depth > TW_Q_LENGTH-2)
179 queue_depth = TW_Q_LENGTH-2;
180 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
181 return queue_depth;
182 } /* End twa_change_queue_depth() */
183
184 /* Create sysfs 'stats' entry */
185 static struct class_device_attribute twa_host_stats_attr = {
186 .attr = {
187 .name = "stats",
188 .mode = S_IRUGO,
189 },
190 .show = twa_show_stats
191 };
192
193 /* Host attributes initializer */
194 static struct class_device_attribute *twa_host_attrs[] = {
195 &twa_host_stats_attr,
196 NULL,
197 };
198
199 /* File operations struct for character device */
200 static const struct file_operations twa_fops = {
201 .owner = THIS_MODULE,
202 .ioctl = twa_chrdev_ioctl,
203 .open = twa_chrdev_open,
204 .release = NULL
205 };
206
207 /* This function will complete an aen request from the isr */
208 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
209 {
210 TW_Command_Full *full_command_packet;
211 TW_Command *command_packet;
212 TW_Command_Apache_Header *header;
213 unsigned short aen;
214 int retval = 1;
215
216 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
217 tw_dev->posted_request_count--;
218 aen = le16_to_cpu(header->status_block.error);
219 full_command_packet = tw_dev->command_packet_virt[request_id];
220 command_packet = &full_command_packet->command.oldcommand;
221
222 /* First check for internal completion of set param for time sync */
223 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
224 /* Keep reading the queue in case there are more aen's */
225 if (twa_aen_read_queue(tw_dev, request_id))
226 goto out2;
227 else {
228 retval = 0;
229 goto out;
230 }
231 }
232
233 switch (aen) {
234 case TW_AEN_QUEUE_EMPTY:
235 /* Quit reading the queue if this is the last one */
236 break;
237 case TW_AEN_SYNC_TIME_WITH_HOST:
238 twa_aen_sync_time(tw_dev, request_id);
239 retval = 0;
240 goto out;
241 default:
242 twa_aen_queue_event(tw_dev, header);
243
244 /* If there are more aen's, keep reading the queue */
245 if (twa_aen_read_queue(tw_dev, request_id))
246 goto out2;
247 else {
248 retval = 0;
249 goto out;
250 }
251 }
252 retval = 0;
253 out2:
254 tw_dev->state[request_id] = TW_S_COMPLETED;
255 twa_free_request_id(tw_dev, request_id);
256 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
257 out:
258 return retval;
259 } /* End twa_aen_complete() */
260
261 /* This function will drain aen queue */
262 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
263 {
264 int request_id = 0;
265 char cdb[TW_MAX_CDB_LEN];
266 TW_SG_Entry sglist[1];
267 int finished = 0, count = 0;
268 TW_Command_Full *full_command_packet;
269 TW_Command_Apache_Header *header;
270 unsigned short aen;
271 int first_reset = 0, queue = 0, retval = 1;
272
273 if (no_check_reset)
274 first_reset = 0;
275 else
276 first_reset = 1;
277
278 full_command_packet = tw_dev->command_packet_virt[request_id];
279 memset(full_command_packet, 0, sizeof(TW_Command_Full));
280
281 /* Initialize cdb */
282 memset(&cdb, 0, TW_MAX_CDB_LEN);
283 cdb[0] = REQUEST_SENSE; /* opcode */
284 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
285
286 /* Initialize sglist */
287 memset(&sglist, 0, sizeof(TW_SG_Entry));
288 sglist[0].length = TW_SECTOR_SIZE;
289 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
290
291 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
292 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
293 goto out;
294 }
295
296 /* Mark internal command */
297 tw_dev->srb[request_id] = NULL;
298
299 do {
300 /* Send command to the board */
301 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
302 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
303 goto out;
304 }
305
306 /* Now poll for completion */
307 if (twa_poll_response(tw_dev, request_id, 30)) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
309 tw_dev->posted_request_count--;
310 goto out;
311 }
312
313 tw_dev->posted_request_count--;
314 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
315 aen = le16_to_cpu(header->status_block.error);
316 queue = 0;
317 count++;
318
319 switch (aen) {
320 case TW_AEN_QUEUE_EMPTY:
321 if (first_reset != 1)
322 goto out;
323 else
324 finished = 1;
325 break;
326 case TW_AEN_SOFT_RESET:
327 if (first_reset == 0)
328 first_reset = 1;
329 else
330 queue = 1;
331 break;
332 case TW_AEN_SYNC_TIME_WITH_HOST:
333 break;
334 default:
335 queue = 1;
336 }
337
338 /* Now queue an event info */
339 if (queue)
340 twa_aen_queue_event(tw_dev, header);
341 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
342
343 if (count == TW_MAX_AEN_DRAIN)
344 goto out;
345
346 retval = 0;
347 out:
348 tw_dev->state[request_id] = TW_S_INITIAL;
349 return retval;
350 } /* End twa_aen_drain_queue() */
351
352 /* This function will queue an event */
353 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
354 {
355 u32 local_time;
356 struct timeval time;
357 TW_Event *event;
358 unsigned short aen;
359 char host[16];
360 char *error_str;
361
362 tw_dev->aen_count++;
363
364 /* Fill out event info */
365 event = tw_dev->event_queue[tw_dev->error_index];
366
367 /* Check for clobber */
368 host[0] = '\0';
369 if (tw_dev->host) {
370 sprintf(host, " scsi%d:", tw_dev->host->host_no);
371 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
372 tw_dev->aen_clobber = 1;
373 }
374
375 aen = le16_to_cpu(header->status_block.error);
376 memset(event, 0, sizeof(TW_Event));
377
378 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
379 do_gettimeofday(&time);
380 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
381 event->time_stamp_sec = local_time;
382 event->aen_code = aen;
383 event->retrieved = TW_AEN_NOT_RETRIEVED;
384 event->sequence_id = tw_dev->error_sequence_id;
385 tw_dev->error_sequence_id++;
386
387 /* Check for embedded error string */
388 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
389
390 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
391 event->parameter_len = strlen(header->err_specific_desc);
392 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
393 if (event->severity != TW_AEN_SEVERITY_DEBUG)
394 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
395 host,
396 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
397 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
398 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
399 header->err_specific_desc);
400 else
401 tw_dev->aen_count--;
402
403 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
404 tw_dev->event_queue_wrapped = 1;
405 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
406 } /* End twa_aen_queue_event() */
407
408 /* This function will read the aen queue from the isr */
409 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
410 {
411 char cdb[TW_MAX_CDB_LEN];
412 TW_SG_Entry sglist[1];
413 TW_Command_Full *full_command_packet;
414 int retval = 1;
415
416 full_command_packet = tw_dev->command_packet_virt[request_id];
417 memset(full_command_packet, 0, sizeof(TW_Command_Full));
418
419 /* Initialize cdb */
420 memset(&cdb, 0, TW_MAX_CDB_LEN);
421 cdb[0] = REQUEST_SENSE; /* opcode */
422 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
423
424 /* Initialize sglist */
425 memset(&sglist, 0, sizeof(TW_SG_Entry));
426 sglist[0].length = TW_SECTOR_SIZE;
427 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
428
429 /* Mark internal command */
430 tw_dev->srb[request_id] = NULL;
431
432 /* Now post the command packet */
433 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
434 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
435 goto out;
436 }
437 retval = 0;
438 out:
439 return retval;
440 } /* End twa_aen_read_queue() */
441
442 /* This function will look up an AEN severity string */
443 static char *twa_aen_severity_lookup(unsigned char severity_code)
444 {
445 char *retval = NULL;
446
447 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
448 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
449 goto out;
450
451 retval = twa_aen_severity_table[severity_code];
452 out:
453 return retval;
454 } /* End twa_aen_severity_lookup() */
455
456 /* This function will sync firmware time with the host time */
457 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
458 {
459 u32 schedulertime;
460 struct timeval utc;
461 TW_Command_Full *full_command_packet;
462 TW_Command *command_packet;
463 TW_Param_Apache *param;
464 u32 local_time;
465
466 /* Fill out the command packet */
467 full_command_packet = tw_dev->command_packet_virt[request_id];
468 memset(full_command_packet, 0, sizeof(TW_Command_Full));
469 command_packet = &full_command_packet->command.oldcommand;
470 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
471 command_packet->request_id = request_id;
472 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
473 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
474 command_packet->size = TW_COMMAND_SIZE;
475 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
476
477 /* Setup the param */
478 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
479 memset(param, 0, TW_SECTOR_SIZE);
480 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
481 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
482 param->parameter_size_bytes = cpu_to_le16(4);
483
484 /* Convert system time in UTC to local time seconds since last
485 Sunday 12:00AM */
486 do_gettimeofday(&utc);
487 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
488 schedulertime = local_time - (3 * 86400);
489 schedulertime = cpu_to_le32(schedulertime % 604800);
490
491 memcpy(param->data, &schedulertime, sizeof(u32));
492
493 /* Mark internal command */
494 tw_dev->srb[request_id] = NULL;
495
496 /* Now post the command */
497 twa_post_command_packet(tw_dev, request_id, 1);
498 } /* End twa_aen_sync_time() */
499
500 /* This function will allocate memory and check if it is correctly aligned */
501 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
502 {
503 int i;
504 dma_addr_t dma_handle;
505 unsigned long *cpu_addr;
506 int retval = 1;
507
508 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
509 if (!cpu_addr) {
510 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
511 goto out;
512 }
513
514 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
515 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
516 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
517 goto out;
518 }
519
520 memset(cpu_addr, 0, size*TW_Q_LENGTH);
521
522 for (i = 0; i < TW_Q_LENGTH; i++) {
523 switch(which) {
524 case 0:
525 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
526 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
527 break;
528 case 1:
529 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
530 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
531 break;
532 }
533 }
534 retval = 0;
535 out:
536 return retval;
537 } /* End twa_allocate_memory() */
538
539 /* This function will check the status register for unexpected bits */
540 static int twa_check_bits(u32 status_reg_value)
541 {
542 int retval = 1;
543
544 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
545 goto out;
546 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
547 goto out;
548
549 retval = 0;
550 out:
551 return retval;
552 } /* End twa_check_bits() */
553
554 /* This function will check the srl and decide if we are compatible */
555 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
556 {
557 int retval = 1;
558 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
559 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
560 u32 init_connect_result = 0;
561
562 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
563 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
564 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
565 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
566 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
567 &fw_on_ctlr_build, &init_connect_result)) {
568 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
569 goto out;
570 }
571
572 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
573 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
574 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
575
576 /* Try base mode compatibility */
577 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT,
580 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
581 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
582 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
583 &fw_on_ctlr_branch, &fw_on_ctlr_build,
584 &init_connect_result)) {
585 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
586 goto out;
587 }
588 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
589 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
590 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
591 } else {
592 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
593 }
594 goto out;
595 }
596 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
597 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
598 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
599 }
600
601 /* Load rest of compatibility struct */
602 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
603 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
604 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
605 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
606 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
607 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
608 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
609 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
610 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
611 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
612
613 retval = 0;
614 out:
615 return retval;
616 } /* End twa_check_srl() */
617
618 /* This function handles ioctl for the character device */
619 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
620 {
621 long timeout;
622 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
623 dma_addr_t dma_handle;
624 int request_id = 0;
625 unsigned int sequence_id = 0;
626 unsigned char event_index, start_index;
627 TW_Ioctl_Driver_Command driver_command;
628 TW_Ioctl_Buf_Apache *tw_ioctl;
629 TW_Lock *tw_lock;
630 TW_Command_Full *full_command_packet;
631 TW_Compatibility_Info *tw_compat_info;
632 TW_Event *event;
633 struct timeval current_time;
634 u32 current_time_ms;
635 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
636 int retval = TW_IOCTL_ERROR_OS_EFAULT;
637 void __user *argp = (void __user *)arg;
638
639 /* Only let one of these through at a time */
640 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
641 retval = TW_IOCTL_ERROR_OS_EINTR;
642 goto out;
643 }
644
645 /* First copy down the driver command */
646 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
647 goto out2;
648
649 /* Check data buffer size */
650 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
651 retval = TW_IOCTL_ERROR_OS_EINVAL;
652 goto out2;
653 }
654
655 /* Hardware can only do multiple of 512 byte transfers */
656 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
657
658 /* Now allocate ioctl buf memory */
659 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
660 if (!cpu_addr) {
661 retval = TW_IOCTL_ERROR_OS_ENOMEM;
662 goto out2;
663 }
664
665 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
666
667 /* Now copy down the entire ioctl */
668 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
669 goto out3;
670
671 /* See which ioctl we are doing */
672 switch (cmd) {
673 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
674 spin_lock_irqsave(tw_dev->host->host_lock, flags);
675 twa_get_request_id(tw_dev, &request_id);
676
677 /* Flag internal command */
678 tw_dev->srb[request_id] = NULL;
679
680 /* Flag chrdev ioctl */
681 tw_dev->chrdev_request_id = request_id;
682
683 full_command_packet = &tw_ioctl->firmware_command;
684
685 /* Load request id and sglist for both command types */
686 twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
687
688 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
689
690 /* Now post the command packet to the controller */
691 twa_post_command_packet(tw_dev, request_id, 1);
692 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
693
694 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
695
696 /* Now wait for command to complete */
697 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
698
699 /* We timed out, and didn't get an interrupt */
700 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
701 /* Now we need to reset the board */
702 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
703 tw_dev->host->host_no, TW_DRIVER, 0xc,
704 cmd);
705 retval = TW_IOCTL_ERROR_OS_EIO;
706 twa_reset_device_extension(tw_dev, 1);
707 goto out3;
708 }
709
710 /* Now copy in the command packet response */
711 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
712
713 /* Now complete the io */
714 spin_lock_irqsave(tw_dev->host->host_lock, flags);
715 tw_dev->posted_request_count--;
716 tw_dev->state[request_id] = TW_S_COMPLETED;
717 twa_free_request_id(tw_dev, request_id);
718 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
719 break;
720 case TW_IOCTL_GET_COMPATIBILITY_INFO:
721 tw_ioctl->driver_command.status = 0;
722 /* Copy compatiblity struct into ioctl data buffer */
723 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
724 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
725 break;
726 case TW_IOCTL_GET_LAST_EVENT:
727 if (tw_dev->event_queue_wrapped) {
728 if (tw_dev->aen_clobber) {
729 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
730 tw_dev->aen_clobber = 0;
731 } else
732 tw_ioctl->driver_command.status = 0;
733 } else {
734 if (!tw_dev->error_index) {
735 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
736 break;
737 }
738 tw_ioctl->driver_command.status = 0;
739 }
740 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
741 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
742 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
743 break;
744 case TW_IOCTL_GET_FIRST_EVENT:
745 if (tw_dev->event_queue_wrapped) {
746 if (tw_dev->aen_clobber) {
747 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
748 tw_dev->aen_clobber = 0;
749 } else
750 tw_ioctl->driver_command.status = 0;
751 event_index = tw_dev->error_index;
752 } else {
753 if (!tw_dev->error_index) {
754 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
755 break;
756 }
757 tw_ioctl->driver_command.status = 0;
758 event_index = 0;
759 }
760 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
761 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
762 break;
763 case TW_IOCTL_GET_NEXT_EVENT:
764 event = (TW_Event *)tw_ioctl->data_buffer;
765 sequence_id = event->sequence_id;
766 tw_ioctl->driver_command.status = 0;
767
768 if (tw_dev->event_queue_wrapped) {
769 if (tw_dev->aen_clobber) {
770 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
771 tw_dev->aen_clobber = 0;
772 }
773 start_index = tw_dev->error_index;
774 } else {
775 if (!tw_dev->error_index) {
776 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
777 break;
778 }
779 start_index = 0;
780 }
781 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
782
783 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
784 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
785 tw_dev->aen_clobber = 1;
786 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
787 break;
788 }
789 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
790 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
791 break;
792 case TW_IOCTL_GET_PREVIOUS_EVENT:
793 event = (TW_Event *)tw_ioctl->data_buffer;
794 sequence_id = event->sequence_id;
795 tw_ioctl->driver_command.status = 0;
796
797 if (tw_dev->event_queue_wrapped) {
798 if (tw_dev->aen_clobber) {
799 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
800 tw_dev->aen_clobber = 0;
801 }
802 start_index = tw_dev->error_index;
803 } else {
804 if (!tw_dev->error_index) {
805 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
806 break;
807 }
808 start_index = 0;
809 }
810 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
811
812 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
813 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
814 tw_dev->aen_clobber = 1;
815 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
816 break;
817 }
818 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
819 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
820 break;
821 case TW_IOCTL_GET_LOCK:
822 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
823 do_gettimeofday(&current_time);
824 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
825
826 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
827 tw_dev->ioctl_sem_lock = 1;
828 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
829 tw_ioctl->driver_command.status = 0;
830 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
831 } else {
832 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
833 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
834 }
835 break;
836 case TW_IOCTL_RELEASE_LOCK:
837 if (tw_dev->ioctl_sem_lock == 1) {
838 tw_dev->ioctl_sem_lock = 0;
839 tw_ioctl->driver_command.status = 0;
840 } else {
841 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
842 }
843 break;
844 default:
845 retval = TW_IOCTL_ERROR_OS_ENOTTY;
846 goto out3;
847 }
848
849 /* Now copy the entire response to userspace */
850 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
851 retval = 0;
852 out3:
853 /* Now free ioctl buf memory */
854 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
855 out2:
856 mutex_unlock(&tw_dev->ioctl_lock);
857 out:
858 return retval;
859 } /* End twa_chrdev_ioctl() */
860
861 /* This function handles open for the character device */
862 static int twa_chrdev_open(struct inode *inode, struct file *file)
863 {
864 unsigned int minor_number;
865 int retval = TW_IOCTL_ERROR_OS_ENODEV;
866
867 minor_number = iminor(inode);
868 if (minor_number >= twa_device_extension_count)
869 goto out;
870 retval = 0;
871 out:
872 return retval;
873 } /* End twa_chrdev_open() */
874
875 /* This function will print readable messages from status register errors */
876 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
877 {
878 int retval = 1;
879
880 /* Check for various error conditions and handle them appropriately */
881 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
882 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
883 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
884 }
885
886 if (status_reg_value & TW_STATUS_PCI_ABORT) {
887 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
888 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
889 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
890 }
891
892 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
893 if ((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) || (!test_bit(TW_IN_RESET, &tw_dev->flags)))
894 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
895 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
896 }
897
898 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
899 if (tw_dev->reset_print == 0) {
900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
901 tw_dev->reset_print = 1;
902 }
903 goto out;
904 }
905 retval = 0;
906 out:
907 return retval;
908 } /* End twa_decode_bits() */
909
910 /* This function will empty the response queue */
911 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
912 {
913 u32 status_reg_value, response_que_value;
914 int count = 0, retval = 1;
915
916 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
917
918 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
919 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
920 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
921 count++;
922 }
923 if (count == TW_MAX_RESPONSE_DRAIN)
924 goto out;
925
926 retval = 0;
927 out:
928 return retval;
929 } /* End twa_empty_response_queue() */
930
931 /* This function will clear the pchip/response queue on 9550SX */
932 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
933 {
934 u32 response_que_value = 0;
935 unsigned long before;
936 int retval = 1;
937
938 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) ||
939 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE)) {
940 before = jiffies;
941 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
942 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
943 msleep(1);
944 if (time_after(jiffies, before + HZ * 30))
945 goto out;
946 }
947 /* P-chip settle time */
948 msleep(500);
949 retval = 0;
950 } else
951 retval = 0;
952 out:
953 return retval;
954 } /* End twa_empty_response_queue_large() */
955
956 /* This function passes sense keys from firmware to scsi layer */
957 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
958 {
959 TW_Command_Full *full_command_packet;
960 unsigned short error;
961 int retval = 1;
962 char *error_str;
963
964 full_command_packet = tw_dev->command_packet_virt[request_id];
965
966 /* Check for embedded error string */
967 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
968
969 /* Don't print error for Logical unit not supported during rollcall */
970 error = le16_to_cpu(full_command_packet->header.status_block.error);
971 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
972 if (print_host)
973 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
974 tw_dev->host->host_no,
975 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
976 full_command_packet->header.status_block.error,
977 error_str[0] == '\0' ?
978 twa_string_lookup(twa_error_table,
979 full_command_packet->header.status_block.error) : error_str,
980 full_command_packet->header.err_specific_desc);
981 else
982 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
983 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
984 full_command_packet->header.status_block.error,
985 error_str[0] == '\0' ?
986 twa_string_lookup(twa_error_table,
987 full_command_packet->header.status_block.error) : error_str,
988 full_command_packet->header.err_specific_desc);
989 }
990
991 if (copy_sense) {
992 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
993 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
994 retval = TW_ISR_DONT_RESULT;
995 goto out;
996 }
997 retval = 0;
998 out:
999 return retval;
1000 } /* End twa_fill_sense() */
1001
1002 /* This function will free up device extension resources */
1003 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1004 {
1005 if (tw_dev->command_packet_virt[0])
1006 pci_free_consistent(tw_dev->tw_pci_dev,
1007 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1008 tw_dev->command_packet_virt[0],
1009 tw_dev->command_packet_phys[0]);
1010
1011 if (tw_dev->generic_buffer_virt[0])
1012 pci_free_consistent(tw_dev->tw_pci_dev,
1013 TW_SECTOR_SIZE*TW_Q_LENGTH,
1014 tw_dev->generic_buffer_virt[0],
1015 tw_dev->generic_buffer_phys[0]);
1016
1017 kfree(tw_dev->event_queue[0]);
1018 } /* End twa_free_device_extension() */
1019
1020 /* This function will free a request id */
1021 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1022 {
1023 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1024 tw_dev->state[request_id] = TW_S_FINISHED;
1025 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1026 } /* End twa_free_request_id() */
1027
1028 /* This function will get parameter table entries from the firmware */
1029 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1030 {
1031 TW_Command_Full *full_command_packet;
1032 TW_Command *command_packet;
1033 TW_Param_Apache *param;
1034 unsigned long param_value;
1035 void *retval = NULL;
1036
1037 /* Setup the command packet */
1038 full_command_packet = tw_dev->command_packet_virt[request_id];
1039 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1040 command_packet = &full_command_packet->command.oldcommand;
1041
1042 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1043 command_packet->size = TW_COMMAND_SIZE;
1044 command_packet->request_id = request_id;
1045 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1046
1047 /* Now setup the param */
1048 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1049 memset(param, 0, TW_SECTOR_SIZE);
1050 param->table_id = cpu_to_le16(table_id | 0x8000);
1051 param->parameter_id = cpu_to_le16(parameter_id);
1052 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1053 param_value = tw_dev->generic_buffer_phys[request_id];
1054
1055 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value);
1056 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1057
1058 /* Post the command packet to the board */
1059 twa_post_command_packet(tw_dev, request_id, 1);
1060
1061 /* Poll for completion */
1062 if (twa_poll_response(tw_dev, request_id, 30))
1063 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1064 else
1065 retval = (void *)&(param->data[0]);
1066
1067 tw_dev->posted_request_count--;
1068 tw_dev->state[request_id] = TW_S_INITIAL;
1069
1070 return retval;
1071 } /* End twa_get_param() */
1072
1073 /* This function will assign an available request id */
1074 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1075 {
1076 *request_id = tw_dev->free_queue[tw_dev->free_head];
1077 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1078 tw_dev->state[*request_id] = TW_S_STARTED;
1079 } /* End twa_get_request_id() */
1080
1081 /* This function will send an initconnection command to controller */
1082 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1083 u32 set_features, unsigned short current_fw_srl,
1084 unsigned short current_fw_arch_id,
1085 unsigned short current_fw_branch,
1086 unsigned short current_fw_build,
1087 unsigned short *fw_on_ctlr_srl,
1088 unsigned short *fw_on_ctlr_arch_id,
1089 unsigned short *fw_on_ctlr_branch,
1090 unsigned short *fw_on_ctlr_build,
1091 u32 *init_connect_result)
1092 {
1093 TW_Command_Full *full_command_packet;
1094 TW_Initconnect *tw_initconnect;
1095 int request_id = 0, retval = 1;
1096
1097 /* Initialize InitConnection command packet */
1098 full_command_packet = tw_dev->command_packet_virt[request_id];
1099 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1100 full_command_packet->header.header_desc.size_header = 128;
1101
1102 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1103 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1104 tw_initconnect->request_id = request_id;
1105 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1106 tw_initconnect->features = set_features;
1107
1108 /* Turn on 64-bit sgl support if we need to */
1109 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1110
1111 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1112
1113 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1114 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1115 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1116 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1117 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1118 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1119 } else
1120 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1121
1122 /* Send command packet to the board */
1123 twa_post_command_packet(tw_dev, request_id, 1);
1124
1125 /* Poll for completion */
1126 if (twa_poll_response(tw_dev, request_id, 30)) {
1127 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1128 } else {
1129 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1130 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1131 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1132 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1133 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1134 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1135 }
1136 retval = 0;
1137 }
1138
1139 tw_dev->posted_request_count--;
1140 tw_dev->state[request_id] = TW_S_INITIAL;
1141
1142 return retval;
1143 } /* End twa_initconnection() */
1144
1145 /* This function will initialize the fields of a device extension */
1146 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1147 {
1148 int i, retval = 1;
1149
1150 /* Initialize command packet buffers */
1151 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1152 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1153 goto out;
1154 }
1155
1156 /* Initialize generic buffer */
1157 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1158 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1159 goto out;
1160 }
1161
1162 /* Allocate event info space */
1163 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1164 if (!tw_dev->event_queue[0]) {
1165 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1166 goto out;
1167 }
1168
1169
1170 for (i = 0; i < TW_Q_LENGTH; i++) {
1171 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1172 tw_dev->free_queue[i] = i;
1173 tw_dev->state[i] = TW_S_INITIAL;
1174 }
1175
1176 tw_dev->pending_head = TW_Q_START;
1177 tw_dev->pending_tail = TW_Q_START;
1178 tw_dev->free_head = TW_Q_START;
1179 tw_dev->free_tail = TW_Q_START;
1180 tw_dev->error_sequence_id = 1;
1181 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1182
1183 mutex_init(&tw_dev->ioctl_lock);
1184 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1185
1186 retval = 0;
1187 out:
1188 return retval;
1189 } /* End twa_initialize_device_extension() */
1190
1191 /* This function is the interrupt service routine */
1192 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1193 {
1194 int request_id, error = 0;
1195 u32 status_reg_value;
1196 TW_Response_Queue response_que;
1197 TW_Command_Full *full_command_packet;
1198 TW_Command *command_packet;
1199 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1200 int handled = 0;
1201
1202 /* Get the per adapter lock */
1203 spin_lock(tw_dev->host->host_lock);
1204
1205 /* Read the registers */
1206 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1207
1208 /* Check if this is our interrupt, otherwise bail */
1209 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1210 goto twa_interrupt_bail;
1211
1212 handled = 1;
1213
1214 /* If we are resetting, bail */
1215 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1216 goto twa_interrupt_bail;
1217
1218 /* Check controller for errors */
1219 if (twa_check_bits(status_reg_value)) {
1220 if (twa_decode_bits(tw_dev, status_reg_value)) {
1221 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1222 goto twa_interrupt_bail;
1223 }
1224 }
1225
1226 /* Handle host interrupt */
1227 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1228 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1229
1230 /* Handle attention interrupt */
1231 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1232 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1233 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1234 twa_get_request_id(tw_dev, &request_id);
1235
1236 error = twa_aen_read_queue(tw_dev, request_id);
1237 if (error) {
1238 tw_dev->state[request_id] = TW_S_COMPLETED;
1239 twa_free_request_id(tw_dev, request_id);
1240 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1241 }
1242 }
1243 }
1244
1245 /* Handle command interrupt */
1246 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1247 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1248 /* Drain as many pending commands as we can */
1249 while (tw_dev->pending_request_count > 0) {
1250 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1251 if (tw_dev->state[request_id] != TW_S_PENDING) {
1252 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1253 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1254 goto twa_interrupt_bail;
1255 }
1256 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1257 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1258 tw_dev->pending_request_count--;
1259 } else {
1260 /* If we get here, we will continue re-posting on the next command interrupt */
1261 break;
1262 }
1263 }
1264 }
1265
1266 /* Handle response interrupt */
1267 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1268
1269 /* Drain the response queue from the board */
1270 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1271 /* Complete the response */
1272 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1273 request_id = TW_RESID_OUT(response_que.response_id);
1274 full_command_packet = tw_dev->command_packet_virt[request_id];
1275 error = 0;
1276 command_packet = &full_command_packet->command.oldcommand;
1277 /* Check for command packet errors */
1278 if (full_command_packet->command.newcommand.status != 0) {
1279 if (tw_dev->srb[request_id] != 0) {
1280 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1281 } else {
1282 /* Skip ioctl error prints */
1283 if (request_id != tw_dev->chrdev_request_id) {
1284 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1285 }
1286 }
1287 }
1288
1289 /* Check for correct state */
1290 if (tw_dev->state[request_id] != TW_S_POSTED) {
1291 if (tw_dev->srb[request_id] != 0) {
1292 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1293 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1294 goto twa_interrupt_bail;
1295 }
1296 }
1297
1298 /* Check for internal command completion */
1299 if (tw_dev->srb[request_id] == 0) {
1300 if (request_id != tw_dev->chrdev_request_id) {
1301 if (twa_aen_complete(tw_dev, request_id))
1302 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1303 } else {
1304 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1305 wake_up(&tw_dev->ioctl_wqueue);
1306 }
1307 } else {
1308 struct scsi_cmnd *cmd;
1309
1310 cmd = tw_dev->srb[request_id];
1311
1312 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1313 /* If no error command was a success */
1314 if (error == 0) {
1315 cmd->result = (DID_OK << 16);
1316 }
1317
1318 /* If error, command failed */
1319 if (error == 1) {
1320 /* Ask for a host reset */
1321 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1322 }
1323
1324 /* Report residual bytes for single sgl */
1325 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1326 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1327 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1328 }
1329
1330 /* Now complete the io */
1331 tw_dev->state[request_id] = TW_S_COMPLETED;
1332 twa_free_request_id(tw_dev, request_id);
1333 tw_dev->posted_request_count--;
1334 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1335 twa_unmap_scsi_data(tw_dev, request_id);
1336 }
1337
1338 /* Check for valid status after each drain */
1339 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1340 if (twa_check_bits(status_reg_value)) {
1341 if (twa_decode_bits(tw_dev, status_reg_value)) {
1342 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1343 goto twa_interrupt_bail;
1344 }
1345 }
1346 }
1347 }
1348
1349 twa_interrupt_bail:
1350 spin_unlock(tw_dev->host->host_lock);
1351 return IRQ_RETVAL(handled);
1352 } /* End twa_interrupt() */
1353
1354 /* This function will load the request id and various sgls for ioctls */
1355 static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1356 {
1357 TW_Command *oldcommand;
1358 TW_Command_Apache *newcommand;
1359 TW_SG_Entry *sgl;
1360
1361 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1362 newcommand = &full_command_packet->command.newcommand;
1363 newcommand->request_id__lunl =
1364 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1365 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1366 newcommand->sg_list[0].length = cpu_to_le32(length);
1367 newcommand->sgl_entries__lunh =
1368 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
1369 } else {
1370 oldcommand = &full_command_packet->command.oldcommand;
1371 oldcommand->request_id = request_id;
1372
1373 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1374 /* Load the sg list */
1375 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1376 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1377 sgl->length = cpu_to_le32(length);
1378
1379 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1380 oldcommand->size += 1;
1381 }
1382 }
1383 } /* End twa_load_sgl() */
1384
1385 /* This function will perform a pci-dma mapping for a scatter gather list */
1386 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1387 {
1388 int use_sg;
1389 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1390
1391 use_sg = scsi_dma_map(cmd);
1392 if (!use_sg)
1393 return 0;
1394 else if (use_sg < 0) {
1395 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1396 return 0;
1397 }
1398
1399 cmd->SCp.phase = TW_PHASE_SGLIST;
1400 cmd->SCp.have_data_in = use_sg;
1401
1402 return use_sg;
1403 } /* End twa_map_scsi_sg_data() */
1404
1405 /* This function will poll for a response interrupt of a request */
1406 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1407 {
1408 int retval = 1, found = 0, response_request_id;
1409 TW_Response_Queue response_queue;
1410 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1411
1412 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1413 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1414 response_request_id = TW_RESID_OUT(response_queue.response_id);
1415 if (request_id != response_request_id) {
1416 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1417 goto out;
1418 }
1419 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1420 if (full_command_packet->command.newcommand.status != 0) {
1421 /* bad response */
1422 twa_fill_sense(tw_dev, request_id, 0, 0);
1423 goto out;
1424 }
1425 found = 1;
1426 } else {
1427 if (full_command_packet->command.oldcommand.status != 0) {
1428 /* bad response */
1429 twa_fill_sense(tw_dev, request_id, 0, 0);
1430 goto out;
1431 }
1432 found = 1;
1433 }
1434 }
1435
1436 if (found)
1437 retval = 0;
1438 out:
1439 return retval;
1440 } /* End twa_poll_response() */
1441
1442 /* This function will poll the status register for a flag */
1443 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1444 {
1445 u32 status_reg_value;
1446 unsigned long before;
1447 int retval = 1;
1448
1449 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1450 before = jiffies;
1451
1452 if (twa_check_bits(status_reg_value))
1453 twa_decode_bits(tw_dev, status_reg_value);
1454
1455 while ((status_reg_value & flag) != flag) {
1456 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1457
1458 if (twa_check_bits(status_reg_value))
1459 twa_decode_bits(tw_dev, status_reg_value);
1460
1461 if (time_after(jiffies, before + HZ * seconds))
1462 goto out;
1463
1464 msleep(50);
1465 }
1466 retval = 0;
1467 out:
1468 return retval;
1469 } /* End twa_poll_status() */
1470
1471 /* This function will poll the status register for disappearance of a flag */
1472 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1473 {
1474 u32 status_reg_value;
1475 unsigned long before;
1476 int retval = 1;
1477
1478 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1479 before = jiffies;
1480
1481 if (twa_check_bits(status_reg_value))
1482 twa_decode_bits(tw_dev, status_reg_value);
1483
1484 while ((status_reg_value & flag) != 0) {
1485 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1486 if (twa_check_bits(status_reg_value))
1487 twa_decode_bits(tw_dev, status_reg_value);
1488
1489 if (time_after(jiffies, before + HZ * seconds))
1490 goto out;
1491
1492 msleep(50);
1493 }
1494 retval = 0;
1495 out:
1496 return retval;
1497 } /* End twa_poll_status_gone() */
1498
1499 /* This function will attempt to post a command packet to the board */
1500 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1501 {
1502 u32 status_reg_value;
1503 dma_addr_t command_que_value;
1504 int retval = 1;
1505
1506 command_que_value = tw_dev->command_packet_phys[request_id];
1507
1508 /* For 9650SE write low 4 bytes first */
1509 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
1510 command_que_value += TW_COMMAND_OFFSET;
1511 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1512 }
1513
1514 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1515
1516 if (twa_check_bits(status_reg_value))
1517 twa_decode_bits(tw_dev, status_reg_value);
1518
1519 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1520
1521 /* Only pend internal driver commands */
1522 if (!internal) {
1523 retval = SCSI_MLQUEUE_HOST_BUSY;
1524 goto out;
1525 }
1526
1527 /* Couldn't post the command packet, so we do it later */
1528 if (tw_dev->state[request_id] != TW_S_PENDING) {
1529 tw_dev->state[request_id] = TW_S_PENDING;
1530 tw_dev->pending_request_count++;
1531 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1532 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1533 }
1534 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1535 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1536 }
1537 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1538 goto out;
1539 } else {
1540 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
1541 /* Now write upper 4 bytes */
1542 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1543 } else {
1544 if (sizeof(dma_addr_t) > 4) {
1545 command_que_value += TW_COMMAND_OFFSET;
1546 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1547 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1548 } else {
1549 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1550 }
1551 }
1552 tw_dev->state[request_id] = TW_S_POSTED;
1553 tw_dev->posted_request_count++;
1554 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1555 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1556 }
1557 }
1558 retval = 0;
1559 out:
1560 return retval;
1561 } /* End twa_post_command_packet() */
1562
1563 /* This function will reset a device extension */
1564 static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
1565 {
1566 int i = 0;
1567 int retval = 1;
1568 unsigned long flags = 0;
1569
1570 set_bit(TW_IN_RESET, &tw_dev->flags);
1571 TW_DISABLE_INTERRUPTS(tw_dev);
1572 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1573 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1574
1575 /* Abort all requests that are in progress */
1576 for (i = 0; i < TW_Q_LENGTH; i++) {
1577 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1578 (tw_dev->state[i] != TW_S_INITIAL) &&
1579 (tw_dev->state[i] != TW_S_COMPLETED)) {
1580 if (tw_dev->srb[i]) {
1581 tw_dev->srb[i]->result = (DID_RESET << 16);
1582 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1583 twa_unmap_scsi_data(tw_dev, i);
1584 }
1585 }
1586 }
1587
1588 /* Reset queues and counts */
1589 for (i = 0; i < TW_Q_LENGTH; i++) {
1590 tw_dev->free_queue[i] = i;
1591 tw_dev->state[i] = TW_S_INITIAL;
1592 }
1593 tw_dev->free_head = TW_Q_START;
1594 tw_dev->free_tail = TW_Q_START;
1595 tw_dev->posted_request_count = 0;
1596 tw_dev->pending_request_count = 0;
1597 tw_dev->pending_head = TW_Q_START;
1598 tw_dev->pending_tail = TW_Q_START;
1599 tw_dev->reset_print = 0;
1600
1601 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1602
1603 if (twa_reset_sequence(tw_dev, 1))
1604 goto out;
1605
1606 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1607 clear_bit(TW_IN_RESET, &tw_dev->flags);
1608 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1609
1610 retval = 0;
1611 out:
1612 return retval;
1613 } /* End twa_reset_device_extension() */
1614
1615 /* This function will reset a controller */
1616 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1617 {
1618 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1619
1620 while (tries < TW_MAX_RESET_TRIES) {
1621 if (do_soft_reset) {
1622 TW_SOFT_RESET(tw_dev);
1623 /* Clear pchip/response queue on 9550SX */
1624 if (twa_empty_response_queue_large(tw_dev)) {
1625 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1626 do_soft_reset = 1;
1627 tries++;
1628 continue;
1629 }
1630 }
1631
1632 /* Make sure controller is in a good state */
1633 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1634 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1635 do_soft_reset = 1;
1636 tries++;
1637 continue;
1638 }
1639
1640 /* Empty response queue */
1641 if (twa_empty_response_queue(tw_dev)) {
1642 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1643 do_soft_reset = 1;
1644 tries++;
1645 continue;
1646 }
1647
1648 flashed = 0;
1649
1650 /* Check for compatibility/flash */
1651 if (twa_check_srl(tw_dev, &flashed)) {
1652 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1653 do_soft_reset = 1;
1654 tries++;
1655 continue;
1656 } else {
1657 if (flashed) {
1658 tries++;
1659 continue;
1660 }
1661 }
1662
1663 /* Drain the AEN queue */
1664 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1665 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1666 do_soft_reset = 1;
1667 tries++;
1668 continue;
1669 }
1670
1671 /* If we got here, controller is in a good state */
1672 retval = 0;
1673 goto out;
1674 }
1675 out:
1676 return retval;
1677 } /* End twa_reset_sequence() */
1678
1679 /* This funciton returns unit geometry in cylinders/heads/sectors */
1680 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1681 {
1682 int heads, sectors, cylinders;
1683 TW_Device_Extension *tw_dev;
1684
1685 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1686
1687 if (capacity >= 0x200000) {
1688 heads = 255;
1689 sectors = 63;
1690 cylinders = sector_div(capacity, heads * sectors);
1691 } else {
1692 heads = 64;
1693 sectors = 32;
1694 cylinders = sector_div(capacity, heads * sectors);
1695 }
1696
1697 geom[0] = heads;
1698 geom[1] = sectors;
1699 geom[2] = cylinders;
1700
1701 return 0;
1702 } /* End twa_scsi_biosparam() */
1703
1704 /* This is the new scsi eh reset function */
1705 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1706 {
1707 TW_Device_Extension *tw_dev = NULL;
1708 int retval = FAILED;
1709
1710 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1711
1712 tw_dev->num_resets++;
1713
1714 sdev_printk(KERN_WARNING, SCpnt->device,
1715 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1716 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1717
1718 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1719 mutex_lock(&tw_dev->ioctl_lock);
1720
1721 /* Now reset the card and some of the device extension data */
1722 if (twa_reset_device_extension(tw_dev, 0)) {
1723 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1724 goto out;
1725 }
1726
1727 retval = SUCCESS;
1728 out:
1729 mutex_unlock(&tw_dev->ioctl_lock);
1730 return retval;
1731 } /* End twa_scsi_eh_reset() */
1732
1733 /* This is the main scsi queue function to handle scsi opcodes */
1734 static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1735 {
1736 int request_id, retval;
1737 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1738
1739 /* If we are resetting due to timed out ioctl, report as busy */
1740 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1741 retval = SCSI_MLQUEUE_HOST_BUSY;
1742 goto out;
1743 }
1744
1745 /* Check if this FW supports luns */
1746 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1747 SCpnt->result = (DID_BAD_TARGET << 16);
1748 done(SCpnt);
1749 retval = 0;
1750 goto out;
1751 }
1752
1753 /* Save done function into scsi_cmnd struct */
1754 SCpnt->scsi_done = done;
1755
1756 /* Get a free request id */
1757 twa_get_request_id(tw_dev, &request_id);
1758
1759 /* Save the scsi command for use by the ISR */
1760 tw_dev->srb[request_id] = SCpnt;
1761
1762 /* Initialize phase to zero */
1763 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1764
1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1766 switch (retval) {
1767 case SCSI_MLQUEUE_HOST_BUSY:
1768 twa_free_request_id(tw_dev, request_id);
1769 break;
1770 case 1:
1771 tw_dev->state[request_id] = TW_S_COMPLETED;
1772 twa_free_request_id(tw_dev, request_id);
1773 SCpnt->result = (DID_ERROR << 16);
1774 done(SCpnt);
1775 retval = 0;
1776 }
1777 out:
1778 return retval;
1779 } /* End twa_scsi_queue() */
1780
1781 /* This function hands scsi cdb's to the firmware */
1782 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1783 {
1784 TW_Command_Full *full_command_packet;
1785 TW_Command_Apache *command_packet;
1786 u32 num_sectors = 0x0;
1787 int i, sg_count;
1788 struct scsi_cmnd *srb = NULL;
1789 struct scatterlist *sglist = NULL, *sg;
1790 int retval = 1;
1791
1792 if (tw_dev->srb[request_id]) {
1793 srb = tw_dev->srb[request_id];
1794 if (scsi_sglist(srb))
1795 sglist = scsi_sglist(srb);
1796 }
1797
1798 /* Initialize command packet */
1799 full_command_packet = tw_dev->command_packet_virt[request_id];
1800 full_command_packet->header.header_desc.size_header = 128;
1801 full_command_packet->header.status_block.error = 0;
1802 full_command_packet->header.status_block.severity__reserved = 0;
1803
1804 command_packet = &full_command_packet->command.newcommand;
1805 command_packet->status = 0;
1806 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1807
1808 /* We forced 16 byte cdb use earlier */
1809 if (!cdb)
1810 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1811 else
1812 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1813
1814 if (srb) {
1815 command_packet->unit = srb->device->id;
1816 command_packet->request_id__lunl =
1817 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1818 } else {
1819 command_packet->request_id__lunl =
1820 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1821 command_packet->unit = 0;
1822 }
1823
1824 command_packet->sgl_offset = 16;
1825
1826 if (!sglistarg) {
1827 /* Map sglist from scsi layer to cmd packet */
1828
1829 if (scsi_sg_count(srb)) {
1830 if ((scsi_sg_count(srb) == 1) &&
1831 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1832 if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
1833 struct scatterlist *sg = scsi_sglist(srb);
1834 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1835 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1836 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1837 }
1838 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1839 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1840 } else {
1841 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1842 if (sg_count == 0)
1843 goto out;
1844
1845 scsi_for_each_sg(srb, sg, sg_count, i) {
1846 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1847 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1848 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1849 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1850 goto out;
1851 }
1852 }
1853 }
1854 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1855 }
1856 } else {
1857 /* Internal cdb post */
1858 for (i = 0; i < use_sg; i++) {
1859 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1860 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1861 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1863 goto out;
1864 }
1865 }
1866 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1867 }
1868
1869 if (srb) {
1870 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1871 num_sectors = (u32)srb->cmnd[4];
1872
1873 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1874 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1875 }
1876
1877 /* Update sector statistic */
1878 tw_dev->sector_count = num_sectors;
1879 if (tw_dev->sector_count > tw_dev->max_sector_count)
1880 tw_dev->max_sector_count = tw_dev->sector_count;
1881
1882 /* Update SG statistics */
1883 if (srb) {
1884 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1885 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1886 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1887 }
1888
1889 /* Now post the command to the board */
1890 if (srb) {
1891 retval = twa_post_command_packet(tw_dev, request_id, 0);
1892 } else {
1893 twa_post_command_packet(tw_dev, request_id, 1);
1894 retval = 0;
1895 }
1896 out:
1897 return retval;
1898 } /* End twa_scsiop_execute_scsi() */
1899
1900 /* This function completes an execute scsi operation */
1901 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1902 {
1903 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1904
1905 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1906 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1907 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1908 if (scsi_sg_count(cmd) == 1) {
1909 struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
1910 char *buf;
1911 unsigned long flags = 0;
1912 local_irq_save(flags);
1913 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1914 memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
1915 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1916 local_irq_restore(flags);
1917 }
1918 }
1919 } /* End twa_scsiop_execute_scsi_complete() */
1920
1921 /* This function tells the controller to shut down */
1922 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1923 {
1924 /* Disable interrupts */
1925 TW_DISABLE_INTERRUPTS(tw_dev);
1926
1927 /* Free up the IRQ */
1928 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1929
1930 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1931
1932 /* Tell the card we are shutting down */
1933 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1934 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1935 } else {
1936 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1937 }
1938
1939 /* Clear all interrupts just before exit */
1940 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1941 } /* End __twa_shutdown() */
1942
1943 /* Wrapper for __twa_shutdown */
1944 static void twa_shutdown(struct pci_dev *pdev)
1945 {
1946 struct Scsi_Host *host = pci_get_drvdata(pdev);
1947 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1948
1949 __twa_shutdown(tw_dev);
1950 } /* End twa_shutdown() */
1951
1952 /* This function will look up a string */
1953 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1954 {
1955 int index;
1956
1957 for (index = 0; ((code != table[index].code) &&
1958 (table[index].text != (char *)0)); index++);
1959 return(table[index].text);
1960 } /* End twa_string_lookup() */
1961
1962 /* This function will perform a pci-dma unmap */
1963 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1964 {
1965 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1966
1967 scsi_dma_unmap(cmd);
1968 } /* End twa_unmap_scsi_data() */
1969
1970 /* scsi_host_template initializer */
1971 static struct scsi_host_template driver_template = {
1972 .module = THIS_MODULE,
1973 .name = "3ware 9000 Storage Controller",
1974 .queuecommand = twa_scsi_queue,
1975 .eh_host_reset_handler = twa_scsi_eh_reset,
1976 .bios_param = twa_scsi_biosparam,
1977 .change_queue_depth = twa_change_queue_depth,
1978 .can_queue = TW_Q_LENGTH-2,
1979 .this_id = -1,
1980 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1981 .max_sectors = TW_MAX_SECTORS,
1982 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1983 .use_clustering = ENABLE_CLUSTERING,
1984 .shost_attrs = twa_host_attrs,
1985 .emulated = 1
1986 };
1987
1988 /* This function will probe and initialize a card */
1989 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1990 {
1991 struct Scsi_Host *host = NULL;
1992 TW_Device_Extension *tw_dev;
1993 u32 mem_addr;
1994 int retval = -ENODEV;
1995
1996 retval = pci_enable_device(pdev);
1997 if (retval) {
1998 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
1999 goto out_disable_device;
2000 }
2001
2002 pci_set_master(pdev);
2003
2004 retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK);
2005 if (retval) {
2006 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2007 goto out_disable_device;
2008 }
2009
2010 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2011 if (!host) {
2012 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2013 retval = -ENOMEM;
2014 goto out_disable_device;
2015 }
2016 tw_dev = (TW_Device_Extension *)host->hostdata;
2017
2018 memset(tw_dev, 0, sizeof(TW_Device_Extension));
2019
2020 /* Save values to device extension */
2021 tw_dev->host = host;
2022 tw_dev->tw_pci_dev = pdev;
2023
2024 if (twa_initialize_device_extension(tw_dev)) {
2025 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2026 goto out_free_device_extension;
2027 }
2028
2029 /* Request IO regions */
2030 retval = pci_request_regions(pdev, "3w-9xxx");
2031 if (retval) {
2032 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2033 goto out_free_device_extension;
2034 }
2035
2036 if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
2037 mem_addr = pci_resource_start(pdev, 1);
2038 else
2039 mem_addr = pci_resource_start(pdev, 2);
2040
2041 /* Save base address */
2042 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
2043 if (!tw_dev->base_addr) {
2044 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2045 goto out_release_mem_region;
2046 }
2047
2048 /* Disable interrupts on the card */
2049 TW_DISABLE_INTERRUPTS(tw_dev);
2050
2051 /* Initialize the card */
2052 if (twa_reset_sequence(tw_dev, 0))
2053 goto out_iounmap;
2054
2055 /* Set host specific parameters */
2056 if (pdev->device == PCI_DEVICE_ID_3WARE_9650SE)
2057 host->max_id = TW_MAX_UNITS_9650SE;
2058 else
2059 host->max_id = TW_MAX_UNITS;
2060
2061 host->max_cmd_len = TW_MAX_CDB_LEN;
2062
2063 /* Channels aren't supported by adapter */
2064 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2065 host->max_channel = 0;
2066
2067 /* Register the card with the kernel SCSI layer */
2068 retval = scsi_add_host(host, &pdev->dev);
2069 if (retval) {
2070 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2071 goto out_iounmap;
2072 }
2073
2074 pci_set_drvdata(pdev, host);
2075
2076 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n",
2077 host->host_no, mem_addr, pdev->irq);
2078 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2079 host->host_no,
2080 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2081 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2082 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2083 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2084 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2085 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2086
2087 /* Now setup the interrupt handler */
2088 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2089 if (retval) {
2090 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2091 goto out_remove_host;
2092 }
2093
2094 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2095 twa_device_extension_count++;
2096
2097 /* Re-enable interrupts on the card */
2098 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2099
2100 /* Finally, scan the host */
2101 scsi_scan_host(host);
2102
2103 if (twa_major == -1) {
2104 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2105 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2106 }
2107 return 0;
2108
2109 out_remove_host:
2110 scsi_remove_host(host);
2111 out_iounmap:
2112 iounmap(tw_dev->base_addr);
2113 out_release_mem_region:
2114 pci_release_regions(pdev);
2115 out_free_device_extension:
2116 twa_free_device_extension(tw_dev);
2117 scsi_host_put(host);
2118 out_disable_device:
2119 pci_disable_device(pdev);
2120
2121 return retval;
2122 } /* End twa_probe() */
2123
2124 /* This function is called to remove a device */
2125 static void twa_remove(struct pci_dev *pdev)
2126 {
2127 struct Scsi_Host *host = pci_get_drvdata(pdev);
2128 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2129
2130 scsi_remove_host(tw_dev->host);
2131
2132 /* Unregister character device */
2133 if (twa_major >= 0) {
2134 unregister_chrdev(twa_major, "twa");
2135 twa_major = -1;
2136 }
2137
2138 /* Shutdown the card */
2139 __twa_shutdown(tw_dev);
2140
2141 /* Free IO remapping */
2142 iounmap(tw_dev->base_addr);
2143
2144 /* Free up the mem region */
2145 pci_release_regions(pdev);
2146
2147 /* Free up device extension resources */
2148 twa_free_device_extension(tw_dev);
2149
2150 scsi_host_put(tw_dev->host);
2151 pci_disable_device(pdev);
2152 twa_device_extension_count--;
2153 } /* End twa_remove() */
2154
2155 /* PCI Devices supported by this driver */
2156 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2157 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2159 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2161 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2163 { }
2164 };
2165 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2166
2167 /* pci_driver initializer */
2168 static struct pci_driver twa_driver = {
2169 .name = "3w-9xxx",
2170 .id_table = twa_pci_tbl,
2171 .probe = twa_probe,
2172 .remove = twa_remove,
2173 .shutdown = twa_shutdown
2174 };
2175
2176 /* This function is called on driver initialization */
2177 static int __init twa_init(void)
2178 {
2179 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2180
2181 return pci_register_driver(&twa_driver);
2182 } /* End twa_init() */
2183
2184 /* This function is called on driver exit */
2185 static void __exit twa_exit(void)
2186 {
2187 pci_unregister_driver(&twa_driver);
2188 } /* End twa_exit() */
2189
2190 module_init(twa_init);
2191 module_exit(twa_exit);
2192