]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/3w-9xxx.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / 3w-9xxx.c
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com>
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com
45
46 For more information, goto:
47 http://www.lsi.com
48
49 Note: This version of the driver does not contain a bundled firmware
50 image.
51
52 History
53 -------
54 2.26.02.000 - Driver cleanup for kernel submission.
55 2.26.02.001 - Replace schedule_timeout() calls with msleep().
56 2.26.02.002 - Add support for PAE mode.
57 Add lun support.
58 Fix twa_remove() to free irq handler/unregister_chrdev()
59 before shutting down card.
60 Change to new 'change_queue_depth' api.
61 Fix 'handled=1' ISR usage, remove bogus IRQ check.
62 Remove un-needed eh_abort handler.
63 Add support for embedded firmware error strings.
64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65 2.26.02.004 - Add support for 9550SX controllers.
66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67 2.26.02.006 - Fix 9550SX pchip reset timeout.
68 Add big endian support.
69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70 2.26.02.008 - Free irq handler in __twa_shutdown().
71 Serialize reset code.
72 Add support for 9650SE controllers.
73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74 2.26.02.010 - Add support for 9690SA controllers.
75 2.26.02.011 - Increase max AENs drained to 256.
76 Add MSI support and "use_msi" module parameter.
77 Fix bug in twa_get_param() on 4GB+.
78 Use pci_resource_len() for ioremap().
79 2.26.02.012 - Add power management support.
80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
82 */
83
84 #include <linux/module.h>
85 #include <linux/reboot.h>
86 #include <linux/spinlock.h>
87 #include <linux/interrupt.h>
88 #include <linux/moduleparam.h>
89 #include <linux/errno.h>
90 #include <linux/types.h>
91 #include <linux/delay.h>
92 #include <linux/pci.h>
93 #include <linux/time.h>
94 #include <linux/mutex.h>
95 #include <linux/slab.h>
96 #include <asm/io.h>
97 #include <asm/irq.h>
98 #include <asm/uaccess.h>
99 #include <scsi/scsi.h>
100 #include <scsi/scsi_host.h>
101 #include <scsi/scsi_tcq.h>
102 #include <scsi/scsi_cmnd.h>
103 #include "3w-9xxx.h"
104
105 /* Globals */
106 #define TW_DRIVER_VERSION "2.26.02.014"
107 static DEFINE_MUTEX(twa_chrdev_mutex);
108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109 static unsigned int twa_device_extension_count;
110 static int twa_major = -1;
111 extern struct timezone sys_tz;
112
113 /* Module parameters */
114 MODULE_AUTHOR ("LSI");
115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(TW_DRIVER_VERSION);
118
119 static int use_msi = 0;
120 module_param(use_msi, int, S_IRUGO);
121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122
123 /* Function prototypes */
124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126 static char *twa_aen_severity_lookup(unsigned char severity_code);
127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129 static int twa_chrdev_open(struct inode *inode, struct file *file);
130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152
153 /* Functions */
154
155 /* Show some statistics about the card */
156 static ssize_t twa_show_stats(struct device *dev,
157 struct device_attribute *attr, char *buf)
158 {
159 struct Scsi_Host *host = class_to_shost(dev);
160 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
161 unsigned long flags = 0;
162 ssize_t len;
163
164 spin_lock_irqsave(tw_dev->host->host_lock, flags);
165 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
166 "Current commands posted: %4d\n"
167 "Max commands posted: %4d\n"
168 "Current pending commands: %4d\n"
169 "Max pending commands: %4d\n"
170 "Last sgl length: %4d\n"
171 "Max sgl length: %4d\n"
172 "Last sector count: %4d\n"
173 "Max sector count: %4d\n"
174 "SCSI Host Resets: %4d\n"
175 "AEN's: %4d\n",
176 TW_DRIVER_VERSION,
177 tw_dev->posted_request_count,
178 tw_dev->max_posted_request_count,
179 tw_dev->pending_request_count,
180 tw_dev->max_pending_request_count,
181 tw_dev->sgl_entries,
182 tw_dev->max_sgl_entries,
183 tw_dev->sector_count,
184 tw_dev->max_sector_count,
185 tw_dev->num_resets,
186 tw_dev->aen_count);
187 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
188 return len;
189 } /* End twa_show_stats() */
190
191 /* Create sysfs 'stats' entry */
192 static struct device_attribute twa_host_stats_attr = {
193 .attr = {
194 .name = "stats",
195 .mode = S_IRUGO,
196 },
197 .show = twa_show_stats
198 };
199
200 /* Host attributes initializer */
201 static struct device_attribute *twa_host_attrs[] = {
202 &twa_host_stats_attr,
203 NULL,
204 };
205
206 /* File operations struct for character device */
207 static const struct file_operations twa_fops = {
208 .owner = THIS_MODULE,
209 .unlocked_ioctl = twa_chrdev_ioctl,
210 .open = twa_chrdev_open,
211 .release = NULL,
212 .llseek = noop_llseek,
213 };
214
215 /* This function will complete an aen request from the isr */
216 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
217 {
218 TW_Command_Full *full_command_packet;
219 TW_Command *command_packet;
220 TW_Command_Apache_Header *header;
221 unsigned short aen;
222 int retval = 1;
223
224 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
225 tw_dev->posted_request_count--;
226 aen = le16_to_cpu(header->status_block.error);
227 full_command_packet = tw_dev->command_packet_virt[request_id];
228 command_packet = &full_command_packet->command.oldcommand;
229
230 /* First check for internal completion of set param for time sync */
231 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
232 /* Keep reading the queue in case there are more aen's */
233 if (twa_aen_read_queue(tw_dev, request_id))
234 goto out2;
235 else {
236 retval = 0;
237 goto out;
238 }
239 }
240
241 switch (aen) {
242 case TW_AEN_QUEUE_EMPTY:
243 /* Quit reading the queue if this is the last one */
244 break;
245 case TW_AEN_SYNC_TIME_WITH_HOST:
246 twa_aen_sync_time(tw_dev, request_id);
247 retval = 0;
248 goto out;
249 default:
250 twa_aen_queue_event(tw_dev, header);
251
252 /* If there are more aen's, keep reading the queue */
253 if (twa_aen_read_queue(tw_dev, request_id))
254 goto out2;
255 else {
256 retval = 0;
257 goto out;
258 }
259 }
260 retval = 0;
261 out2:
262 tw_dev->state[request_id] = TW_S_COMPLETED;
263 twa_free_request_id(tw_dev, request_id);
264 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
265 out:
266 return retval;
267 } /* End twa_aen_complete() */
268
269 /* This function will drain aen queue */
270 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
271 {
272 int request_id = 0;
273 char cdb[TW_MAX_CDB_LEN];
274 TW_SG_Entry sglist[1];
275 int finished = 0, count = 0;
276 TW_Command_Full *full_command_packet;
277 TW_Command_Apache_Header *header;
278 unsigned short aen;
279 int first_reset = 0, queue = 0, retval = 1;
280
281 if (no_check_reset)
282 first_reset = 0;
283 else
284 first_reset = 1;
285
286 full_command_packet = tw_dev->command_packet_virt[request_id];
287 memset(full_command_packet, 0, sizeof(TW_Command_Full));
288
289 /* Initialize cdb */
290 memset(&cdb, 0, TW_MAX_CDB_LEN);
291 cdb[0] = REQUEST_SENSE; /* opcode */
292 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
293
294 /* Initialize sglist */
295 memset(&sglist, 0, sizeof(TW_SG_Entry));
296 sglist[0].length = TW_SECTOR_SIZE;
297 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
298
299 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
300 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
301 goto out;
302 }
303
304 /* Mark internal command */
305 tw_dev->srb[request_id] = NULL;
306
307 do {
308 /* Send command to the board */
309 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
310 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
311 goto out;
312 }
313
314 /* Now poll for completion */
315 if (twa_poll_response(tw_dev, request_id, 30)) {
316 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
317 tw_dev->posted_request_count--;
318 goto out;
319 }
320
321 tw_dev->posted_request_count--;
322 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
323 aen = le16_to_cpu(header->status_block.error);
324 queue = 0;
325 count++;
326
327 switch (aen) {
328 case TW_AEN_QUEUE_EMPTY:
329 if (first_reset != 1)
330 goto out;
331 else
332 finished = 1;
333 break;
334 case TW_AEN_SOFT_RESET:
335 if (first_reset == 0)
336 first_reset = 1;
337 else
338 queue = 1;
339 break;
340 case TW_AEN_SYNC_TIME_WITH_HOST:
341 break;
342 default:
343 queue = 1;
344 }
345
346 /* Now queue an event info */
347 if (queue)
348 twa_aen_queue_event(tw_dev, header);
349 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
350
351 if (count == TW_MAX_AEN_DRAIN)
352 goto out;
353
354 retval = 0;
355 out:
356 tw_dev->state[request_id] = TW_S_INITIAL;
357 return retval;
358 } /* End twa_aen_drain_queue() */
359
360 /* This function will queue an event */
361 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
362 {
363 u32 local_time;
364 struct timeval time;
365 TW_Event *event;
366 unsigned short aen;
367 char host[16];
368 char *error_str;
369
370 tw_dev->aen_count++;
371
372 /* Fill out event info */
373 event = tw_dev->event_queue[tw_dev->error_index];
374
375 /* Check for clobber */
376 host[0] = '\0';
377 if (tw_dev->host) {
378 sprintf(host, " scsi%d:", tw_dev->host->host_no);
379 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
380 tw_dev->aen_clobber = 1;
381 }
382
383 aen = le16_to_cpu(header->status_block.error);
384 memset(event, 0, sizeof(TW_Event));
385
386 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
387 do_gettimeofday(&time);
388 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
389 event->time_stamp_sec = local_time;
390 event->aen_code = aen;
391 event->retrieved = TW_AEN_NOT_RETRIEVED;
392 event->sequence_id = tw_dev->error_sequence_id;
393 tw_dev->error_sequence_id++;
394
395 /* Check for embedded error string */
396 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
397
398 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
399 event->parameter_len = strlen(header->err_specific_desc);
400 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
401 if (event->severity != TW_AEN_SEVERITY_DEBUG)
402 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
403 host,
404 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
405 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
406 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
407 header->err_specific_desc);
408 else
409 tw_dev->aen_count--;
410
411 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
412 tw_dev->event_queue_wrapped = 1;
413 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
414 } /* End twa_aen_queue_event() */
415
416 /* This function will read the aen queue from the isr */
417 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
418 {
419 char cdb[TW_MAX_CDB_LEN];
420 TW_SG_Entry sglist[1];
421 TW_Command_Full *full_command_packet;
422 int retval = 1;
423
424 full_command_packet = tw_dev->command_packet_virt[request_id];
425 memset(full_command_packet, 0, sizeof(TW_Command_Full));
426
427 /* Initialize cdb */
428 memset(&cdb, 0, TW_MAX_CDB_LEN);
429 cdb[0] = REQUEST_SENSE; /* opcode */
430 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
431
432 /* Initialize sglist */
433 memset(&sglist, 0, sizeof(TW_SG_Entry));
434 sglist[0].length = TW_SECTOR_SIZE;
435 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
436
437 /* Mark internal command */
438 tw_dev->srb[request_id] = NULL;
439
440 /* Now post the command packet */
441 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
442 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
443 goto out;
444 }
445 retval = 0;
446 out:
447 return retval;
448 } /* End twa_aen_read_queue() */
449
450 /* This function will look up an AEN severity string */
451 static char *twa_aen_severity_lookup(unsigned char severity_code)
452 {
453 char *retval = NULL;
454
455 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
456 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
457 goto out;
458
459 retval = twa_aen_severity_table[severity_code];
460 out:
461 return retval;
462 } /* End twa_aen_severity_lookup() */
463
464 /* This function will sync firmware time with the host time */
465 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
466 {
467 u32 schedulertime;
468 struct timeval utc;
469 TW_Command_Full *full_command_packet;
470 TW_Command *command_packet;
471 TW_Param_Apache *param;
472 u32 local_time;
473
474 /* Fill out the command packet */
475 full_command_packet = tw_dev->command_packet_virt[request_id];
476 memset(full_command_packet, 0, sizeof(TW_Command_Full));
477 command_packet = &full_command_packet->command.oldcommand;
478 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
479 command_packet->request_id = request_id;
480 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
481 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
482 command_packet->size = TW_COMMAND_SIZE;
483 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
484
485 /* Setup the param */
486 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
487 memset(param, 0, TW_SECTOR_SIZE);
488 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
489 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
490 param->parameter_size_bytes = cpu_to_le16(4);
491
492 /* Convert system time in UTC to local time seconds since last
493 Sunday 12:00AM */
494 do_gettimeofday(&utc);
495 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
496 schedulertime = local_time - (3 * 86400);
497 schedulertime = cpu_to_le32(schedulertime % 604800);
498
499 memcpy(param->data, &schedulertime, sizeof(u32));
500
501 /* Mark internal command */
502 tw_dev->srb[request_id] = NULL;
503
504 /* Now post the command */
505 twa_post_command_packet(tw_dev, request_id, 1);
506 } /* End twa_aen_sync_time() */
507
508 /* This function will allocate memory and check if it is correctly aligned */
509 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
510 {
511 int i;
512 dma_addr_t dma_handle;
513 unsigned long *cpu_addr;
514 int retval = 1;
515
516 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
517 if (!cpu_addr) {
518 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
519 goto out;
520 }
521
522 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
523 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
524 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
525 goto out;
526 }
527
528 memset(cpu_addr, 0, size*TW_Q_LENGTH);
529
530 for (i = 0; i < TW_Q_LENGTH; i++) {
531 switch(which) {
532 case 0:
533 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
534 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
535 break;
536 case 1:
537 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
538 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
539 break;
540 }
541 }
542 retval = 0;
543 out:
544 return retval;
545 } /* End twa_allocate_memory() */
546
547 /* This function will check the status register for unexpected bits */
548 static int twa_check_bits(u32 status_reg_value)
549 {
550 int retval = 1;
551
552 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
553 goto out;
554 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
555 goto out;
556
557 retval = 0;
558 out:
559 return retval;
560 } /* End twa_check_bits() */
561
562 /* This function will check the srl and decide if we are compatible */
563 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
564 {
565 int retval = 1;
566 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
567 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
568 u32 init_connect_result = 0;
569
570 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
571 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
572 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
573 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
574 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
575 &fw_on_ctlr_build, &init_connect_result)) {
576 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
577 goto out;
578 }
579
580 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
581 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
582 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
583
584 /* Try base mode compatibility */
585 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
586 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
587 TW_EXTENDED_INIT_CONNECT,
588 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
589 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
590 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
591 &fw_on_ctlr_branch, &fw_on_ctlr_build,
592 &init_connect_result)) {
593 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
594 goto out;
595 }
596 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
597 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
598 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
599 } else {
600 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
601 }
602 goto out;
603 }
604 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
605 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
606 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
607 }
608
609 /* Load rest of compatibility struct */
610 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
611 sizeof(tw_dev->tw_compat_info.driver_version));
612 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
613 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
614 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
615 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
616 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
617 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
618 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
619 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
620 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
621
622 retval = 0;
623 out:
624 return retval;
625 } /* End twa_check_srl() */
626
627 /* This function handles ioctl for the character device */
628 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
629 {
630 struct inode *inode = file_inode(file);
631 long timeout;
632 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
633 dma_addr_t dma_handle;
634 int request_id = 0;
635 unsigned int sequence_id = 0;
636 unsigned char event_index, start_index;
637 TW_Ioctl_Driver_Command driver_command;
638 TW_Ioctl_Buf_Apache *tw_ioctl;
639 TW_Lock *tw_lock;
640 TW_Command_Full *full_command_packet;
641 TW_Compatibility_Info *tw_compat_info;
642 TW_Event *event;
643 struct timeval current_time;
644 u32 current_time_ms;
645 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
646 int retval = TW_IOCTL_ERROR_OS_EFAULT;
647 void __user *argp = (void __user *)arg;
648
649 mutex_lock(&twa_chrdev_mutex);
650
651 /* Only let one of these through at a time */
652 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
653 retval = TW_IOCTL_ERROR_OS_EINTR;
654 goto out;
655 }
656
657 /* First copy down the driver command */
658 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
659 goto out2;
660
661 /* Check data buffer size */
662 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
663 retval = TW_IOCTL_ERROR_OS_EINVAL;
664 goto out2;
665 }
666
667 /* Hardware can only do multiple of 512 byte transfers */
668 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
669
670 /* Now allocate ioctl buf memory */
671 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
672 if (!cpu_addr) {
673 retval = TW_IOCTL_ERROR_OS_ENOMEM;
674 goto out2;
675 }
676
677 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
678
679 /* Now copy down the entire ioctl */
680 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
681 goto out3;
682
683 /* See which ioctl we are doing */
684 switch (cmd) {
685 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
686 spin_lock_irqsave(tw_dev->host->host_lock, flags);
687 twa_get_request_id(tw_dev, &request_id);
688
689 /* Flag internal command */
690 tw_dev->srb[request_id] = NULL;
691
692 /* Flag chrdev ioctl */
693 tw_dev->chrdev_request_id = request_id;
694
695 full_command_packet = &tw_ioctl->firmware_command;
696
697 /* Load request id and sglist for both command types */
698 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
699
700 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
701
702 /* Now post the command packet to the controller */
703 twa_post_command_packet(tw_dev, request_id, 1);
704 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
705
706 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
707
708 /* Now wait for command to complete */
709 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
710
711 /* We timed out, and didn't get an interrupt */
712 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
713 /* Now we need to reset the board */
714 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
715 tw_dev->host->host_no, TW_DRIVER, 0x37,
716 cmd);
717 retval = TW_IOCTL_ERROR_OS_EIO;
718 twa_reset_device_extension(tw_dev);
719 goto out3;
720 }
721
722 /* Now copy in the command packet response */
723 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
724
725 /* Now complete the io */
726 spin_lock_irqsave(tw_dev->host->host_lock, flags);
727 tw_dev->posted_request_count--;
728 tw_dev->state[request_id] = TW_S_COMPLETED;
729 twa_free_request_id(tw_dev, request_id);
730 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
731 break;
732 case TW_IOCTL_GET_COMPATIBILITY_INFO:
733 tw_ioctl->driver_command.status = 0;
734 /* Copy compatibility struct into ioctl data buffer */
735 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
736 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
737 break;
738 case TW_IOCTL_GET_LAST_EVENT:
739 if (tw_dev->event_queue_wrapped) {
740 if (tw_dev->aen_clobber) {
741 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
742 tw_dev->aen_clobber = 0;
743 } else
744 tw_ioctl->driver_command.status = 0;
745 } else {
746 if (!tw_dev->error_index) {
747 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
748 break;
749 }
750 tw_ioctl->driver_command.status = 0;
751 }
752 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
753 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
754 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
755 break;
756 case TW_IOCTL_GET_FIRST_EVENT:
757 if (tw_dev->event_queue_wrapped) {
758 if (tw_dev->aen_clobber) {
759 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
760 tw_dev->aen_clobber = 0;
761 } else
762 tw_ioctl->driver_command.status = 0;
763 event_index = tw_dev->error_index;
764 } else {
765 if (!tw_dev->error_index) {
766 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
767 break;
768 }
769 tw_ioctl->driver_command.status = 0;
770 event_index = 0;
771 }
772 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
773 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
774 break;
775 case TW_IOCTL_GET_NEXT_EVENT:
776 event = (TW_Event *)tw_ioctl->data_buffer;
777 sequence_id = event->sequence_id;
778 tw_ioctl->driver_command.status = 0;
779
780 if (tw_dev->event_queue_wrapped) {
781 if (tw_dev->aen_clobber) {
782 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
783 tw_dev->aen_clobber = 0;
784 }
785 start_index = tw_dev->error_index;
786 } else {
787 if (!tw_dev->error_index) {
788 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
789 break;
790 }
791 start_index = 0;
792 }
793 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
794
795 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
796 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
797 tw_dev->aen_clobber = 1;
798 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
799 break;
800 }
801 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
802 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
803 break;
804 case TW_IOCTL_GET_PREVIOUS_EVENT:
805 event = (TW_Event *)tw_ioctl->data_buffer;
806 sequence_id = event->sequence_id;
807 tw_ioctl->driver_command.status = 0;
808
809 if (tw_dev->event_queue_wrapped) {
810 if (tw_dev->aen_clobber) {
811 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
812 tw_dev->aen_clobber = 0;
813 }
814 start_index = tw_dev->error_index;
815 } else {
816 if (!tw_dev->error_index) {
817 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
818 break;
819 }
820 start_index = 0;
821 }
822 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
823
824 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
825 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
826 tw_dev->aen_clobber = 1;
827 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
828 break;
829 }
830 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
831 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
832 break;
833 case TW_IOCTL_GET_LOCK:
834 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
835 do_gettimeofday(&current_time);
836 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
837
838 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
839 tw_dev->ioctl_sem_lock = 1;
840 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
841 tw_ioctl->driver_command.status = 0;
842 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
843 } else {
844 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
845 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
846 }
847 break;
848 case TW_IOCTL_RELEASE_LOCK:
849 if (tw_dev->ioctl_sem_lock == 1) {
850 tw_dev->ioctl_sem_lock = 0;
851 tw_ioctl->driver_command.status = 0;
852 } else {
853 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
854 }
855 break;
856 default:
857 retval = TW_IOCTL_ERROR_OS_ENOTTY;
858 goto out3;
859 }
860
861 /* Now copy the entire response to userspace */
862 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
863 retval = 0;
864 out3:
865 /* Now free ioctl buf memory */
866 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
867 out2:
868 mutex_unlock(&tw_dev->ioctl_lock);
869 out:
870 mutex_unlock(&twa_chrdev_mutex);
871 return retval;
872 } /* End twa_chrdev_ioctl() */
873
874 /* This function handles open for the character device */
875 /* NOTE that this function will race with remove. */
876 static int twa_chrdev_open(struct inode *inode, struct file *file)
877 {
878 unsigned int minor_number;
879 int retval = TW_IOCTL_ERROR_OS_ENODEV;
880
881 minor_number = iminor(inode);
882 if (minor_number >= twa_device_extension_count)
883 goto out;
884 retval = 0;
885 out:
886 return retval;
887 } /* End twa_chrdev_open() */
888
889 /* This function will print readable messages from status register errors */
890 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
891 {
892 int retval = 1;
893
894 /* Check for various error conditions and handle them appropriately */
895 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
897 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
898 }
899
900 if (status_reg_value & TW_STATUS_PCI_ABORT) {
901 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
902 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
903 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
904 }
905
906 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
907 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
908 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
909 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
910 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
911 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
912 }
913
914 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
915 if (tw_dev->reset_print == 0) {
916 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
917 tw_dev->reset_print = 1;
918 }
919 goto out;
920 }
921 retval = 0;
922 out:
923 return retval;
924 } /* End twa_decode_bits() */
925
926 /* This function will empty the response queue */
927 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
928 {
929 u32 status_reg_value, response_que_value;
930 int count = 0, retval = 1;
931
932 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
933
934 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
935 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937 count++;
938 }
939 if (count == TW_MAX_RESPONSE_DRAIN)
940 goto out;
941
942 retval = 0;
943 out:
944 return retval;
945 } /* End twa_empty_response_queue() */
946
947 /* This function will clear the pchip/response queue on 9550SX */
948 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
949 {
950 u32 response_que_value = 0;
951 unsigned long before;
952 int retval = 1;
953
954 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
955 before = jiffies;
956 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
957 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
958 msleep(1);
959 if (time_after(jiffies, before + HZ * 30))
960 goto out;
961 }
962 /* P-chip settle time */
963 msleep(500);
964 retval = 0;
965 } else
966 retval = 0;
967 out:
968 return retval;
969 } /* End twa_empty_response_queue_large() */
970
971 /* This function passes sense keys from firmware to scsi layer */
972 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
973 {
974 TW_Command_Full *full_command_packet;
975 unsigned short error;
976 int retval = 1;
977 char *error_str;
978
979 full_command_packet = tw_dev->command_packet_virt[request_id];
980
981 /* Check for embedded error string */
982 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
983
984 /* Don't print error for Logical unit not supported during rollcall */
985 error = le16_to_cpu(full_command_packet->header.status_block.error);
986 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
987 if (print_host)
988 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
989 tw_dev->host->host_no,
990 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
991 full_command_packet->header.status_block.error,
992 error_str[0] == '\0' ?
993 twa_string_lookup(twa_error_table,
994 full_command_packet->header.status_block.error) : error_str,
995 full_command_packet->header.err_specific_desc);
996 else
997 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
998 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
999 full_command_packet->header.status_block.error,
1000 error_str[0] == '\0' ?
1001 twa_string_lookup(twa_error_table,
1002 full_command_packet->header.status_block.error) : error_str,
1003 full_command_packet->header.err_specific_desc);
1004 }
1005
1006 if (copy_sense) {
1007 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1008 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1009 retval = TW_ISR_DONT_RESULT;
1010 goto out;
1011 }
1012 retval = 0;
1013 out:
1014 return retval;
1015 } /* End twa_fill_sense() */
1016
1017 /* This function will free up device extension resources */
1018 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1019 {
1020 if (tw_dev->command_packet_virt[0])
1021 pci_free_consistent(tw_dev->tw_pci_dev,
1022 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1023 tw_dev->command_packet_virt[0],
1024 tw_dev->command_packet_phys[0]);
1025
1026 if (tw_dev->generic_buffer_virt[0])
1027 pci_free_consistent(tw_dev->tw_pci_dev,
1028 TW_SECTOR_SIZE*TW_Q_LENGTH,
1029 tw_dev->generic_buffer_virt[0],
1030 tw_dev->generic_buffer_phys[0]);
1031
1032 kfree(tw_dev->event_queue[0]);
1033 } /* End twa_free_device_extension() */
1034
1035 /* This function will free a request id */
1036 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1037 {
1038 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1039 tw_dev->state[request_id] = TW_S_FINISHED;
1040 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1041 } /* End twa_free_request_id() */
1042
1043 /* This function will get parameter table entries from the firmware */
1044 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1045 {
1046 TW_Command_Full *full_command_packet;
1047 TW_Command *command_packet;
1048 TW_Param_Apache *param;
1049 void *retval = NULL;
1050
1051 /* Setup the command packet */
1052 full_command_packet = tw_dev->command_packet_virt[request_id];
1053 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1054 command_packet = &full_command_packet->command.oldcommand;
1055
1056 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1057 command_packet->size = TW_COMMAND_SIZE;
1058 command_packet->request_id = request_id;
1059 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1060
1061 /* Now setup the param */
1062 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1063 memset(param, 0, TW_SECTOR_SIZE);
1064 param->table_id = cpu_to_le16(table_id | 0x8000);
1065 param->parameter_id = cpu_to_le16(parameter_id);
1066 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1067
1068 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1069 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1070
1071 /* Post the command packet to the board */
1072 twa_post_command_packet(tw_dev, request_id, 1);
1073
1074 /* Poll for completion */
1075 if (twa_poll_response(tw_dev, request_id, 30))
1076 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1077 else
1078 retval = (void *)&(param->data[0]);
1079
1080 tw_dev->posted_request_count--;
1081 tw_dev->state[request_id] = TW_S_INITIAL;
1082
1083 return retval;
1084 } /* End twa_get_param() */
1085
1086 /* This function will assign an available request id */
1087 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1088 {
1089 *request_id = tw_dev->free_queue[tw_dev->free_head];
1090 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1091 tw_dev->state[*request_id] = TW_S_STARTED;
1092 } /* End twa_get_request_id() */
1093
1094 /* This function will send an initconnection command to controller */
1095 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1096 u32 set_features, unsigned short current_fw_srl,
1097 unsigned short current_fw_arch_id,
1098 unsigned short current_fw_branch,
1099 unsigned short current_fw_build,
1100 unsigned short *fw_on_ctlr_srl,
1101 unsigned short *fw_on_ctlr_arch_id,
1102 unsigned short *fw_on_ctlr_branch,
1103 unsigned short *fw_on_ctlr_build,
1104 u32 *init_connect_result)
1105 {
1106 TW_Command_Full *full_command_packet;
1107 TW_Initconnect *tw_initconnect;
1108 int request_id = 0, retval = 1;
1109
1110 /* Initialize InitConnection command packet */
1111 full_command_packet = tw_dev->command_packet_virt[request_id];
1112 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1113 full_command_packet->header.header_desc.size_header = 128;
1114
1115 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1116 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1117 tw_initconnect->request_id = request_id;
1118 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1119 tw_initconnect->features = set_features;
1120
1121 /* Turn on 64-bit sgl support if we need to */
1122 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1123
1124 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1125
1126 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1127 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1128 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1129 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1130 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1131 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1132 } else
1133 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1134
1135 /* Send command packet to the board */
1136 twa_post_command_packet(tw_dev, request_id, 1);
1137
1138 /* Poll for completion */
1139 if (twa_poll_response(tw_dev, request_id, 30)) {
1140 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1141 } else {
1142 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1143 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1144 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1145 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1146 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1147 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1148 }
1149 retval = 0;
1150 }
1151
1152 tw_dev->posted_request_count--;
1153 tw_dev->state[request_id] = TW_S_INITIAL;
1154
1155 return retval;
1156 } /* End twa_initconnection() */
1157
1158 /* This function will initialize the fields of a device extension */
1159 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1160 {
1161 int i, retval = 1;
1162
1163 /* Initialize command packet buffers */
1164 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1165 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1166 goto out;
1167 }
1168
1169 /* Initialize generic buffer */
1170 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1171 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1172 goto out;
1173 }
1174
1175 /* Allocate event info space */
1176 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1177 if (!tw_dev->event_queue[0]) {
1178 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1179 goto out;
1180 }
1181
1182
1183 for (i = 0; i < TW_Q_LENGTH; i++) {
1184 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1185 tw_dev->free_queue[i] = i;
1186 tw_dev->state[i] = TW_S_INITIAL;
1187 }
1188
1189 tw_dev->pending_head = TW_Q_START;
1190 tw_dev->pending_tail = TW_Q_START;
1191 tw_dev->free_head = TW_Q_START;
1192 tw_dev->free_tail = TW_Q_START;
1193 tw_dev->error_sequence_id = 1;
1194 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1195
1196 mutex_init(&tw_dev->ioctl_lock);
1197 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1198
1199 retval = 0;
1200 out:
1201 return retval;
1202 } /* End twa_initialize_device_extension() */
1203
1204 /* This function is the interrupt service routine */
1205 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1206 {
1207 int request_id, error = 0;
1208 u32 status_reg_value;
1209 TW_Response_Queue response_que;
1210 TW_Command_Full *full_command_packet;
1211 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1212 int handled = 0;
1213
1214 /* Get the per adapter lock */
1215 spin_lock(tw_dev->host->host_lock);
1216
1217 /* Read the registers */
1218 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1219
1220 /* Check if this is our interrupt, otherwise bail */
1221 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1222 goto twa_interrupt_bail;
1223
1224 handled = 1;
1225
1226 /* If we are resetting, bail */
1227 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1228 goto twa_interrupt_bail;
1229
1230 /* Check controller for errors */
1231 if (twa_check_bits(status_reg_value)) {
1232 if (twa_decode_bits(tw_dev, status_reg_value)) {
1233 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1234 goto twa_interrupt_bail;
1235 }
1236 }
1237
1238 /* Handle host interrupt */
1239 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1240 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1241
1242 /* Handle attention interrupt */
1243 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1244 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1245 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1246 twa_get_request_id(tw_dev, &request_id);
1247
1248 error = twa_aen_read_queue(tw_dev, request_id);
1249 if (error) {
1250 tw_dev->state[request_id] = TW_S_COMPLETED;
1251 twa_free_request_id(tw_dev, request_id);
1252 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1253 }
1254 }
1255 }
1256
1257 /* Handle command interrupt */
1258 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1259 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1260 /* Drain as many pending commands as we can */
1261 while (tw_dev->pending_request_count > 0) {
1262 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1263 if (tw_dev->state[request_id] != TW_S_PENDING) {
1264 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1265 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1266 goto twa_interrupt_bail;
1267 }
1268 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1269 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1270 tw_dev->pending_request_count--;
1271 } else {
1272 /* If we get here, we will continue re-posting on the next command interrupt */
1273 break;
1274 }
1275 }
1276 }
1277
1278 /* Handle response interrupt */
1279 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1280
1281 /* Drain the response queue from the board */
1282 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1283 /* Complete the response */
1284 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1285 request_id = TW_RESID_OUT(response_que.response_id);
1286 full_command_packet = tw_dev->command_packet_virt[request_id];
1287 error = 0;
1288 /* Check for command packet errors */
1289 if (full_command_packet->command.newcommand.status != 0) {
1290 if (tw_dev->srb[request_id] != NULL) {
1291 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1292 } else {
1293 /* Skip ioctl error prints */
1294 if (request_id != tw_dev->chrdev_request_id) {
1295 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1296 }
1297 }
1298 }
1299
1300 /* Check for correct state */
1301 if (tw_dev->state[request_id] != TW_S_POSTED) {
1302 if (tw_dev->srb[request_id] != NULL) {
1303 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1304 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1305 goto twa_interrupt_bail;
1306 }
1307 }
1308
1309 /* Check for internal command completion */
1310 if (tw_dev->srb[request_id] == NULL) {
1311 if (request_id != tw_dev->chrdev_request_id) {
1312 if (twa_aen_complete(tw_dev, request_id))
1313 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1314 } else {
1315 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1316 wake_up(&tw_dev->ioctl_wqueue);
1317 }
1318 } else {
1319 struct scsi_cmnd *cmd;
1320
1321 cmd = tw_dev->srb[request_id];
1322
1323 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1324 /* If no error command was a success */
1325 if (error == 0) {
1326 cmd->result = (DID_OK << 16);
1327 }
1328
1329 /* If error, command failed */
1330 if (error == 1) {
1331 /* Ask for a host reset */
1332 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1333 }
1334
1335 /* Report residual bytes for single sgl */
1336 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1337 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1338 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1339 }
1340
1341 /* Now complete the io */
1342 scsi_dma_unmap(cmd);
1343 cmd->scsi_done(cmd);
1344 tw_dev->state[request_id] = TW_S_COMPLETED;
1345 twa_free_request_id(tw_dev, request_id);
1346 tw_dev->posted_request_count--;
1347 }
1348
1349 /* Check for valid status after each drain */
1350 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1351 if (twa_check_bits(status_reg_value)) {
1352 if (twa_decode_bits(tw_dev, status_reg_value)) {
1353 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1354 goto twa_interrupt_bail;
1355 }
1356 }
1357 }
1358 }
1359
1360 twa_interrupt_bail:
1361 spin_unlock(tw_dev->host->host_lock);
1362 return IRQ_RETVAL(handled);
1363 } /* End twa_interrupt() */
1364
1365 /* This function will load the request id and various sgls for ioctls */
1366 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1367 {
1368 TW_Command *oldcommand;
1369 TW_Command_Apache *newcommand;
1370 TW_SG_Entry *sgl;
1371 unsigned int pae = 0;
1372
1373 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1374 pae = 1;
1375
1376 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1377 newcommand = &full_command_packet->command.newcommand;
1378 newcommand->request_id__lunl =
1379 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1380 if (length) {
1381 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1382 newcommand->sg_list[0].length = cpu_to_le32(length);
1383 }
1384 newcommand->sgl_entries__lunh =
1385 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1386 } else {
1387 oldcommand = &full_command_packet->command.oldcommand;
1388 oldcommand->request_id = request_id;
1389
1390 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1391 /* Load the sg list */
1392 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1393 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1394 else
1395 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1396 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1397 sgl->length = cpu_to_le32(length);
1398
1399 oldcommand->size += pae;
1400 }
1401 }
1402 } /* End twa_load_sgl() */
1403
1404 /* This function will poll for a response interrupt of a request */
1405 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1406 {
1407 int retval = 1, found = 0, response_request_id;
1408 TW_Response_Queue response_queue;
1409 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1410
1411 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1412 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1413 response_request_id = TW_RESID_OUT(response_queue.response_id);
1414 if (request_id != response_request_id) {
1415 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1416 goto out;
1417 }
1418 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1419 if (full_command_packet->command.newcommand.status != 0) {
1420 /* bad response */
1421 twa_fill_sense(tw_dev, request_id, 0, 0);
1422 goto out;
1423 }
1424 found = 1;
1425 } else {
1426 if (full_command_packet->command.oldcommand.status != 0) {
1427 /* bad response */
1428 twa_fill_sense(tw_dev, request_id, 0, 0);
1429 goto out;
1430 }
1431 found = 1;
1432 }
1433 }
1434
1435 if (found)
1436 retval = 0;
1437 out:
1438 return retval;
1439 } /* End twa_poll_response() */
1440
1441 /* This function will poll the status register for a flag */
1442 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1443 {
1444 u32 status_reg_value;
1445 unsigned long before;
1446 int retval = 1;
1447
1448 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1449 before = jiffies;
1450
1451 if (twa_check_bits(status_reg_value))
1452 twa_decode_bits(tw_dev, status_reg_value);
1453
1454 while ((status_reg_value & flag) != flag) {
1455 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1456
1457 if (twa_check_bits(status_reg_value))
1458 twa_decode_bits(tw_dev, status_reg_value);
1459
1460 if (time_after(jiffies, before + HZ * seconds))
1461 goto out;
1462
1463 msleep(50);
1464 }
1465 retval = 0;
1466 out:
1467 return retval;
1468 } /* End twa_poll_status() */
1469
1470 /* This function will poll the status register for disappearance of a flag */
1471 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1472 {
1473 u32 status_reg_value;
1474 unsigned long before;
1475 int retval = 1;
1476
1477 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1478 before = jiffies;
1479
1480 if (twa_check_bits(status_reg_value))
1481 twa_decode_bits(tw_dev, status_reg_value);
1482
1483 while ((status_reg_value & flag) != 0) {
1484 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1485 if (twa_check_bits(status_reg_value))
1486 twa_decode_bits(tw_dev, status_reg_value);
1487
1488 if (time_after(jiffies, before + HZ * seconds))
1489 goto out;
1490
1491 msleep(50);
1492 }
1493 retval = 0;
1494 out:
1495 return retval;
1496 } /* End twa_poll_status_gone() */
1497
1498 /* This function will attempt to post a command packet to the board */
1499 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1500 {
1501 u32 status_reg_value;
1502 dma_addr_t command_que_value;
1503 int retval = 1;
1504
1505 command_que_value = tw_dev->command_packet_phys[request_id];
1506
1507 /* For 9650SE write low 4 bytes first */
1508 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1509 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1510 command_que_value += TW_COMMAND_OFFSET;
1511 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1512 }
1513
1514 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1515
1516 if (twa_check_bits(status_reg_value))
1517 twa_decode_bits(tw_dev, status_reg_value);
1518
1519 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1520
1521 /* Only pend internal driver commands */
1522 if (!internal) {
1523 retval = SCSI_MLQUEUE_HOST_BUSY;
1524 goto out;
1525 }
1526
1527 /* Couldn't post the command packet, so we do it later */
1528 if (tw_dev->state[request_id] != TW_S_PENDING) {
1529 tw_dev->state[request_id] = TW_S_PENDING;
1530 tw_dev->pending_request_count++;
1531 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1532 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1533 }
1534 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1535 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1536 }
1537 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1538 goto out;
1539 } else {
1540 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1541 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1542 /* Now write upper 4 bytes */
1543 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1544 } else {
1545 if (sizeof(dma_addr_t) > 4) {
1546 command_que_value += TW_COMMAND_OFFSET;
1547 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1548 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1549 } else {
1550 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1551 }
1552 }
1553 tw_dev->state[request_id] = TW_S_POSTED;
1554 tw_dev->posted_request_count++;
1555 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1556 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1557 }
1558 }
1559 retval = 0;
1560 out:
1561 return retval;
1562 } /* End twa_post_command_packet() */
1563
1564 /* This function will reset a device extension */
1565 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1566 {
1567 int i = 0;
1568 int retval = 1;
1569 unsigned long flags = 0;
1570
1571 set_bit(TW_IN_RESET, &tw_dev->flags);
1572 TW_DISABLE_INTERRUPTS(tw_dev);
1573 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1574 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1575
1576 /* Abort all requests that are in progress */
1577 for (i = 0; i < TW_Q_LENGTH; i++) {
1578 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1579 (tw_dev->state[i] != TW_S_INITIAL) &&
1580 (tw_dev->state[i] != TW_S_COMPLETED)) {
1581 if (tw_dev->srb[i]) {
1582 struct scsi_cmnd *cmd = tw_dev->srb[i];
1583
1584 cmd->result = (DID_RESET << 16);
1585 scsi_dma_unmap(cmd);
1586 cmd->scsi_done(cmd);
1587 }
1588 }
1589 }
1590
1591 /* Reset queues and counts */
1592 for (i = 0; i < TW_Q_LENGTH; i++) {
1593 tw_dev->free_queue[i] = i;
1594 tw_dev->state[i] = TW_S_INITIAL;
1595 }
1596 tw_dev->free_head = TW_Q_START;
1597 tw_dev->free_tail = TW_Q_START;
1598 tw_dev->posted_request_count = 0;
1599 tw_dev->pending_request_count = 0;
1600 tw_dev->pending_head = TW_Q_START;
1601 tw_dev->pending_tail = TW_Q_START;
1602 tw_dev->reset_print = 0;
1603
1604 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1605
1606 if (twa_reset_sequence(tw_dev, 1))
1607 goto out;
1608
1609 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1610 clear_bit(TW_IN_RESET, &tw_dev->flags);
1611 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1612
1613 retval = 0;
1614 out:
1615 return retval;
1616 } /* End twa_reset_device_extension() */
1617
1618 /* This function will reset a controller */
1619 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1620 {
1621 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1622
1623 while (tries < TW_MAX_RESET_TRIES) {
1624 if (do_soft_reset) {
1625 TW_SOFT_RESET(tw_dev);
1626 /* Clear pchip/response queue on 9550SX */
1627 if (twa_empty_response_queue_large(tw_dev)) {
1628 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1629 do_soft_reset = 1;
1630 tries++;
1631 continue;
1632 }
1633 }
1634
1635 /* Make sure controller is in a good state */
1636 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1637 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1638 do_soft_reset = 1;
1639 tries++;
1640 continue;
1641 }
1642
1643 /* Empty response queue */
1644 if (twa_empty_response_queue(tw_dev)) {
1645 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1646 do_soft_reset = 1;
1647 tries++;
1648 continue;
1649 }
1650
1651 flashed = 0;
1652
1653 /* Check for compatibility/flash */
1654 if (twa_check_srl(tw_dev, &flashed)) {
1655 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1656 do_soft_reset = 1;
1657 tries++;
1658 continue;
1659 } else {
1660 if (flashed) {
1661 tries++;
1662 continue;
1663 }
1664 }
1665
1666 /* Drain the AEN queue */
1667 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1672 }
1673
1674 /* If we got here, controller is in a good state */
1675 retval = 0;
1676 goto out;
1677 }
1678 out:
1679 return retval;
1680 } /* End twa_reset_sequence() */
1681
1682 /* This funciton returns unit geometry in cylinders/heads/sectors */
1683 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1684 {
1685 int heads, sectors, cylinders;
1686 TW_Device_Extension *tw_dev;
1687
1688 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1689
1690 if (capacity >= 0x200000) {
1691 heads = 255;
1692 sectors = 63;
1693 cylinders = sector_div(capacity, heads * sectors);
1694 } else {
1695 heads = 64;
1696 sectors = 32;
1697 cylinders = sector_div(capacity, heads * sectors);
1698 }
1699
1700 geom[0] = heads;
1701 geom[1] = sectors;
1702 geom[2] = cylinders;
1703
1704 return 0;
1705 } /* End twa_scsi_biosparam() */
1706
1707 /* This is the new scsi eh reset function */
1708 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1709 {
1710 TW_Device_Extension *tw_dev = NULL;
1711 int retval = FAILED;
1712
1713 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1714
1715 tw_dev->num_resets++;
1716
1717 sdev_printk(KERN_WARNING, SCpnt->device,
1718 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1719 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1720
1721 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1722 mutex_lock(&tw_dev->ioctl_lock);
1723
1724 /* Now reset the card and some of the device extension data */
1725 if (twa_reset_device_extension(tw_dev)) {
1726 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1727 goto out;
1728 }
1729
1730 retval = SUCCESS;
1731 out:
1732 mutex_unlock(&tw_dev->ioctl_lock);
1733 return retval;
1734 } /* End twa_scsi_eh_reset() */
1735
1736 /* This is the main scsi queue function to handle scsi opcodes */
1737 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1738 {
1739 int request_id, retval;
1740 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1741
1742 /* If we are resetting due to timed out ioctl, report as busy */
1743 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1744 retval = SCSI_MLQUEUE_HOST_BUSY;
1745 goto out;
1746 }
1747
1748 /* Check if this FW supports luns */
1749 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1750 SCpnt->result = (DID_BAD_TARGET << 16);
1751 done(SCpnt);
1752 retval = 0;
1753 goto out;
1754 }
1755
1756 /* Save done function into scsi_cmnd struct */
1757 SCpnt->scsi_done = done;
1758
1759 /* Get a free request id */
1760 twa_get_request_id(tw_dev, &request_id);
1761
1762 /* Save the scsi command for use by the ISR */
1763 tw_dev->srb[request_id] = SCpnt;
1764
1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1766 switch (retval) {
1767 case SCSI_MLQUEUE_HOST_BUSY:
1768 scsi_dma_unmap(SCpnt);
1769 twa_free_request_id(tw_dev, request_id);
1770 break;
1771 case 1:
1772 SCpnt->result = (DID_ERROR << 16);
1773 scsi_dma_unmap(SCpnt);
1774 done(SCpnt);
1775 tw_dev->state[request_id] = TW_S_COMPLETED;
1776 twa_free_request_id(tw_dev, request_id);
1777 retval = 0;
1778 }
1779 out:
1780 return retval;
1781 } /* End twa_scsi_queue() */
1782
1783 static DEF_SCSI_QCMD(twa_scsi_queue)
1784
1785 /* This function hands scsi cdb's to the firmware */
1786 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1787 {
1788 TW_Command_Full *full_command_packet;
1789 TW_Command_Apache *command_packet;
1790 u32 num_sectors = 0x0;
1791 int i, sg_count;
1792 struct scsi_cmnd *srb = NULL;
1793 struct scatterlist *sglist = NULL, *sg;
1794 int retval = 1;
1795
1796 if (tw_dev->srb[request_id]) {
1797 srb = tw_dev->srb[request_id];
1798 if (scsi_sglist(srb))
1799 sglist = scsi_sglist(srb);
1800 }
1801
1802 /* Initialize command packet */
1803 full_command_packet = tw_dev->command_packet_virt[request_id];
1804 full_command_packet->header.header_desc.size_header = 128;
1805 full_command_packet->header.status_block.error = 0;
1806 full_command_packet->header.status_block.severity__reserved = 0;
1807
1808 command_packet = &full_command_packet->command.newcommand;
1809 command_packet->status = 0;
1810 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1811
1812 /* We forced 16 byte cdb use earlier */
1813 if (!cdb)
1814 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1815 else
1816 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1817
1818 if (srb) {
1819 command_packet->unit = srb->device->id;
1820 command_packet->request_id__lunl =
1821 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1822 } else {
1823 command_packet->request_id__lunl =
1824 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1825 command_packet->unit = 0;
1826 }
1827
1828 command_packet->sgl_offset = 16;
1829
1830 if (!sglistarg) {
1831 /* Map sglist from scsi layer to cmd packet */
1832
1833 if (scsi_sg_count(srb)) {
1834 if ((scsi_sg_count(srb) == 1) &&
1835 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1836 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1837 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1838 scsi_sg_copy_to_buffer(srb,
1839 tw_dev->generic_buffer_virt[request_id],
1840 TW_SECTOR_SIZE);
1841 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1842 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1843 } else {
1844 sg_count = scsi_dma_map(srb);
1845 if (sg_count < 0)
1846 goto out;
1847
1848 scsi_for_each_sg(srb, sg, sg_count, i) {
1849 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1850 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1851 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1852 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1853 goto out;
1854 }
1855 }
1856 }
1857 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1858 }
1859 } else {
1860 /* Internal cdb post */
1861 for (i = 0; i < use_sg; i++) {
1862 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1863 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1864 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1865 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1866 goto out;
1867 }
1868 }
1869 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1870 }
1871
1872 if (srb) {
1873 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1874 num_sectors = (u32)srb->cmnd[4];
1875
1876 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1877 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1878 }
1879
1880 /* Update sector statistic */
1881 tw_dev->sector_count = num_sectors;
1882 if (tw_dev->sector_count > tw_dev->max_sector_count)
1883 tw_dev->max_sector_count = tw_dev->sector_count;
1884
1885 /* Update SG statistics */
1886 if (srb) {
1887 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1888 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1889 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1890 }
1891
1892 /* Now post the command to the board */
1893 if (srb) {
1894 retval = twa_post_command_packet(tw_dev, request_id, 0);
1895 } else {
1896 twa_post_command_packet(tw_dev, request_id, 1);
1897 retval = 0;
1898 }
1899 out:
1900 return retval;
1901 } /* End twa_scsiop_execute_scsi() */
1902
1903 /* This function completes an execute scsi operation */
1904 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1905 {
1906 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1907
1908 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1909 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1910 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1911 if (scsi_sg_count(cmd) == 1) {
1912 void *buf = tw_dev->generic_buffer_virt[request_id];
1913
1914 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1915 }
1916 }
1917 } /* End twa_scsiop_execute_scsi_complete() */
1918
1919 /* This function tells the controller to shut down */
1920 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1921 {
1922 /* Disable interrupts */
1923 TW_DISABLE_INTERRUPTS(tw_dev);
1924
1925 /* Free up the IRQ */
1926 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1927
1928 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1929
1930 /* Tell the card we are shutting down */
1931 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1932 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1933 } else {
1934 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1935 }
1936
1937 /* Clear all interrupts just before exit */
1938 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1939 } /* End __twa_shutdown() */
1940
1941 /* Wrapper for __twa_shutdown */
1942 static void twa_shutdown(struct pci_dev *pdev)
1943 {
1944 struct Scsi_Host *host = pci_get_drvdata(pdev);
1945 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1946
1947 __twa_shutdown(tw_dev);
1948 } /* End twa_shutdown() */
1949
1950 /* This function will look up a string */
1951 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1952 {
1953 int index;
1954
1955 for (index = 0; ((code != table[index].code) &&
1956 (table[index].text != (char *)0)); index++);
1957 return(table[index].text);
1958 } /* End twa_string_lookup() */
1959
1960 /* This function gets called when a disk is coming on-line */
1961 static int twa_slave_configure(struct scsi_device *sdev)
1962 {
1963 /* Force 60 second timeout */
1964 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1965
1966 return 0;
1967 } /* End twa_slave_configure() */
1968
1969 /* scsi_host_template initializer */
1970 static struct scsi_host_template driver_template = {
1971 .module = THIS_MODULE,
1972 .name = "3ware 9000 Storage Controller",
1973 .queuecommand = twa_scsi_queue,
1974 .eh_host_reset_handler = twa_scsi_eh_reset,
1975 .bios_param = twa_scsi_biosparam,
1976 .change_queue_depth = scsi_change_queue_depth,
1977 .can_queue = TW_Q_LENGTH-2,
1978 .slave_configure = twa_slave_configure,
1979 .this_id = -1,
1980 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1981 .max_sectors = TW_MAX_SECTORS,
1982 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1983 .use_clustering = ENABLE_CLUSTERING,
1984 .shost_attrs = twa_host_attrs,
1985 .emulated = 1,
1986 .no_write_same = 1,
1987 };
1988
1989 /* This function will probe and initialize a card */
1990 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1991 {
1992 struct Scsi_Host *host = NULL;
1993 TW_Device_Extension *tw_dev;
1994 unsigned long mem_addr, mem_len;
1995 int retval = -ENODEV;
1996
1997 retval = pci_enable_device(pdev);
1998 if (retval) {
1999 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2000 goto out_disable_device;
2001 }
2002
2003 pci_set_master(pdev);
2004 pci_try_set_mwi(pdev);
2005
2006 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2007 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2008 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2009 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2010 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2011 retval = -ENODEV;
2012 goto out_disable_device;
2013 }
2014
2015 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2016 if (!host) {
2017 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2018 retval = -ENOMEM;
2019 goto out_disable_device;
2020 }
2021 tw_dev = (TW_Device_Extension *)host->hostdata;
2022
2023 /* Save values to device extension */
2024 tw_dev->host = host;
2025 tw_dev->tw_pci_dev = pdev;
2026
2027 if (twa_initialize_device_extension(tw_dev)) {
2028 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2029 goto out_free_device_extension;
2030 }
2031
2032 /* Request IO regions */
2033 retval = pci_request_regions(pdev, "3w-9xxx");
2034 if (retval) {
2035 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2036 goto out_free_device_extension;
2037 }
2038
2039 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2040 mem_addr = pci_resource_start(pdev, 1);
2041 mem_len = pci_resource_len(pdev, 1);
2042 } else {
2043 mem_addr = pci_resource_start(pdev, 2);
2044 mem_len = pci_resource_len(pdev, 2);
2045 }
2046
2047 /* Save base address */
2048 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2049 if (!tw_dev->base_addr) {
2050 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2051 goto out_release_mem_region;
2052 }
2053
2054 /* Disable interrupts on the card */
2055 TW_DISABLE_INTERRUPTS(tw_dev);
2056
2057 /* Initialize the card */
2058 if (twa_reset_sequence(tw_dev, 0))
2059 goto out_iounmap;
2060
2061 /* Set host specific parameters */
2062 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2063 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2064 host->max_id = TW_MAX_UNITS_9650SE;
2065 else
2066 host->max_id = TW_MAX_UNITS;
2067
2068 host->max_cmd_len = TW_MAX_CDB_LEN;
2069
2070 /* Channels aren't supported by adapter */
2071 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2072 host->max_channel = 0;
2073
2074 /* Register the card with the kernel SCSI layer */
2075 retval = scsi_add_host(host, &pdev->dev);
2076 if (retval) {
2077 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2078 goto out_iounmap;
2079 }
2080
2081 pci_set_drvdata(pdev, host);
2082
2083 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2084 host->host_no, mem_addr, pdev->irq);
2085 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2086 host->host_no,
2087 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2088 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2089 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2090 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2091 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2092 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2093
2094 /* Try to enable MSI */
2095 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2096 !pci_enable_msi(pdev))
2097 set_bit(TW_USING_MSI, &tw_dev->flags);
2098
2099 /* Now setup the interrupt handler */
2100 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2101 if (retval) {
2102 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2103 goto out_remove_host;
2104 }
2105
2106 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2107 twa_device_extension_count++;
2108
2109 /* Re-enable interrupts on the card */
2110 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2111
2112 /* Finally, scan the host */
2113 scsi_scan_host(host);
2114
2115 if (twa_major == -1) {
2116 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2117 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2118 }
2119 return 0;
2120
2121 out_remove_host:
2122 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2123 pci_disable_msi(pdev);
2124 scsi_remove_host(host);
2125 out_iounmap:
2126 iounmap(tw_dev->base_addr);
2127 out_release_mem_region:
2128 pci_release_regions(pdev);
2129 out_free_device_extension:
2130 twa_free_device_extension(tw_dev);
2131 scsi_host_put(host);
2132 out_disable_device:
2133 pci_disable_device(pdev);
2134
2135 return retval;
2136 } /* End twa_probe() */
2137
2138 /* This function is called to remove a device */
2139 static void twa_remove(struct pci_dev *pdev)
2140 {
2141 struct Scsi_Host *host = pci_get_drvdata(pdev);
2142 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2143
2144 scsi_remove_host(tw_dev->host);
2145
2146 /* Unregister character device */
2147 if (twa_major >= 0) {
2148 unregister_chrdev(twa_major, "twa");
2149 twa_major = -1;
2150 }
2151
2152 /* Shutdown the card */
2153 __twa_shutdown(tw_dev);
2154
2155 /* Disable MSI if enabled */
2156 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2157 pci_disable_msi(pdev);
2158
2159 /* Free IO remapping */
2160 iounmap(tw_dev->base_addr);
2161
2162 /* Free up the mem region */
2163 pci_release_regions(pdev);
2164
2165 /* Free up device extension resources */
2166 twa_free_device_extension(tw_dev);
2167
2168 scsi_host_put(tw_dev->host);
2169 pci_disable_device(pdev);
2170 twa_device_extension_count--;
2171 } /* End twa_remove() */
2172
2173 #ifdef CONFIG_PM
2174 /* This function is called on PCI suspend */
2175 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2176 {
2177 struct Scsi_Host *host = pci_get_drvdata(pdev);
2178 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2179
2180 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2181
2182 TW_DISABLE_INTERRUPTS(tw_dev);
2183 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2184
2185 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2186 pci_disable_msi(pdev);
2187
2188 /* Tell the card we are shutting down */
2189 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2190 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2191 } else {
2192 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2193 }
2194 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2195
2196 pci_save_state(pdev);
2197 pci_disable_device(pdev);
2198 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2199
2200 return 0;
2201 } /* End twa_suspend() */
2202
2203 /* This function is called on PCI resume */
2204 static int twa_resume(struct pci_dev *pdev)
2205 {
2206 int retval = 0;
2207 struct Scsi_Host *host = pci_get_drvdata(pdev);
2208 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2209
2210 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2211 pci_set_power_state(pdev, PCI_D0);
2212 pci_enable_wake(pdev, PCI_D0, 0);
2213 pci_restore_state(pdev);
2214
2215 retval = pci_enable_device(pdev);
2216 if (retval) {
2217 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2218 return retval;
2219 }
2220
2221 pci_set_master(pdev);
2222 pci_try_set_mwi(pdev);
2223
2224 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2225 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2226 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2227 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2228 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2229 retval = -ENODEV;
2230 goto out_disable_device;
2231 }
2232
2233 /* Initialize the card */
2234 if (twa_reset_sequence(tw_dev, 0)) {
2235 retval = -ENODEV;
2236 goto out_disable_device;
2237 }
2238
2239 /* Now setup the interrupt handler */
2240 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2241 if (retval) {
2242 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2243 retval = -ENODEV;
2244 goto out_disable_device;
2245 }
2246
2247 /* Now enable MSI if enabled */
2248 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2249 pci_enable_msi(pdev);
2250
2251 /* Re-enable interrupts on the card */
2252 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2253
2254 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2255 return 0;
2256
2257 out_disable_device:
2258 scsi_remove_host(host);
2259 pci_disable_device(pdev);
2260
2261 return retval;
2262 } /* End twa_resume() */
2263 #endif
2264
2265 /* PCI Devices supported by this driver */
2266 static struct pci_device_id twa_pci_tbl[] = {
2267 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2268 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2269 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2270 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2271 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2273 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2274 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2275 { }
2276 };
2277 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2278
2279 /* pci_driver initializer */
2280 static struct pci_driver twa_driver = {
2281 .name = "3w-9xxx",
2282 .id_table = twa_pci_tbl,
2283 .probe = twa_probe,
2284 .remove = twa_remove,
2285 #ifdef CONFIG_PM
2286 .suspend = twa_suspend,
2287 .resume = twa_resume,
2288 #endif
2289 .shutdown = twa_shutdown
2290 };
2291
2292 /* This function is called on driver initialization */
2293 static int __init twa_init(void)
2294 {
2295 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2296
2297 return pci_register_driver(&twa_driver);
2298 } /* End twa_init() */
2299
2300 /* This function is called on driver exit */
2301 static void __exit twa_exit(void)
2302 {
2303 pci_unregister_driver(&twa_driver);
2304 } /* End twa_exit() */
2305
2306 module_init(twa_init);
2307 module_exit(twa_exit);
2308