]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/3w-9xxx.c
llseek: automatically add .llseek fop
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / 3w-9xxx.c
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com>
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com
45
46 For more information, goto:
47 http://www.lsi.com
48
49 Note: This version of the driver does not contain a bundled firmware
50 image.
51
52 History
53 -------
54 2.26.02.000 - Driver cleanup for kernel submission.
55 2.26.02.001 - Replace schedule_timeout() calls with msleep().
56 2.26.02.002 - Add support for PAE mode.
57 Add lun support.
58 Fix twa_remove() to free irq handler/unregister_chrdev()
59 before shutting down card.
60 Change to new 'change_queue_depth' api.
61 Fix 'handled=1' ISR usage, remove bogus IRQ check.
62 Remove un-needed eh_abort handler.
63 Add support for embedded firmware error strings.
64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65 2.26.02.004 - Add support for 9550SX controllers.
66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67 2.26.02.006 - Fix 9550SX pchip reset timeout.
68 Add big endian support.
69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70 2.26.02.008 - Free irq handler in __twa_shutdown().
71 Serialize reset code.
72 Add support for 9650SE controllers.
73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74 2.26.02.010 - Add support for 9690SA controllers.
75 2.26.02.011 - Increase max AENs drained to 256.
76 Add MSI support and "use_msi" module parameter.
77 Fix bug in twa_get_param() on 4GB+.
78 Use pci_resource_len() for ioremap().
79 2.26.02.012 - Add power management support.
80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
82 */
83
84 #include <linux/module.h>
85 #include <linux/reboot.h>
86 #include <linux/spinlock.h>
87 #include <linux/interrupt.h>
88 #include <linux/moduleparam.h>
89 #include <linux/errno.h>
90 #include <linux/types.h>
91 #include <linux/delay.h>
92 #include <linux/pci.h>
93 #include <linux/time.h>
94 #include <linux/mutex.h>
95 #include <linux/smp_lock.h>
96 #include <linux/slab.h>
97 #include <asm/io.h>
98 #include <asm/irq.h>
99 #include <asm/uaccess.h>
100 #include <scsi/scsi.h>
101 #include <scsi/scsi_host.h>
102 #include <scsi/scsi_tcq.h>
103 #include <scsi/scsi_cmnd.h>
104 #include "3w-9xxx.h"
105
106 /* Globals */
107 #define TW_DRIVER_VERSION "2.26.02.014"
108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109 static unsigned int twa_device_extension_count;
110 static int twa_major = -1;
111 extern struct timezone sys_tz;
112
113 /* Module parameters */
114 MODULE_AUTHOR ("LSI");
115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(TW_DRIVER_VERSION);
118
119 static int use_msi = 0;
120 module_param(use_msi, int, S_IRUGO);
121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122
123 /* Function prototypes */
124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126 static char *twa_aen_severity_lookup(unsigned char severity_code);
127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129 static int twa_chrdev_open(struct inode *inode, struct file *file);
130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153
154 /* Functions */
155
156 /* Show some statistics about the card */
157 static ssize_t twa_show_stats(struct device *dev,
158 struct device_attribute *attr, char *buf)
159 {
160 struct Scsi_Host *host = class_to_shost(dev);
161 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
162 unsigned long flags = 0;
163 ssize_t len;
164
165 spin_lock_irqsave(tw_dev->host->host_lock, flags);
166 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
167 "Current commands posted: %4d\n"
168 "Max commands posted: %4d\n"
169 "Current pending commands: %4d\n"
170 "Max pending commands: %4d\n"
171 "Last sgl length: %4d\n"
172 "Max sgl length: %4d\n"
173 "Last sector count: %4d\n"
174 "Max sector count: %4d\n"
175 "SCSI Host Resets: %4d\n"
176 "AEN's: %4d\n",
177 TW_DRIVER_VERSION,
178 tw_dev->posted_request_count,
179 tw_dev->max_posted_request_count,
180 tw_dev->pending_request_count,
181 tw_dev->max_pending_request_count,
182 tw_dev->sgl_entries,
183 tw_dev->max_sgl_entries,
184 tw_dev->sector_count,
185 tw_dev->max_sector_count,
186 tw_dev->num_resets,
187 tw_dev->aen_count);
188 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
189 return len;
190 } /* End twa_show_stats() */
191
192 /* This function will set a devices queue depth */
193 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194 int reason)
195 {
196 if (reason != SCSI_QDEPTH_DEFAULT)
197 return -EOPNOTSUPP;
198
199 if (queue_depth > TW_Q_LENGTH-2)
200 queue_depth = TW_Q_LENGTH-2;
201 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
202 return queue_depth;
203 } /* End twa_change_queue_depth() */
204
205 /* Create sysfs 'stats' entry */
206 static struct device_attribute twa_host_stats_attr = {
207 .attr = {
208 .name = "stats",
209 .mode = S_IRUGO,
210 },
211 .show = twa_show_stats
212 };
213
214 /* Host attributes initializer */
215 static struct device_attribute *twa_host_attrs[] = {
216 &twa_host_stats_attr,
217 NULL,
218 };
219
220 /* File operations struct for character device */
221 static const struct file_operations twa_fops = {
222 .owner = THIS_MODULE,
223 .unlocked_ioctl = twa_chrdev_ioctl,
224 .open = twa_chrdev_open,
225 .release = NULL,
226 .llseek = noop_llseek,
227 };
228
229 /* This function will complete an aen request from the isr */
230 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
231 {
232 TW_Command_Full *full_command_packet;
233 TW_Command *command_packet;
234 TW_Command_Apache_Header *header;
235 unsigned short aen;
236 int retval = 1;
237
238 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
239 tw_dev->posted_request_count--;
240 aen = le16_to_cpu(header->status_block.error);
241 full_command_packet = tw_dev->command_packet_virt[request_id];
242 command_packet = &full_command_packet->command.oldcommand;
243
244 /* First check for internal completion of set param for time sync */
245 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
246 /* Keep reading the queue in case there are more aen's */
247 if (twa_aen_read_queue(tw_dev, request_id))
248 goto out2;
249 else {
250 retval = 0;
251 goto out;
252 }
253 }
254
255 switch (aen) {
256 case TW_AEN_QUEUE_EMPTY:
257 /* Quit reading the queue if this is the last one */
258 break;
259 case TW_AEN_SYNC_TIME_WITH_HOST:
260 twa_aen_sync_time(tw_dev, request_id);
261 retval = 0;
262 goto out;
263 default:
264 twa_aen_queue_event(tw_dev, header);
265
266 /* If there are more aen's, keep reading the queue */
267 if (twa_aen_read_queue(tw_dev, request_id))
268 goto out2;
269 else {
270 retval = 0;
271 goto out;
272 }
273 }
274 retval = 0;
275 out2:
276 tw_dev->state[request_id] = TW_S_COMPLETED;
277 twa_free_request_id(tw_dev, request_id);
278 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
279 out:
280 return retval;
281 } /* End twa_aen_complete() */
282
283 /* This function will drain aen queue */
284 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
285 {
286 int request_id = 0;
287 char cdb[TW_MAX_CDB_LEN];
288 TW_SG_Entry sglist[1];
289 int finished = 0, count = 0;
290 TW_Command_Full *full_command_packet;
291 TW_Command_Apache_Header *header;
292 unsigned short aen;
293 int first_reset = 0, queue = 0, retval = 1;
294
295 if (no_check_reset)
296 first_reset = 0;
297 else
298 first_reset = 1;
299
300 full_command_packet = tw_dev->command_packet_virt[request_id];
301 memset(full_command_packet, 0, sizeof(TW_Command_Full));
302
303 /* Initialize cdb */
304 memset(&cdb, 0, TW_MAX_CDB_LEN);
305 cdb[0] = REQUEST_SENSE; /* opcode */
306 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
307
308 /* Initialize sglist */
309 memset(&sglist, 0, sizeof(TW_SG_Entry));
310 sglist[0].length = TW_SECTOR_SIZE;
311 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
312
313 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
315 goto out;
316 }
317
318 /* Mark internal command */
319 tw_dev->srb[request_id] = NULL;
320
321 do {
322 /* Send command to the board */
323 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
325 goto out;
326 }
327
328 /* Now poll for completion */
329 if (twa_poll_response(tw_dev, request_id, 30)) {
330 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
331 tw_dev->posted_request_count--;
332 goto out;
333 }
334
335 tw_dev->posted_request_count--;
336 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
337 aen = le16_to_cpu(header->status_block.error);
338 queue = 0;
339 count++;
340
341 switch (aen) {
342 case TW_AEN_QUEUE_EMPTY:
343 if (first_reset != 1)
344 goto out;
345 else
346 finished = 1;
347 break;
348 case TW_AEN_SOFT_RESET:
349 if (first_reset == 0)
350 first_reset = 1;
351 else
352 queue = 1;
353 break;
354 case TW_AEN_SYNC_TIME_WITH_HOST:
355 break;
356 default:
357 queue = 1;
358 }
359
360 /* Now queue an event info */
361 if (queue)
362 twa_aen_queue_event(tw_dev, header);
363 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
364
365 if (count == TW_MAX_AEN_DRAIN)
366 goto out;
367
368 retval = 0;
369 out:
370 tw_dev->state[request_id] = TW_S_INITIAL;
371 return retval;
372 } /* End twa_aen_drain_queue() */
373
374 /* This function will queue an event */
375 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
376 {
377 u32 local_time;
378 struct timeval time;
379 TW_Event *event;
380 unsigned short aen;
381 char host[16];
382 char *error_str;
383
384 tw_dev->aen_count++;
385
386 /* Fill out event info */
387 event = tw_dev->event_queue[tw_dev->error_index];
388
389 /* Check for clobber */
390 host[0] = '\0';
391 if (tw_dev->host) {
392 sprintf(host, " scsi%d:", tw_dev->host->host_no);
393 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
394 tw_dev->aen_clobber = 1;
395 }
396
397 aen = le16_to_cpu(header->status_block.error);
398 memset(event, 0, sizeof(TW_Event));
399
400 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
401 do_gettimeofday(&time);
402 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
403 event->time_stamp_sec = local_time;
404 event->aen_code = aen;
405 event->retrieved = TW_AEN_NOT_RETRIEVED;
406 event->sequence_id = tw_dev->error_sequence_id;
407 tw_dev->error_sequence_id++;
408
409 /* Check for embedded error string */
410 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
411
412 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
413 event->parameter_len = strlen(header->err_specific_desc);
414 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
415 if (event->severity != TW_AEN_SEVERITY_DEBUG)
416 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
417 host,
418 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
419 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
420 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
421 header->err_specific_desc);
422 else
423 tw_dev->aen_count--;
424
425 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
426 tw_dev->event_queue_wrapped = 1;
427 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
428 } /* End twa_aen_queue_event() */
429
430 /* This function will read the aen queue from the isr */
431 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
432 {
433 char cdb[TW_MAX_CDB_LEN];
434 TW_SG_Entry sglist[1];
435 TW_Command_Full *full_command_packet;
436 int retval = 1;
437
438 full_command_packet = tw_dev->command_packet_virt[request_id];
439 memset(full_command_packet, 0, sizeof(TW_Command_Full));
440
441 /* Initialize cdb */
442 memset(&cdb, 0, TW_MAX_CDB_LEN);
443 cdb[0] = REQUEST_SENSE; /* opcode */
444 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
445
446 /* Initialize sglist */
447 memset(&sglist, 0, sizeof(TW_SG_Entry));
448 sglist[0].length = TW_SECTOR_SIZE;
449 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
450
451 /* Mark internal command */
452 tw_dev->srb[request_id] = NULL;
453
454 /* Now post the command packet */
455 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
456 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
457 goto out;
458 }
459 retval = 0;
460 out:
461 return retval;
462 } /* End twa_aen_read_queue() */
463
464 /* This function will look up an AEN severity string */
465 static char *twa_aen_severity_lookup(unsigned char severity_code)
466 {
467 char *retval = NULL;
468
469 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
470 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
471 goto out;
472
473 retval = twa_aen_severity_table[severity_code];
474 out:
475 return retval;
476 } /* End twa_aen_severity_lookup() */
477
478 /* This function will sync firmware time with the host time */
479 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
480 {
481 u32 schedulertime;
482 struct timeval utc;
483 TW_Command_Full *full_command_packet;
484 TW_Command *command_packet;
485 TW_Param_Apache *param;
486 u32 local_time;
487
488 /* Fill out the command packet */
489 full_command_packet = tw_dev->command_packet_virt[request_id];
490 memset(full_command_packet, 0, sizeof(TW_Command_Full));
491 command_packet = &full_command_packet->command.oldcommand;
492 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
493 command_packet->request_id = request_id;
494 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
495 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
496 command_packet->size = TW_COMMAND_SIZE;
497 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
498
499 /* Setup the param */
500 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
501 memset(param, 0, TW_SECTOR_SIZE);
502 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
503 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
504 param->parameter_size_bytes = cpu_to_le16(4);
505
506 /* Convert system time in UTC to local time seconds since last
507 Sunday 12:00AM */
508 do_gettimeofday(&utc);
509 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
510 schedulertime = local_time - (3 * 86400);
511 schedulertime = cpu_to_le32(schedulertime % 604800);
512
513 memcpy(param->data, &schedulertime, sizeof(u32));
514
515 /* Mark internal command */
516 tw_dev->srb[request_id] = NULL;
517
518 /* Now post the command */
519 twa_post_command_packet(tw_dev, request_id, 1);
520 } /* End twa_aen_sync_time() */
521
522 /* This function will allocate memory and check if it is correctly aligned */
523 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
524 {
525 int i;
526 dma_addr_t dma_handle;
527 unsigned long *cpu_addr;
528 int retval = 1;
529
530 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
531 if (!cpu_addr) {
532 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
533 goto out;
534 }
535
536 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
537 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
538 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
539 goto out;
540 }
541
542 memset(cpu_addr, 0, size*TW_Q_LENGTH);
543
544 for (i = 0; i < TW_Q_LENGTH; i++) {
545 switch(which) {
546 case 0:
547 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
548 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
549 break;
550 case 1:
551 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
552 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
553 break;
554 }
555 }
556 retval = 0;
557 out:
558 return retval;
559 } /* End twa_allocate_memory() */
560
561 /* This function will check the status register for unexpected bits */
562 static int twa_check_bits(u32 status_reg_value)
563 {
564 int retval = 1;
565
566 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
567 goto out;
568 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
569 goto out;
570
571 retval = 0;
572 out:
573 return retval;
574 } /* End twa_check_bits() */
575
576 /* This function will check the srl and decide if we are compatible */
577 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
578 {
579 int retval = 1;
580 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
581 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
582 u32 init_connect_result = 0;
583
584 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
585 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
586 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
587 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
588 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
589 &fw_on_ctlr_build, &init_connect_result)) {
590 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
591 goto out;
592 }
593
594 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
595 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
596 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
597
598 /* Try base mode compatibility */
599 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
600 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
601 TW_EXTENDED_INIT_CONNECT,
602 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
603 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
604 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
605 &fw_on_ctlr_branch, &fw_on_ctlr_build,
606 &init_connect_result)) {
607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
608 goto out;
609 }
610 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
611 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
612 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
613 } else {
614 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
615 }
616 goto out;
617 }
618 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
619 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
620 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
621 }
622
623 /* Load rest of compatibility struct */
624 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
625 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
626 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
627 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
628 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
629 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
630 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
631 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
632 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
633 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
634
635 retval = 0;
636 out:
637 return retval;
638 } /* End twa_check_srl() */
639
640 /* This function handles ioctl for the character device */
641 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 {
643 struct inode *inode = file->f_path.dentry->d_inode;
644 long timeout;
645 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
646 dma_addr_t dma_handle;
647 int request_id = 0;
648 unsigned int sequence_id = 0;
649 unsigned char event_index, start_index;
650 TW_Ioctl_Driver_Command driver_command;
651 TW_Ioctl_Buf_Apache *tw_ioctl;
652 TW_Lock *tw_lock;
653 TW_Command_Full *full_command_packet;
654 TW_Compatibility_Info *tw_compat_info;
655 TW_Event *event;
656 struct timeval current_time;
657 u32 current_time_ms;
658 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
659 int retval = TW_IOCTL_ERROR_OS_EFAULT;
660 void __user *argp = (void __user *)arg;
661
662 lock_kernel();
663
664 /* Only let one of these through at a time */
665 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
666 retval = TW_IOCTL_ERROR_OS_EINTR;
667 goto out;
668 }
669
670 /* First copy down the driver command */
671 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
672 goto out2;
673
674 /* Check data buffer size */
675 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
676 retval = TW_IOCTL_ERROR_OS_EINVAL;
677 goto out2;
678 }
679
680 /* Hardware can only do multiple of 512 byte transfers */
681 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
682
683 /* Now allocate ioctl buf memory */
684 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
685 if (!cpu_addr) {
686 retval = TW_IOCTL_ERROR_OS_ENOMEM;
687 goto out2;
688 }
689
690 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
691
692 /* Now copy down the entire ioctl */
693 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
694 goto out3;
695
696 /* See which ioctl we are doing */
697 switch (cmd) {
698 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
699 spin_lock_irqsave(tw_dev->host->host_lock, flags);
700 twa_get_request_id(tw_dev, &request_id);
701
702 /* Flag internal command */
703 tw_dev->srb[request_id] = NULL;
704
705 /* Flag chrdev ioctl */
706 tw_dev->chrdev_request_id = request_id;
707
708 full_command_packet = &tw_ioctl->firmware_command;
709
710 /* Load request id and sglist for both command types */
711 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
712
713 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
714
715 /* Now post the command packet to the controller */
716 twa_post_command_packet(tw_dev, request_id, 1);
717 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
718
719 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
720
721 /* Now wait for command to complete */
722 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
723
724 /* We timed out, and didn't get an interrupt */
725 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
726 /* Now we need to reset the board */
727 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
728 tw_dev->host->host_no, TW_DRIVER, 0x37,
729 cmd);
730 retval = TW_IOCTL_ERROR_OS_EIO;
731 twa_reset_device_extension(tw_dev);
732 goto out3;
733 }
734
735 /* Now copy in the command packet response */
736 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
737
738 /* Now complete the io */
739 spin_lock_irqsave(tw_dev->host->host_lock, flags);
740 tw_dev->posted_request_count--;
741 tw_dev->state[request_id] = TW_S_COMPLETED;
742 twa_free_request_id(tw_dev, request_id);
743 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
744 break;
745 case TW_IOCTL_GET_COMPATIBILITY_INFO:
746 tw_ioctl->driver_command.status = 0;
747 /* Copy compatibility struct into ioctl data buffer */
748 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
749 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
750 break;
751 case TW_IOCTL_GET_LAST_EVENT:
752 if (tw_dev->event_queue_wrapped) {
753 if (tw_dev->aen_clobber) {
754 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
755 tw_dev->aen_clobber = 0;
756 } else
757 tw_ioctl->driver_command.status = 0;
758 } else {
759 if (!tw_dev->error_index) {
760 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
761 break;
762 }
763 tw_ioctl->driver_command.status = 0;
764 }
765 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
766 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
767 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
768 break;
769 case TW_IOCTL_GET_FIRST_EVENT:
770 if (tw_dev->event_queue_wrapped) {
771 if (tw_dev->aen_clobber) {
772 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
773 tw_dev->aen_clobber = 0;
774 } else
775 tw_ioctl->driver_command.status = 0;
776 event_index = tw_dev->error_index;
777 } else {
778 if (!tw_dev->error_index) {
779 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
780 break;
781 }
782 tw_ioctl->driver_command.status = 0;
783 event_index = 0;
784 }
785 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
786 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
787 break;
788 case TW_IOCTL_GET_NEXT_EVENT:
789 event = (TW_Event *)tw_ioctl->data_buffer;
790 sequence_id = event->sequence_id;
791 tw_ioctl->driver_command.status = 0;
792
793 if (tw_dev->event_queue_wrapped) {
794 if (tw_dev->aen_clobber) {
795 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
796 tw_dev->aen_clobber = 0;
797 }
798 start_index = tw_dev->error_index;
799 } else {
800 if (!tw_dev->error_index) {
801 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
802 break;
803 }
804 start_index = 0;
805 }
806 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
807
808 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
809 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
810 tw_dev->aen_clobber = 1;
811 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
812 break;
813 }
814 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
815 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
816 break;
817 case TW_IOCTL_GET_PREVIOUS_EVENT:
818 event = (TW_Event *)tw_ioctl->data_buffer;
819 sequence_id = event->sequence_id;
820 tw_ioctl->driver_command.status = 0;
821
822 if (tw_dev->event_queue_wrapped) {
823 if (tw_dev->aen_clobber) {
824 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
825 tw_dev->aen_clobber = 0;
826 }
827 start_index = tw_dev->error_index;
828 } else {
829 if (!tw_dev->error_index) {
830 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
831 break;
832 }
833 start_index = 0;
834 }
835 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
836
837 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
838 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
839 tw_dev->aen_clobber = 1;
840 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
841 break;
842 }
843 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
844 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
845 break;
846 case TW_IOCTL_GET_LOCK:
847 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
848 do_gettimeofday(&current_time);
849 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
850
851 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
852 tw_dev->ioctl_sem_lock = 1;
853 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
854 tw_ioctl->driver_command.status = 0;
855 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
856 } else {
857 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
858 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
859 }
860 break;
861 case TW_IOCTL_RELEASE_LOCK:
862 if (tw_dev->ioctl_sem_lock == 1) {
863 tw_dev->ioctl_sem_lock = 0;
864 tw_ioctl->driver_command.status = 0;
865 } else {
866 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
867 }
868 break;
869 default:
870 retval = TW_IOCTL_ERROR_OS_ENOTTY;
871 goto out3;
872 }
873
874 /* Now copy the entire response to userspace */
875 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
876 retval = 0;
877 out3:
878 /* Now free ioctl buf memory */
879 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
880 out2:
881 mutex_unlock(&tw_dev->ioctl_lock);
882 out:
883 unlock_kernel();
884 return retval;
885 } /* End twa_chrdev_ioctl() */
886
887 /* This function handles open for the character device */
888 /* NOTE that this function will race with remove. */
889 static int twa_chrdev_open(struct inode *inode, struct file *file)
890 {
891 unsigned int minor_number;
892 int retval = TW_IOCTL_ERROR_OS_ENODEV;
893
894 cycle_kernel_lock();
895 minor_number = iminor(inode);
896 if (minor_number >= twa_device_extension_count)
897 goto out;
898 retval = 0;
899 out:
900 return retval;
901 } /* End twa_chrdev_open() */
902
903 /* This function will print readable messages from status register errors */
904 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
905 {
906 int retval = 1;
907
908 /* Check for various error conditions and handle them appropriately */
909 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
910 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
911 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
912 }
913
914 if (status_reg_value & TW_STATUS_PCI_ABORT) {
915 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
916 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
917 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
918 }
919
920 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
921 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
922 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
923 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
924 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
925 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
926 }
927
928 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
929 if (tw_dev->reset_print == 0) {
930 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
931 tw_dev->reset_print = 1;
932 }
933 goto out;
934 }
935 retval = 0;
936 out:
937 return retval;
938 } /* End twa_decode_bits() */
939
940 /* This function will empty the response queue */
941 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
942 {
943 u32 status_reg_value, response_que_value;
944 int count = 0, retval = 1;
945
946 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
947
948 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
949 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
950 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
951 count++;
952 }
953 if (count == TW_MAX_RESPONSE_DRAIN)
954 goto out;
955
956 retval = 0;
957 out:
958 return retval;
959 } /* End twa_empty_response_queue() */
960
961 /* This function will clear the pchip/response queue on 9550SX */
962 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
963 {
964 u32 response_que_value = 0;
965 unsigned long before;
966 int retval = 1;
967
968 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
969 before = jiffies;
970 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
971 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
972 msleep(1);
973 if (time_after(jiffies, before + HZ * 30))
974 goto out;
975 }
976 /* P-chip settle time */
977 msleep(500);
978 retval = 0;
979 } else
980 retval = 0;
981 out:
982 return retval;
983 } /* End twa_empty_response_queue_large() */
984
985 /* This function passes sense keys from firmware to scsi layer */
986 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
987 {
988 TW_Command_Full *full_command_packet;
989 unsigned short error;
990 int retval = 1;
991 char *error_str;
992
993 full_command_packet = tw_dev->command_packet_virt[request_id];
994
995 /* Check for embedded error string */
996 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
997
998 /* Don't print error for Logical unit not supported during rollcall */
999 error = le16_to_cpu(full_command_packet->header.status_block.error);
1000 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1001 if (print_host)
1002 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1003 tw_dev->host->host_no,
1004 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1005 full_command_packet->header.status_block.error,
1006 error_str[0] == '\0' ?
1007 twa_string_lookup(twa_error_table,
1008 full_command_packet->header.status_block.error) : error_str,
1009 full_command_packet->header.err_specific_desc);
1010 else
1011 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1012 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1013 full_command_packet->header.status_block.error,
1014 error_str[0] == '\0' ?
1015 twa_string_lookup(twa_error_table,
1016 full_command_packet->header.status_block.error) : error_str,
1017 full_command_packet->header.err_specific_desc);
1018 }
1019
1020 if (copy_sense) {
1021 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1022 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1023 retval = TW_ISR_DONT_RESULT;
1024 goto out;
1025 }
1026 retval = 0;
1027 out:
1028 return retval;
1029 } /* End twa_fill_sense() */
1030
1031 /* This function will free up device extension resources */
1032 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1033 {
1034 if (tw_dev->command_packet_virt[0])
1035 pci_free_consistent(tw_dev->tw_pci_dev,
1036 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1037 tw_dev->command_packet_virt[0],
1038 tw_dev->command_packet_phys[0]);
1039
1040 if (tw_dev->generic_buffer_virt[0])
1041 pci_free_consistent(tw_dev->tw_pci_dev,
1042 TW_SECTOR_SIZE*TW_Q_LENGTH,
1043 tw_dev->generic_buffer_virt[0],
1044 tw_dev->generic_buffer_phys[0]);
1045
1046 kfree(tw_dev->event_queue[0]);
1047 } /* End twa_free_device_extension() */
1048
1049 /* This function will free a request id */
1050 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1051 {
1052 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1053 tw_dev->state[request_id] = TW_S_FINISHED;
1054 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1055 } /* End twa_free_request_id() */
1056
1057 /* This function will get parameter table entries from the firmware */
1058 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1059 {
1060 TW_Command_Full *full_command_packet;
1061 TW_Command *command_packet;
1062 TW_Param_Apache *param;
1063 void *retval = NULL;
1064
1065 /* Setup the command packet */
1066 full_command_packet = tw_dev->command_packet_virt[request_id];
1067 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1068 command_packet = &full_command_packet->command.oldcommand;
1069
1070 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1071 command_packet->size = TW_COMMAND_SIZE;
1072 command_packet->request_id = request_id;
1073 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1074
1075 /* Now setup the param */
1076 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1077 memset(param, 0, TW_SECTOR_SIZE);
1078 param->table_id = cpu_to_le16(table_id | 0x8000);
1079 param->parameter_id = cpu_to_le16(parameter_id);
1080 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1081
1082 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1083 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1084
1085 /* Post the command packet to the board */
1086 twa_post_command_packet(tw_dev, request_id, 1);
1087
1088 /* Poll for completion */
1089 if (twa_poll_response(tw_dev, request_id, 30))
1090 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1091 else
1092 retval = (void *)&(param->data[0]);
1093
1094 tw_dev->posted_request_count--;
1095 tw_dev->state[request_id] = TW_S_INITIAL;
1096
1097 return retval;
1098 } /* End twa_get_param() */
1099
1100 /* This function will assign an available request id */
1101 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1102 {
1103 *request_id = tw_dev->free_queue[tw_dev->free_head];
1104 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1105 tw_dev->state[*request_id] = TW_S_STARTED;
1106 } /* End twa_get_request_id() */
1107
1108 /* This function will send an initconnection command to controller */
1109 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1110 u32 set_features, unsigned short current_fw_srl,
1111 unsigned short current_fw_arch_id,
1112 unsigned short current_fw_branch,
1113 unsigned short current_fw_build,
1114 unsigned short *fw_on_ctlr_srl,
1115 unsigned short *fw_on_ctlr_arch_id,
1116 unsigned short *fw_on_ctlr_branch,
1117 unsigned short *fw_on_ctlr_build,
1118 u32 *init_connect_result)
1119 {
1120 TW_Command_Full *full_command_packet;
1121 TW_Initconnect *tw_initconnect;
1122 int request_id = 0, retval = 1;
1123
1124 /* Initialize InitConnection command packet */
1125 full_command_packet = tw_dev->command_packet_virt[request_id];
1126 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1127 full_command_packet->header.header_desc.size_header = 128;
1128
1129 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1130 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1131 tw_initconnect->request_id = request_id;
1132 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1133 tw_initconnect->features = set_features;
1134
1135 /* Turn on 64-bit sgl support if we need to */
1136 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1137
1138 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1139
1140 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1141 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1142 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1143 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1144 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1145 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1146 } else
1147 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1148
1149 /* Send command packet to the board */
1150 twa_post_command_packet(tw_dev, request_id, 1);
1151
1152 /* Poll for completion */
1153 if (twa_poll_response(tw_dev, request_id, 30)) {
1154 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1155 } else {
1156 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1157 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1158 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1159 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1160 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1161 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1162 }
1163 retval = 0;
1164 }
1165
1166 tw_dev->posted_request_count--;
1167 tw_dev->state[request_id] = TW_S_INITIAL;
1168
1169 return retval;
1170 } /* End twa_initconnection() */
1171
1172 /* This function will initialize the fields of a device extension */
1173 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1174 {
1175 int i, retval = 1;
1176
1177 /* Initialize command packet buffers */
1178 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1179 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1180 goto out;
1181 }
1182
1183 /* Initialize generic buffer */
1184 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1185 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1186 goto out;
1187 }
1188
1189 /* Allocate event info space */
1190 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1191 if (!tw_dev->event_queue[0]) {
1192 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1193 goto out;
1194 }
1195
1196
1197 for (i = 0; i < TW_Q_LENGTH; i++) {
1198 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1199 tw_dev->free_queue[i] = i;
1200 tw_dev->state[i] = TW_S_INITIAL;
1201 }
1202
1203 tw_dev->pending_head = TW_Q_START;
1204 tw_dev->pending_tail = TW_Q_START;
1205 tw_dev->free_head = TW_Q_START;
1206 tw_dev->free_tail = TW_Q_START;
1207 tw_dev->error_sequence_id = 1;
1208 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1209
1210 mutex_init(&tw_dev->ioctl_lock);
1211 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1212
1213 retval = 0;
1214 out:
1215 return retval;
1216 } /* End twa_initialize_device_extension() */
1217
1218 /* This function is the interrupt service routine */
1219 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1220 {
1221 int request_id, error = 0;
1222 u32 status_reg_value;
1223 TW_Response_Queue response_que;
1224 TW_Command_Full *full_command_packet;
1225 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1226 int handled = 0;
1227
1228 /* Get the per adapter lock */
1229 spin_lock(tw_dev->host->host_lock);
1230
1231 /* Read the registers */
1232 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1233
1234 /* Check if this is our interrupt, otherwise bail */
1235 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1236 goto twa_interrupt_bail;
1237
1238 handled = 1;
1239
1240 /* If we are resetting, bail */
1241 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1242 goto twa_interrupt_bail;
1243
1244 /* Check controller for errors */
1245 if (twa_check_bits(status_reg_value)) {
1246 if (twa_decode_bits(tw_dev, status_reg_value)) {
1247 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1248 goto twa_interrupt_bail;
1249 }
1250 }
1251
1252 /* Handle host interrupt */
1253 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1254 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1255
1256 /* Handle attention interrupt */
1257 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1258 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1259 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1260 twa_get_request_id(tw_dev, &request_id);
1261
1262 error = twa_aen_read_queue(tw_dev, request_id);
1263 if (error) {
1264 tw_dev->state[request_id] = TW_S_COMPLETED;
1265 twa_free_request_id(tw_dev, request_id);
1266 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1267 }
1268 }
1269 }
1270
1271 /* Handle command interrupt */
1272 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1273 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1274 /* Drain as many pending commands as we can */
1275 while (tw_dev->pending_request_count > 0) {
1276 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1277 if (tw_dev->state[request_id] != TW_S_PENDING) {
1278 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1279 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1280 goto twa_interrupt_bail;
1281 }
1282 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1283 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1284 tw_dev->pending_request_count--;
1285 } else {
1286 /* If we get here, we will continue re-posting on the next command interrupt */
1287 break;
1288 }
1289 }
1290 }
1291
1292 /* Handle response interrupt */
1293 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1294
1295 /* Drain the response queue from the board */
1296 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1297 /* Complete the response */
1298 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1299 request_id = TW_RESID_OUT(response_que.response_id);
1300 full_command_packet = tw_dev->command_packet_virt[request_id];
1301 error = 0;
1302 /* Check for command packet errors */
1303 if (full_command_packet->command.newcommand.status != 0) {
1304 if (tw_dev->srb[request_id] != NULL) {
1305 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1306 } else {
1307 /* Skip ioctl error prints */
1308 if (request_id != tw_dev->chrdev_request_id) {
1309 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1310 }
1311 }
1312 }
1313
1314 /* Check for correct state */
1315 if (tw_dev->state[request_id] != TW_S_POSTED) {
1316 if (tw_dev->srb[request_id] != NULL) {
1317 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1318 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1319 goto twa_interrupt_bail;
1320 }
1321 }
1322
1323 /* Check for internal command completion */
1324 if (tw_dev->srb[request_id] == NULL) {
1325 if (request_id != tw_dev->chrdev_request_id) {
1326 if (twa_aen_complete(tw_dev, request_id))
1327 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1328 } else {
1329 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1330 wake_up(&tw_dev->ioctl_wqueue);
1331 }
1332 } else {
1333 struct scsi_cmnd *cmd;
1334
1335 cmd = tw_dev->srb[request_id];
1336
1337 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1338 /* If no error command was a success */
1339 if (error == 0) {
1340 cmd->result = (DID_OK << 16);
1341 }
1342
1343 /* If error, command failed */
1344 if (error == 1) {
1345 /* Ask for a host reset */
1346 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1347 }
1348
1349 /* Report residual bytes for single sgl */
1350 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1351 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1352 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1353 }
1354
1355 /* Now complete the io */
1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1357 twa_free_request_id(tw_dev, request_id);
1358 tw_dev->posted_request_count--;
1359 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1360 twa_unmap_scsi_data(tw_dev, request_id);
1361 }
1362
1363 /* Check for valid status after each drain */
1364 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1365 if (twa_check_bits(status_reg_value)) {
1366 if (twa_decode_bits(tw_dev, status_reg_value)) {
1367 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1368 goto twa_interrupt_bail;
1369 }
1370 }
1371 }
1372 }
1373
1374 twa_interrupt_bail:
1375 spin_unlock(tw_dev->host->host_lock);
1376 return IRQ_RETVAL(handled);
1377 } /* End twa_interrupt() */
1378
1379 /* This function will load the request id and various sgls for ioctls */
1380 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381 {
1382 TW_Command *oldcommand;
1383 TW_Command_Apache *newcommand;
1384 TW_SG_Entry *sgl;
1385 unsigned int pae = 0;
1386
1387 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1388 pae = 1;
1389
1390 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1391 newcommand = &full_command_packet->command.newcommand;
1392 newcommand->request_id__lunl =
1393 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1394 if (length) {
1395 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1396 newcommand->sg_list[0].length = cpu_to_le32(length);
1397 }
1398 newcommand->sgl_entries__lunh =
1399 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1400 } else {
1401 oldcommand = &full_command_packet->command.oldcommand;
1402 oldcommand->request_id = request_id;
1403
1404 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1405 /* Load the sg list */
1406 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1408 else
1409 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1410 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1411 sgl->length = cpu_to_le32(length);
1412
1413 oldcommand->size += pae;
1414 }
1415 }
1416 } /* End twa_load_sgl() */
1417
1418 /* This function will perform a pci-dma mapping for a scatter gather list */
1419 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1420 {
1421 int use_sg;
1422 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1423
1424 use_sg = scsi_dma_map(cmd);
1425 if (!use_sg)
1426 return 0;
1427 else if (use_sg < 0) {
1428 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1429 return 0;
1430 }
1431
1432 cmd->SCp.phase = TW_PHASE_SGLIST;
1433 cmd->SCp.have_data_in = use_sg;
1434
1435 return use_sg;
1436 } /* End twa_map_scsi_sg_data() */
1437
1438 /* This function will poll for a response interrupt of a request */
1439 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1440 {
1441 int retval = 1, found = 0, response_request_id;
1442 TW_Response_Queue response_queue;
1443 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1444
1445 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1446 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1447 response_request_id = TW_RESID_OUT(response_queue.response_id);
1448 if (request_id != response_request_id) {
1449 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1450 goto out;
1451 }
1452 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1453 if (full_command_packet->command.newcommand.status != 0) {
1454 /* bad response */
1455 twa_fill_sense(tw_dev, request_id, 0, 0);
1456 goto out;
1457 }
1458 found = 1;
1459 } else {
1460 if (full_command_packet->command.oldcommand.status != 0) {
1461 /* bad response */
1462 twa_fill_sense(tw_dev, request_id, 0, 0);
1463 goto out;
1464 }
1465 found = 1;
1466 }
1467 }
1468
1469 if (found)
1470 retval = 0;
1471 out:
1472 return retval;
1473 } /* End twa_poll_response() */
1474
1475 /* This function will poll the status register for a flag */
1476 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1477 {
1478 u32 status_reg_value;
1479 unsigned long before;
1480 int retval = 1;
1481
1482 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1483 before = jiffies;
1484
1485 if (twa_check_bits(status_reg_value))
1486 twa_decode_bits(tw_dev, status_reg_value);
1487
1488 while ((status_reg_value & flag) != flag) {
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490
1491 if (twa_check_bits(status_reg_value))
1492 twa_decode_bits(tw_dev, status_reg_value);
1493
1494 if (time_after(jiffies, before + HZ * seconds))
1495 goto out;
1496
1497 msleep(50);
1498 }
1499 retval = 0;
1500 out:
1501 return retval;
1502 } /* End twa_poll_status() */
1503
1504 /* This function will poll the status register for disappearance of a flag */
1505 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1506 {
1507 u32 status_reg_value;
1508 unsigned long before;
1509 int retval = 1;
1510
1511 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1512 before = jiffies;
1513
1514 if (twa_check_bits(status_reg_value))
1515 twa_decode_bits(tw_dev, status_reg_value);
1516
1517 while ((status_reg_value & flag) != 0) {
1518 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1519 if (twa_check_bits(status_reg_value))
1520 twa_decode_bits(tw_dev, status_reg_value);
1521
1522 if (time_after(jiffies, before + HZ * seconds))
1523 goto out;
1524
1525 msleep(50);
1526 }
1527 retval = 0;
1528 out:
1529 return retval;
1530 } /* End twa_poll_status_gone() */
1531
1532 /* This function will attempt to post a command packet to the board */
1533 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1534 {
1535 u32 status_reg_value;
1536 dma_addr_t command_que_value;
1537 int retval = 1;
1538
1539 command_que_value = tw_dev->command_packet_phys[request_id];
1540
1541 /* For 9650SE write low 4 bytes first */
1542 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1543 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1544 command_que_value += TW_COMMAND_OFFSET;
1545 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1546 }
1547
1548 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1549
1550 if (twa_check_bits(status_reg_value))
1551 twa_decode_bits(tw_dev, status_reg_value);
1552
1553 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1554
1555 /* Only pend internal driver commands */
1556 if (!internal) {
1557 retval = SCSI_MLQUEUE_HOST_BUSY;
1558 goto out;
1559 }
1560
1561 /* Couldn't post the command packet, so we do it later */
1562 if (tw_dev->state[request_id] != TW_S_PENDING) {
1563 tw_dev->state[request_id] = TW_S_PENDING;
1564 tw_dev->pending_request_count++;
1565 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1566 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1567 }
1568 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1569 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1570 }
1571 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1572 goto out;
1573 } else {
1574 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1575 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1576 /* Now write upper 4 bytes */
1577 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1578 } else {
1579 if (sizeof(dma_addr_t) > 4) {
1580 command_que_value += TW_COMMAND_OFFSET;
1581 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1582 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1583 } else {
1584 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1585 }
1586 }
1587 tw_dev->state[request_id] = TW_S_POSTED;
1588 tw_dev->posted_request_count++;
1589 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1590 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1591 }
1592 }
1593 retval = 0;
1594 out:
1595 return retval;
1596 } /* End twa_post_command_packet() */
1597
1598 /* This function will reset a device extension */
1599 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1600 {
1601 int i = 0;
1602 int retval = 1;
1603 unsigned long flags = 0;
1604
1605 set_bit(TW_IN_RESET, &tw_dev->flags);
1606 TW_DISABLE_INTERRUPTS(tw_dev);
1607 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1608 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1609
1610 /* Abort all requests that are in progress */
1611 for (i = 0; i < TW_Q_LENGTH; i++) {
1612 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1613 (tw_dev->state[i] != TW_S_INITIAL) &&
1614 (tw_dev->state[i] != TW_S_COMPLETED)) {
1615 if (tw_dev->srb[i]) {
1616 tw_dev->srb[i]->result = (DID_RESET << 16);
1617 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1618 twa_unmap_scsi_data(tw_dev, i);
1619 }
1620 }
1621 }
1622
1623 /* Reset queues and counts */
1624 for (i = 0; i < TW_Q_LENGTH; i++) {
1625 tw_dev->free_queue[i] = i;
1626 tw_dev->state[i] = TW_S_INITIAL;
1627 }
1628 tw_dev->free_head = TW_Q_START;
1629 tw_dev->free_tail = TW_Q_START;
1630 tw_dev->posted_request_count = 0;
1631 tw_dev->pending_request_count = 0;
1632 tw_dev->pending_head = TW_Q_START;
1633 tw_dev->pending_tail = TW_Q_START;
1634 tw_dev->reset_print = 0;
1635
1636 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1637
1638 if (twa_reset_sequence(tw_dev, 1))
1639 goto out;
1640
1641 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1642 clear_bit(TW_IN_RESET, &tw_dev->flags);
1643 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1644
1645 retval = 0;
1646 out:
1647 return retval;
1648 } /* End twa_reset_device_extension() */
1649
1650 /* This function will reset a controller */
1651 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1652 {
1653 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1654
1655 while (tries < TW_MAX_RESET_TRIES) {
1656 if (do_soft_reset) {
1657 TW_SOFT_RESET(tw_dev);
1658 /* Clear pchip/response queue on 9550SX */
1659 if (twa_empty_response_queue_large(tw_dev)) {
1660 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1661 do_soft_reset = 1;
1662 tries++;
1663 continue;
1664 }
1665 }
1666
1667 /* Make sure controller is in a good state */
1668 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1669 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1670 do_soft_reset = 1;
1671 tries++;
1672 continue;
1673 }
1674
1675 /* Empty response queue */
1676 if (twa_empty_response_queue(tw_dev)) {
1677 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1678 do_soft_reset = 1;
1679 tries++;
1680 continue;
1681 }
1682
1683 flashed = 0;
1684
1685 /* Check for compatibility/flash */
1686 if (twa_check_srl(tw_dev, &flashed)) {
1687 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1688 do_soft_reset = 1;
1689 tries++;
1690 continue;
1691 } else {
1692 if (flashed) {
1693 tries++;
1694 continue;
1695 }
1696 }
1697
1698 /* Drain the AEN queue */
1699 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1700 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1701 do_soft_reset = 1;
1702 tries++;
1703 continue;
1704 }
1705
1706 /* If we got here, controller is in a good state */
1707 retval = 0;
1708 goto out;
1709 }
1710 out:
1711 return retval;
1712 } /* End twa_reset_sequence() */
1713
1714 /* This funciton returns unit geometry in cylinders/heads/sectors */
1715 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1716 {
1717 int heads, sectors, cylinders;
1718 TW_Device_Extension *tw_dev;
1719
1720 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1721
1722 if (capacity >= 0x200000) {
1723 heads = 255;
1724 sectors = 63;
1725 cylinders = sector_div(capacity, heads * sectors);
1726 } else {
1727 heads = 64;
1728 sectors = 32;
1729 cylinders = sector_div(capacity, heads * sectors);
1730 }
1731
1732 geom[0] = heads;
1733 geom[1] = sectors;
1734 geom[2] = cylinders;
1735
1736 return 0;
1737 } /* End twa_scsi_biosparam() */
1738
1739 /* This is the new scsi eh reset function */
1740 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1741 {
1742 TW_Device_Extension *tw_dev = NULL;
1743 int retval = FAILED;
1744
1745 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1746
1747 tw_dev->num_resets++;
1748
1749 sdev_printk(KERN_WARNING, SCpnt->device,
1750 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1751 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1752
1753 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1754 mutex_lock(&tw_dev->ioctl_lock);
1755
1756 /* Now reset the card and some of the device extension data */
1757 if (twa_reset_device_extension(tw_dev)) {
1758 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1759 goto out;
1760 }
1761
1762 retval = SUCCESS;
1763 out:
1764 mutex_unlock(&tw_dev->ioctl_lock);
1765 return retval;
1766 } /* End twa_scsi_eh_reset() */
1767
1768 /* This is the main scsi queue function to handle scsi opcodes */
1769 static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1770 {
1771 int request_id, retval;
1772 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1773
1774 /* If we are resetting due to timed out ioctl, report as busy */
1775 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1776 retval = SCSI_MLQUEUE_HOST_BUSY;
1777 goto out;
1778 }
1779
1780 /* Check if this FW supports luns */
1781 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1782 SCpnt->result = (DID_BAD_TARGET << 16);
1783 done(SCpnt);
1784 retval = 0;
1785 goto out;
1786 }
1787
1788 /* Save done function into scsi_cmnd struct */
1789 SCpnt->scsi_done = done;
1790
1791 /* Get a free request id */
1792 twa_get_request_id(tw_dev, &request_id);
1793
1794 /* Save the scsi command for use by the ISR */
1795 tw_dev->srb[request_id] = SCpnt;
1796
1797 /* Initialize phase to zero */
1798 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1799
1800 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1801 switch (retval) {
1802 case SCSI_MLQUEUE_HOST_BUSY:
1803 twa_free_request_id(tw_dev, request_id);
1804 break;
1805 case 1:
1806 tw_dev->state[request_id] = TW_S_COMPLETED;
1807 twa_free_request_id(tw_dev, request_id);
1808 SCpnt->result = (DID_ERROR << 16);
1809 done(SCpnt);
1810 retval = 0;
1811 }
1812 out:
1813 return retval;
1814 } /* End twa_scsi_queue() */
1815
1816 /* This function hands scsi cdb's to the firmware */
1817 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1818 {
1819 TW_Command_Full *full_command_packet;
1820 TW_Command_Apache *command_packet;
1821 u32 num_sectors = 0x0;
1822 int i, sg_count;
1823 struct scsi_cmnd *srb = NULL;
1824 struct scatterlist *sglist = NULL, *sg;
1825 int retval = 1;
1826
1827 if (tw_dev->srb[request_id]) {
1828 srb = tw_dev->srb[request_id];
1829 if (scsi_sglist(srb))
1830 sglist = scsi_sglist(srb);
1831 }
1832
1833 /* Initialize command packet */
1834 full_command_packet = tw_dev->command_packet_virt[request_id];
1835 full_command_packet->header.header_desc.size_header = 128;
1836 full_command_packet->header.status_block.error = 0;
1837 full_command_packet->header.status_block.severity__reserved = 0;
1838
1839 command_packet = &full_command_packet->command.newcommand;
1840 command_packet->status = 0;
1841 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1842
1843 /* We forced 16 byte cdb use earlier */
1844 if (!cdb)
1845 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1846 else
1847 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1848
1849 if (srb) {
1850 command_packet->unit = srb->device->id;
1851 command_packet->request_id__lunl =
1852 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1853 } else {
1854 command_packet->request_id__lunl =
1855 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1856 command_packet->unit = 0;
1857 }
1858
1859 command_packet->sgl_offset = 16;
1860
1861 if (!sglistarg) {
1862 /* Map sglist from scsi layer to cmd packet */
1863
1864 if (scsi_sg_count(srb)) {
1865 if ((scsi_sg_count(srb) == 1) &&
1866 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1867 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1868 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1869 scsi_sg_copy_to_buffer(srb,
1870 tw_dev->generic_buffer_virt[request_id],
1871 TW_SECTOR_SIZE);
1872 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1873 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1874 } else {
1875 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1876 if (sg_count == 0)
1877 goto out;
1878
1879 scsi_for_each_sg(srb, sg, sg_count, i) {
1880 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1881 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1882 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1883 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1884 goto out;
1885 }
1886 }
1887 }
1888 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1889 }
1890 } else {
1891 /* Internal cdb post */
1892 for (i = 0; i < use_sg; i++) {
1893 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1894 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1895 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1897 goto out;
1898 }
1899 }
1900 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1901 }
1902
1903 if (srb) {
1904 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1905 num_sectors = (u32)srb->cmnd[4];
1906
1907 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1908 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1909 }
1910
1911 /* Update sector statistic */
1912 tw_dev->sector_count = num_sectors;
1913 if (tw_dev->sector_count > tw_dev->max_sector_count)
1914 tw_dev->max_sector_count = tw_dev->sector_count;
1915
1916 /* Update SG statistics */
1917 if (srb) {
1918 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1919 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1920 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1921 }
1922
1923 /* Now post the command to the board */
1924 if (srb) {
1925 retval = twa_post_command_packet(tw_dev, request_id, 0);
1926 } else {
1927 twa_post_command_packet(tw_dev, request_id, 1);
1928 retval = 0;
1929 }
1930 out:
1931 return retval;
1932 } /* End twa_scsiop_execute_scsi() */
1933
1934 /* This function completes an execute scsi operation */
1935 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1936 {
1937 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1938
1939 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1940 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1941 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1942 if (scsi_sg_count(cmd) == 1) {
1943 void *buf = tw_dev->generic_buffer_virt[request_id];
1944
1945 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1946 }
1947 }
1948 } /* End twa_scsiop_execute_scsi_complete() */
1949
1950 /* This function tells the controller to shut down */
1951 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1952 {
1953 /* Disable interrupts */
1954 TW_DISABLE_INTERRUPTS(tw_dev);
1955
1956 /* Free up the IRQ */
1957 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1958
1959 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1960
1961 /* Tell the card we are shutting down */
1962 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1963 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1964 } else {
1965 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1966 }
1967
1968 /* Clear all interrupts just before exit */
1969 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1970 } /* End __twa_shutdown() */
1971
1972 /* Wrapper for __twa_shutdown */
1973 static void twa_shutdown(struct pci_dev *pdev)
1974 {
1975 struct Scsi_Host *host = pci_get_drvdata(pdev);
1976 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1977
1978 __twa_shutdown(tw_dev);
1979 } /* End twa_shutdown() */
1980
1981 /* This function will look up a string */
1982 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1983 {
1984 int index;
1985
1986 for (index = 0; ((code != table[index].code) &&
1987 (table[index].text != (char *)0)); index++);
1988 return(table[index].text);
1989 } /* End twa_string_lookup() */
1990
1991 /* This function will perform a pci-dma unmap */
1992 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1993 {
1994 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1995
1996 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1997 scsi_dma_unmap(cmd);
1998 } /* End twa_unmap_scsi_data() */
1999
2000 /* This function gets called when a disk is coming on-line */
2001 static int twa_slave_configure(struct scsi_device *sdev)
2002 {
2003 /* Force 60 second timeout */
2004 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2005
2006 return 0;
2007 } /* End twa_slave_configure() */
2008
2009 /* scsi_host_template initializer */
2010 static struct scsi_host_template driver_template = {
2011 .module = THIS_MODULE,
2012 .name = "3ware 9000 Storage Controller",
2013 .queuecommand = twa_scsi_queue,
2014 .eh_host_reset_handler = twa_scsi_eh_reset,
2015 .bios_param = twa_scsi_biosparam,
2016 .change_queue_depth = twa_change_queue_depth,
2017 .can_queue = TW_Q_LENGTH-2,
2018 .slave_configure = twa_slave_configure,
2019 .this_id = -1,
2020 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2021 .max_sectors = TW_MAX_SECTORS,
2022 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2023 .use_clustering = ENABLE_CLUSTERING,
2024 .shost_attrs = twa_host_attrs,
2025 .emulated = 1
2026 };
2027
2028 /* This function will probe and initialize a card */
2029 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2030 {
2031 struct Scsi_Host *host = NULL;
2032 TW_Device_Extension *tw_dev;
2033 unsigned long mem_addr, mem_len;
2034 int retval = -ENODEV;
2035
2036 retval = pci_enable_device(pdev);
2037 if (retval) {
2038 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2039 goto out_disable_device;
2040 }
2041
2042 pci_set_master(pdev);
2043 pci_try_set_mwi(pdev);
2044
2045 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2046 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2047 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2048 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2049 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2050 retval = -ENODEV;
2051 goto out_disable_device;
2052 }
2053
2054 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2055 if (!host) {
2056 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2057 retval = -ENOMEM;
2058 goto out_disable_device;
2059 }
2060 tw_dev = (TW_Device_Extension *)host->hostdata;
2061
2062 /* Save values to device extension */
2063 tw_dev->host = host;
2064 tw_dev->tw_pci_dev = pdev;
2065
2066 if (twa_initialize_device_extension(tw_dev)) {
2067 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2068 goto out_free_device_extension;
2069 }
2070
2071 /* Request IO regions */
2072 retval = pci_request_regions(pdev, "3w-9xxx");
2073 if (retval) {
2074 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2075 goto out_free_device_extension;
2076 }
2077
2078 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2079 mem_addr = pci_resource_start(pdev, 1);
2080 mem_len = pci_resource_len(pdev, 1);
2081 } else {
2082 mem_addr = pci_resource_start(pdev, 2);
2083 mem_len = pci_resource_len(pdev, 2);
2084 }
2085
2086 /* Save base address */
2087 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2088 if (!tw_dev->base_addr) {
2089 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2090 goto out_release_mem_region;
2091 }
2092
2093 /* Disable interrupts on the card */
2094 TW_DISABLE_INTERRUPTS(tw_dev);
2095
2096 /* Initialize the card */
2097 if (twa_reset_sequence(tw_dev, 0))
2098 goto out_iounmap;
2099
2100 /* Set host specific parameters */
2101 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2102 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2103 host->max_id = TW_MAX_UNITS_9650SE;
2104 else
2105 host->max_id = TW_MAX_UNITS;
2106
2107 host->max_cmd_len = TW_MAX_CDB_LEN;
2108
2109 /* Channels aren't supported by adapter */
2110 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2111 host->max_channel = 0;
2112
2113 /* Register the card with the kernel SCSI layer */
2114 retval = scsi_add_host(host, &pdev->dev);
2115 if (retval) {
2116 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2117 goto out_iounmap;
2118 }
2119
2120 pci_set_drvdata(pdev, host);
2121
2122 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2123 host->host_no, mem_addr, pdev->irq);
2124 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2125 host->host_no,
2126 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2127 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2128 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2129 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2130 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2131 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2132
2133 /* Try to enable MSI */
2134 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2135 !pci_enable_msi(pdev))
2136 set_bit(TW_USING_MSI, &tw_dev->flags);
2137
2138 /* Now setup the interrupt handler */
2139 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2140 if (retval) {
2141 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2142 goto out_remove_host;
2143 }
2144
2145 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2146 twa_device_extension_count++;
2147
2148 /* Re-enable interrupts on the card */
2149 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2150
2151 /* Finally, scan the host */
2152 scsi_scan_host(host);
2153
2154 if (twa_major == -1) {
2155 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2156 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2157 }
2158 return 0;
2159
2160 out_remove_host:
2161 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2162 pci_disable_msi(pdev);
2163 scsi_remove_host(host);
2164 out_iounmap:
2165 iounmap(tw_dev->base_addr);
2166 out_release_mem_region:
2167 pci_release_regions(pdev);
2168 out_free_device_extension:
2169 twa_free_device_extension(tw_dev);
2170 scsi_host_put(host);
2171 out_disable_device:
2172 pci_disable_device(pdev);
2173
2174 return retval;
2175 } /* End twa_probe() */
2176
2177 /* This function is called to remove a device */
2178 static void twa_remove(struct pci_dev *pdev)
2179 {
2180 struct Scsi_Host *host = pci_get_drvdata(pdev);
2181 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2182
2183 scsi_remove_host(tw_dev->host);
2184
2185 /* Unregister character device */
2186 if (twa_major >= 0) {
2187 unregister_chrdev(twa_major, "twa");
2188 twa_major = -1;
2189 }
2190
2191 /* Shutdown the card */
2192 __twa_shutdown(tw_dev);
2193
2194 /* Disable MSI if enabled */
2195 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2196 pci_disable_msi(pdev);
2197
2198 /* Free IO remapping */
2199 iounmap(tw_dev->base_addr);
2200
2201 /* Free up the mem region */
2202 pci_release_regions(pdev);
2203
2204 /* Free up device extension resources */
2205 twa_free_device_extension(tw_dev);
2206
2207 scsi_host_put(tw_dev->host);
2208 pci_disable_device(pdev);
2209 twa_device_extension_count--;
2210 } /* End twa_remove() */
2211
2212 #ifdef CONFIG_PM
2213 /* This function is called on PCI suspend */
2214 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2215 {
2216 struct Scsi_Host *host = pci_get_drvdata(pdev);
2217 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2218
2219 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2220
2221 TW_DISABLE_INTERRUPTS(tw_dev);
2222 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2223
2224 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2225 pci_disable_msi(pdev);
2226
2227 /* Tell the card we are shutting down */
2228 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2229 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2230 } else {
2231 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2232 }
2233 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2234
2235 pci_save_state(pdev);
2236 pci_disable_device(pdev);
2237 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2238
2239 return 0;
2240 } /* End twa_suspend() */
2241
2242 /* This function is called on PCI resume */
2243 static int twa_resume(struct pci_dev *pdev)
2244 {
2245 int retval = 0;
2246 struct Scsi_Host *host = pci_get_drvdata(pdev);
2247 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2248
2249 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2250 pci_set_power_state(pdev, PCI_D0);
2251 pci_enable_wake(pdev, PCI_D0, 0);
2252 pci_restore_state(pdev);
2253
2254 retval = pci_enable_device(pdev);
2255 if (retval) {
2256 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2257 return retval;
2258 }
2259
2260 pci_set_master(pdev);
2261 pci_try_set_mwi(pdev);
2262
2263 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2264 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2265 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2266 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2267 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2268 retval = -ENODEV;
2269 goto out_disable_device;
2270 }
2271
2272 /* Initialize the card */
2273 if (twa_reset_sequence(tw_dev, 0)) {
2274 retval = -ENODEV;
2275 goto out_disable_device;
2276 }
2277
2278 /* Now setup the interrupt handler */
2279 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2280 if (retval) {
2281 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2282 retval = -ENODEV;
2283 goto out_disable_device;
2284 }
2285
2286 /* Now enable MSI if enabled */
2287 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2288 pci_enable_msi(pdev);
2289
2290 /* Re-enable interrupts on the card */
2291 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2292
2293 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2294 return 0;
2295
2296 out_disable_device:
2297 scsi_remove_host(host);
2298 pci_disable_device(pdev);
2299
2300 return retval;
2301 } /* End twa_resume() */
2302 #endif
2303
2304 /* PCI Devices supported by this driver */
2305 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2306 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2307 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2308 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2309 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2310 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2311 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2312 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2313 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2314 { }
2315 };
2316 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2317
2318 /* pci_driver initializer */
2319 static struct pci_driver twa_driver = {
2320 .name = "3w-9xxx",
2321 .id_table = twa_pci_tbl,
2322 .probe = twa_probe,
2323 .remove = twa_remove,
2324 #ifdef CONFIG_PM
2325 .suspend = twa_suspend,
2326 .resume = twa_resume,
2327 #endif
2328 .shutdown = twa_shutdown
2329 };
2330
2331 /* This function is called on driver initialization */
2332 static int __init twa_init(void)
2333 {
2334 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2335
2336 return pci_register_driver(&twa_driver);
2337 } /* End twa_init() */
2338
2339 /* This function is called on driver exit */
2340 static void __exit twa_exit(void)
2341 {
2342 pci_unregister_driver(&twa_driver);
2343 } /* End twa_exit() */
2344
2345 module_init(twa_init);
2346 module_exit(twa_exit);
2347