]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/3w-9xxx.c
Merge branch 'for-linus' of git://codeaurora.org/quic/kernel/dwalker/linux-msm
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / 3w-9xxx.c
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com>
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 NO WARRANTY
19 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 solely responsible for determining the appropriateness of using and
24 distributing the Program and assumes all risks associated with its
25 exercise of rights under this Agreement, including but not limited to
26 the risks and costs of program errors, damage to or loss of data,
27 programs or equipment, and unavailability or interruption of operations.
28
29 DISCLAIMER OF LIABILITY
30 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 You should have received a copy of the GNU General Public License
39 along with this program; if not, write to the Free Software
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41
42 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com
44
45 For more information, goto:
46 http://www.amcc.com
47
48 Note: This version of the driver does not contain a bundled firmware
49 image.
50
51 History
52 -------
53 2.26.02.000 - Driver cleanup for kernel submission.
54 2.26.02.001 - Replace schedule_timeout() calls with msleep().
55 2.26.02.002 - Add support for PAE mode.
56 Add lun support.
57 Fix twa_remove() to free irq handler/unregister_chrdev()
58 before shutting down card.
59 Change to new 'change_queue_depth' api.
60 Fix 'handled=1' ISR usage, remove bogus IRQ check.
61 Remove un-needed eh_abort handler.
62 Add support for embedded firmware error strings.
63 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
64 2.26.02.004 - Add support for 9550SX controllers.
65 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
66 2.26.02.006 - Fix 9550SX pchip reset timeout.
67 Add big endian support.
68 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
69 2.26.02.008 - Free irq handler in __twa_shutdown().
70 Serialize reset code.
71 Add support for 9650SE controllers.
72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
73 2.26.02.010 - Add support for 9690SA controllers.
74 2.26.02.011 - Increase max AENs drained to 256.
75 Add MSI support and "use_msi" module parameter.
76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support.
79 */
80
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/smp_lock.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <asm/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
101
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.012"
104 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
105 static unsigned int twa_device_extension_count;
106 static int twa_major = -1;
107 extern struct timezone sys_tz;
108
109 /* Module parameters */
110 MODULE_AUTHOR ("AMCC");
111 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(TW_DRIVER_VERSION);
114
115 static int use_msi = 0;
116 module_param(use_msi, int, S_IRUGO);
117 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
118
119 /* Function prototypes */
120 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
121 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
122 static char *twa_aen_severity_lookup(unsigned char severity_code);
123 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
124 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
125 static int twa_chrdev_open(struct inode *inode, struct file *file);
126 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
127 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
128 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
129 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
130 u32 set_features, unsigned short current_fw_srl,
131 unsigned short current_fw_arch_id,
132 unsigned short current_fw_branch,
133 unsigned short current_fw_build,
134 unsigned short *fw_on_ctlr_srl,
135 unsigned short *fw_on_ctlr_arch_id,
136 unsigned short *fw_on_ctlr_branch,
137 unsigned short *fw_on_ctlr_build,
138 u32 *init_connect_result);
139 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
140 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
141 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
142 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
143 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
144 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
145 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
146 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
147 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
148 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
149
150 /* Functions */
151
152 /* Show some statistics about the card */
153 static ssize_t twa_show_stats(struct device *dev,
154 struct device_attribute *attr, char *buf)
155 {
156 struct Scsi_Host *host = class_to_shost(dev);
157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 unsigned long flags = 0;
159 ssize_t len;
160
161 spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 "Current commands posted: %4d\n"
164 "Max commands posted: %4d\n"
165 "Current pending commands: %4d\n"
166 "Max pending commands: %4d\n"
167 "Last sgl length: %4d\n"
168 "Max sgl length: %4d\n"
169 "Last sector count: %4d\n"
170 "Max sector count: %4d\n"
171 "SCSI Host Resets: %4d\n"
172 "AEN's: %4d\n",
173 TW_DRIVER_VERSION,
174 tw_dev->posted_request_count,
175 tw_dev->max_posted_request_count,
176 tw_dev->pending_request_count,
177 tw_dev->max_pending_request_count,
178 tw_dev->sgl_entries,
179 tw_dev->max_sgl_entries,
180 tw_dev->sector_count,
181 tw_dev->max_sector_count,
182 tw_dev->num_resets,
183 tw_dev->aen_count);
184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 return len;
186 } /* End twa_show_stats() */
187
188 /* This function will set a devices queue depth */
189 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
190 int reason)
191 {
192 if (reason != SCSI_QDEPTH_DEFAULT)
193 return -EOPNOTSUPP;
194
195 if (queue_depth > TW_Q_LENGTH-2)
196 queue_depth = TW_Q_LENGTH-2;
197 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
198 return queue_depth;
199 } /* End twa_change_queue_depth() */
200
201 /* Create sysfs 'stats' entry */
202 static struct device_attribute twa_host_stats_attr = {
203 .attr = {
204 .name = "stats",
205 .mode = S_IRUGO,
206 },
207 .show = twa_show_stats
208 };
209
210 /* Host attributes initializer */
211 static struct device_attribute *twa_host_attrs[] = {
212 &twa_host_stats_attr,
213 NULL,
214 };
215
216 /* File operations struct for character device */
217 static const struct file_operations twa_fops = {
218 .owner = THIS_MODULE,
219 .ioctl = twa_chrdev_ioctl,
220 .open = twa_chrdev_open,
221 .release = NULL
222 };
223
224 /* This function will complete an aen request from the isr */
225 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
226 {
227 TW_Command_Full *full_command_packet;
228 TW_Command *command_packet;
229 TW_Command_Apache_Header *header;
230 unsigned short aen;
231 int retval = 1;
232
233 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
234 tw_dev->posted_request_count--;
235 aen = le16_to_cpu(header->status_block.error);
236 full_command_packet = tw_dev->command_packet_virt[request_id];
237 command_packet = &full_command_packet->command.oldcommand;
238
239 /* First check for internal completion of set param for time sync */
240 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
241 /* Keep reading the queue in case there are more aen's */
242 if (twa_aen_read_queue(tw_dev, request_id))
243 goto out2;
244 else {
245 retval = 0;
246 goto out;
247 }
248 }
249
250 switch (aen) {
251 case TW_AEN_QUEUE_EMPTY:
252 /* Quit reading the queue if this is the last one */
253 break;
254 case TW_AEN_SYNC_TIME_WITH_HOST:
255 twa_aen_sync_time(tw_dev, request_id);
256 retval = 0;
257 goto out;
258 default:
259 twa_aen_queue_event(tw_dev, header);
260
261 /* If there are more aen's, keep reading the queue */
262 if (twa_aen_read_queue(tw_dev, request_id))
263 goto out2;
264 else {
265 retval = 0;
266 goto out;
267 }
268 }
269 retval = 0;
270 out2:
271 tw_dev->state[request_id] = TW_S_COMPLETED;
272 twa_free_request_id(tw_dev, request_id);
273 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
274 out:
275 return retval;
276 } /* End twa_aen_complete() */
277
278 /* This function will drain aen queue */
279 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
280 {
281 int request_id = 0;
282 char cdb[TW_MAX_CDB_LEN];
283 TW_SG_Entry sglist[1];
284 int finished = 0, count = 0;
285 TW_Command_Full *full_command_packet;
286 TW_Command_Apache_Header *header;
287 unsigned short aen;
288 int first_reset = 0, queue = 0, retval = 1;
289
290 if (no_check_reset)
291 first_reset = 0;
292 else
293 first_reset = 1;
294
295 full_command_packet = tw_dev->command_packet_virt[request_id];
296 memset(full_command_packet, 0, sizeof(TW_Command_Full));
297
298 /* Initialize cdb */
299 memset(&cdb, 0, TW_MAX_CDB_LEN);
300 cdb[0] = REQUEST_SENSE; /* opcode */
301 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
302
303 /* Initialize sglist */
304 memset(&sglist, 0, sizeof(TW_SG_Entry));
305 sglist[0].length = TW_SECTOR_SIZE;
306 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
307
308 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
309 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
310 goto out;
311 }
312
313 /* Mark internal command */
314 tw_dev->srb[request_id] = NULL;
315
316 do {
317 /* Send command to the board */
318 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
319 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
320 goto out;
321 }
322
323 /* Now poll for completion */
324 if (twa_poll_response(tw_dev, request_id, 30)) {
325 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
326 tw_dev->posted_request_count--;
327 goto out;
328 }
329
330 tw_dev->posted_request_count--;
331 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
332 aen = le16_to_cpu(header->status_block.error);
333 queue = 0;
334 count++;
335
336 switch (aen) {
337 case TW_AEN_QUEUE_EMPTY:
338 if (first_reset != 1)
339 goto out;
340 else
341 finished = 1;
342 break;
343 case TW_AEN_SOFT_RESET:
344 if (first_reset == 0)
345 first_reset = 1;
346 else
347 queue = 1;
348 break;
349 case TW_AEN_SYNC_TIME_WITH_HOST:
350 break;
351 default:
352 queue = 1;
353 }
354
355 /* Now queue an event info */
356 if (queue)
357 twa_aen_queue_event(tw_dev, header);
358 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
359
360 if (count == TW_MAX_AEN_DRAIN)
361 goto out;
362
363 retval = 0;
364 out:
365 tw_dev->state[request_id] = TW_S_INITIAL;
366 return retval;
367 } /* End twa_aen_drain_queue() */
368
369 /* This function will queue an event */
370 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
371 {
372 u32 local_time;
373 struct timeval time;
374 TW_Event *event;
375 unsigned short aen;
376 char host[16];
377 char *error_str;
378
379 tw_dev->aen_count++;
380
381 /* Fill out event info */
382 event = tw_dev->event_queue[tw_dev->error_index];
383
384 /* Check for clobber */
385 host[0] = '\0';
386 if (tw_dev->host) {
387 sprintf(host, " scsi%d:", tw_dev->host->host_no);
388 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
389 tw_dev->aen_clobber = 1;
390 }
391
392 aen = le16_to_cpu(header->status_block.error);
393 memset(event, 0, sizeof(TW_Event));
394
395 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
396 do_gettimeofday(&time);
397 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
398 event->time_stamp_sec = local_time;
399 event->aen_code = aen;
400 event->retrieved = TW_AEN_NOT_RETRIEVED;
401 event->sequence_id = tw_dev->error_sequence_id;
402 tw_dev->error_sequence_id++;
403
404 /* Check for embedded error string */
405 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
406
407 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
408 event->parameter_len = strlen(header->err_specific_desc);
409 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
410 if (event->severity != TW_AEN_SEVERITY_DEBUG)
411 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
412 host,
413 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
414 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
415 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
416 header->err_specific_desc);
417 else
418 tw_dev->aen_count--;
419
420 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
421 tw_dev->event_queue_wrapped = 1;
422 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
423 } /* End twa_aen_queue_event() */
424
425 /* This function will read the aen queue from the isr */
426 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
427 {
428 char cdb[TW_MAX_CDB_LEN];
429 TW_SG_Entry sglist[1];
430 TW_Command_Full *full_command_packet;
431 int retval = 1;
432
433 full_command_packet = tw_dev->command_packet_virt[request_id];
434 memset(full_command_packet, 0, sizeof(TW_Command_Full));
435
436 /* Initialize cdb */
437 memset(&cdb, 0, TW_MAX_CDB_LEN);
438 cdb[0] = REQUEST_SENSE; /* opcode */
439 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
440
441 /* Initialize sglist */
442 memset(&sglist, 0, sizeof(TW_SG_Entry));
443 sglist[0].length = TW_SECTOR_SIZE;
444 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
445
446 /* Mark internal command */
447 tw_dev->srb[request_id] = NULL;
448
449 /* Now post the command packet */
450 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
451 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
452 goto out;
453 }
454 retval = 0;
455 out:
456 return retval;
457 } /* End twa_aen_read_queue() */
458
459 /* This function will look up an AEN severity string */
460 static char *twa_aen_severity_lookup(unsigned char severity_code)
461 {
462 char *retval = NULL;
463
464 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
465 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
466 goto out;
467
468 retval = twa_aen_severity_table[severity_code];
469 out:
470 return retval;
471 } /* End twa_aen_severity_lookup() */
472
473 /* This function will sync firmware time with the host time */
474 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
475 {
476 u32 schedulertime;
477 struct timeval utc;
478 TW_Command_Full *full_command_packet;
479 TW_Command *command_packet;
480 TW_Param_Apache *param;
481 u32 local_time;
482
483 /* Fill out the command packet */
484 full_command_packet = tw_dev->command_packet_virt[request_id];
485 memset(full_command_packet, 0, sizeof(TW_Command_Full));
486 command_packet = &full_command_packet->command.oldcommand;
487 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
488 command_packet->request_id = request_id;
489 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
490 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
491 command_packet->size = TW_COMMAND_SIZE;
492 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
493
494 /* Setup the param */
495 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
496 memset(param, 0, TW_SECTOR_SIZE);
497 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
498 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
499 param->parameter_size_bytes = cpu_to_le16(4);
500
501 /* Convert system time in UTC to local time seconds since last
502 Sunday 12:00AM */
503 do_gettimeofday(&utc);
504 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
505 schedulertime = local_time - (3 * 86400);
506 schedulertime = cpu_to_le32(schedulertime % 604800);
507
508 memcpy(param->data, &schedulertime, sizeof(u32));
509
510 /* Mark internal command */
511 tw_dev->srb[request_id] = NULL;
512
513 /* Now post the command */
514 twa_post_command_packet(tw_dev, request_id, 1);
515 } /* End twa_aen_sync_time() */
516
517 /* This function will allocate memory and check if it is correctly aligned */
518 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
519 {
520 int i;
521 dma_addr_t dma_handle;
522 unsigned long *cpu_addr;
523 int retval = 1;
524
525 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
526 if (!cpu_addr) {
527 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
528 goto out;
529 }
530
531 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
532 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
533 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
534 goto out;
535 }
536
537 memset(cpu_addr, 0, size*TW_Q_LENGTH);
538
539 for (i = 0; i < TW_Q_LENGTH; i++) {
540 switch(which) {
541 case 0:
542 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
543 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
544 break;
545 case 1:
546 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
547 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
548 break;
549 }
550 }
551 retval = 0;
552 out:
553 return retval;
554 } /* End twa_allocate_memory() */
555
556 /* This function will check the status register for unexpected bits */
557 static int twa_check_bits(u32 status_reg_value)
558 {
559 int retval = 1;
560
561 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
562 goto out;
563 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
564 goto out;
565
566 retval = 0;
567 out:
568 return retval;
569 } /* End twa_check_bits() */
570
571 /* This function will check the srl and decide if we are compatible */
572 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
573 {
574 int retval = 1;
575 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
576 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
577 u32 init_connect_result = 0;
578
579 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
580 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
581 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
582 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
583 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
584 &fw_on_ctlr_build, &init_connect_result)) {
585 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
586 goto out;
587 }
588
589 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
590 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
591 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
592
593 /* Try base mode compatibility */
594 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
595 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
596 TW_EXTENDED_INIT_CONNECT,
597 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
598 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
599 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
600 &fw_on_ctlr_branch, &fw_on_ctlr_build,
601 &init_connect_result)) {
602 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
603 goto out;
604 }
605 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
606 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
608 } else {
609 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
610 }
611 goto out;
612 }
613 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
614 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
615 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
616 }
617
618 /* Load rest of compatibility struct */
619 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
629
630 retval = 0;
631 out:
632 return retval;
633 } /* End twa_check_srl() */
634
635 /* This function handles ioctl for the character device */
636 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
637 {
638 long timeout;
639 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
640 dma_addr_t dma_handle;
641 int request_id = 0;
642 unsigned int sequence_id = 0;
643 unsigned char event_index, start_index;
644 TW_Ioctl_Driver_Command driver_command;
645 TW_Ioctl_Buf_Apache *tw_ioctl;
646 TW_Lock *tw_lock;
647 TW_Command_Full *full_command_packet;
648 TW_Compatibility_Info *tw_compat_info;
649 TW_Event *event;
650 struct timeval current_time;
651 u32 current_time_ms;
652 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
653 int retval = TW_IOCTL_ERROR_OS_EFAULT;
654 void __user *argp = (void __user *)arg;
655
656 /* Only let one of these through at a time */
657 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
658 retval = TW_IOCTL_ERROR_OS_EINTR;
659 goto out;
660 }
661
662 /* First copy down the driver command */
663 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
664 goto out2;
665
666 /* Check data buffer size */
667 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
668 retval = TW_IOCTL_ERROR_OS_EINVAL;
669 goto out2;
670 }
671
672 /* Hardware can only do multiple of 512 byte transfers */
673 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
674
675 /* Now allocate ioctl buf memory */
676 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
677 if (!cpu_addr) {
678 retval = TW_IOCTL_ERROR_OS_ENOMEM;
679 goto out2;
680 }
681
682 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
683
684 /* Now copy down the entire ioctl */
685 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
686 goto out3;
687
688 /* See which ioctl we are doing */
689 switch (cmd) {
690 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
691 spin_lock_irqsave(tw_dev->host->host_lock, flags);
692 twa_get_request_id(tw_dev, &request_id);
693
694 /* Flag internal command */
695 tw_dev->srb[request_id] = NULL;
696
697 /* Flag chrdev ioctl */
698 tw_dev->chrdev_request_id = request_id;
699
700 full_command_packet = &tw_ioctl->firmware_command;
701
702 /* Load request id and sglist for both command types */
703 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
704
705 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
706
707 /* Now post the command packet to the controller */
708 twa_post_command_packet(tw_dev, request_id, 1);
709 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
710
711 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
712
713 /* Now wait for command to complete */
714 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
715
716 /* We timed out, and didn't get an interrupt */
717 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
718 /* Now we need to reset the board */
719 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
720 tw_dev->host->host_no, TW_DRIVER, 0x37,
721 cmd);
722 retval = TW_IOCTL_ERROR_OS_EIO;
723 twa_reset_device_extension(tw_dev);
724 goto out3;
725 }
726
727 /* Now copy in the command packet response */
728 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
729
730 /* Now complete the io */
731 spin_lock_irqsave(tw_dev->host->host_lock, flags);
732 tw_dev->posted_request_count--;
733 tw_dev->state[request_id] = TW_S_COMPLETED;
734 twa_free_request_id(tw_dev, request_id);
735 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
736 break;
737 case TW_IOCTL_GET_COMPATIBILITY_INFO:
738 tw_ioctl->driver_command.status = 0;
739 /* Copy compatibility struct into ioctl data buffer */
740 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
741 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
742 break;
743 case TW_IOCTL_GET_LAST_EVENT:
744 if (tw_dev->event_queue_wrapped) {
745 if (tw_dev->aen_clobber) {
746 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
747 tw_dev->aen_clobber = 0;
748 } else
749 tw_ioctl->driver_command.status = 0;
750 } else {
751 if (!tw_dev->error_index) {
752 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
753 break;
754 }
755 tw_ioctl->driver_command.status = 0;
756 }
757 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
758 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
759 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
760 break;
761 case TW_IOCTL_GET_FIRST_EVENT:
762 if (tw_dev->event_queue_wrapped) {
763 if (tw_dev->aen_clobber) {
764 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
765 tw_dev->aen_clobber = 0;
766 } else
767 tw_ioctl->driver_command.status = 0;
768 event_index = tw_dev->error_index;
769 } else {
770 if (!tw_dev->error_index) {
771 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
772 break;
773 }
774 tw_ioctl->driver_command.status = 0;
775 event_index = 0;
776 }
777 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
778 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
779 break;
780 case TW_IOCTL_GET_NEXT_EVENT:
781 event = (TW_Event *)tw_ioctl->data_buffer;
782 sequence_id = event->sequence_id;
783 tw_ioctl->driver_command.status = 0;
784
785 if (tw_dev->event_queue_wrapped) {
786 if (tw_dev->aen_clobber) {
787 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
788 tw_dev->aen_clobber = 0;
789 }
790 start_index = tw_dev->error_index;
791 } else {
792 if (!tw_dev->error_index) {
793 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
794 break;
795 }
796 start_index = 0;
797 }
798 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
799
800 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
801 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
802 tw_dev->aen_clobber = 1;
803 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
804 break;
805 }
806 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
807 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
808 break;
809 case TW_IOCTL_GET_PREVIOUS_EVENT:
810 event = (TW_Event *)tw_ioctl->data_buffer;
811 sequence_id = event->sequence_id;
812 tw_ioctl->driver_command.status = 0;
813
814 if (tw_dev->event_queue_wrapped) {
815 if (tw_dev->aen_clobber) {
816 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
817 tw_dev->aen_clobber = 0;
818 }
819 start_index = tw_dev->error_index;
820 } else {
821 if (!tw_dev->error_index) {
822 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
823 break;
824 }
825 start_index = 0;
826 }
827 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
828
829 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
830 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
831 tw_dev->aen_clobber = 1;
832 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
833 break;
834 }
835 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
836 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
837 break;
838 case TW_IOCTL_GET_LOCK:
839 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
840 do_gettimeofday(&current_time);
841 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
842
843 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
844 tw_dev->ioctl_sem_lock = 1;
845 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
846 tw_ioctl->driver_command.status = 0;
847 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
848 } else {
849 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
850 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
851 }
852 break;
853 case TW_IOCTL_RELEASE_LOCK:
854 if (tw_dev->ioctl_sem_lock == 1) {
855 tw_dev->ioctl_sem_lock = 0;
856 tw_ioctl->driver_command.status = 0;
857 } else {
858 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
859 }
860 break;
861 default:
862 retval = TW_IOCTL_ERROR_OS_ENOTTY;
863 goto out3;
864 }
865
866 /* Now copy the entire response to userspace */
867 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
868 retval = 0;
869 out3:
870 /* Now free ioctl buf memory */
871 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
872 out2:
873 mutex_unlock(&tw_dev->ioctl_lock);
874 out:
875 return retval;
876 } /* End twa_chrdev_ioctl() */
877
878 /* This function handles open for the character device */
879 /* NOTE that this function will race with remove. */
880 static int twa_chrdev_open(struct inode *inode, struct file *file)
881 {
882 unsigned int minor_number;
883 int retval = TW_IOCTL_ERROR_OS_ENODEV;
884
885 cycle_kernel_lock();
886 minor_number = iminor(inode);
887 if (minor_number >= twa_device_extension_count)
888 goto out;
889 retval = 0;
890 out:
891 return retval;
892 } /* End twa_chrdev_open() */
893
894 /* This function will print readable messages from status register errors */
895 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
896 {
897 int retval = 1;
898
899 /* Check for various error conditions and handle them appropriately */
900 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
901 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
902 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
903 }
904
905 if (status_reg_value & TW_STATUS_PCI_ABORT) {
906 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
907 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
908 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
909 }
910
911 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
912 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
913 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
914 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
915 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
916 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
917 }
918
919 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
920 if (tw_dev->reset_print == 0) {
921 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
922 tw_dev->reset_print = 1;
923 }
924 goto out;
925 }
926 retval = 0;
927 out:
928 return retval;
929 } /* End twa_decode_bits() */
930
931 /* This function will empty the response queue */
932 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
933 {
934 u32 status_reg_value, response_que_value;
935 int count = 0, retval = 1;
936
937 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
938
939 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
940 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
941 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
942 count++;
943 }
944 if (count == TW_MAX_RESPONSE_DRAIN)
945 goto out;
946
947 retval = 0;
948 out:
949 return retval;
950 } /* End twa_empty_response_queue() */
951
952 /* This function will clear the pchip/response queue on 9550SX */
953 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
954 {
955 u32 response_que_value = 0;
956 unsigned long before;
957 int retval = 1;
958
959 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
960 before = jiffies;
961 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
962 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
963 msleep(1);
964 if (time_after(jiffies, before + HZ * 30))
965 goto out;
966 }
967 /* P-chip settle time */
968 msleep(500);
969 retval = 0;
970 } else
971 retval = 0;
972 out:
973 return retval;
974 } /* End twa_empty_response_queue_large() */
975
976 /* This function passes sense keys from firmware to scsi layer */
977 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
978 {
979 TW_Command_Full *full_command_packet;
980 unsigned short error;
981 int retval = 1;
982 char *error_str;
983
984 full_command_packet = tw_dev->command_packet_virt[request_id];
985
986 /* Check for embedded error string */
987 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
988
989 /* Don't print error for Logical unit not supported during rollcall */
990 error = le16_to_cpu(full_command_packet->header.status_block.error);
991 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
992 if (print_host)
993 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
994 tw_dev->host->host_no,
995 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
996 full_command_packet->header.status_block.error,
997 error_str[0] == '\0' ?
998 twa_string_lookup(twa_error_table,
999 full_command_packet->header.status_block.error) : error_str,
1000 full_command_packet->header.err_specific_desc);
1001 else
1002 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1003 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1004 full_command_packet->header.status_block.error,
1005 error_str[0] == '\0' ?
1006 twa_string_lookup(twa_error_table,
1007 full_command_packet->header.status_block.error) : error_str,
1008 full_command_packet->header.err_specific_desc);
1009 }
1010
1011 if (copy_sense) {
1012 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1013 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1014 retval = TW_ISR_DONT_RESULT;
1015 goto out;
1016 }
1017 retval = 0;
1018 out:
1019 return retval;
1020 } /* End twa_fill_sense() */
1021
1022 /* This function will free up device extension resources */
1023 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1024 {
1025 if (tw_dev->command_packet_virt[0])
1026 pci_free_consistent(tw_dev->tw_pci_dev,
1027 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1028 tw_dev->command_packet_virt[0],
1029 tw_dev->command_packet_phys[0]);
1030
1031 if (tw_dev->generic_buffer_virt[0])
1032 pci_free_consistent(tw_dev->tw_pci_dev,
1033 TW_SECTOR_SIZE*TW_Q_LENGTH,
1034 tw_dev->generic_buffer_virt[0],
1035 tw_dev->generic_buffer_phys[0]);
1036
1037 kfree(tw_dev->event_queue[0]);
1038 } /* End twa_free_device_extension() */
1039
1040 /* This function will free a request id */
1041 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1042 {
1043 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1044 tw_dev->state[request_id] = TW_S_FINISHED;
1045 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1046 } /* End twa_free_request_id() */
1047
1048 /* This function will get parameter table entries from the firmware */
1049 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1050 {
1051 TW_Command_Full *full_command_packet;
1052 TW_Command *command_packet;
1053 TW_Param_Apache *param;
1054 void *retval = NULL;
1055
1056 /* Setup the command packet */
1057 full_command_packet = tw_dev->command_packet_virt[request_id];
1058 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1059 command_packet = &full_command_packet->command.oldcommand;
1060
1061 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1062 command_packet->size = TW_COMMAND_SIZE;
1063 command_packet->request_id = request_id;
1064 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1065
1066 /* Now setup the param */
1067 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1068 memset(param, 0, TW_SECTOR_SIZE);
1069 param->table_id = cpu_to_le16(table_id | 0x8000);
1070 param->parameter_id = cpu_to_le16(parameter_id);
1071 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1072
1073 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1074 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1075
1076 /* Post the command packet to the board */
1077 twa_post_command_packet(tw_dev, request_id, 1);
1078
1079 /* Poll for completion */
1080 if (twa_poll_response(tw_dev, request_id, 30))
1081 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1082 else
1083 retval = (void *)&(param->data[0]);
1084
1085 tw_dev->posted_request_count--;
1086 tw_dev->state[request_id] = TW_S_INITIAL;
1087
1088 return retval;
1089 } /* End twa_get_param() */
1090
1091 /* This function will assign an available request id */
1092 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1093 {
1094 *request_id = tw_dev->free_queue[tw_dev->free_head];
1095 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1096 tw_dev->state[*request_id] = TW_S_STARTED;
1097 } /* End twa_get_request_id() */
1098
1099 /* This function will send an initconnection command to controller */
1100 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1101 u32 set_features, unsigned short current_fw_srl,
1102 unsigned short current_fw_arch_id,
1103 unsigned short current_fw_branch,
1104 unsigned short current_fw_build,
1105 unsigned short *fw_on_ctlr_srl,
1106 unsigned short *fw_on_ctlr_arch_id,
1107 unsigned short *fw_on_ctlr_branch,
1108 unsigned short *fw_on_ctlr_build,
1109 u32 *init_connect_result)
1110 {
1111 TW_Command_Full *full_command_packet;
1112 TW_Initconnect *tw_initconnect;
1113 int request_id = 0, retval = 1;
1114
1115 /* Initialize InitConnection command packet */
1116 full_command_packet = tw_dev->command_packet_virt[request_id];
1117 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1118 full_command_packet->header.header_desc.size_header = 128;
1119
1120 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1121 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1122 tw_initconnect->request_id = request_id;
1123 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1124 tw_initconnect->features = set_features;
1125
1126 /* Turn on 64-bit sgl support if we need to */
1127 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1128
1129 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1130
1131 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1132 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1133 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1134 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1135 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1136 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1137 } else
1138 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1139
1140 /* Send command packet to the board */
1141 twa_post_command_packet(tw_dev, request_id, 1);
1142
1143 /* Poll for completion */
1144 if (twa_poll_response(tw_dev, request_id, 30)) {
1145 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1146 } else {
1147 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1148 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1149 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1150 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1151 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1152 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1153 }
1154 retval = 0;
1155 }
1156
1157 tw_dev->posted_request_count--;
1158 tw_dev->state[request_id] = TW_S_INITIAL;
1159
1160 return retval;
1161 } /* End twa_initconnection() */
1162
1163 /* This function will initialize the fields of a device extension */
1164 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1165 {
1166 int i, retval = 1;
1167
1168 /* Initialize command packet buffers */
1169 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1170 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1171 goto out;
1172 }
1173
1174 /* Initialize generic buffer */
1175 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1176 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1177 goto out;
1178 }
1179
1180 /* Allocate event info space */
1181 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1182 if (!tw_dev->event_queue[0]) {
1183 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1184 goto out;
1185 }
1186
1187
1188 for (i = 0; i < TW_Q_LENGTH; i++) {
1189 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1190 tw_dev->free_queue[i] = i;
1191 tw_dev->state[i] = TW_S_INITIAL;
1192 }
1193
1194 tw_dev->pending_head = TW_Q_START;
1195 tw_dev->pending_tail = TW_Q_START;
1196 tw_dev->free_head = TW_Q_START;
1197 tw_dev->free_tail = TW_Q_START;
1198 tw_dev->error_sequence_id = 1;
1199 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1200
1201 mutex_init(&tw_dev->ioctl_lock);
1202 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1203
1204 retval = 0;
1205 out:
1206 return retval;
1207 } /* End twa_initialize_device_extension() */
1208
1209 /* This function is the interrupt service routine */
1210 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1211 {
1212 int request_id, error = 0;
1213 u32 status_reg_value;
1214 TW_Response_Queue response_que;
1215 TW_Command_Full *full_command_packet;
1216 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1217 int handled = 0;
1218
1219 /* Get the per adapter lock */
1220 spin_lock(tw_dev->host->host_lock);
1221
1222 /* Read the registers */
1223 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1224
1225 /* Check if this is our interrupt, otherwise bail */
1226 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1227 goto twa_interrupt_bail;
1228
1229 handled = 1;
1230
1231 /* If we are resetting, bail */
1232 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1233 goto twa_interrupt_bail;
1234
1235 /* Check controller for errors */
1236 if (twa_check_bits(status_reg_value)) {
1237 if (twa_decode_bits(tw_dev, status_reg_value)) {
1238 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1239 goto twa_interrupt_bail;
1240 }
1241 }
1242
1243 /* Handle host interrupt */
1244 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1245 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1246
1247 /* Handle attention interrupt */
1248 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1249 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1250 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1251 twa_get_request_id(tw_dev, &request_id);
1252
1253 error = twa_aen_read_queue(tw_dev, request_id);
1254 if (error) {
1255 tw_dev->state[request_id] = TW_S_COMPLETED;
1256 twa_free_request_id(tw_dev, request_id);
1257 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1258 }
1259 }
1260 }
1261
1262 /* Handle command interrupt */
1263 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1264 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1265 /* Drain as many pending commands as we can */
1266 while (tw_dev->pending_request_count > 0) {
1267 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1268 if (tw_dev->state[request_id] != TW_S_PENDING) {
1269 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1270 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1271 goto twa_interrupt_bail;
1272 }
1273 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1274 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1275 tw_dev->pending_request_count--;
1276 } else {
1277 /* If we get here, we will continue re-posting on the next command interrupt */
1278 break;
1279 }
1280 }
1281 }
1282
1283 /* Handle response interrupt */
1284 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1285
1286 /* Drain the response queue from the board */
1287 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1288 /* Complete the response */
1289 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1290 request_id = TW_RESID_OUT(response_que.response_id);
1291 full_command_packet = tw_dev->command_packet_virt[request_id];
1292 error = 0;
1293 /* Check for command packet errors */
1294 if (full_command_packet->command.newcommand.status != 0) {
1295 if (tw_dev->srb[request_id] != NULL) {
1296 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1297 } else {
1298 /* Skip ioctl error prints */
1299 if (request_id != tw_dev->chrdev_request_id) {
1300 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1301 }
1302 }
1303 }
1304
1305 /* Check for correct state */
1306 if (tw_dev->state[request_id] != TW_S_POSTED) {
1307 if (tw_dev->srb[request_id] != NULL) {
1308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1309 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1310 goto twa_interrupt_bail;
1311 }
1312 }
1313
1314 /* Check for internal command completion */
1315 if (tw_dev->srb[request_id] == NULL) {
1316 if (request_id != tw_dev->chrdev_request_id) {
1317 if (twa_aen_complete(tw_dev, request_id))
1318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1319 } else {
1320 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1321 wake_up(&tw_dev->ioctl_wqueue);
1322 }
1323 } else {
1324 struct scsi_cmnd *cmd;
1325
1326 cmd = tw_dev->srb[request_id];
1327
1328 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1329 /* If no error command was a success */
1330 if (error == 0) {
1331 cmd->result = (DID_OK << 16);
1332 }
1333
1334 /* If error, command failed */
1335 if (error == 1) {
1336 /* Ask for a host reset */
1337 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1338 }
1339
1340 /* Report residual bytes for single sgl */
1341 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1342 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1343 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1344 }
1345
1346 /* Now complete the io */
1347 tw_dev->state[request_id] = TW_S_COMPLETED;
1348 twa_free_request_id(tw_dev, request_id);
1349 tw_dev->posted_request_count--;
1350 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1351 twa_unmap_scsi_data(tw_dev, request_id);
1352 }
1353
1354 /* Check for valid status after each drain */
1355 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1356 if (twa_check_bits(status_reg_value)) {
1357 if (twa_decode_bits(tw_dev, status_reg_value)) {
1358 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1359 goto twa_interrupt_bail;
1360 }
1361 }
1362 }
1363 }
1364
1365 twa_interrupt_bail:
1366 spin_unlock(tw_dev->host->host_lock);
1367 return IRQ_RETVAL(handled);
1368 } /* End twa_interrupt() */
1369
1370 /* This function will load the request id and various sgls for ioctls */
1371 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1372 {
1373 TW_Command *oldcommand;
1374 TW_Command_Apache *newcommand;
1375 TW_SG_Entry *sgl;
1376 unsigned int pae = 0;
1377
1378 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1379 pae = 1;
1380
1381 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1382 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1386 newcommand->sg_list[0].length = cpu_to_le32(length);
1387 newcommand->sgl_entries__lunh =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
1389 } else {
1390 oldcommand = &full_command_packet->command.oldcommand;
1391 oldcommand->request_id = request_id;
1392
1393 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1394 /* Load the sg list */
1395 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1396 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1397 else
1398 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1399 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1400 sgl->length = cpu_to_le32(length);
1401
1402 oldcommand->size += pae;
1403 }
1404 }
1405 } /* End twa_load_sgl() */
1406
1407 /* This function will perform a pci-dma mapping for a scatter gather list */
1408 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1409 {
1410 int use_sg;
1411 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1412
1413 use_sg = scsi_dma_map(cmd);
1414 if (!use_sg)
1415 return 0;
1416 else if (use_sg < 0) {
1417 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1418 return 0;
1419 }
1420
1421 cmd->SCp.phase = TW_PHASE_SGLIST;
1422 cmd->SCp.have_data_in = use_sg;
1423
1424 return use_sg;
1425 } /* End twa_map_scsi_sg_data() */
1426
1427 /* This function will poll for a response interrupt of a request */
1428 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1429 {
1430 int retval = 1, found = 0, response_request_id;
1431 TW_Response_Queue response_queue;
1432 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1433
1434 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1435 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1436 response_request_id = TW_RESID_OUT(response_queue.response_id);
1437 if (request_id != response_request_id) {
1438 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1439 goto out;
1440 }
1441 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1442 if (full_command_packet->command.newcommand.status != 0) {
1443 /* bad response */
1444 twa_fill_sense(tw_dev, request_id, 0, 0);
1445 goto out;
1446 }
1447 found = 1;
1448 } else {
1449 if (full_command_packet->command.oldcommand.status != 0) {
1450 /* bad response */
1451 twa_fill_sense(tw_dev, request_id, 0, 0);
1452 goto out;
1453 }
1454 found = 1;
1455 }
1456 }
1457
1458 if (found)
1459 retval = 0;
1460 out:
1461 return retval;
1462 } /* End twa_poll_response() */
1463
1464 /* This function will poll the status register for a flag */
1465 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1466 {
1467 u32 status_reg_value;
1468 unsigned long before;
1469 int retval = 1;
1470
1471 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1472 before = jiffies;
1473
1474 if (twa_check_bits(status_reg_value))
1475 twa_decode_bits(tw_dev, status_reg_value);
1476
1477 while ((status_reg_value & flag) != flag) {
1478 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1479
1480 if (twa_check_bits(status_reg_value))
1481 twa_decode_bits(tw_dev, status_reg_value);
1482
1483 if (time_after(jiffies, before + HZ * seconds))
1484 goto out;
1485
1486 msleep(50);
1487 }
1488 retval = 0;
1489 out:
1490 return retval;
1491 } /* End twa_poll_status() */
1492
1493 /* This function will poll the status register for disappearance of a flag */
1494 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1495 {
1496 u32 status_reg_value;
1497 unsigned long before;
1498 int retval = 1;
1499
1500 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1501 before = jiffies;
1502
1503 if (twa_check_bits(status_reg_value))
1504 twa_decode_bits(tw_dev, status_reg_value);
1505
1506 while ((status_reg_value & flag) != 0) {
1507 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1508 if (twa_check_bits(status_reg_value))
1509 twa_decode_bits(tw_dev, status_reg_value);
1510
1511 if (time_after(jiffies, before + HZ * seconds))
1512 goto out;
1513
1514 msleep(50);
1515 }
1516 retval = 0;
1517 out:
1518 return retval;
1519 } /* End twa_poll_status_gone() */
1520
1521 /* This function will attempt to post a command packet to the board */
1522 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1523 {
1524 u32 status_reg_value;
1525 dma_addr_t command_que_value;
1526 int retval = 1;
1527
1528 command_que_value = tw_dev->command_packet_phys[request_id];
1529
1530 /* For 9650SE write low 4 bytes first */
1531 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1532 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1533 command_que_value += TW_COMMAND_OFFSET;
1534 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1535 }
1536
1537 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1538
1539 if (twa_check_bits(status_reg_value))
1540 twa_decode_bits(tw_dev, status_reg_value);
1541
1542 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1543
1544 /* Only pend internal driver commands */
1545 if (!internal) {
1546 retval = SCSI_MLQUEUE_HOST_BUSY;
1547 goto out;
1548 }
1549
1550 /* Couldn't post the command packet, so we do it later */
1551 if (tw_dev->state[request_id] != TW_S_PENDING) {
1552 tw_dev->state[request_id] = TW_S_PENDING;
1553 tw_dev->pending_request_count++;
1554 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1555 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1556 }
1557 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1558 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1559 }
1560 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1561 goto out;
1562 } else {
1563 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1564 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1565 /* Now write upper 4 bytes */
1566 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1567 } else {
1568 if (sizeof(dma_addr_t) > 4) {
1569 command_que_value += TW_COMMAND_OFFSET;
1570 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1571 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1572 } else {
1573 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1574 }
1575 }
1576 tw_dev->state[request_id] = TW_S_POSTED;
1577 tw_dev->posted_request_count++;
1578 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1579 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1580 }
1581 }
1582 retval = 0;
1583 out:
1584 return retval;
1585 } /* End twa_post_command_packet() */
1586
1587 /* This function will reset a device extension */
1588 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1589 {
1590 int i = 0;
1591 int retval = 1;
1592 unsigned long flags = 0;
1593
1594 set_bit(TW_IN_RESET, &tw_dev->flags);
1595 TW_DISABLE_INTERRUPTS(tw_dev);
1596 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1597 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1598
1599 /* Abort all requests that are in progress */
1600 for (i = 0; i < TW_Q_LENGTH; i++) {
1601 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1602 (tw_dev->state[i] != TW_S_INITIAL) &&
1603 (tw_dev->state[i] != TW_S_COMPLETED)) {
1604 if (tw_dev->srb[i]) {
1605 tw_dev->srb[i]->result = (DID_RESET << 16);
1606 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1607 twa_unmap_scsi_data(tw_dev, i);
1608 }
1609 }
1610 }
1611
1612 /* Reset queues and counts */
1613 for (i = 0; i < TW_Q_LENGTH; i++) {
1614 tw_dev->free_queue[i] = i;
1615 tw_dev->state[i] = TW_S_INITIAL;
1616 }
1617 tw_dev->free_head = TW_Q_START;
1618 tw_dev->free_tail = TW_Q_START;
1619 tw_dev->posted_request_count = 0;
1620 tw_dev->pending_request_count = 0;
1621 tw_dev->pending_head = TW_Q_START;
1622 tw_dev->pending_tail = TW_Q_START;
1623 tw_dev->reset_print = 0;
1624
1625 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1626
1627 if (twa_reset_sequence(tw_dev, 1))
1628 goto out;
1629
1630 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1631 clear_bit(TW_IN_RESET, &tw_dev->flags);
1632 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1633
1634 retval = 0;
1635 out:
1636 return retval;
1637 } /* End twa_reset_device_extension() */
1638
1639 /* This function will reset a controller */
1640 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1641 {
1642 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1643
1644 while (tries < TW_MAX_RESET_TRIES) {
1645 if (do_soft_reset) {
1646 TW_SOFT_RESET(tw_dev);
1647 /* Clear pchip/response queue on 9550SX */
1648 if (twa_empty_response_queue_large(tw_dev)) {
1649 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1650 do_soft_reset = 1;
1651 tries++;
1652 continue;
1653 }
1654 }
1655
1656 /* Make sure controller is in a good state */
1657 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1659 do_soft_reset = 1;
1660 tries++;
1661 continue;
1662 }
1663
1664 /* Empty response queue */
1665 if (twa_empty_response_queue(tw_dev)) {
1666 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1667 do_soft_reset = 1;
1668 tries++;
1669 continue;
1670 }
1671
1672 flashed = 0;
1673
1674 /* Check for compatibility/flash */
1675 if (twa_check_srl(tw_dev, &flashed)) {
1676 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1677 do_soft_reset = 1;
1678 tries++;
1679 continue;
1680 } else {
1681 if (flashed) {
1682 tries++;
1683 continue;
1684 }
1685 }
1686
1687 /* Drain the AEN queue */
1688 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1689 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1690 do_soft_reset = 1;
1691 tries++;
1692 continue;
1693 }
1694
1695 /* If we got here, controller is in a good state */
1696 retval = 0;
1697 goto out;
1698 }
1699 out:
1700 return retval;
1701 } /* End twa_reset_sequence() */
1702
1703 /* This funciton returns unit geometry in cylinders/heads/sectors */
1704 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1705 {
1706 int heads, sectors, cylinders;
1707 TW_Device_Extension *tw_dev;
1708
1709 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1710
1711 if (capacity >= 0x200000) {
1712 heads = 255;
1713 sectors = 63;
1714 cylinders = sector_div(capacity, heads * sectors);
1715 } else {
1716 heads = 64;
1717 sectors = 32;
1718 cylinders = sector_div(capacity, heads * sectors);
1719 }
1720
1721 geom[0] = heads;
1722 geom[1] = sectors;
1723 geom[2] = cylinders;
1724
1725 return 0;
1726 } /* End twa_scsi_biosparam() */
1727
1728 /* This is the new scsi eh reset function */
1729 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1730 {
1731 TW_Device_Extension *tw_dev = NULL;
1732 int retval = FAILED;
1733
1734 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1735
1736 tw_dev->num_resets++;
1737
1738 sdev_printk(KERN_WARNING, SCpnt->device,
1739 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1740 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1741
1742 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1743 mutex_lock(&tw_dev->ioctl_lock);
1744
1745 /* Now reset the card and some of the device extension data */
1746 if (twa_reset_device_extension(tw_dev)) {
1747 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1748 goto out;
1749 }
1750
1751 retval = SUCCESS;
1752 out:
1753 mutex_unlock(&tw_dev->ioctl_lock);
1754 return retval;
1755 } /* End twa_scsi_eh_reset() */
1756
1757 /* This is the main scsi queue function to handle scsi opcodes */
1758 static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1759 {
1760 int request_id, retval;
1761 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1762
1763 /* If we are resetting due to timed out ioctl, report as busy */
1764 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1765 retval = SCSI_MLQUEUE_HOST_BUSY;
1766 goto out;
1767 }
1768
1769 /* Check if this FW supports luns */
1770 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1771 SCpnt->result = (DID_BAD_TARGET << 16);
1772 done(SCpnt);
1773 retval = 0;
1774 goto out;
1775 }
1776
1777 /* Save done function into scsi_cmnd struct */
1778 SCpnt->scsi_done = done;
1779
1780 /* Get a free request id */
1781 twa_get_request_id(tw_dev, &request_id);
1782
1783 /* Save the scsi command for use by the ISR */
1784 tw_dev->srb[request_id] = SCpnt;
1785
1786 /* Initialize phase to zero */
1787 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1788
1789 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1790 switch (retval) {
1791 case SCSI_MLQUEUE_HOST_BUSY:
1792 twa_free_request_id(tw_dev, request_id);
1793 break;
1794 case 1:
1795 tw_dev->state[request_id] = TW_S_COMPLETED;
1796 twa_free_request_id(tw_dev, request_id);
1797 SCpnt->result = (DID_ERROR << 16);
1798 done(SCpnt);
1799 retval = 0;
1800 }
1801 out:
1802 return retval;
1803 } /* End twa_scsi_queue() */
1804
1805 /* This function hands scsi cdb's to the firmware */
1806 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1807 {
1808 TW_Command_Full *full_command_packet;
1809 TW_Command_Apache *command_packet;
1810 u32 num_sectors = 0x0;
1811 int i, sg_count;
1812 struct scsi_cmnd *srb = NULL;
1813 struct scatterlist *sglist = NULL, *sg;
1814 int retval = 1;
1815
1816 if (tw_dev->srb[request_id]) {
1817 srb = tw_dev->srb[request_id];
1818 if (scsi_sglist(srb))
1819 sglist = scsi_sglist(srb);
1820 }
1821
1822 /* Initialize command packet */
1823 full_command_packet = tw_dev->command_packet_virt[request_id];
1824 full_command_packet->header.header_desc.size_header = 128;
1825 full_command_packet->header.status_block.error = 0;
1826 full_command_packet->header.status_block.severity__reserved = 0;
1827
1828 command_packet = &full_command_packet->command.newcommand;
1829 command_packet->status = 0;
1830 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1831
1832 /* We forced 16 byte cdb use earlier */
1833 if (!cdb)
1834 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1835 else
1836 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1837
1838 if (srb) {
1839 command_packet->unit = srb->device->id;
1840 command_packet->request_id__lunl =
1841 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1842 } else {
1843 command_packet->request_id__lunl =
1844 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1845 command_packet->unit = 0;
1846 }
1847
1848 command_packet->sgl_offset = 16;
1849
1850 if (!sglistarg) {
1851 /* Map sglist from scsi layer to cmd packet */
1852
1853 if (scsi_sg_count(srb)) {
1854 if ((scsi_sg_count(srb) == 1) &&
1855 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1856 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1857 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1858 scsi_sg_copy_to_buffer(srb,
1859 tw_dev->generic_buffer_virt[request_id],
1860 TW_SECTOR_SIZE);
1861 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1862 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1863 } else {
1864 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1865 if (sg_count == 0)
1866 goto out;
1867
1868 scsi_for_each_sg(srb, sg, sg_count, i) {
1869 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1870 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1871 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1872 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1873 goto out;
1874 }
1875 }
1876 }
1877 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1878 }
1879 } else {
1880 /* Internal cdb post */
1881 for (i = 0; i < use_sg; i++) {
1882 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1883 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1884 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1885 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1886 goto out;
1887 }
1888 }
1889 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1890 }
1891
1892 if (srb) {
1893 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1894 num_sectors = (u32)srb->cmnd[4];
1895
1896 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1897 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1898 }
1899
1900 /* Update sector statistic */
1901 tw_dev->sector_count = num_sectors;
1902 if (tw_dev->sector_count > tw_dev->max_sector_count)
1903 tw_dev->max_sector_count = tw_dev->sector_count;
1904
1905 /* Update SG statistics */
1906 if (srb) {
1907 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1908 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1909 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1910 }
1911
1912 /* Now post the command to the board */
1913 if (srb) {
1914 retval = twa_post_command_packet(tw_dev, request_id, 0);
1915 } else {
1916 twa_post_command_packet(tw_dev, request_id, 1);
1917 retval = 0;
1918 }
1919 out:
1920 return retval;
1921 } /* End twa_scsiop_execute_scsi() */
1922
1923 /* This function completes an execute scsi operation */
1924 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1925 {
1926 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1927
1928 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1929 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1930 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1931 if (scsi_sg_count(cmd) == 1) {
1932 void *buf = tw_dev->generic_buffer_virt[request_id];
1933
1934 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1935 }
1936 }
1937 } /* End twa_scsiop_execute_scsi_complete() */
1938
1939 /* This function tells the controller to shut down */
1940 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1941 {
1942 /* Disable interrupts */
1943 TW_DISABLE_INTERRUPTS(tw_dev);
1944
1945 /* Free up the IRQ */
1946 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1947
1948 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1949
1950 /* Tell the card we are shutting down */
1951 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1952 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1953 } else {
1954 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1955 }
1956
1957 /* Clear all interrupts just before exit */
1958 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1959 } /* End __twa_shutdown() */
1960
1961 /* Wrapper for __twa_shutdown */
1962 static void twa_shutdown(struct pci_dev *pdev)
1963 {
1964 struct Scsi_Host *host = pci_get_drvdata(pdev);
1965 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1966
1967 __twa_shutdown(tw_dev);
1968 } /* End twa_shutdown() */
1969
1970 /* This function will look up a string */
1971 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1972 {
1973 int index;
1974
1975 for (index = 0; ((code != table[index].code) &&
1976 (table[index].text != (char *)0)); index++);
1977 return(table[index].text);
1978 } /* End twa_string_lookup() */
1979
1980 /* This function will perform a pci-dma unmap */
1981 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1982 {
1983 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1984
1985 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1986 scsi_dma_unmap(cmd);
1987 } /* End twa_unmap_scsi_data() */
1988
1989 /* scsi_host_template initializer */
1990 static struct scsi_host_template driver_template = {
1991 .module = THIS_MODULE,
1992 .name = "3ware 9000 Storage Controller",
1993 .queuecommand = twa_scsi_queue,
1994 .eh_host_reset_handler = twa_scsi_eh_reset,
1995 .bios_param = twa_scsi_biosparam,
1996 .change_queue_depth = twa_change_queue_depth,
1997 .can_queue = TW_Q_LENGTH-2,
1998 .this_id = -1,
1999 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2000 .max_sectors = TW_MAX_SECTORS,
2001 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2002 .use_clustering = ENABLE_CLUSTERING,
2003 .shost_attrs = twa_host_attrs,
2004 .emulated = 1
2005 };
2006
2007 /* This function will probe and initialize a card */
2008 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2009 {
2010 struct Scsi_Host *host = NULL;
2011 TW_Device_Extension *tw_dev;
2012 unsigned long mem_addr, mem_len;
2013 int retval = -ENODEV;
2014
2015 retval = pci_enable_device(pdev);
2016 if (retval) {
2017 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2018 goto out_disable_device;
2019 }
2020
2021 pci_set_master(pdev);
2022 pci_try_set_mwi(pdev);
2023
2024 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2025 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2026 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2027 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2028 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2029 retval = -ENODEV;
2030 goto out_disable_device;
2031 }
2032
2033 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2034 if (!host) {
2035 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2036 retval = -ENOMEM;
2037 goto out_disable_device;
2038 }
2039 tw_dev = (TW_Device_Extension *)host->hostdata;
2040
2041 /* Save values to device extension */
2042 tw_dev->host = host;
2043 tw_dev->tw_pci_dev = pdev;
2044
2045 if (twa_initialize_device_extension(tw_dev)) {
2046 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2047 goto out_free_device_extension;
2048 }
2049
2050 /* Request IO regions */
2051 retval = pci_request_regions(pdev, "3w-9xxx");
2052 if (retval) {
2053 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2054 goto out_free_device_extension;
2055 }
2056
2057 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2058 mem_addr = pci_resource_start(pdev, 1);
2059 mem_len = pci_resource_len(pdev, 1);
2060 } else {
2061 mem_addr = pci_resource_start(pdev, 2);
2062 mem_len = pci_resource_len(pdev, 2);
2063 }
2064
2065 /* Save base address */
2066 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2067 if (!tw_dev->base_addr) {
2068 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2069 goto out_release_mem_region;
2070 }
2071
2072 /* Disable interrupts on the card */
2073 TW_DISABLE_INTERRUPTS(tw_dev);
2074
2075 /* Initialize the card */
2076 if (twa_reset_sequence(tw_dev, 0))
2077 goto out_iounmap;
2078
2079 /* Set host specific parameters */
2080 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2081 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2082 host->max_id = TW_MAX_UNITS_9650SE;
2083 else
2084 host->max_id = TW_MAX_UNITS;
2085
2086 host->max_cmd_len = TW_MAX_CDB_LEN;
2087
2088 /* Channels aren't supported by adapter */
2089 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2090 host->max_channel = 0;
2091
2092 /* Register the card with the kernel SCSI layer */
2093 retval = scsi_add_host(host, &pdev->dev);
2094 if (retval) {
2095 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2096 goto out_iounmap;
2097 }
2098
2099 pci_set_drvdata(pdev, host);
2100
2101 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2102 host->host_no, mem_addr, pdev->irq);
2103 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2104 host->host_no,
2105 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2106 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2107 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2108 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2109 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2110 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2111
2112 /* Try to enable MSI */
2113 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2114 !pci_enable_msi(pdev))
2115 set_bit(TW_USING_MSI, &tw_dev->flags);
2116
2117 /* Now setup the interrupt handler */
2118 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2119 if (retval) {
2120 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2121 goto out_remove_host;
2122 }
2123
2124 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2125 twa_device_extension_count++;
2126
2127 /* Re-enable interrupts on the card */
2128 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2129
2130 /* Finally, scan the host */
2131 scsi_scan_host(host);
2132
2133 if (twa_major == -1) {
2134 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2135 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2136 }
2137 return 0;
2138
2139 out_remove_host:
2140 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2141 pci_disable_msi(pdev);
2142 scsi_remove_host(host);
2143 out_iounmap:
2144 iounmap(tw_dev->base_addr);
2145 out_release_mem_region:
2146 pci_release_regions(pdev);
2147 out_free_device_extension:
2148 twa_free_device_extension(tw_dev);
2149 scsi_host_put(host);
2150 out_disable_device:
2151 pci_disable_device(pdev);
2152
2153 return retval;
2154 } /* End twa_probe() */
2155
2156 /* This function is called to remove a device */
2157 static void twa_remove(struct pci_dev *pdev)
2158 {
2159 struct Scsi_Host *host = pci_get_drvdata(pdev);
2160 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2161
2162 scsi_remove_host(tw_dev->host);
2163
2164 /* Unregister character device */
2165 if (twa_major >= 0) {
2166 unregister_chrdev(twa_major, "twa");
2167 twa_major = -1;
2168 }
2169
2170 /* Shutdown the card */
2171 __twa_shutdown(tw_dev);
2172
2173 /* Disable MSI if enabled */
2174 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2175 pci_disable_msi(pdev);
2176
2177 /* Free IO remapping */
2178 iounmap(tw_dev->base_addr);
2179
2180 /* Free up the mem region */
2181 pci_release_regions(pdev);
2182
2183 /* Free up device extension resources */
2184 twa_free_device_extension(tw_dev);
2185
2186 scsi_host_put(tw_dev->host);
2187 pci_disable_device(pdev);
2188 twa_device_extension_count--;
2189 } /* End twa_remove() */
2190
2191 #ifdef CONFIG_PM
2192 /* This function is called on PCI suspend */
2193 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2194 {
2195 struct Scsi_Host *host = pci_get_drvdata(pdev);
2196 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2197
2198 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2199
2200 TW_DISABLE_INTERRUPTS(tw_dev);
2201 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2202
2203 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2204 pci_disable_msi(pdev);
2205
2206 /* Tell the card we are shutting down */
2207 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2208 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2209 } else {
2210 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2211 }
2212 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2213
2214 pci_save_state(pdev);
2215 pci_disable_device(pdev);
2216 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2217
2218 return 0;
2219 } /* End twa_suspend() */
2220
2221 /* This function is called on PCI resume */
2222 static int twa_resume(struct pci_dev *pdev)
2223 {
2224 int retval = 0;
2225 struct Scsi_Host *host = pci_get_drvdata(pdev);
2226 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2227
2228 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2229 pci_set_power_state(pdev, PCI_D0);
2230 pci_enable_wake(pdev, PCI_D0, 0);
2231 pci_restore_state(pdev);
2232
2233 retval = pci_enable_device(pdev);
2234 if (retval) {
2235 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2236 return retval;
2237 }
2238
2239 pci_set_master(pdev);
2240 pci_try_set_mwi(pdev);
2241
2242 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2243 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2244 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2245 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2246 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2247 retval = -ENODEV;
2248 goto out_disable_device;
2249 }
2250
2251 /* Initialize the card */
2252 if (twa_reset_sequence(tw_dev, 0)) {
2253 retval = -ENODEV;
2254 goto out_disable_device;
2255 }
2256
2257 /* Now setup the interrupt handler */
2258 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2259 if (retval) {
2260 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2261 retval = -ENODEV;
2262 goto out_disable_device;
2263 }
2264
2265 /* Now enable MSI if enabled */
2266 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2267 pci_enable_msi(pdev);
2268
2269 /* Re-enable interrupts on the card */
2270 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2271
2272 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2273 return 0;
2274
2275 out_disable_device:
2276 scsi_remove_host(host);
2277 pci_disable_device(pdev);
2278
2279 return retval;
2280 } /* End twa_resume() */
2281 #endif
2282
2283 /* PCI Devices supported by this driver */
2284 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2285 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2286 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2287 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2288 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2289 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2290 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2291 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2292 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2293 { }
2294 };
2295 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2296
2297 /* pci_driver initializer */
2298 static struct pci_driver twa_driver = {
2299 .name = "3w-9xxx",
2300 .id_table = twa_pci_tbl,
2301 .probe = twa_probe,
2302 .remove = twa_remove,
2303 #ifdef CONFIG_PM
2304 .suspend = twa_suspend,
2305 .resume = twa_resume,
2306 #endif
2307 .shutdown = twa_shutdown
2308 };
2309
2310 /* This function is called on driver initialization */
2311 static int __init twa_init(void)
2312 {
2313 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2314
2315 return pci_register_driver(&twa_driver);
2316 } /* End twa_init() */
2317
2318 /* This function is called on driver exit */
2319 static void __exit twa_exit(void)
2320 {
2321 pci_unregister_driver(&twa_driver);
2322 } /* End twa_exit() */
2323
2324 module_init(twa_init);
2325 module_exit(twa_exit);
2326