]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/3w-9xxx.c
Pull misc-for-upstream into release branch
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / 3w-9xxx.c
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com>
6
7 Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 NO WARRANTY
19 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 solely responsible for determining the appropriateness of using and
24 distributing the Program and assumes all risks associated with its
25 exercise of rights under this Agreement, including but not limited to
26 the risks and costs of program errors, damage to or loss of data,
27 programs or equipment, and unavailability or interruption of operations.
28
29 DISCLAIMER OF LIABILITY
30 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38 You should have received a copy of the GNU General Public License
39 along with this program; if not, write to the Free Software
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41
42 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com
44
45 For more information, goto:
46 http://www.amcc.com
47
48 Note: This version of the driver does not contain a bundled firmware
49 image.
50
51 History
52 -------
53 2.26.02.000 - Driver cleanup for kernel submission.
54 2.26.02.001 - Replace schedule_timeout() calls with msleep().
55 2.26.02.002 - Add support for PAE mode.
56 Add lun support.
57 Fix twa_remove() to free irq handler/unregister_chrdev()
58 before shutting down card.
59 Change to new 'change_queue_depth' api.
60 Fix 'handled=1' ISR usage, remove bogus IRQ check.
61 Remove un-needed eh_abort handler.
62 Add support for embedded firmware error strings.
63 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
64 2.26.02.004 - Add support for 9550SX controllers.
65 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
66 2.26.02.006 - Fix 9550SX pchip reset timeout.
67 Add big endian support.
68 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
69 2.26.02.008 - Free irq handler in __twa_shutdown().
70 Serialize reset code.
71 Add support for 9650SE controllers.
72 */
73
74 #include <linux/module.h>
75 #include <linux/reboot.h>
76 #include <linux/spinlock.h>
77 #include <linux/interrupt.h>
78 #include <linux/moduleparam.h>
79 #include <linux/errno.h>
80 #include <linux/types.h>
81 #include <linux/delay.h>
82 #include <linux/pci.h>
83 #include <linux/time.h>
84 #include <linux/mutex.h>
85 #include <asm/io.h>
86 #include <asm/irq.h>
87 #include <asm/uaccess.h>
88 #include <scsi/scsi.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_tcq.h>
91 #include <scsi/scsi_cmnd.h>
92 #include "3w-9xxx.h"
93
94 /* Globals */
95 #define TW_DRIVER_VERSION "2.26.02.008"
96 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
97 static unsigned int twa_device_extension_count;
98 static int twa_major = -1;
99 extern struct timezone sys_tz;
100
101 /* Module parameters */
102 MODULE_AUTHOR ("AMCC");
103 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
104 MODULE_LICENSE("GPL");
105 MODULE_VERSION(TW_DRIVER_VERSION);
106
107 /* Function prototypes */
108 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
109 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
110 static char *twa_aen_severity_lookup(unsigned char severity_code);
111 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
112 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
113 static int twa_chrdev_open(struct inode *inode, struct file *file);
114 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
115 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
116 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
117 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
118 u32 set_features, unsigned short current_fw_srl,
119 unsigned short current_fw_arch_id,
120 unsigned short current_fw_branch,
121 unsigned short current_fw_build,
122 unsigned short *fw_on_ctlr_srl,
123 unsigned short *fw_on_ctlr_arch_id,
124 unsigned short *fw_on_ctlr_branch,
125 unsigned short *fw_on_ctlr_build,
126 u32 *init_connect_result);
127 static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
128 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
129 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
130 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
131 static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
132 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
133 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
134 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
135 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
136 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
137
138 /* Functions */
139
140 /* Show some statistics about the card */
141 static ssize_t twa_show_stats(struct class_device *class_dev, char *buf)
142 {
143 struct Scsi_Host *host = class_to_shost(class_dev);
144 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
145 unsigned long flags = 0;
146 ssize_t len;
147
148 spin_lock_irqsave(tw_dev->host->host_lock, flags);
149 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
150 "Current commands posted: %4d\n"
151 "Max commands posted: %4d\n"
152 "Current pending commands: %4d\n"
153 "Max pending commands: %4d\n"
154 "Last sgl length: %4d\n"
155 "Max sgl length: %4d\n"
156 "Last sector count: %4d\n"
157 "Max sector count: %4d\n"
158 "SCSI Host Resets: %4d\n"
159 "AEN's: %4d\n",
160 TW_DRIVER_VERSION,
161 tw_dev->posted_request_count,
162 tw_dev->max_posted_request_count,
163 tw_dev->pending_request_count,
164 tw_dev->max_pending_request_count,
165 tw_dev->sgl_entries,
166 tw_dev->max_sgl_entries,
167 tw_dev->sector_count,
168 tw_dev->max_sector_count,
169 tw_dev->num_resets,
170 tw_dev->aen_count);
171 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
172 return len;
173 } /* End twa_show_stats() */
174
175 /* This function will set a devices queue depth */
176 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth)
177 {
178 if (queue_depth > TW_Q_LENGTH-2)
179 queue_depth = TW_Q_LENGTH-2;
180 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
181 return queue_depth;
182 } /* End twa_change_queue_depth() */
183
184 /* Create sysfs 'stats' entry */
185 static struct class_device_attribute twa_host_stats_attr = {
186 .attr = {
187 .name = "stats",
188 .mode = S_IRUGO,
189 },
190 .show = twa_show_stats
191 };
192
193 /* Host attributes initializer */
194 static struct class_device_attribute *twa_host_attrs[] = {
195 &twa_host_stats_attr,
196 NULL,
197 };
198
199 /* File operations struct for character device */
200 static const struct file_operations twa_fops = {
201 .owner = THIS_MODULE,
202 .ioctl = twa_chrdev_ioctl,
203 .open = twa_chrdev_open,
204 .release = NULL
205 };
206
207 /* This function will complete an aen request from the isr */
208 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
209 {
210 TW_Command_Full *full_command_packet;
211 TW_Command *command_packet;
212 TW_Command_Apache_Header *header;
213 unsigned short aen;
214 int retval = 1;
215
216 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
217 tw_dev->posted_request_count--;
218 aen = le16_to_cpu(header->status_block.error);
219 full_command_packet = tw_dev->command_packet_virt[request_id];
220 command_packet = &full_command_packet->command.oldcommand;
221
222 /* First check for internal completion of set param for time sync */
223 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
224 /* Keep reading the queue in case there are more aen's */
225 if (twa_aen_read_queue(tw_dev, request_id))
226 goto out2;
227 else {
228 retval = 0;
229 goto out;
230 }
231 }
232
233 switch (aen) {
234 case TW_AEN_QUEUE_EMPTY:
235 /* Quit reading the queue if this is the last one */
236 break;
237 case TW_AEN_SYNC_TIME_WITH_HOST:
238 twa_aen_sync_time(tw_dev, request_id);
239 retval = 0;
240 goto out;
241 default:
242 twa_aen_queue_event(tw_dev, header);
243
244 /* If there are more aen's, keep reading the queue */
245 if (twa_aen_read_queue(tw_dev, request_id))
246 goto out2;
247 else {
248 retval = 0;
249 goto out;
250 }
251 }
252 retval = 0;
253 out2:
254 tw_dev->state[request_id] = TW_S_COMPLETED;
255 twa_free_request_id(tw_dev, request_id);
256 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
257 out:
258 return retval;
259 } /* End twa_aen_complete() */
260
261 /* This function will drain aen queue */
262 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
263 {
264 int request_id = 0;
265 char cdb[TW_MAX_CDB_LEN];
266 TW_SG_Entry sglist[1];
267 int finished = 0, count = 0;
268 TW_Command_Full *full_command_packet;
269 TW_Command_Apache_Header *header;
270 unsigned short aen;
271 int first_reset = 0, queue = 0, retval = 1;
272
273 if (no_check_reset)
274 first_reset = 0;
275 else
276 first_reset = 1;
277
278 full_command_packet = tw_dev->command_packet_virt[request_id];
279 memset(full_command_packet, 0, sizeof(TW_Command_Full));
280
281 /* Initialize cdb */
282 memset(&cdb, 0, TW_MAX_CDB_LEN);
283 cdb[0] = REQUEST_SENSE; /* opcode */
284 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
285
286 /* Initialize sglist */
287 memset(&sglist, 0, sizeof(TW_SG_Entry));
288 sglist[0].length = TW_SECTOR_SIZE;
289 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
290
291 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
292 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
293 goto out;
294 }
295
296 /* Mark internal command */
297 tw_dev->srb[request_id] = NULL;
298
299 do {
300 /* Send command to the board */
301 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
302 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
303 goto out;
304 }
305
306 /* Now poll for completion */
307 if (twa_poll_response(tw_dev, request_id, 30)) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
309 tw_dev->posted_request_count--;
310 goto out;
311 }
312
313 tw_dev->posted_request_count--;
314 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
315 aen = le16_to_cpu(header->status_block.error);
316 queue = 0;
317 count++;
318
319 switch (aen) {
320 case TW_AEN_QUEUE_EMPTY:
321 if (first_reset != 1)
322 goto out;
323 else
324 finished = 1;
325 break;
326 case TW_AEN_SOFT_RESET:
327 if (first_reset == 0)
328 first_reset = 1;
329 else
330 queue = 1;
331 break;
332 case TW_AEN_SYNC_TIME_WITH_HOST:
333 break;
334 default:
335 queue = 1;
336 }
337
338 /* Now queue an event info */
339 if (queue)
340 twa_aen_queue_event(tw_dev, header);
341 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
342
343 if (count == TW_MAX_AEN_DRAIN)
344 goto out;
345
346 retval = 0;
347 out:
348 tw_dev->state[request_id] = TW_S_INITIAL;
349 return retval;
350 } /* End twa_aen_drain_queue() */
351
352 /* This function will queue an event */
353 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
354 {
355 u32 local_time;
356 struct timeval time;
357 TW_Event *event;
358 unsigned short aen;
359 char host[16];
360 char *error_str;
361
362 tw_dev->aen_count++;
363
364 /* Fill out event info */
365 event = tw_dev->event_queue[tw_dev->error_index];
366
367 /* Check for clobber */
368 host[0] = '\0';
369 if (tw_dev->host) {
370 sprintf(host, " scsi%d:", tw_dev->host->host_no);
371 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
372 tw_dev->aen_clobber = 1;
373 }
374
375 aen = le16_to_cpu(header->status_block.error);
376 memset(event, 0, sizeof(TW_Event));
377
378 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
379 do_gettimeofday(&time);
380 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
381 event->time_stamp_sec = local_time;
382 event->aen_code = aen;
383 event->retrieved = TW_AEN_NOT_RETRIEVED;
384 event->sequence_id = tw_dev->error_sequence_id;
385 tw_dev->error_sequence_id++;
386
387 /* Check for embedded error string */
388 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
389
390 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
391 event->parameter_len = strlen(header->err_specific_desc);
392 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
393 if (event->severity != TW_AEN_SEVERITY_DEBUG)
394 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
395 host,
396 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
397 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
398 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
399 header->err_specific_desc);
400 else
401 tw_dev->aen_count--;
402
403 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
404 tw_dev->event_queue_wrapped = 1;
405 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
406 } /* End twa_aen_queue_event() */
407
408 /* This function will read the aen queue from the isr */
409 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
410 {
411 char cdb[TW_MAX_CDB_LEN];
412 TW_SG_Entry sglist[1];
413 TW_Command_Full *full_command_packet;
414 int retval = 1;
415
416 full_command_packet = tw_dev->command_packet_virt[request_id];
417 memset(full_command_packet, 0, sizeof(TW_Command_Full));
418
419 /* Initialize cdb */
420 memset(&cdb, 0, TW_MAX_CDB_LEN);
421 cdb[0] = REQUEST_SENSE; /* opcode */
422 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
423
424 /* Initialize sglist */
425 memset(&sglist, 0, sizeof(TW_SG_Entry));
426 sglist[0].length = TW_SECTOR_SIZE;
427 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
428
429 /* Mark internal command */
430 tw_dev->srb[request_id] = NULL;
431
432 /* Now post the command packet */
433 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
434 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
435 goto out;
436 }
437 retval = 0;
438 out:
439 return retval;
440 } /* End twa_aen_read_queue() */
441
442 /* This function will look up an AEN severity string */
443 static char *twa_aen_severity_lookup(unsigned char severity_code)
444 {
445 char *retval = NULL;
446
447 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
448 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
449 goto out;
450
451 retval = twa_aen_severity_table[severity_code];
452 out:
453 return retval;
454 } /* End twa_aen_severity_lookup() */
455
456 /* This function will sync firmware time with the host time */
457 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
458 {
459 u32 schedulertime;
460 struct timeval utc;
461 TW_Command_Full *full_command_packet;
462 TW_Command *command_packet;
463 TW_Param_Apache *param;
464 u32 local_time;
465
466 /* Fill out the command packet */
467 full_command_packet = tw_dev->command_packet_virt[request_id];
468 memset(full_command_packet, 0, sizeof(TW_Command_Full));
469 command_packet = &full_command_packet->command.oldcommand;
470 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
471 command_packet->request_id = request_id;
472 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
473 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
474 command_packet->size = TW_COMMAND_SIZE;
475 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
476
477 /* Setup the param */
478 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
479 memset(param, 0, TW_SECTOR_SIZE);
480 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
481 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
482 param->parameter_size_bytes = cpu_to_le16(4);
483
484 /* Convert system time in UTC to local time seconds since last
485 Sunday 12:00AM */
486 do_gettimeofday(&utc);
487 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
488 schedulertime = local_time - (3 * 86400);
489 schedulertime = cpu_to_le32(schedulertime % 604800);
490
491 memcpy(param->data, &schedulertime, sizeof(u32));
492
493 /* Mark internal command */
494 tw_dev->srb[request_id] = NULL;
495
496 /* Now post the command */
497 twa_post_command_packet(tw_dev, request_id, 1);
498 } /* End twa_aen_sync_time() */
499
500 /* This function will allocate memory and check if it is correctly aligned */
501 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
502 {
503 int i;
504 dma_addr_t dma_handle;
505 unsigned long *cpu_addr;
506 int retval = 1;
507
508 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
509 if (!cpu_addr) {
510 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
511 goto out;
512 }
513
514 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
515 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
516 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
517 goto out;
518 }
519
520 memset(cpu_addr, 0, size*TW_Q_LENGTH);
521
522 for (i = 0; i < TW_Q_LENGTH; i++) {
523 switch(which) {
524 case 0:
525 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
526 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
527 break;
528 case 1:
529 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
530 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
531 break;
532 }
533 }
534 retval = 0;
535 out:
536 return retval;
537 } /* End twa_allocate_memory() */
538
539 /* This function will check the status register for unexpected bits */
540 static int twa_check_bits(u32 status_reg_value)
541 {
542 int retval = 1;
543
544 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
545 goto out;
546 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
547 goto out;
548
549 retval = 0;
550 out:
551 return retval;
552 } /* End twa_check_bits() */
553
554 /* This function will check the srl and decide if we are compatible */
555 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
556 {
557 int retval = 1;
558 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
559 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
560 u32 init_connect_result = 0;
561
562 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
563 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
564 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
565 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
566 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
567 &fw_on_ctlr_build, &init_connect_result)) {
568 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
569 goto out;
570 }
571
572 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
573 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
574 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
575
576 /* Try base mode compatibility */
577 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT,
580 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
581 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
582 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
583 &fw_on_ctlr_branch, &fw_on_ctlr_build,
584 &init_connect_result)) {
585 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
586 goto out;
587 }
588 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
589 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
590 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
591 } else {
592 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
593 }
594 goto out;
595 }
596 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
597 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
598 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
599 }
600
601 /* Load rest of compatibility struct */
602 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
603 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
604 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
605 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
606 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
607 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
608 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
609 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
610 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
611 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
612
613 retval = 0;
614 out:
615 return retval;
616 } /* End twa_check_srl() */
617
618 /* This function handles ioctl for the character device */
619 static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
620 {
621 long timeout;
622 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
623 dma_addr_t dma_handle;
624 int request_id = 0;
625 unsigned int sequence_id = 0;
626 unsigned char event_index, start_index;
627 TW_Ioctl_Driver_Command driver_command;
628 TW_Ioctl_Buf_Apache *tw_ioctl;
629 TW_Lock *tw_lock;
630 TW_Command_Full *full_command_packet;
631 TW_Compatibility_Info *tw_compat_info;
632 TW_Event *event;
633 struct timeval current_time;
634 u32 current_time_ms;
635 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
636 int retval = TW_IOCTL_ERROR_OS_EFAULT;
637 void __user *argp = (void __user *)arg;
638
639 /* Only let one of these through at a time */
640 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
641 retval = TW_IOCTL_ERROR_OS_EINTR;
642 goto out;
643 }
644
645 /* First copy down the driver command */
646 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
647 goto out2;
648
649 /* Check data buffer size */
650 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
651 retval = TW_IOCTL_ERROR_OS_EINVAL;
652 goto out2;
653 }
654
655 /* Hardware can only do multiple of 512 byte transfers */
656 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
657
658 /* Now allocate ioctl buf memory */
659 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
660 if (!cpu_addr) {
661 retval = TW_IOCTL_ERROR_OS_ENOMEM;
662 goto out2;
663 }
664
665 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
666
667 /* Now copy down the entire ioctl */
668 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
669 goto out3;
670
671 /* See which ioctl we are doing */
672 switch (cmd) {
673 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
674 spin_lock_irqsave(tw_dev->host->host_lock, flags);
675 twa_get_request_id(tw_dev, &request_id);
676
677 /* Flag internal command */
678 tw_dev->srb[request_id] = NULL;
679
680 /* Flag chrdev ioctl */
681 tw_dev->chrdev_request_id = request_id;
682
683 full_command_packet = &tw_ioctl->firmware_command;
684
685 /* Load request id and sglist for both command types */
686 twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
687
688 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
689
690 /* Now post the command packet to the controller */
691 twa_post_command_packet(tw_dev, request_id, 1);
692 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
693
694 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
695
696 /* Now wait for command to complete */
697 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
698
699 /* We timed out, and didn't get an interrupt */
700 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
701 /* Now we need to reset the board */
702 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
703 tw_dev->host->host_no, TW_DRIVER, 0xc,
704 cmd);
705 retval = TW_IOCTL_ERROR_OS_EIO;
706 twa_reset_device_extension(tw_dev, 1);
707 goto out3;
708 }
709
710 /* Now copy in the command packet response */
711 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
712
713 /* Now complete the io */
714 spin_lock_irqsave(tw_dev->host->host_lock, flags);
715 tw_dev->posted_request_count--;
716 tw_dev->state[request_id] = TW_S_COMPLETED;
717 twa_free_request_id(tw_dev, request_id);
718 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
719 break;
720 case TW_IOCTL_GET_COMPATIBILITY_INFO:
721 tw_ioctl->driver_command.status = 0;
722 /* Copy compatiblity struct into ioctl data buffer */
723 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
724 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
725 break;
726 case TW_IOCTL_GET_LAST_EVENT:
727 if (tw_dev->event_queue_wrapped) {
728 if (tw_dev->aen_clobber) {
729 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
730 tw_dev->aen_clobber = 0;
731 } else
732 tw_ioctl->driver_command.status = 0;
733 } else {
734 if (!tw_dev->error_index) {
735 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
736 break;
737 }
738 tw_ioctl->driver_command.status = 0;
739 }
740 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
741 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
742 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
743 break;
744 case TW_IOCTL_GET_FIRST_EVENT:
745 if (tw_dev->event_queue_wrapped) {
746 if (tw_dev->aen_clobber) {
747 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
748 tw_dev->aen_clobber = 0;
749 } else
750 tw_ioctl->driver_command.status = 0;
751 event_index = tw_dev->error_index;
752 } else {
753 if (!tw_dev->error_index) {
754 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
755 break;
756 }
757 tw_ioctl->driver_command.status = 0;
758 event_index = 0;
759 }
760 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
761 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
762 break;
763 case TW_IOCTL_GET_NEXT_EVENT:
764 event = (TW_Event *)tw_ioctl->data_buffer;
765 sequence_id = event->sequence_id;
766 tw_ioctl->driver_command.status = 0;
767
768 if (tw_dev->event_queue_wrapped) {
769 if (tw_dev->aen_clobber) {
770 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
771 tw_dev->aen_clobber = 0;
772 }
773 start_index = tw_dev->error_index;
774 } else {
775 if (!tw_dev->error_index) {
776 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
777 break;
778 }
779 start_index = 0;
780 }
781 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
782
783 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
784 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
785 tw_dev->aen_clobber = 1;
786 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
787 break;
788 }
789 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
790 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
791 break;
792 case TW_IOCTL_GET_PREVIOUS_EVENT:
793 event = (TW_Event *)tw_ioctl->data_buffer;
794 sequence_id = event->sequence_id;
795 tw_ioctl->driver_command.status = 0;
796
797 if (tw_dev->event_queue_wrapped) {
798 if (tw_dev->aen_clobber) {
799 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
800 tw_dev->aen_clobber = 0;
801 }
802 start_index = tw_dev->error_index;
803 } else {
804 if (!tw_dev->error_index) {
805 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
806 break;
807 }
808 start_index = 0;
809 }
810 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
811
812 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
813 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
814 tw_dev->aen_clobber = 1;
815 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
816 break;
817 }
818 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
819 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
820 break;
821 case TW_IOCTL_GET_LOCK:
822 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
823 do_gettimeofday(&current_time);
824 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
825
826 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
827 tw_dev->ioctl_sem_lock = 1;
828 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
829 tw_ioctl->driver_command.status = 0;
830 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
831 } else {
832 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
833 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
834 }
835 break;
836 case TW_IOCTL_RELEASE_LOCK:
837 if (tw_dev->ioctl_sem_lock == 1) {
838 tw_dev->ioctl_sem_lock = 0;
839 tw_ioctl->driver_command.status = 0;
840 } else {
841 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
842 }
843 break;
844 default:
845 retval = TW_IOCTL_ERROR_OS_ENOTTY;
846 goto out3;
847 }
848
849 /* Now copy the entire response to userspace */
850 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
851 retval = 0;
852 out3:
853 /* Now free ioctl buf memory */
854 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
855 out2:
856 mutex_unlock(&tw_dev->ioctl_lock);
857 out:
858 return retval;
859 } /* End twa_chrdev_ioctl() */
860
861 /* This function handles open for the character device */
862 static int twa_chrdev_open(struct inode *inode, struct file *file)
863 {
864 unsigned int minor_number;
865 int retval = TW_IOCTL_ERROR_OS_ENODEV;
866
867 minor_number = iminor(inode);
868 if (minor_number >= twa_device_extension_count)
869 goto out;
870 retval = 0;
871 out:
872 return retval;
873 } /* End twa_chrdev_open() */
874
875 /* This function will print readable messages from status register errors */
876 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
877 {
878 int retval = 1;
879
880 /* Check for various error conditions and handle them appropriately */
881 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
882 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
883 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
884 }
885
886 if (status_reg_value & TW_STATUS_PCI_ABORT) {
887 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
888 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
889 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
890 }
891
892 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
893 if ((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) || (!test_bit(TW_IN_RESET, &tw_dev->flags)))
894 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
895 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
896 }
897
898 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
899 if (tw_dev->reset_print == 0) {
900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
901 tw_dev->reset_print = 1;
902 }
903 goto out;
904 }
905 retval = 0;
906 out:
907 return retval;
908 } /* End twa_decode_bits() */
909
910 /* This function will empty the response queue */
911 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
912 {
913 u32 status_reg_value, response_que_value;
914 int count = 0, retval = 1;
915
916 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
917
918 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
919 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
920 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
921 count++;
922 }
923 if (count == TW_MAX_RESPONSE_DRAIN)
924 goto out;
925
926 retval = 0;
927 out:
928 return retval;
929 } /* End twa_empty_response_queue() */
930
931 /* This function will clear the pchip/response queue on 9550SX */
932 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
933 {
934 u32 response_que_value = 0;
935 unsigned long before;
936 int retval = 1;
937
938 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) ||
939 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE)) {
940 before = jiffies;
941 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
942 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
943 msleep(1);
944 if (time_after(jiffies, before + HZ * 30))
945 goto out;
946 }
947 /* P-chip settle time */
948 msleep(500);
949 retval = 0;
950 } else
951 retval = 0;
952 out:
953 return retval;
954 } /* End twa_empty_response_queue_large() */
955
956 /* This function passes sense keys from firmware to scsi layer */
957 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
958 {
959 TW_Command_Full *full_command_packet;
960 unsigned short error;
961 int retval = 1;
962 char *error_str;
963
964 full_command_packet = tw_dev->command_packet_virt[request_id];
965
966 /* Check for embedded error string */
967 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
968
969 /* Don't print error for Logical unit not supported during rollcall */
970 error = le16_to_cpu(full_command_packet->header.status_block.error);
971 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
972 if (print_host)
973 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
974 tw_dev->host->host_no,
975 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
976 full_command_packet->header.status_block.error,
977 error_str[0] == '\0' ?
978 twa_string_lookup(twa_error_table,
979 full_command_packet->header.status_block.error) : error_str,
980 full_command_packet->header.err_specific_desc);
981 else
982 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
983 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
984 full_command_packet->header.status_block.error,
985 error_str[0] == '\0' ?
986 twa_string_lookup(twa_error_table,
987 full_command_packet->header.status_block.error) : error_str,
988 full_command_packet->header.err_specific_desc);
989 }
990
991 if (copy_sense) {
992 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
993 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
994 retval = TW_ISR_DONT_RESULT;
995 goto out;
996 }
997 retval = 0;
998 out:
999 return retval;
1000 } /* End twa_fill_sense() */
1001
1002 /* This function will free up device extension resources */
1003 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1004 {
1005 if (tw_dev->command_packet_virt[0])
1006 pci_free_consistent(tw_dev->tw_pci_dev,
1007 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1008 tw_dev->command_packet_virt[0],
1009 tw_dev->command_packet_phys[0]);
1010
1011 if (tw_dev->generic_buffer_virt[0])
1012 pci_free_consistent(tw_dev->tw_pci_dev,
1013 TW_SECTOR_SIZE*TW_Q_LENGTH,
1014 tw_dev->generic_buffer_virt[0],
1015 tw_dev->generic_buffer_phys[0]);
1016
1017 kfree(tw_dev->event_queue[0]);
1018 } /* End twa_free_device_extension() */
1019
1020 /* This function will free a request id */
1021 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1022 {
1023 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1024 tw_dev->state[request_id] = TW_S_FINISHED;
1025 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1026 } /* End twa_free_request_id() */
1027
1028 /* This function will get parameter table entries from the firmware */
1029 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1030 {
1031 TW_Command_Full *full_command_packet;
1032 TW_Command *command_packet;
1033 TW_Param_Apache *param;
1034 unsigned long param_value;
1035 void *retval = NULL;
1036
1037 /* Setup the command packet */
1038 full_command_packet = tw_dev->command_packet_virt[request_id];
1039 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1040 command_packet = &full_command_packet->command.oldcommand;
1041
1042 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1043 command_packet->size = TW_COMMAND_SIZE;
1044 command_packet->request_id = request_id;
1045 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1046
1047 /* Now setup the param */
1048 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1049 memset(param, 0, TW_SECTOR_SIZE);
1050 param->table_id = cpu_to_le16(table_id | 0x8000);
1051 param->parameter_id = cpu_to_le16(parameter_id);
1052 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1053 param_value = tw_dev->generic_buffer_phys[request_id];
1054
1055 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value);
1056 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1057
1058 /* Post the command packet to the board */
1059 twa_post_command_packet(tw_dev, request_id, 1);
1060
1061 /* Poll for completion */
1062 if (twa_poll_response(tw_dev, request_id, 30))
1063 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1064 else
1065 retval = (void *)&(param->data[0]);
1066
1067 tw_dev->posted_request_count--;
1068 tw_dev->state[request_id] = TW_S_INITIAL;
1069
1070 return retval;
1071 } /* End twa_get_param() */
1072
1073 /* This function will assign an available request id */
1074 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1075 {
1076 *request_id = tw_dev->free_queue[tw_dev->free_head];
1077 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1078 tw_dev->state[*request_id] = TW_S_STARTED;
1079 } /* End twa_get_request_id() */
1080
1081 /* This function will send an initconnection command to controller */
1082 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1083 u32 set_features, unsigned short current_fw_srl,
1084 unsigned short current_fw_arch_id,
1085 unsigned short current_fw_branch,
1086 unsigned short current_fw_build,
1087 unsigned short *fw_on_ctlr_srl,
1088 unsigned short *fw_on_ctlr_arch_id,
1089 unsigned short *fw_on_ctlr_branch,
1090 unsigned short *fw_on_ctlr_build,
1091 u32 *init_connect_result)
1092 {
1093 TW_Command_Full *full_command_packet;
1094 TW_Initconnect *tw_initconnect;
1095 int request_id = 0, retval = 1;
1096
1097 /* Initialize InitConnection command packet */
1098 full_command_packet = tw_dev->command_packet_virt[request_id];
1099 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1100 full_command_packet->header.header_desc.size_header = 128;
1101
1102 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1103 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1104 tw_initconnect->request_id = request_id;
1105 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1106 tw_initconnect->features = set_features;
1107
1108 /* Turn on 64-bit sgl support if we need to */
1109 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1110
1111 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1112
1113 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1114 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1115 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1116 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1117 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1118 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1119 } else
1120 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1121
1122 /* Send command packet to the board */
1123 twa_post_command_packet(tw_dev, request_id, 1);
1124
1125 /* Poll for completion */
1126 if (twa_poll_response(tw_dev, request_id, 30)) {
1127 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1128 } else {
1129 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1130 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1131 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1132 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1133 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1134 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1135 }
1136 retval = 0;
1137 }
1138
1139 tw_dev->posted_request_count--;
1140 tw_dev->state[request_id] = TW_S_INITIAL;
1141
1142 return retval;
1143 } /* End twa_initconnection() */
1144
1145 /* This function will initialize the fields of a device extension */
1146 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1147 {
1148 int i, retval = 1;
1149
1150 /* Initialize command packet buffers */
1151 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1152 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1153 goto out;
1154 }
1155
1156 /* Initialize generic buffer */
1157 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1158 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1159 goto out;
1160 }
1161
1162 /* Allocate event info space */
1163 tw_dev->event_queue[0] = kmalloc(sizeof(TW_Event) * TW_Q_LENGTH, GFP_KERNEL);
1164 if (!tw_dev->event_queue[0]) {
1165 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1166 goto out;
1167 }
1168
1169 memset(tw_dev->event_queue[0], 0, sizeof(TW_Event) * TW_Q_LENGTH);
1170
1171 for (i = 0; i < TW_Q_LENGTH; i++) {
1172 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1173 tw_dev->free_queue[i] = i;
1174 tw_dev->state[i] = TW_S_INITIAL;
1175 }
1176
1177 tw_dev->pending_head = TW_Q_START;
1178 tw_dev->pending_tail = TW_Q_START;
1179 tw_dev->free_head = TW_Q_START;
1180 tw_dev->free_tail = TW_Q_START;
1181 tw_dev->error_sequence_id = 1;
1182 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1183
1184 mutex_init(&tw_dev->ioctl_lock);
1185 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1186
1187 retval = 0;
1188 out:
1189 return retval;
1190 } /* End twa_initialize_device_extension() */
1191
1192 /* This function is the interrupt service routine */
1193 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1194 {
1195 int request_id, error = 0;
1196 u32 status_reg_value;
1197 TW_Response_Queue response_que;
1198 TW_Command_Full *full_command_packet;
1199 TW_Command *command_packet;
1200 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1201 int handled = 0;
1202
1203 /* Get the per adapter lock */
1204 spin_lock(tw_dev->host->host_lock);
1205
1206 /* Read the registers */
1207 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1208
1209 /* Check if this is our interrupt, otherwise bail */
1210 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1211 goto twa_interrupt_bail;
1212
1213 handled = 1;
1214
1215 /* If we are resetting, bail */
1216 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1217 goto twa_interrupt_bail;
1218
1219 /* Check controller for errors */
1220 if (twa_check_bits(status_reg_value)) {
1221 if (twa_decode_bits(tw_dev, status_reg_value)) {
1222 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1223 goto twa_interrupt_bail;
1224 }
1225 }
1226
1227 /* Handle host interrupt */
1228 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1229 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1230
1231 /* Handle attention interrupt */
1232 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1233 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1234 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1235 twa_get_request_id(tw_dev, &request_id);
1236
1237 error = twa_aen_read_queue(tw_dev, request_id);
1238 if (error) {
1239 tw_dev->state[request_id] = TW_S_COMPLETED;
1240 twa_free_request_id(tw_dev, request_id);
1241 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1242 }
1243 }
1244 }
1245
1246 /* Handle command interrupt */
1247 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1248 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1249 /* Drain as many pending commands as we can */
1250 while (tw_dev->pending_request_count > 0) {
1251 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1252 if (tw_dev->state[request_id] != TW_S_PENDING) {
1253 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1254 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1255 goto twa_interrupt_bail;
1256 }
1257 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1258 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1259 tw_dev->pending_request_count--;
1260 } else {
1261 /* If we get here, we will continue re-posting on the next command interrupt */
1262 break;
1263 }
1264 }
1265 }
1266
1267 /* Handle response interrupt */
1268 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1269
1270 /* Drain the response queue from the board */
1271 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1272 /* Complete the response */
1273 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1274 request_id = TW_RESID_OUT(response_que.response_id);
1275 full_command_packet = tw_dev->command_packet_virt[request_id];
1276 error = 0;
1277 command_packet = &full_command_packet->command.oldcommand;
1278 /* Check for command packet errors */
1279 if (full_command_packet->command.newcommand.status != 0) {
1280 if (tw_dev->srb[request_id] != 0) {
1281 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1282 } else {
1283 /* Skip ioctl error prints */
1284 if (request_id != tw_dev->chrdev_request_id) {
1285 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1286 }
1287 }
1288 }
1289
1290 /* Check for correct state */
1291 if (tw_dev->state[request_id] != TW_S_POSTED) {
1292 if (tw_dev->srb[request_id] != 0) {
1293 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1294 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1295 goto twa_interrupt_bail;
1296 }
1297 }
1298
1299 /* Check for internal command completion */
1300 if (tw_dev->srb[request_id] == 0) {
1301 if (request_id != tw_dev->chrdev_request_id) {
1302 if (twa_aen_complete(tw_dev, request_id))
1303 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1304 } else {
1305 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1306 wake_up(&tw_dev->ioctl_wqueue);
1307 }
1308 } else {
1309 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1310 /* If no error command was a success */
1311 if (error == 0) {
1312 tw_dev->srb[request_id]->result = (DID_OK << 16);
1313 }
1314
1315 /* If error, command failed */
1316 if (error == 1) {
1317 /* Ask for a host reset */
1318 tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1319 }
1320
1321 /* Report residual bytes for single sgl */
1322 if ((tw_dev->srb[request_id]->use_sg <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1323 if (full_command_packet->command.newcommand.sg_list[0].length < tw_dev->srb[request_id]->request_bufflen)
1324 tw_dev->srb[request_id]->resid = tw_dev->srb[request_id]->request_bufflen - full_command_packet->command.newcommand.sg_list[0].length;
1325 }
1326
1327 /* Now complete the io */
1328 tw_dev->state[request_id] = TW_S_COMPLETED;
1329 twa_free_request_id(tw_dev, request_id);
1330 tw_dev->posted_request_count--;
1331 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1332 twa_unmap_scsi_data(tw_dev, request_id);
1333 }
1334
1335 /* Check for valid status after each drain */
1336 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1337 if (twa_check_bits(status_reg_value)) {
1338 if (twa_decode_bits(tw_dev, status_reg_value)) {
1339 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1340 goto twa_interrupt_bail;
1341 }
1342 }
1343 }
1344 }
1345
1346 twa_interrupt_bail:
1347 spin_unlock(tw_dev->host->host_lock);
1348 return IRQ_RETVAL(handled);
1349 } /* End twa_interrupt() */
1350
1351 /* This function will load the request id and various sgls for ioctls */
1352 static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1353 {
1354 TW_Command *oldcommand;
1355 TW_Command_Apache *newcommand;
1356 TW_SG_Entry *sgl;
1357
1358 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1359 newcommand = &full_command_packet->command.newcommand;
1360 newcommand->request_id__lunl =
1361 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1362 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1363 newcommand->sg_list[0].length = cpu_to_le32(length);
1364 newcommand->sgl_entries__lunh =
1365 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
1366 } else {
1367 oldcommand = &full_command_packet->command.oldcommand;
1368 oldcommand->request_id = request_id;
1369
1370 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1371 /* Load the sg list */
1372 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1373 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1374 sgl->length = cpu_to_le32(length);
1375
1376 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1377 oldcommand->size += 1;
1378 }
1379 }
1380 } /* End twa_load_sgl() */
1381
1382 /* This function will perform a pci-dma mapping for a scatter gather list */
1383 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1384 {
1385 int use_sg;
1386 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1387 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1388 int retval = 0;
1389
1390 if (cmd->use_sg == 0)
1391 goto out;
1392
1393 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
1394
1395 if (use_sg == 0) {
1396 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1397 goto out;
1398 }
1399
1400 cmd->SCp.phase = TW_PHASE_SGLIST;
1401 cmd->SCp.have_data_in = use_sg;
1402 retval = use_sg;
1403 out:
1404 return retval;
1405 } /* End twa_map_scsi_sg_data() */
1406
1407 /* This function will perform a pci-dma map for a single buffer */
1408 static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id)
1409 {
1410 dma_addr_t mapping;
1411 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1412 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1413 dma_addr_t retval = 0;
1414
1415 if (cmd->request_bufflen == 0) {
1416 retval = 0;
1417 goto out;
1418 }
1419
1420 mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL);
1421
1422 if (mapping == 0) {
1423 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page");
1424 goto out;
1425 }
1426
1427 cmd->SCp.phase = TW_PHASE_SINGLE;
1428 cmd->SCp.have_data_in = mapping;
1429 retval = mapping;
1430 out:
1431 return retval;
1432 } /* End twa_map_scsi_single_data() */
1433
1434 /* This function will poll for a response interrupt of a request */
1435 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1436 {
1437 int retval = 1, found = 0, response_request_id;
1438 TW_Response_Queue response_queue;
1439 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1440
1441 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1442 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1443 response_request_id = TW_RESID_OUT(response_queue.response_id);
1444 if (request_id != response_request_id) {
1445 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1446 goto out;
1447 }
1448 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1449 if (full_command_packet->command.newcommand.status != 0) {
1450 /* bad response */
1451 twa_fill_sense(tw_dev, request_id, 0, 0);
1452 goto out;
1453 }
1454 found = 1;
1455 } else {
1456 if (full_command_packet->command.oldcommand.status != 0) {
1457 /* bad response */
1458 twa_fill_sense(tw_dev, request_id, 0, 0);
1459 goto out;
1460 }
1461 found = 1;
1462 }
1463 }
1464
1465 if (found)
1466 retval = 0;
1467 out:
1468 return retval;
1469 } /* End twa_poll_response() */
1470
1471 /* This function will poll the status register for a flag */
1472 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1473 {
1474 u32 status_reg_value;
1475 unsigned long before;
1476 int retval = 1;
1477
1478 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1479 before = jiffies;
1480
1481 if (twa_check_bits(status_reg_value))
1482 twa_decode_bits(tw_dev, status_reg_value);
1483
1484 while ((status_reg_value & flag) != flag) {
1485 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1486
1487 if (twa_check_bits(status_reg_value))
1488 twa_decode_bits(tw_dev, status_reg_value);
1489
1490 if (time_after(jiffies, before + HZ * seconds))
1491 goto out;
1492
1493 msleep(50);
1494 }
1495 retval = 0;
1496 out:
1497 return retval;
1498 } /* End twa_poll_status() */
1499
1500 /* This function will poll the status register for disappearance of a flag */
1501 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1502 {
1503 u32 status_reg_value;
1504 unsigned long before;
1505 int retval = 1;
1506
1507 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1508 before = jiffies;
1509
1510 if (twa_check_bits(status_reg_value))
1511 twa_decode_bits(tw_dev, status_reg_value);
1512
1513 while ((status_reg_value & flag) != 0) {
1514 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1515 if (twa_check_bits(status_reg_value))
1516 twa_decode_bits(tw_dev, status_reg_value);
1517
1518 if (time_after(jiffies, before + HZ * seconds))
1519 goto out;
1520
1521 msleep(50);
1522 }
1523 retval = 0;
1524 out:
1525 return retval;
1526 } /* End twa_poll_status_gone() */
1527
1528 /* This function will attempt to post a command packet to the board */
1529 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1530 {
1531 u32 status_reg_value;
1532 dma_addr_t command_que_value;
1533 int retval = 1;
1534
1535 command_que_value = tw_dev->command_packet_phys[request_id];
1536
1537 /* For 9650SE write low 4 bytes first */
1538 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
1539 command_que_value += TW_COMMAND_OFFSET;
1540 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1541 }
1542
1543 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1544
1545 if (twa_check_bits(status_reg_value))
1546 twa_decode_bits(tw_dev, status_reg_value);
1547
1548 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1549
1550 /* Only pend internal driver commands */
1551 if (!internal) {
1552 retval = SCSI_MLQUEUE_HOST_BUSY;
1553 goto out;
1554 }
1555
1556 /* Couldn't post the command packet, so we do it later */
1557 if (tw_dev->state[request_id] != TW_S_PENDING) {
1558 tw_dev->state[request_id] = TW_S_PENDING;
1559 tw_dev->pending_request_count++;
1560 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1561 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1562 }
1563 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1564 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1565 }
1566 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1567 goto out;
1568 } else {
1569 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
1570 /* Now write upper 4 bytes */
1571 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1572 } else {
1573 if (sizeof(dma_addr_t) > 4) {
1574 command_que_value += TW_COMMAND_OFFSET;
1575 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1576 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1577 } else {
1578 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1579 }
1580 }
1581 tw_dev->state[request_id] = TW_S_POSTED;
1582 tw_dev->posted_request_count++;
1583 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1584 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1585 }
1586 }
1587 retval = 0;
1588 out:
1589 return retval;
1590 } /* End twa_post_command_packet() */
1591
1592 /* This function will reset a device extension */
1593 static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
1594 {
1595 int i = 0;
1596 int retval = 1;
1597 unsigned long flags = 0;
1598
1599 set_bit(TW_IN_RESET, &tw_dev->flags);
1600 TW_DISABLE_INTERRUPTS(tw_dev);
1601 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1602 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1603
1604 /* Abort all requests that are in progress */
1605 for (i = 0; i < TW_Q_LENGTH; i++) {
1606 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1607 (tw_dev->state[i] != TW_S_INITIAL) &&
1608 (tw_dev->state[i] != TW_S_COMPLETED)) {
1609 if (tw_dev->srb[i]) {
1610 tw_dev->srb[i]->result = (DID_RESET << 16);
1611 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1612 twa_unmap_scsi_data(tw_dev, i);
1613 }
1614 }
1615 }
1616
1617 /* Reset queues and counts */
1618 for (i = 0; i < TW_Q_LENGTH; i++) {
1619 tw_dev->free_queue[i] = i;
1620 tw_dev->state[i] = TW_S_INITIAL;
1621 }
1622 tw_dev->free_head = TW_Q_START;
1623 tw_dev->free_tail = TW_Q_START;
1624 tw_dev->posted_request_count = 0;
1625 tw_dev->pending_request_count = 0;
1626 tw_dev->pending_head = TW_Q_START;
1627 tw_dev->pending_tail = TW_Q_START;
1628 tw_dev->reset_print = 0;
1629
1630 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1631
1632 if (twa_reset_sequence(tw_dev, 1))
1633 goto out;
1634
1635 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1636 clear_bit(TW_IN_RESET, &tw_dev->flags);
1637 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1638
1639 retval = 0;
1640 out:
1641 return retval;
1642 } /* End twa_reset_device_extension() */
1643
1644 /* This function will reset a controller */
1645 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1646 {
1647 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1648
1649 while (tries < TW_MAX_RESET_TRIES) {
1650 if (do_soft_reset) {
1651 TW_SOFT_RESET(tw_dev);
1652 /* Clear pchip/response queue on 9550SX */
1653 if (twa_empty_response_queue_large(tw_dev)) {
1654 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1655 do_soft_reset = 1;
1656 tries++;
1657 continue;
1658 }
1659 }
1660
1661 /* Make sure controller is in a good state */
1662 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1663 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1664 do_soft_reset = 1;
1665 tries++;
1666 continue;
1667 }
1668
1669 /* Empty response queue */
1670 if (twa_empty_response_queue(tw_dev)) {
1671 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1672 do_soft_reset = 1;
1673 tries++;
1674 continue;
1675 }
1676
1677 flashed = 0;
1678
1679 /* Check for compatibility/flash */
1680 if (twa_check_srl(tw_dev, &flashed)) {
1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1682 do_soft_reset = 1;
1683 tries++;
1684 continue;
1685 } else {
1686 if (flashed) {
1687 tries++;
1688 continue;
1689 }
1690 }
1691
1692 /* Drain the AEN queue */
1693 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1694 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1695 do_soft_reset = 1;
1696 tries++;
1697 continue;
1698 }
1699
1700 /* If we got here, controller is in a good state */
1701 retval = 0;
1702 goto out;
1703 }
1704 out:
1705 return retval;
1706 } /* End twa_reset_sequence() */
1707
1708 /* This funciton returns unit geometry in cylinders/heads/sectors */
1709 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1710 {
1711 int heads, sectors, cylinders;
1712 TW_Device_Extension *tw_dev;
1713
1714 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1715
1716 if (capacity >= 0x200000) {
1717 heads = 255;
1718 sectors = 63;
1719 cylinders = sector_div(capacity, heads * sectors);
1720 } else {
1721 heads = 64;
1722 sectors = 32;
1723 cylinders = sector_div(capacity, heads * sectors);
1724 }
1725
1726 geom[0] = heads;
1727 geom[1] = sectors;
1728 geom[2] = cylinders;
1729
1730 return 0;
1731 } /* End twa_scsi_biosparam() */
1732
1733 /* This is the new scsi eh reset function */
1734 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1735 {
1736 TW_Device_Extension *tw_dev = NULL;
1737 int retval = FAILED;
1738
1739 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1740
1741 tw_dev->num_resets++;
1742
1743 sdev_printk(KERN_WARNING, SCpnt->device,
1744 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1745 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1746
1747 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1748 mutex_lock(&tw_dev->ioctl_lock);
1749
1750 /* Now reset the card and some of the device extension data */
1751 if (twa_reset_device_extension(tw_dev, 0)) {
1752 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1753 goto out;
1754 }
1755
1756 retval = SUCCESS;
1757 out:
1758 mutex_unlock(&tw_dev->ioctl_lock);
1759 return retval;
1760 } /* End twa_scsi_eh_reset() */
1761
1762 /* This is the main scsi queue function to handle scsi opcodes */
1763 static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1764 {
1765 int request_id, retval;
1766 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1767
1768 /* If we are resetting due to timed out ioctl, report as busy */
1769 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1770 retval = SCSI_MLQUEUE_HOST_BUSY;
1771 goto out;
1772 }
1773
1774 /* Check if this FW supports luns */
1775 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1776 SCpnt->result = (DID_BAD_TARGET << 16);
1777 done(SCpnt);
1778 retval = 0;
1779 goto out;
1780 }
1781
1782 /* Save done function into scsi_cmnd struct */
1783 SCpnt->scsi_done = done;
1784
1785 /* Get a free request id */
1786 twa_get_request_id(tw_dev, &request_id);
1787
1788 /* Save the scsi command for use by the ISR */
1789 tw_dev->srb[request_id] = SCpnt;
1790
1791 /* Initialize phase to zero */
1792 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1793
1794 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1795 switch (retval) {
1796 case SCSI_MLQUEUE_HOST_BUSY:
1797 twa_free_request_id(tw_dev, request_id);
1798 break;
1799 case 1:
1800 tw_dev->state[request_id] = TW_S_COMPLETED;
1801 twa_free_request_id(tw_dev, request_id);
1802 SCpnt->result = (DID_ERROR << 16);
1803 done(SCpnt);
1804 retval = 0;
1805 }
1806 out:
1807 return retval;
1808 } /* End twa_scsi_queue() */
1809
1810 /* This function hands scsi cdb's to the firmware */
1811 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1812 {
1813 TW_Command_Full *full_command_packet;
1814 TW_Command_Apache *command_packet;
1815 u32 num_sectors = 0x0;
1816 int i, sg_count;
1817 struct scsi_cmnd *srb = NULL;
1818 struct scatterlist *sglist = NULL;
1819 dma_addr_t buffaddr = 0x0;
1820 int retval = 1;
1821
1822 if (tw_dev->srb[request_id]) {
1823 if (tw_dev->srb[request_id]->request_buffer) {
1824 sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1825 }
1826 srb = tw_dev->srb[request_id];
1827 }
1828
1829 /* Initialize command packet */
1830 full_command_packet = tw_dev->command_packet_virt[request_id];
1831 full_command_packet->header.header_desc.size_header = 128;
1832 full_command_packet->header.status_block.error = 0;
1833 full_command_packet->header.status_block.severity__reserved = 0;
1834
1835 command_packet = &full_command_packet->command.newcommand;
1836 command_packet->status = 0;
1837 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1838
1839 /* We forced 16 byte cdb use earlier */
1840 if (!cdb)
1841 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1842 else
1843 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1844
1845 if (srb) {
1846 command_packet->unit = srb->device->id;
1847 command_packet->request_id__lunl =
1848 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1849 } else {
1850 command_packet->request_id__lunl =
1851 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1852 command_packet->unit = 0;
1853 }
1854
1855 command_packet->sgl_offset = 16;
1856
1857 if (!sglistarg) {
1858 /* Map sglist from scsi layer to cmd packet */
1859 if (tw_dev->srb[request_id]->use_sg == 0) {
1860 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
1861 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1862 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1863 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
1864 memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
1865 } else {
1866 buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
1867 if (buffaddr == 0)
1868 goto out;
1869
1870 command_packet->sg_list[0].address = TW_CPU_TO_SGL(buffaddr);
1871 command_packet->sg_list[0].length = cpu_to_le32(tw_dev->srb[request_id]->request_bufflen);
1872 }
1873 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), 1));
1874
1875 if (command_packet->sg_list[0].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1876 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi");
1877 goto out;
1878 }
1879 }
1880
1881 if (tw_dev->srb[request_id]->use_sg > 0) {
1882 if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
1883 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
1884 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1885 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1886 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1887 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1888 }
1889 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1890 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1891 } else {
1892 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1893 if (sg_count == 0)
1894 goto out;
1895
1896 for (i = 0; i < sg_count; i++) {
1897 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(&sglist[i]));
1898 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(&sglist[i]));
1899 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1901 goto out;
1902 }
1903 }
1904 }
1905 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), tw_dev->srb[request_id]->use_sg));
1906 }
1907 } else {
1908 /* Internal cdb post */
1909 for (i = 0; i < use_sg; i++) {
1910 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1911 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1912 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1913 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1914 goto out;
1915 }
1916 }
1917 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1918 }
1919
1920 if (srb) {
1921 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1922 num_sectors = (u32)srb->cmnd[4];
1923
1924 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1925 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1926 }
1927
1928 /* Update sector statistic */
1929 tw_dev->sector_count = num_sectors;
1930 if (tw_dev->sector_count > tw_dev->max_sector_count)
1931 tw_dev->max_sector_count = tw_dev->sector_count;
1932
1933 /* Update SG statistics */
1934 if (srb) {
1935 tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg;
1936 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1937 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1938 }
1939
1940 /* Now post the command to the board */
1941 if (srb) {
1942 retval = twa_post_command_packet(tw_dev, request_id, 0);
1943 } else {
1944 twa_post_command_packet(tw_dev, request_id, 1);
1945 retval = 0;
1946 }
1947 out:
1948 return retval;
1949 } /* End twa_scsiop_execute_scsi() */
1950
1951 /* This function completes an execute scsi operation */
1952 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1953 {
1954 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH &&
1955 (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE ||
1956 tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) {
1957 if (tw_dev->srb[request_id]->use_sg == 0) {
1958 memcpy(tw_dev->srb[request_id]->request_buffer,
1959 tw_dev->generic_buffer_virt[request_id],
1960 tw_dev->srb[request_id]->request_bufflen);
1961 }
1962 if (tw_dev->srb[request_id]->use_sg == 1) {
1963 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1964 char *buf;
1965 unsigned long flags = 0;
1966 local_irq_save(flags);
1967 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1968 memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
1969 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1970 local_irq_restore(flags);
1971 }
1972 }
1973 } /* End twa_scsiop_execute_scsi_complete() */
1974
1975 /* This function tells the controller to shut down */
1976 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1977 {
1978 /* Disable interrupts */
1979 TW_DISABLE_INTERRUPTS(tw_dev);
1980
1981 /* Free up the IRQ */
1982 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1983
1984 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1985
1986 /* Tell the card we are shutting down */
1987 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1988 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1989 } else {
1990 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1991 }
1992
1993 /* Clear all interrupts just before exit */
1994 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1995 } /* End __twa_shutdown() */
1996
1997 /* Wrapper for __twa_shutdown */
1998 static void twa_shutdown(struct pci_dev *pdev)
1999 {
2000 struct Scsi_Host *host = pci_get_drvdata(pdev);
2001 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2002
2003 __twa_shutdown(tw_dev);
2004 } /* End twa_shutdown() */
2005
2006 /* This function will look up a string */
2007 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
2008 {
2009 int index;
2010
2011 for (index = 0; ((code != table[index].code) &&
2012 (table[index].text != (char *)0)); index++);
2013 return(table[index].text);
2014 } /* End twa_string_lookup() */
2015
2016 /* This function will perform a pci-dma unmap */
2017 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
2018 {
2019 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2020 struct pci_dev *pdev = tw_dev->tw_pci_dev;
2021
2022 switch(cmd->SCp.phase) {
2023 case TW_PHASE_SINGLE:
2024 pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
2025 break;
2026 case TW_PHASE_SGLIST:
2027 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
2028 break;
2029 }
2030 } /* End twa_unmap_scsi_data() */
2031
2032 /* scsi_host_template initializer */
2033 static struct scsi_host_template driver_template = {
2034 .module = THIS_MODULE,
2035 .name = "3ware 9000 Storage Controller",
2036 .queuecommand = twa_scsi_queue,
2037 .eh_host_reset_handler = twa_scsi_eh_reset,
2038 .bios_param = twa_scsi_biosparam,
2039 .change_queue_depth = twa_change_queue_depth,
2040 .can_queue = TW_Q_LENGTH-2,
2041 .this_id = -1,
2042 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2043 .max_sectors = TW_MAX_SECTORS,
2044 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2045 .use_clustering = ENABLE_CLUSTERING,
2046 .shost_attrs = twa_host_attrs,
2047 .emulated = 1
2048 };
2049
2050 /* This function will probe and initialize a card */
2051 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2052 {
2053 struct Scsi_Host *host = NULL;
2054 TW_Device_Extension *tw_dev;
2055 u32 mem_addr;
2056 int retval = -ENODEV;
2057
2058 retval = pci_enable_device(pdev);
2059 if (retval) {
2060 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2061 goto out_disable_device;
2062 }
2063
2064 pci_set_master(pdev);
2065
2066 retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK);
2067 if (retval) {
2068 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2069 goto out_disable_device;
2070 }
2071
2072 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2073 if (!host) {
2074 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2075 retval = -ENOMEM;
2076 goto out_disable_device;
2077 }
2078 tw_dev = (TW_Device_Extension *)host->hostdata;
2079
2080 memset(tw_dev, 0, sizeof(TW_Device_Extension));
2081
2082 /* Save values to device extension */
2083 tw_dev->host = host;
2084 tw_dev->tw_pci_dev = pdev;
2085
2086 if (twa_initialize_device_extension(tw_dev)) {
2087 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2088 goto out_free_device_extension;
2089 }
2090
2091 /* Request IO regions */
2092 retval = pci_request_regions(pdev, "3w-9xxx");
2093 if (retval) {
2094 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2095 goto out_free_device_extension;
2096 }
2097
2098 if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
2099 mem_addr = pci_resource_start(pdev, 1);
2100 else
2101 mem_addr = pci_resource_start(pdev, 2);
2102
2103 /* Save base address */
2104 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
2105 if (!tw_dev->base_addr) {
2106 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2107 goto out_release_mem_region;
2108 }
2109
2110 /* Disable interrupts on the card */
2111 TW_DISABLE_INTERRUPTS(tw_dev);
2112
2113 /* Initialize the card */
2114 if (twa_reset_sequence(tw_dev, 0))
2115 goto out_iounmap;
2116
2117 /* Set host specific parameters */
2118 if (pdev->device == PCI_DEVICE_ID_3WARE_9650SE)
2119 host->max_id = TW_MAX_UNITS_9650SE;
2120 else
2121 host->max_id = TW_MAX_UNITS;
2122
2123 host->max_cmd_len = TW_MAX_CDB_LEN;
2124
2125 /* Channels aren't supported by adapter */
2126 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2127 host->max_channel = 0;
2128
2129 /* Register the card with the kernel SCSI layer */
2130 retval = scsi_add_host(host, &pdev->dev);
2131 if (retval) {
2132 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2133 goto out_iounmap;
2134 }
2135
2136 pci_set_drvdata(pdev, host);
2137
2138 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n",
2139 host->host_no, mem_addr, pdev->irq);
2140 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2141 host->host_no,
2142 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2143 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2144 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2145 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2146 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2147 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2148
2149 /* Now setup the interrupt handler */
2150 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2151 if (retval) {
2152 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2153 goto out_remove_host;
2154 }
2155
2156 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2157 twa_device_extension_count++;
2158
2159 /* Re-enable interrupts on the card */
2160 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2161
2162 /* Finally, scan the host */
2163 scsi_scan_host(host);
2164
2165 if (twa_major == -1) {
2166 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2167 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2168 }
2169 return 0;
2170
2171 out_remove_host:
2172 scsi_remove_host(host);
2173 out_iounmap:
2174 iounmap(tw_dev->base_addr);
2175 out_release_mem_region:
2176 pci_release_regions(pdev);
2177 out_free_device_extension:
2178 twa_free_device_extension(tw_dev);
2179 scsi_host_put(host);
2180 out_disable_device:
2181 pci_disable_device(pdev);
2182
2183 return retval;
2184 } /* End twa_probe() */
2185
2186 /* This function is called to remove a device */
2187 static void twa_remove(struct pci_dev *pdev)
2188 {
2189 struct Scsi_Host *host = pci_get_drvdata(pdev);
2190 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191
2192 scsi_remove_host(tw_dev->host);
2193
2194 /* Unregister character device */
2195 if (twa_major >= 0) {
2196 unregister_chrdev(twa_major, "twa");
2197 twa_major = -1;
2198 }
2199
2200 /* Shutdown the card */
2201 __twa_shutdown(tw_dev);
2202
2203 /* Free IO remapping */
2204 iounmap(tw_dev->base_addr);
2205
2206 /* Free up the mem region */
2207 pci_release_regions(pdev);
2208
2209 /* Free up device extension resources */
2210 twa_free_device_extension(tw_dev);
2211
2212 scsi_host_put(tw_dev->host);
2213 pci_disable_device(pdev);
2214 twa_device_extension_count--;
2215 } /* End twa_remove() */
2216
2217 /* PCI Devices supported by this driver */
2218 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2219 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2221 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2223 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2225 { }
2226 };
2227 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2228
2229 /* pci_driver initializer */
2230 static struct pci_driver twa_driver = {
2231 .name = "3w-9xxx",
2232 .id_table = twa_pci_tbl,
2233 .probe = twa_probe,
2234 .remove = twa_remove,
2235 .shutdown = twa_shutdown
2236 };
2237
2238 /* This function is called on driver initialization */
2239 static int __init twa_init(void)
2240 {
2241 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2242
2243 return pci_register_driver(&twa_driver);
2244 } /* End twa_init() */
2245
2246 /* This function is called on driver exit */
2247 static void __exit twa_exit(void)
2248 {
2249 pci_unregister_driver(&twa_driver);
2250 } /* End twa_exit() */
2251
2252 module_init(twa_init);
2253 module_exit(twa_exit);
2254