]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/dpt_i2o.c
scsi: autoconvert trivial BKL users to private mutex
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / dpt_i2o.c
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31
32 #include <linux/module.h>
33
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37 ////////////////////////////////////////////////////////////////
38
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
41
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
63
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72
73 /*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89 #else
90 (-1),(-1),
91 #endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96
97
98
99
100 /*============================================================================
101 * Globals
102 *============================================================================
103 */
104
105 static DEFINE_MUTEX(adpt_configuration_lock);
106
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114
115 static struct class *adpt_sysfs_class;
116
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121
122 static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128 #endif
129 };
130
131 /* Structures and definitions for synchronous message posting.
132 * See adpt_i2o_post_wait() for description
133 * */
134 struct adpt_i2o_post_wait_data
135 {
136 int status;
137 u32 id;
138 adpt_wait_queue_head_t *wq;
139 struct adpt_i2o_post_wait_data *next;
140 };
141
142 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
143 static u32 adpt_post_wait_id = 0;
144 static DEFINE_SPINLOCK(adpt_post_wait_lock);
145
146
147 /*============================================================================
148 * Functions
149 *============================================================================
150 */
151
152 static inline int dpt_dma64(adpt_hba *pHba)
153 {
154 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
155 }
156
157 static inline u32 dma_high(dma_addr_t addr)
158 {
159 return upper_32_bits(addr);
160 }
161
162 static inline u32 dma_low(dma_addr_t addr)
163 {
164 return (u32)addr;
165 }
166
167 static u8 adpt_read_blink_led(adpt_hba* host)
168 {
169 if (host->FwDebugBLEDflag_P) {
170 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
171 return readb(host->FwDebugBLEDvalue_P);
172 }
173 }
174 return 0;
175 }
176
177 /*============================================================================
178 * Scsi host template interface functions
179 *============================================================================
180 */
181
182 static struct pci_device_id dptids[] = {
183 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { 0, }
186 };
187 MODULE_DEVICE_TABLE(pci,dptids);
188
189 static int adpt_detect(struct scsi_host_template* sht)
190 {
191 struct pci_dev *pDev = NULL;
192 adpt_hba *pHba;
193 adpt_hba *next;
194
195 PINFO("Detecting Adaptec I2O RAID controllers...\n");
196
197 /* search for all Adatpec I2O RAID cards */
198 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
199 if(pDev->device == PCI_DPT_DEVICE_ID ||
200 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
201 if(adpt_install_hba(sht, pDev) ){
202 PERROR("Could not Init an I2O RAID device\n");
203 PERROR("Will not try to detect others.\n");
204 return hba_count-1;
205 }
206 pci_dev_get(pDev);
207 }
208 }
209
210 /* In INIT state, Activate IOPs */
211 for (pHba = hba_chain; pHba; pHba = next) {
212 next = pHba->next;
213 // Activate does get status , init outbound, and get hrt
214 if (adpt_i2o_activate_hba(pHba) < 0) {
215 adpt_i2o_delete_hba(pHba);
216 }
217 }
218
219
220 /* Active IOPs in HOLD state */
221
222 rebuild_sys_tab:
223 if (hba_chain == NULL)
224 return 0;
225
226 /*
227 * If build_sys_table fails, we kill everything and bail
228 * as we can't init the IOPs w/o a system table
229 */
230 if (adpt_i2o_build_sys_table() < 0) {
231 adpt_i2o_sys_shutdown();
232 return 0;
233 }
234
235 PDEBUG("HBA's in HOLD state\n");
236
237 /* If IOP don't get online, we need to rebuild the System table */
238 for (pHba = hba_chain; pHba; pHba = pHba->next) {
239 if (adpt_i2o_online_hba(pHba) < 0) {
240 adpt_i2o_delete_hba(pHba);
241 goto rebuild_sys_tab;
242 }
243 }
244
245 /* Active IOPs now in OPERATIONAL state */
246 PDEBUG("HBA's in OPERATIONAL state\n");
247
248 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
249 for (pHba = hba_chain; pHba; pHba = next) {
250 next = pHba->next;
251 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
252 if (adpt_i2o_lct_get(pHba) < 0){
253 adpt_i2o_delete_hba(pHba);
254 continue;
255 }
256
257 if (adpt_i2o_parse_lct(pHba) < 0){
258 adpt_i2o_delete_hba(pHba);
259 continue;
260 }
261 adpt_inquiry(pHba);
262 }
263
264 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
265 if (IS_ERR(adpt_sysfs_class)) {
266 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
267 adpt_sysfs_class = NULL;
268 }
269
270 for (pHba = hba_chain; pHba; pHba = next) {
271 next = pHba->next;
272 if (adpt_scsi_host_alloc(pHba, sht) < 0){
273 adpt_i2o_delete_hba(pHba);
274 continue;
275 }
276 pHba->initialized = TRUE;
277 pHba->state &= ~DPTI_STATE_RESET;
278 if (adpt_sysfs_class) {
279 struct device *dev = device_create(adpt_sysfs_class,
280 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
281 "dpti%d", pHba->unit);
282 if (IS_ERR(dev)) {
283 printk(KERN_WARNING"dpti%d: unable to "
284 "create device in dpt_i2o class\n",
285 pHba->unit);
286 }
287 }
288 }
289
290 // Register our control device node
291 // nodes will need to be created in /dev to access this
292 // the nodes can not be created from within the driver
293 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
294 adpt_i2o_sys_shutdown();
295 return 0;
296 }
297 return hba_count;
298 }
299
300
301 /*
302 * scsi_unregister will be called AFTER we return.
303 */
304 static int adpt_release(struct Scsi_Host *host)
305 {
306 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
307 // adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_unregister(host);
310 return 0;
311 }
312
313
314 static void adpt_inquiry(adpt_hba* pHba)
315 {
316 u32 msg[17];
317 u32 *mptr;
318 u32 *lenptr;
319 int direction;
320 int scsidir;
321 u32 len;
322 u32 reqlen;
323 u8* buf;
324 dma_addr_t addr;
325 u8 scb[16];
326 s32 rcode;
327
328 memset(msg, 0, sizeof(msg));
329 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
330 if(!buf){
331 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
332 return;
333 }
334 memset((void*)buf, 0, 36);
335
336 len = 36;
337 direction = 0x00000000;
338 scsidir =0x40000000; // DATA IN (iop<--dev)
339
340 if (dpt_dma64(pHba))
341 reqlen = 17; // SINGLE SGE, 64 bit
342 else
343 reqlen = 14; // SINGLE SGE, 32 bit
344 /* Stick the headers on */
345 msg[0] = reqlen<<16 | SGL_OFFSET_12;
346 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
347 msg[2] = 0;
348 msg[3] = 0;
349 // Adaptec/DPT Private stuff
350 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
351 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
352 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
353 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
354 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
355 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
356 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
357
358 mptr=msg+7;
359
360 memset(scb, 0, sizeof(scb));
361 // Write SCSI command into the message - always 16 byte block
362 scb[0] = INQUIRY;
363 scb[1] = 0;
364 scb[2] = 0;
365 scb[3] = 0;
366 scb[4] = 36;
367 scb[5] = 0;
368 // Don't care about the rest of scb
369
370 memcpy(mptr, scb, sizeof(scb));
371 mptr+=4;
372 lenptr=mptr++; /* Remember me - fill in when we know */
373
374 /* Now fill in the SGList and command */
375 *lenptr = len;
376 if (dpt_dma64(pHba)) {
377 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
378 *mptr++ = 1 << PAGE_SHIFT;
379 *mptr++ = 0xD0000000|direction|len;
380 *mptr++ = dma_low(addr);
381 *mptr++ = dma_high(addr);
382 } else {
383 *mptr++ = 0xD0000000|direction|len;
384 *mptr++ = addr;
385 }
386
387 // Send it on it's way
388 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
389 if (rcode != 0) {
390 sprintf(pHba->detail, "Adaptec I2O RAID");
391 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
392 if (rcode != -ETIME && rcode != -EINTR)
393 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
394 } else {
395 memset(pHba->detail, 0, sizeof(pHba->detail));
396 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
397 memcpy(&(pHba->detail[16]), " Model: ", 8);
398 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
399 memcpy(&(pHba->detail[40]), " FW: ", 4);
400 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
401 pHba->detail[48] = '\0'; /* precautionary */
402 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
403 }
404 adpt_i2o_status_get(pHba);
405 return ;
406 }
407
408
409 static int adpt_slave_configure(struct scsi_device * device)
410 {
411 struct Scsi_Host *host = device->host;
412 adpt_hba* pHba;
413
414 pHba = (adpt_hba *) host->hostdata[0];
415
416 if (host->can_queue && device->tagged_supported) {
417 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
418 host->can_queue - 1);
419 } else {
420 scsi_adjust_queue_depth(device, 0, 1);
421 }
422 return 0;
423 }
424
425 static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
426 {
427 adpt_hba* pHba = NULL;
428 struct adpt_device* pDev = NULL; /* dpt per device information */
429
430 cmd->scsi_done = done;
431 /*
432 * SCSI REQUEST_SENSE commands will be executed automatically by the
433 * Host Adapter for any errors, so they should not be executed
434 * explicitly unless the Sense Data is zero indicating that no error
435 * occurred.
436 */
437
438 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
439 cmd->result = (DID_OK << 16);
440 cmd->scsi_done(cmd);
441 return 0;
442 }
443
444 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
445 if (!pHba) {
446 return FAILED;
447 }
448
449 rmb();
450 /*
451 * TODO: I need to block here if I am processing ioctl cmds
452 * but if the outstanding cmds all finish before the ioctl,
453 * the scsi-core will not know to start sending cmds to me again.
454 * I need to a way to restart the scsi-cores queues or should I block
455 * calling scsi_done on the outstanding cmds instead
456 * for now we don't set the IOCTL state
457 */
458 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
459 pHba->host->last_reset = jiffies;
460 pHba->host->resetting = 1;
461 return 1;
462 }
463
464 // TODO if the cmd->device if offline then I may need to issue a bus rescan
465 // followed by a get_lct to see if the device is there anymore
466 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
467 /*
468 * First command request for this device. Set up a pointer
469 * to the device structure. This should be a TEST_UNIT_READY
470 * command from scan_scsis_single.
471 */
472 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
473 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
474 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
475 cmd->result = (DID_NO_CONNECT << 16);
476 cmd->scsi_done(cmd);
477 return 0;
478 }
479 cmd->device->hostdata = pDev;
480 }
481 pDev->pScsi_dev = cmd->device;
482
483 /*
484 * If we are being called from when the device is being reset,
485 * delay processing of the command until later.
486 */
487 if (pDev->state & DPTI_DEV_RESET ) {
488 return FAILED;
489 }
490 return adpt_scsi_to_i2o(pHba, cmd, pDev);
491 }
492
493 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
494 sector_t capacity, int geom[])
495 {
496 int heads=-1;
497 int sectors=-1;
498 int cylinders=-1;
499
500 // *** First lets set the default geometry ****
501
502 // If the capacity is less than ox2000
503 if (capacity < 0x2000 ) { // floppy
504 heads = 18;
505 sectors = 2;
506 }
507 // else if between 0x2000 and 0x20000
508 else if (capacity < 0x20000) {
509 heads = 64;
510 sectors = 32;
511 }
512 // else if between 0x20000 and 0x40000
513 else if (capacity < 0x40000) {
514 heads = 65;
515 sectors = 63;
516 }
517 // else if between 0x4000 and 0x80000
518 else if (capacity < 0x80000) {
519 heads = 128;
520 sectors = 63;
521 }
522 // else if greater than 0x80000
523 else {
524 heads = 255;
525 sectors = 63;
526 }
527 cylinders = sector_div(capacity, heads * sectors);
528
529 // Special case if CDROM
530 if(sdev->type == 5) { // CDROM
531 heads = 252;
532 sectors = 63;
533 cylinders = 1111;
534 }
535
536 geom[0] = heads;
537 geom[1] = sectors;
538 geom[2] = cylinders;
539
540 PDEBUG("adpt_bios_param: exit\n");
541 return 0;
542 }
543
544
545 static const char *adpt_info(struct Scsi_Host *host)
546 {
547 adpt_hba* pHba;
548
549 pHba = (adpt_hba *) host->hostdata[0];
550 return (char *) (pHba->detail);
551 }
552
553 static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
554 int length, int inout)
555 {
556 struct adpt_device* d;
557 int id;
558 int chan;
559 int len = 0;
560 int begin = 0;
561 int pos = 0;
562 adpt_hba* pHba;
563 int unit;
564
565 *start = buffer;
566 if (inout == TRUE) {
567 /*
568 * The user has done a write and wants us to take the
569 * data in the buffer and do something with it.
570 * proc_scsiwrite calls us with inout = 1
571 *
572 * Read data from buffer (writing to us) - NOT SUPPORTED
573 */
574 return -EINVAL;
575 }
576
577 /*
578 * inout = 0 means the user has done a read and wants information
579 * returned, so we write information about the cards into the buffer
580 * proc_scsiread() calls us with inout = 0
581 */
582
583 // Find HBA (host bus adapter) we are looking for
584 mutex_lock(&adpt_configuration_lock);
585 for (pHba = hba_chain; pHba; pHba = pHba->next) {
586 if (pHba->host == host) {
587 break; /* found adapter */
588 }
589 }
590 mutex_unlock(&adpt_configuration_lock);
591 if (pHba == NULL) {
592 return 0;
593 }
594 host = pHba->host;
595
596 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
597 len += sprintf(buffer+len, "%s\n", pHba->detail);
598 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
599 pHba->host->host_no, pHba->name, host->irq);
600 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
601 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
602
603 pos = begin + len;
604
605 /* CHECKPOINT */
606 if(pos > offset + length) {
607 goto stop_output;
608 }
609 if(pos <= offset) {
610 /*
611 * If we haven't even written to where we last left
612 * off (the last time we were called), reset the
613 * beginning pointer.
614 */
615 len = 0;
616 begin = pos;
617 }
618 len += sprintf(buffer+len, "Devices:\n");
619 for(chan = 0; chan < MAX_CHANNEL; chan++) {
620 for(id = 0; id < MAX_ID; id++) {
621 d = pHba->channel[chan].device[id];
622 while(d){
623 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
624 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
625 pos = begin + len;
626
627
628 /* CHECKPOINT */
629 if(pos > offset + length) {
630 goto stop_output;
631 }
632 if(pos <= offset) {
633 len = 0;
634 begin = pos;
635 }
636
637 unit = d->pI2o_dev->lct_data.tid;
638 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
639 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
640 scsi_device_online(d->pScsi_dev)? "online":"offline");
641 pos = begin + len;
642
643 /* CHECKPOINT */
644 if(pos > offset + length) {
645 goto stop_output;
646 }
647 if(pos <= offset) {
648 len = 0;
649 begin = pos;
650 }
651
652 d = d->next_lun;
653 }
654 }
655 }
656
657 /*
658 * begin is where we last checked our position with regards to offset
659 * begin is always less than offset. len is relative to begin. It
660 * is the number of bytes written past begin
661 *
662 */
663 stop_output:
664 /* stop the output and calculate the correct length */
665 *(buffer + len) = '\0';
666
667 *start = buffer + (offset - begin); /* Start of wanted data */
668 len -= (offset - begin);
669 if(len > length) {
670 len = length;
671 } else if(len < 0){
672 len = 0;
673 **start = '\0';
674 }
675 return len;
676 }
677
678 /*
679 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
680 */
681 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
682 {
683 return (u32)cmd->serial_number;
684 }
685
686 /*
687 * Go from a u32 'context' to a struct scsi_cmnd * .
688 * This could probably be made more efficient.
689 */
690 static struct scsi_cmnd *
691 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
692 {
693 struct scsi_cmnd * cmd;
694 struct scsi_device * d;
695
696 if (context == 0)
697 return NULL;
698
699 spin_unlock(pHba->host->host_lock);
700 shost_for_each_device(d, pHba->host) {
701 unsigned long flags;
702 spin_lock_irqsave(&d->list_lock, flags);
703 list_for_each_entry(cmd, &d->cmd_list, list) {
704 if (((u32)cmd->serial_number == context)) {
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 scsi_device_put(d);
707 spin_lock(pHba->host->host_lock);
708 return cmd;
709 }
710 }
711 spin_unlock_irqrestore(&d->list_lock, flags);
712 }
713 spin_lock(pHba->host->host_lock);
714
715 return NULL;
716 }
717
718 /*
719 * Turn a pointer to ioctl reply data into an u32 'context'
720 */
721 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
722 {
723 #if BITS_PER_LONG == 32
724 return (u32)(unsigned long)reply;
725 #else
726 ulong flags = 0;
727 u32 nr, i;
728
729 spin_lock_irqsave(pHba->host->host_lock, flags);
730 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
731 for (i = 0; i < nr; i++) {
732 if (pHba->ioctl_reply_context[i] == NULL) {
733 pHba->ioctl_reply_context[i] = reply;
734 break;
735 }
736 }
737 spin_unlock_irqrestore(pHba->host->host_lock, flags);
738 if (i >= nr) {
739 kfree (reply);
740 printk(KERN_WARNING"%s: Too many outstanding "
741 "ioctl commands\n", pHba->name);
742 return (u32)-1;
743 }
744
745 return i;
746 #endif
747 }
748
749 /*
750 * Go from an u32 'context' to a pointer to ioctl reply data.
751 */
752 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
753 {
754 #if BITS_PER_LONG == 32
755 return (void *)(unsigned long)context;
756 #else
757 void *p = pHba->ioctl_reply_context[context];
758 pHba->ioctl_reply_context[context] = NULL;
759
760 return p;
761 #endif
762 }
763
764 /*===========================================================================
765 * Error Handling routines
766 *===========================================================================
767 */
768
769 static int adpt_abort(struct scsi_cmnd * cmd)
770 {
771 adpt_hba* pHba = NULL; /* host bus adapter structure */
772 struct adpt_device* dptdevice; /* dpt per device information */
773 u32 msg[5];
774 int rcode;
775
776 if(cmd->serial_number == 0){
777 return FAILED;
778 }
779 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
780 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
781 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
782 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
783 return FAILED;
784 }
785
786 memset(msg, 0, sizeof(msg));
787 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
788 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
789 msg[2] = 0;
790 msg[3]= 0;
791 msg[4] = adpt_cmd_to_context(cmd);
792 if (pHba->host)
793 spin_lock_irq(pHba->host->host_lock);
794 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
795 if (pHba->host)
796 spin_unlock_irq(pHba->host->host_lock);
797 if (rcode != 0) {
798 if(rcode == -EOPNOTSUPP ){
799 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
800 return FAILED;
801 }
802 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
803 return FAILED;
804 }
805 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
806 return SUCCESS;
807 }
808
809
810 #define I2O_DEVICE_RESET 0x27
811 // This is the same for BLK and SCSI devices
812 // NOTE this is wrong in the i2o.h definitions
813 // This is not currently supported by our adapter but we issue it anyway
814 static int adpt_device_reset(struct scsi_cmnd* cmd)
815 {
816 adpt_hba* pHba;
817 u32 msg[4];
818 u32 rcode;
819 int old_state;
820 struct adpt_device* d = cmd->device->hostdata;
821
822 pHba = (void*) cmd->device->host->hostdata[0];
823 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
824 if (!d) {
825 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
826 return FAILED;
827 }
828 memset(msg, 0, sizeof(msg));
829 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
830 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
831 msg[2] = 0;
832 msg[3] = 0;
833
834 if (pHba->host)
835 spin_lock_irq(pHba->host->host_lock);
836 old_state = d->state;
837 d->state |= DPTI_DEV_RESET;
838 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
839 d->state = old_state;
840 if (pHba->host)
841 spin_unlock_irq(pHba->host->host_lock);
842 if (rcode != 0) {
843 if(rcode == -EOPNOTSUPP ){
844 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
845 return FAILED;
846 }
847 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
848 return FAILED;
849 } else {
850 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
851 return SUCCESS;
852 }
853 }
854
855
856 #define I2O_HBA_BUS_RESET 0x87
857 // This version of bus reset is called by the eh_error handler
858 static int adpt_bus_reset(struct scsi_cmnd* cmd)
859 {
860 adpt_hba* pHba;
861 u32 msg[4];
862 u32 rcode;
863
864 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
865 memset(msg, 0, sizeof(msg));
866 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
867 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
868 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
869 msg[2] = 0;
870 msg[3] = 0;
871 if (pHba->host)
872 spin_lock_irq(pHba->host->host_lock);
873 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
874 if (pHba->host)
875 spin_unlock_irq(pHba->host->host_lock);
876 if (rcode != 0) {
877 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
878 return FAILED;
879 } else {
880 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
881 return SUCCESS;
882 }
883 }
884
885 // This version of reset is called by the eh_error_handler
886 static int __adpt_reset(struct scsi_cmnd* cmd)
887 {
888 adpt_hba* pHba;
889 int rcode;
890 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
891 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
892 rcode = adpt_hba_reset(pHba);
893 if(rcode == 0){
894 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
895 return SUCCESS;
896 } else {
897 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
898 return FAILED;
899 }
900 }
901
902 static int adpt_reset(struct scsi_cmnd* cmd)
903 {
904 int rc;
905
906 spin_lock_irq(cmd->device->host->host_lock);
907 rc = __adpt_reset(cmd);
908 spin_unlock_irq(cmd->device->host->host_lock);
909
910 return rc;
911 }
912
913 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
914 static int adpt_hba_reset(adpt_hba* pHba)
915 {
916 int rcode;
917
918 pHba->state |= DPTI_STATE_RESET;
919
920 // Activate does get status , init outbound, and get hrt
921 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
922 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
923 adpt_i2o_delete_hba(pHba);
924 return rcode;
925 }
926
927 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
928 adpt_i2o_delete_hba(pHba);
929 return rcode;
930 }
931 PDEBUG("%s: in HOLD state\n",pHba->name);
932
933 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
934 adpt_i2o_delete_hba(pHba);
935 return rcode;
936 }
937 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
938
939 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
940 adpt_i2o_delete_hba(pHba);
941 return rcode;
942 }
943
944 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
945 adpt_i2o_delete_hba(pHba);
946 return rcode;
947 }
948 pHba->state &= ~DPTI_STATE_RESET;
949
950 adpt_fail_posted_scbs(pHba);
951 return 0; /* return success */
952 }
953
954 /*===========================================================================
955 *
956 *===========================================================================
957 */
958
959
960 static void adpt_i2o_sys_shutdown(void)
961 {
962 adpt_hba *pHba, *pNext;
963 struct adpt_i2o_post_wait_data *p1, *old;
964
965 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
966 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
967 /* Delete all IOPs from the controller chain */
968 /* They should have already been released by the
969 * scsi-core
970 */
971 for (pHba = hba_chain; pHba; pHba = pNext) {
972 pNext = pHba->next;
973 adpt_i2o_delete_hba(pHba);
974 }
975
976 /* Remove any timedout entries from the wait queue. */
977 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
978 /* Nothing should be outstanding at this point so just
979 * free them
980 */
981 for(p1 = adpt_post_wait_queue; p1;) {
982 old = p1;
983 p1 = p1->next;
984 kfree(old);
985 }
986 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
987 adpt_post_wait_queue = NULL;
988
989 printk(KERN_INFO "Adaptec I2O controllers down.\n");
990 }
991
992 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
993 {
994
995 adpt_hba* pHba = NULL;
996 adpt_hba* p = NULL;
997 ulong base_addr0_phys = 0;
998 ulong base_addr1_phys = 0;
999 u32 hba_map0_area_size = 0;
1000 u32 hba_map1_area_size = 0;
1001 void __iomem *base_addr_virt = NULL;
1002 void __iomem *msg_addr_virt = NULL;
1003 int dma64 = 0;
1004
1005 int raptorFlag = FALSE;
1006
1007 if(pci_enable_device(pDev)) {
1008 return -EINVAL;
1009 }
1010
1011 if (pci_request_regions(pDev, "dpt_i2o")) {
1012 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1013 return -EINVAL;
1014 }
1015
1016 pci_set_master(pDev);
1017
1018 /*
1019 * See if we should enable dma64 mode.
1020 */
1021 if (sizeof(dma_addr_t) > 4 &&
1022 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
1023 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
1024 dma64 = 1;
1025 }
1026 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1027 return -EINVAL;
1028
1029 /* adapter only supports message blocks below 4GB */
1030 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
1031
1032 base_addr0_phys = pci_resource_start(pDev,0);
1033 hba_map0_area_size = pci_resource_len(pDev,0);
1034
1035 // Check if standard PCI card or single BAR Raptor
1036 if(pDev->device == PCI_DPT_DEVICE_ID){
1037 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1038 // Raptor card with this device id needs 4M
1039 hba_map0_area_size = 0x400000;
1040 } else { // Not Raptor - it is a PCI card
1041 if(hba_map0_area_size > 0x100000 ){
1042 hba_map0_area_size = 0x100000;
1043 }
1044 }
1045 } else {// Raptor split BAR config
1046 // Use BAR1 in this configuration
1047 base_addr1_phys = pci_resource_start(pDev,1);
1048 hba_map1_area_size = pci_resource_len(pDev,1);
1049 raptorFlag = TRUE;
1050 }
1051
1052 #if BITS_PER_LONG == 64
1053 /*
1054 * The original Adaptec 64 bit driver has this comment here:
1055 * "x86_64 machines need more optimal mappings"
1056 *
1057 * I assume some HBAs report ridiculously large mappings
1058 * and we need to limit them on platforms with IOMMUs.
1059 */
1060 if (raptorFlag == TRUE) {
1061 if (hba_map0_area_size > 128)
1062 hba_map0_area_size = 128;
1063 if (hba_map1_area_size > 524288)
1064 hba_map1_area_size = 524288;
1065 } else {
1066 if (hba_map0_area_size > 524288)
1067 hba_map0_area_size = 524288;
1068 }
1069 #endif
1070
1071 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1072 if (!base_addr_virt) {
1073 pci_release_regions(pDev);
1074 PERROR("dpti: adpt_config_hba: io remap failed\n");
1075 return -EINVAL;
1076 }
1077
1078 if(raptorFlag == TRUE) {
1079 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1080 if (!msg_addr_virt) {
1081 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1082 iounmap(base_addr_virt);
1083 pci_release_regions(pDev);
1084 return -EINVAL;
1085 }
1086 } else {
1087 msg_addr_virt = base_addr_virt;
1088 }
1089
1090 // Allocate and zero the data structure
1091 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1092 if (!pHba) {
1093 if (msg_addr_virt != base_addr_virt)
1094 iounmap(msg_addr_virt);
1095 iounmap(base_addr_virt);
1096 pci_release_regions(pDev);
1097 return -ENOMEM;
1098 }
1099
1100 mutex_lock(&adpt_configuration_lock);
1101
1102 if(hba_chain != NULL){
1103 for(p = hba_chain; p->next; p = p->next);
1104 p->next = pHba;
1105 } else {
1106 hba_chain = pHba;
1107 }
1108 pHba->next = NULL;
1109 pHba->unit = hba_count;
1110 sprintf(pHba->name, "dpti%d", hba_count);
1111 hba_count++;
1112
1113 mutex_unlock(&adpt_configuration_lock);
1114
1115 pHba->pDev = pDev;
1116 pHba->base_addr_phys = base_addr0_phys;
1117
1118 // Set up the Virtual Base Address of the I2O Device
1119 pHba->base_addr_virt = base_addr_virt;
1120 pHba->msg_addr_virt = msg_addr_virt;
1121 pHba->irq_mask = base_addr_virt+0x30;
1122 pHba->post_port = base_addr_virt+0x40;
1123 pHba->reply_port = base_addr_virt+0x44;
1124
1125 pHba->hrt = NULL;
1126 pHba->lct = NULL;
1127 pHba->lct_size = 0;
1128 pHba->status_block = NULL;
1129 pHba->post_count = 0;
1130 pHba->state = DPTI_STATE_RESET;
1131 pHba->pDev = pDev;
1132 pHba->devices = NULL;
1133 pHba->dma64 = dma64;
1134
1135 // Initializing the spinlocks
1136 spin_lock_init(&pHba->state_lock);
1137 spin_lock_init(&adpt_post_wait_lock);
1138
1139 if(raptorFlag == 0){
1140 printk(KERN_INFO "Adaptec I2O RAID controller"
1141 " %d at %p size=%x irq=%d%s\n",
1142 hba_count-1, base_addr_virt,
1143 hba_map0_area_size, pDev->irq,
1144 dma64 ? " (64-bit DMA)" : "");
1145 } else {
1146 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1147 hba_count-1, pDev->irq,
1148 dma64 ? " (64-bit DMA)" : "");
1149 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1150 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1151 }
1152
1153 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1154 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1155 adpt_i2o_delete_hba(pHba);
1156 return -EINVAL;
1157 }
1158
1159 return 0;
1160 }
1161
1162
1163 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1164 {
1165 adpt_hba* p1;
1166 adpt_hba* p2;
1167 struct i2o_device* d;
1168 struct i2o_device* next;
1169 int i;
1170 int j;
1171 struct adpt_device* pDev;
1172 struct adpt_device* pNext;
1173
1174
1175 mutex_lock(&adpt_configuration_lock);
1176 // scsi_unregister calls our adpt_release which
1177 // does a quiese
1178 if(pHba->host){
1179 free_irq(pHba->host->irq, pHba);
1180 }
1181 p2 = NULL;
1182 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1183 if(p1 == pHba) {
1184 if(p2) {
1185 p2->next = p1->next;
1186 } else {
1187 hba_chain = p1->next;
1188 }
1189 break;
1190 }
1191 }
1192
1193 hba_count--;
1194 mutex_unlock(&adpt_configuration_lock);
1195
1196 iounmap(pHba->base_addr_virt);
1197 pci_release_regions(pHba->pDev);
1198 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1199 iounmap(pHba->msg_addr_virt);
1200 }
1201 if(pHba->FwDebugBuffer_P)
1202 iounmap(pHba->FwDebugBuffer_P);
1203 if(pHba->hrt) {
1204 dma_free_coherent(&pHba->pDev->dev,
1205 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1206 pHba->hrt, pHba->hrt_pa);
1207 }
1208 if(pHba->lct) {
1209 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1210 pHba->lct, pHba->lct_pa);
1211 }
1212 if(pHba->status_block) {
1213 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1214 pHba->status_block, pHba->status_block_pa);
1215 }
1216 if(pHba->reply_pool) {
1217 dma_free_coherent(&pHba->pDev->dev,
1218 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1219 pHba->reply_pool, pHba->reply_pool_pa);
1220 }
1221
1222 for(d = pHba->devices; d ; d = next){
1223 next = d->next;
1224 kfree(d);
1225 }
1226 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1227 for(j = 0; j < MAX_ID; j++){
1228 if(pHba->channel[i].device[j] != NULL){
1229 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1230 pNext = pDev->next_lun;
1231 kfree(pDev);
1232 }
1233 }
1234 }
1235 }
1236 pci_dev_put(pHba->pDev);
1237 if (adpt_sysfs_class)
1238 device_destroy(adpt_sysfs_class,
1239 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1240 kfree(pHba);
1241
1242 if(hba_count <= 0){
1243 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1244 if (adpt_sysfs_class) {
1245 class_destroy(adpt_sysfs_class);
1246 adpt_sysfs_class = NULL;
1247 }
1248 }
1249 }
1250
1251 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1252 {
1253 struct adpt_device* d;
1254
1255 if(chan < 0 || chan >= MAX_CHANNEL)
1256 return NULL;
1257
1258 if( pHba->channel[chan].device == NULL){
1259 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1260 return NULL;
1261 }
1262
1263 d = pHba->channel[chan].device[id];
1264 if(!d || d->tid == 0) {
1265 return NULL;
1266 }
1267
1268 /* If it is the only lun at that address then this should match*/
1269 if(d->scsi_lun == lun){
1270 return d;
1271 }
1272
1273 /* else we need to look through all the luns */
1274 for(d=d->next_lun ; d ; d = d->next_lun){
1275 if(d->scsi_lun == lun){
1276 return d;
1277 }
1278 }
1279 return NULL;
1280 }
1281
1282
1283 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1284 {
1285 // I used my own version of the WAIT_QUEUE_HEAD
1286 // to handle some version differences
1287 // When embedded in the kernel this could go back to the vanilla one
1288 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1289 int status = 0;
1290 ulong flags = 0;
1291 struct adpt_i2o_post_wait_data *p1, *p2;
1292 struct adpt_i2o_post_wait_data *wait_data =
1293 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1294 DECLARE_WAITQUEUE(wait, current);
1295
1296 if (!wait_data)
1297 return -ENOMEM;
1298
1299 /*
1300 * The spin locking is needed to keep anyone from playing
1301 * with the queue pointers and id while we do the same
1302 */
1303 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1304 // TODO we need a MORE unique way of getting ids
1305 // to support async LCT get
1306 wait_data->next = adpt_post_wait_queue;
1307 adpt_post_wait_queue = wait_data;
1308 adpt_post_wait_id++;
1309 adpt_post_wait_id &= 0x7fff;
1310 wait_data->id = adpt_post_wait_id;
1311 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1312
1313 wait_data->wq = &adpt_wq_i2o_post;
1314 wait_data->status = -ETIMEDOUT;
1315
1316 add_wait_queue(&adpt_wq_i2o_post, &wait);
1317
1318 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1319 timeout *= HZ;
1320 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1321 set_current_state(TASK_INTERRUPTIBLE);
1322 if(pHba->host)
1323 spin_unlock_irq(pHba->host->host_lock);
1324 if (!timeout)
1325 schedule();
1326 else{
1327 timeout = schedule_timeout(timeout);
1328 if (timeout == 0) {
1329 // I/O issued, but cannot get result in
1330 // specified time. Freeing resorces is
1331 // dangerous.
1332 status = -ETIME;
1333 }
1334 }
1335 if(pHba->host)
1336 spin_lock_irq(pHba->host->host_lock);
1337 }
1338 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1339
1340 if(status == -ETIMEDOUT){
1341 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1342 // We will have to free the wait_data memory during shutdown
1343 return status;
1344 }
1345
1346 /* Remove the entry from the queue. */
1347 p2 = NULL;
1348 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1349 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1350 if(p1 == wait_data) {
1351 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1352 status = -EOPNOTSUPP;
1353 }
1354 if(p2) {
1355 p2->next = p1->next;
1356 } else {
1357 adpt_post_wait_queue = p1->next;
1358 }
1359 break;
1360 }
1361 }
1362 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1363
1364 kfree(wait_data);
1365
1366 return status;
1367 }
1368
1369
1370 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1371 {
1372
1373 u32 m = EMPTY_QUEUE;
1374 u32 __iomem *msg;
1375 ulong timeout = jiffies + 30*HZ;
1376 do {
1377 rmb();
1378 m = readl(pHba->post_port);
1379 if (m != EMPTY_QUEUE) {
1380 break;
1381 }
1382 if(time_after(jiffies,timeout)){
1383 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1384 return -ETIMEDOUT;
1385 }
1386 schedule_timeout_uninterruptible(1);
1387 } while(m == EMPTY_QUEUE);
1388
1389 msg = pHba->msg_addr_virt + m;
1390 memcpy_toio(msg, data, len);
1391 wmb();
1392
1393 //post message
1394 writel(m, pHba->post_port);
1395 wmb();
1396
1397 return 0;
1398 }
1399
1400
1401 static void adpt_i2o_post_wait_complete(u32 context, int status)
1402 {
1403 struct adpt_i2o_post_wait_data *p1 = NULL;
1404 /*
1405 * We need to search through the adpt_post_wait
1406 * queue to see if the given message is still
1407 * outstanding. If not, it means that the IOP
1408 * took longer to respond to the message than we
1409 * had allowed and timer has already expired.
1410 * Not much we can do about that except log
1411 * it for debug purposes, increase timeout, and recompile
1412 *
1413 * Lock needed to keep anyone from moving queue pointers
1414 * around while we're looking through them.
1415 */
1416
1417 context &= 0x7fff;
1418
1419 spin_lock(&adpt_post_wait_lock);
1420 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1421 if(p1->id == context) {
1422 p1->status = status;
1423 spin_unlock(&adpt_post_wait_lock);
1424 wake_up_interruptible(p1->wq);
1425 return;
1426 }
1427 }
1428 spin_unlock(&adpt_post_wait_lock);
1429 // If this happens we lose commands that probably really completed
1430 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1431 printk(KERN_DEBUG" Tasks in wait queue:\n");
1432 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1433 printk(KERN_DEBUG" %d\n",p1->id);
1434 }
1435 return;
1436 }
1437
1438 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1439 {
1440 u32 msg[8];
1441 u8* status;
1442 dma_addr_t addr;
1443 u32 m = EMPTY_QUEUE ;
1444 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1445
1446 if(pHba->initialized == FALSE) { // First time reset should be quick
1447 timeout = jiffies + (25*HZ);
1448 } else {
1449 adpt_i2o_quiesce_hba(pHba);
1450 }
1451
1452 do {
1453 rmb();
1454 m = readl(pHba->post_port);
1455 if (m != EMPTY_QUEUE) {
1456 break;
1457 }
1458 if(time_after(jiffies,timeout)){
1459 printk(KERN_WARNING"Timeout waiting for message!\n");
1460 return -ETIMEDOUT;
1461 }
1462 schedule_timeout_uninterruptible(1);
1463 } while (m == EMPTY_QUEUE);
1464
1465 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1466 if(status == NULL) {
1467 adpt_send_nop(pHba, m);
1468 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1469 return -ENOMEM;
1470 }
1471 memset(status,0,4);
1472
1473 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1474 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1475 msg[2]=0;
1476 msg[3]=0;
1477 msg[4]=0;
1478 msg[5]=0;
1479 msg[6]=dma_low(addr);
1480 msg[7]=dma_high(addr);
1481
1482 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1483 wmb();
1484 writel(m, pHba->post_port);
1485 wmb();
1486
1487 while(*status == 0){
1488 if(time_after(jiffies,timeout)){
1489 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1490 /* We lose 4 bytes of "status" here, but we cannot
1491 free these because controller may awake and corrupt
1492 those bytes at any time */
1493 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1494 return -ETIMEDOUT;
1495 }
1496 rmb();
1497 schedule_timeout_uninterruptible(1);
1498 }
1499
1500 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1501 PDEBUG("%s: Reset in progress...\n", pHba->name);
1502 // Here we wait for message frame to become available
1503 // indicated that reset has finished
1504 do {
1505 rmb();
1506 m = readl(pHba->post_port);
1507 if (m != EMPTY_QUEUE) {
1508 break;
1509 }
1510 if(time_after(jiffies,timeout)){
1511 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1512 /* We lose 4 bytes of "status" here, but we
1513 cannot free these because controller may
1514 awake and corrupt those bytes at any time */
1515 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1516 return -ETIMEDOUT;
1517 }
1518 schedule_timeout_uninterruptible(1);
1519 } while (m == EMPTY_QUEUE);
1520 // Flush the offset
1521 adpt_send_nop(pHba, m);
1522 }
1523 adpt_i2o_status_get(pHba);
1524 if(*status == 0x02 ||
1525 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1526 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1527 pHba->name);
1528 } else {
1529 PDEBUG("%s: Reset completed.\n", pHba->name);
1530 }
1531
1532 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1533 #ifdef UARTDELAY
1534 // This delay is to allow someone attached to the card through the debug UART to
1535 // set up the dump levels that they want before the rest of the initialization sequence
1536 adpt_delay(20000);
1537 #endif
1538 return 0;
1539 }
1540
1541
1542 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1543 {
1544 int i;
1545 int max;
1546 int tid;
1547 struct i2o_device *d;
1548 i2o_lct *lct = pHba->lct;
1549 u8 bus_no = 0;
1550 s16 scsi_id;
1551 s16 scsi_lun;
1552 u32 buf[10]; // larger than 7, or 8 ...
1553 struct adpt_device* pDev;
1554
1555 if (lct == NULL) {
1556 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1557 return -1;
1558 }
1559
1560 max = lct->table_size;
1561 max -= 3;
1562 max /= 9;
1563
1564 for(i=0;i<max;i++) {
1565 if( lct->lct_entry[i].user_tid != 0xfff){
1566 /*
1567 * If we have hidden devices, we need to inform the upper layers about
1568 * the possible maximum id reference to handle device access when
1569 * an array is disassembled. This code has no other purpose but to
1570 * allow us future access to devices that are currently hidden
1571 * behind arrays, hotspares or have not been configured (JBOD mode).
1572 */
1573 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1574 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1575 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1576 continue;
1577 }
1578 tid = lct->lct_entry[i].tid;
1579 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1580 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1581 continue;
1582 }
1583 bus_no = buf[0]>>16;
1584 scsi_id = buf[1];
1585 scsi_lun = (buf[2]>>8 )&0xff;
1586 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1587 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1588 continue;
1589 }
1590 if (scsi_id >= MAX_ID){
1591 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1592 continue;
1593 }
1594 if(bus_no > pHba->top_scsi_channel){
1595 pHba->top_scsi_channel = bus_no;
1596 }
1597 if(scsi_id > pHba->top_scsi_id){
1598 pHba->top_scsi_id = scsi_id;
1599 }
1600 if(scsi_lun > pHba->top_scsi_lun){
1601 pHba->top_scsi_lun = scsi_lun;
1602 }
1603 continue;
1604 }
1605 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1606 if(d==NULL)
1607 {
1608 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1609 return -ENOMEM;
1610 }
1611
1612 d->controller = pHba;
1613 d->next = NULL;
1614
1615 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1616
1617 d->flags = 0;
1618 tid = d->lct_data.tid;
1619 adpt_i2o_report_hba_unit(pHba, d);
1620 adpt_i2o_install_device(pHba, d);
1621 }
1622 bus_no = 0;
1623 for(d = pHba->devices; d ; d = d->next) {
1624 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1625 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1626 tid = d->lct_data.tid;
1627 // TODO get the bus_no from hrt-but for now they are in order
1628 //bus_no =
1629 if(bus_no > pHba->top_scsi_channel){
1630 pHba->top_scsi_channel = bus_no;
1631 }
1632 pHba->channel[bus_no].type = d->lct_data.class_id;
1633 pHba->channel[bus_no].tid = tid;
1634 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1635 {
1636 pHba->channel[bus_no].scsi_id = buf[1];
1637 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1638 }
1639 // TODO remove - this is just until we get from hrt
1640 bus_no++;
1641 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1642 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1643 break;
1644 }
1645 }
1646 }
1647
1648 // Setup adpt_device table
1649 for(d = pHba->devices; d ; d = d->next) {
1650 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1651 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1652 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1653
1654 tid = d->lct_data.tid;
1655 scsi_id = -1;
1656 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1657 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1658 bus_no = buf[0]>>16;
1659 scsi_id = buf[1];
1660 scsi_lun = (buf[2]>>8 )&0xff;
1661 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1662 continue;
1663 }
1664 if (scsi_id >= MAX_ID) {
1665 continue;
1666 }
1667 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1668 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1669 if(pDev == NULL) {
1670 return -ENOMEM;
1671 }
1672 pHba->channel[bus_no].device[scsi_id] = pDev;
1673 } else {
1674 for( pDev = pHba->channel[bus_no].device[scsi_id];
1675 pDev->next_lun; pDev = pDev->next_lun){
1676 }
1677 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1678 if(pDev->next_lun == NULL) {
1679 return -ENOMEM;
1680 }
1681 pDev = pDev->next_lun;
1682 }
1683 pDev->tid = tid;
1684 pDev->scsi_channel = bus_no;
1685 pDev->scsi_id = scsi_id;
1686 pDev->scsi_lun = scsi_lun;
1687 pDev->pI2o_dev = d;
1688 d->owner = pDev;
1689 pDev->type = (buf[0])&0xff;
1690 pDev->flags = (buf[0]>>8)&0xff;
1691 if(scsi_id > pHba->top_scsi_id){
1692 pHba->top_scsi_id = scsi_id;
1693 }
1694 if(scsi_lun > pHba->top_scsi_lun){
1695 pHba->top_scsi_lun = scsi_lun;
1696 }
1697 }
1698 if(scsi_id == -1){
1699 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1700 d->lct_data.identity_tag);
1701 }
1702 }
1703 }
1704 return 0;
1705 }
1706
1707
1708 /*
1709 * Each I2O controller has a chain of devices on it - these match
1710 * the useful parts of the LCT of the board.
1711 */
1712
1713 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1714 {
1715 mutex_lock(&adpt_configuration_lock);
1716 d->controller=pHba;
1717 d->owner=NULL;
1718 d->next=pHba->devices;
1719 d->prev=NULL;
1720 if (pHba->devices != NULL){
1721 pHba->devices->prev=d;
1722 }
1723 pHba->devices=d;
1724 *d->dev_name = 0;
1725
1726 mutex_unlock(&adpt_configuration_lock);
1727 return 0;
1728 }
1729
1730 static int adpt_open(struct inode *inode, struct file *file)
1731 {
1732 int minor;
1733 adpt_hba* pHba;
1734
1735 mutex_lock(&adpt_mutex);
1736 //TODO check for root access
1737 //
1738 minor = iminor(inode);
1739 if (minor >= hba_count) {
1740 mutex_unlock(&adpt_mutex);
1741 return -ENXIO;
1742 }
1743 mutex_lock(&adpt_configuration_lock);
1744 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1745 if (pHba->unit == minor) {
1746 break; /* found adapter */
1747 }
1748 }
1749 if (pHba == NULL) {
1750 mutex_unlock(&adpt_configuration_lock);
1751 mutex_unlock(&adpt_mutex);
1752 return -ENXIO;
1753 }
1754
1755 // if(pHba->in_use){
1756 // mutex_unlock(&adpt_configuration_lock);
1757 // return -EBUSY;
1758 // }
1759
1760 pHba->in_use = 1;
1761 mutex_unlock(&adpt_configuration_lock);
1762 mutex_unlock(&adpt_mutex);
1763
1764 return 0;
1765 }
1766
1767 static int adpt_close(struct inode *inode, struct file *file)
1768 {
1769 int minor;
1770 adpt_hba* pHba;
1771
1772 minor = iminor(inode);
1773 if (minor >= hba_count) {
1774 return -ENXIO;
1775 }
1776 mutex_lock(&adpt_configuration_lock);
1777 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1778 if (pHba->unit == minor) {
1779 break; /* found adapter */
1780 }
1781 }
1782 mutex_unlock(&adpt_configuration_lock);
1783 if (pHba == NULL) {
1784 return -ENXIO;
1785 }
1786
1787 pHba->in_use = 0;
1788
1789 return 0;
1790 }
1791
1792
1793 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1794 {
1795 u32 msg[MAX_MESSAGE_SIZE];
1796 u32* reply = NULL;
1797 u32 size = 0;
1798 u32 reply_size = 0;
1799 u32 __user *user_msg = arg;
1800 u32 __user * user_reply = NULL;
1801 void *sg_list[pHba->sg_tablesize];
1802 u32 sg_offset = 0;
1803 u32 sg_count = 0;
1804 int sg_index = 0;
1805 u32 i = 0;
1806 u32 rcode = 0;
1807 void *p = NULL;
1808 dma_addr_t addr;
1809 ulong flags = 0;
1810
1811 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1812 // get user msg size in u32s
1813 if(get_user(size, &user_msg[0])){
1814 return -EFAULT;
1815 }
1816 size = size>>16;
1817
1818 user_reply = &user_msg[size];
1819 if(size > MAX_MESSAGE_SIZE){
1820 return -EFAULT;
1821 }
1822 size *= 4; // Convert to bytes
1823
1824 /* Copy in the user's I2O command */
1825 if(copy_from_user(msg, user_msg, size)) {
1826 return -EFAULT;
1827 }
1828 get_user(reply_size, &user_reply[0]);
1829 reply_size = reply_size>>16;
1830 if(reply_size > REPLY_FRAME_SIZE){
1831 reply_size = REPLY_FRAME_SIZE;
1832 }
1833 reply_size *= 4;
1834 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1835 if(reply == NULL) {
1836 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1837 return -ENOMEM;
1838 }
1839 sg_offset = (msg[0]>>4)&0xf;
1840 msg[2] = 0x40000000; // IOCTL context
1841 msg[3] = adpt_ioctl_to_context(pHba, reply);
1842 if (msg[3] == (u32)-1)
1843 return -EBUSY;
1844
1845 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1846 if(sg_offset) {
1847 // TODO add 64 bit API
1848 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1849 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1850 if (sg_count > pHba->sg_tablesize){
1851 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1852 kfree (reply);
1853 return -EINVAL;
1854 }
1855
1856 for(i = 0; i < sg_count; i++) {
1857 int sg_size;
1858
1859 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1860 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1861 rcode = -EINVAL;
1862 goto cleanup;
1863 }
1864 sg_size = sg[i].flag_count & 0xffffff;
1865 /* Allocate memory for the transfer */
1866 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1867 if(!p) {
1868 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1869 pHba->name,sg_size,i,sg_count);
1870 rcode = -ENOMEM;
1871 goto cleanup;
1872 }
1873 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1874 /* Copy in the user's SG buffer if necessary */
1875 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1876 // sg_simple_element API is 32 bit
1877 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1878 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1879 rcode = -EFAULT;
1880 goto cleanup;
1881 }
1882 }
1883 /* sg_simple_element API is 32 bit, but addr < 4GB */
1884 sg[i].addr_bus = addr;
1885 }
1886 }
1887
1888 do {
1889 if(pHba->host)
1890 spin_lock_irqsave(pHba->host->host_lock, flags);
1891 // This state stops any new commands from enterring the
1892 // controller while processing the ioctl
1893 // pHba->state |= DPTI_STATE_IOCTL;
1894 // We can't set this now - The scsi subsystem sets host_blocked and
1895 // the queue empties and stops. We need a way to restart the queue
1896 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1897 if (rcode != 0)
1898 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1899 rcode, reply);
1900 // pHba->state &= ~DPTI_STATE_IOCTL;
1901 if(pHba->host)
1902 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1903 } while(rcode == -ETIMEDOUT);
1904
1905 if(rcode){
1906 goto cleanup;
1907 }
1908
1909 if(sg_offset) {
1910 /* Copy back the Scatter Gather buffers back to user space */
1911 u32 j;
1912 // TODO add 64 bit API
1913 struct sg_simple_element* sg;
1914 int sg_size;
1915
1916 // re-acquire the original message to handle correctly the sg copy operation
1917 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1918 // get user msg size in u32s
1919 if(get_user(size, &user_msg[0])){
1920 rcode = -EFAULT;
1921 goto cleanup;
1922 }
1923 size = size>>16;
1924 size *= 4;
1925 if (size > MAX_MESSAGE_SIZE) {
1926 rcode = -EINVAL;
1927 goto cleanup;
1928 }
1929 /* Copy in the user's I2O command */
1930 if (copy_from_user (msg, user_msg, size)) {
1931 rcode = -EFAULT;
1932 goto cleanup;
1933 }
1934 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1935
1936 // TODO add 64 bit API
1937 sg = (struct sg_simple_element*)(msg + sg_offset);
1938 for (j = 0; j < sg_count; j++) {
1939 /* Copy out the SG list to user's buffer if necessary */
1940 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1941 sg_size = sg[j].flag_count & 0xffffff;
1942 // sg_simple_element API is 32 bit
1943 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1944 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1945 rcode = -EFAULT;
1946 goto cleanup;
1947 }
1948 }
1949 }
1950 }
1951
1952 /* Copy back the reply to user space */
1953 if (reply_size) {
1954 // we wrote our own values for context - now restore the user supplied ones
1955 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1956 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1957 rcode = -EFAULT;
1958 }
1959 if(copy_to_user(user_reply, reply, reply_size)) {
1960 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1961 rcode = -EFAULT;
1962 }
1963 }
1964
1965
1966 cleanup:
1967 if (rcode != -ETIME && rcode != -EINTR) {
1968 struct sg_simple_element *sg =
1969 (struct sg_simple_element*) (msg +sg_offset);
1970 kfree (reply);
1971 while(sg_index) {
1972 if(sg_list[--sg_index]) {
1973 dma_free_coherent(&pHba->pDev->dev,
1974 sg[sg_index].flag_count & 0xffffff,
1975 sg_list[sg_index],
1976 sg[sg_index].addr_bus);
1977 }
1978 }
1979 }
1980 return rcode;
1981 }
1982
1983 #if defined __ia64__
1984 static void adpt_ia64_info(sysInfo_S* si)
1985 {
1986 // This is all the info we need for now
1987 // We will add more info as our new
1988 // managmenent utility requires it
1989 si->processorType = PROC_IA64;
1990 }
1991 #endif
1992
1993 #if defined __sparc__
1994 static void adpt_sparc_info(sysInfo_S* si)
1995 {
1996 // This is all the info we need for now
1997 // We will add more info as our new
1998 // managmenent utility requires it
1999 si->processorType = PROC_ULTRASPARC;
2000 }
2001 #endif
2002 #if defined __alpha__
2003 static void adpt_alpha_info(sysInfo_S* si)
2004 {
2005 // This is all the info we need for now
2006 // We will add more info as our new
2007 // managmenent utility requires it
2008 si->processorType = PROC_ALPHA;
2009 }
2010 #endif
2011
2012 #if defined __i386__
2013 static void adpt_i386_info(sysInfo_S* si)
2014 {
2015 // This is all the info we need for now
2016 // We will add more info as our new
2017 // managmenent utility requires it
2018 switch (boot_cpu_data.x86) {
2019 case CPU_386:
2020 si->processorType = PROC_386;
2021 break;
2022 case CPU_486:
2023 si->processorType = PROC_486;
2024 break;
2025 case CPU_586:
2026 si->processorType = PROC_PENTIUM;
2027 break;
2028 default: // Just in case
2029 si->processorType = PROC_PENTIUM;
2030 break;
2031 }
2032 }
2033 #endif
2034
2035 /*
2036 * This routine returns information about the system. This does not effect
2037 * any logic and if the info is wrong - it doesn't matter.
2038 */
2039
2040 /* Get all the info we can not get from kernel services */
2041 static int adpt_system_info(void __user *buffer)
2042 {
2043 sysInfo_S si;
2044
2045 memset(&si, 0, sizeof(si));
2046
2047 si.osType = OS_LINUX;
2048 si.osMajorVersion = 0;
2049 si.osMinorVersion = 0;
2050 si.osRevision = 0;
2051 si.busType = SI_PCI_BUS;
2052 si.processorFamily = DPTI_sig.dsProcessorFamily;
2053
2054 #if defined __i386__
2055 adpt_i386_info(&si);
2056 #elif defined (__ia64__)
2057 adpt_ia64_info(&si);
2058 #elif defined(__sparc__)
2059 adpt_sparc_info(&si);
2060 #elif defined (__alpha__)
2061 adpt_alpha_info(&si);
2062 #else
2063 si.processorType = 0xff ;
2064 #endif
2065 if (copy_to_user(buffer, &si, sizeof(si))){
2066 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2067 return -EFAULT;
2068 }
2069
2070 return 0;
2071 }
2072
2073 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
2074 {
2075 int minor;
2076 int error = 0;
2077 adpt_hba* pHba;
2078 ulong flags = 0;
2079 void __user *argp = (void __user *)arg;
2080
2081 minor = iminor(inode);
2082 if (minor >= DPTI_MAX_HBA){
2083 return -ENXIO;
2084 }
2085 mutex_lock(&adpt_configuration_lock);
2086 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2087 if (pHba->unit == minor) {
2088 break; /* found adapter */
2089 }
2090 }
2091 mutex_unlock(&adpt_configuration_lock);
2092 if(pHba == NULL){
2093 return -ENXIO;
2094 }
2095
2096 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2097 schedule_timeout_uninterruptible(2);
2098
2099 switch (cmd) {
2100 // TODO: handle 3 cases
2101 case DPT_SIGNATURE:
2102 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2103 return -EFAULT;
2104 }
2105 break;
2106 case I2OUSRCMD:
2107 return adpt_i2o_passthru(pHba, argp);
2108
2109 case DPT_CTRLINFO:{
2110 drvrHBAinfo_S HbaInfo;
2111
2112 #define FLG_OSD_PCI_VALID 0x0001
2113 #define FLG_OSD_DMA 0x0002
2114 #define FLG_OSD_I2O 0x0004
2115 memset(&HbaInfo, 0, sizeof(HbaInfo));
2116 HbaInfo.drvrHBAnum = pHba->unit;
2117 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2118 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2119 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2120 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2121 HbaInfo.Interrupt = pHba->pDev->irq;
2122 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2123 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2124 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2125 return -EFAULT;
2126 }
2127 break;
2128 }
2129 case DPT_SYSINFO:
2130 return adpt_system_info(argp);
2131 case DPT_BLINKLED:{
2132 u32 value;
2133 value = (u32)adpt_read_blink_led(pHba);
2134 if (copy_to_user(argp, &value, sizeof(value))) {
2135 return -EFAULT;
2136 }
2137 break;
2138 }
2139 case I2ORESETCMD:
2140 if(pHba->host)
2141 spin_lock_irqsave(pHba->host->host_lock, flags);
2142 adpt_hba_reset(pHba);
2143 if(pHba->host)
2144 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2145 break;
2146 case I2ORESCANCMD:
2147 adpt_rescan(pHba);
2148 break;
2149 default:
2150 return -EINVAL;
2151 }
2152
2153 return error;
2154 }
2155
2156 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2157 {
2158 struct inode *inode;
2159 long ret;
2160
2161 inode = file->f_dentry->d_inode;
2162
2163 mutex_lock(&adpt_mutex);
2164 ret = adpt_ioctl(inode, file, cmd, arg);
2165 mutex_unlock(&adpt_mutex);
2166
2167 return ret;
2168 }
2169
2170 #ifdef CONFIG_COMPAT
2171 static long compat_adpt_ioctl(struct file *file,
2172 unsigned int cmd, unsigned long arg)
2173 {
2174 struct inode *inode;
2175 long ret;
2176
2177 inode = file->f_dentry->d_inode;
2178
2179 mutex_lock(&adpt_mutex);
2180
2181 switch(cmd) {
2182 case DPT_SIGNATURE:
2183 case I2OUSRCMD:
2184 case DPT_CTRLINFO:
2185 case DPT_SYSINFO:
2186 case DPT_BLINKLED:
2187 case I2ORESETCMD:
2188 case I2ORESCANCMD:
2189 case (DPT_TARGET_BUSY & 0xFFFF):
2190 case DPT_TARGET_BUSY:
2191 ret = adpt_ioctl(inode, file, cmd, arg);
2192 break;
2193 default:
2194 ret = -ENOIOCTLCMD;
2195 }
2196
2197 mutex_unlock(&adpt_mutex);
2198
2199 return ret;
2200 }
2201 #endif
2202
2203 static irqreturn_t adpt_isr(int irq, void *dev_id)
2204 {
2205 struct scsi_cmnd* cmd;
2206 adpt_hba* pHba = dev_id;
2207 u32 m;
2208 void __iomem *reply;
2209 u32 status=0;
2210 u32 context;
2211 ulong flags = 0;
2212 int handled = 0;
2213
2214 if (pHba == NULL){
2215 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2216 return IRQ_NONE;
2217 }
2218 if(pHba->host)
2219 spin_lock_irqsave(pHba->host->host_lock, flags);
2220
2221 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2222 m = readl(pHba->reply_port);
2223 if(m == EMPTY_QUEUE){
2224 // Try twice then give up
2225 rmb();
2226 m = readl(pHba->reply_port);
2227 if(m == EMPTY_QUEUE){
2228 // This really should not happen
2229 printk(KERN_ERR"dpti: Could not get reply frame\n");
2230 goto out;
2231 }
2232 }
2233 if (pHba->reply_pool_pa <= m &&
2234 m < pHba->reply_pool_pa +
2235 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2236 reply = (u8 *)pHba->reply_pool +
2237 (m - pHba->reply_pool_pa);
2238 } else {
2239 /* Ick, we should *never* be here */
2240 printk(KERN_ERR "dpti: reply frame not from pool\n");
2241 reply = (u8 *)bus_to_virt(m);
2242 }
2243
2244 if (readl(reply) & MSG_FAIL) {
2245 u32 old_m = readl(reply+28);
2246 void __iomem *msg;
2247 u32 old_context;
2248 PDEBUG("%s: Failed message\n",pHba->name);
2249 if(old_m >= 0x100000){
2250 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2251 writel(m,pHba->reply_port);
2252 continue;
2253 }
2254 // Transaction context is 0 in failed reply frame
2255 msg = pHba->msg_addr_virt + old_m;
2256 old_context = readl(msg+12);
2257 writel(old_context, reply+12);
2258 adpt_send_nop(pHba, old_m);
2259 }
2260 context = readl(reply+8);
2261 if(context & 0x40000000){ // IOCTL
2262 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2263 if( p != NULL) {
2264 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2265 }
2266 // All IOCTLs will also be post wait
2267 }
2268 if(context & 0x80000000){ // Post wait message
2269 status = readl(reply+16);
2270 if(status >> 24){
2271 status &= 0xffff; /* Get detail status */
2272 } else {
2273 status = I2O_POST_WAIT_OK;
2274 }
2275 if(!(context & 0x40000000)) {
2276 cmd = adpt_cmd_from_context(pHba,
2277 readl(reply+12));
2278 if(cmd != NULL) {
2279 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2280 }
2281 }
2282 adpt_i2o_post_wait_complete(context, status);
2283 } else { // SCSI message
2284 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2285 if(cmd != NULL){
2286 scsi_dma_unmap(cmd);
2287 if(cmd->serial_number != 0) { // If not timedout
2288 adpt_i2o_to_scsi(reply, cmd);
2289 }
2290 }
2291 }
2292 writel(m, pHba->reply_port);
2293 wmb();
2294 rmb();
2295 }
2296 handled = 1;
2297 out: if(pHba->host)
2298 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2299 return IRQ_RETVAL(handled);
2300 }
2301
2302 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2303 {
2304 int i;
2305 u32 msg[MAX_MESSAGE_SIZE];
2306 u32* mptr;
2307 u32* lptr;
2308 u32 *lenptr;
2309 int direction;
2310 int scsidir;
2311 int nseg;
2312 u32 len;
2313 u32 reqlen;
2314 s32 rcode;
2315 dma_addr_t addr;
2316
2317 memset(msg, 0 , sizeof(msg));
2318 len = scsi_bufflen(cmd);
2319 direction = 0x00000000;
2320
2321 scsidir = 0x00000000; // DATA NO XFER
2322 if(len) {
2323 /*
2324 * Set SCBFlags to indicate if data is being transferred
2325 * in or out, or no data transfer
2326 * Note: Do not have to verify index is less than 0 since
2327 * cmd->cmnd[0] is an unsigned char
2328 */
2329 switch(cmd->sc_data_direction){
2330 case DMA_FROM_DEVICE:
2331 scsidir =0x40000000; // DATA IN (iop<--dev)
2332 break;
2333 case DMA_TO_DEVICE:
2334 direction=0x04000000; // SGL OUT
2335 scsidir =0x80000000; // DATA OUT (iop-->dev)
2336 break;
2337 case DMA_NONE:
2338 break;
2339 case DMA_BIDIRECTIONAL:
2340 scsidir =0x40000000; // DATA IN (iop<--dev)
2341 // Assume In - and continue;
2342 break;
2343 default:
2344 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2345 pHba->name, cmd->cmnd[0]);
2346 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2347 cmd->scsi_done(cmd);
2348 return 0;
2349 }
2350 }
2351 // msg[0] is set later
2352 // I2O_CMD_SCSI_EXEC
2353 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2354 msg[2] = 0;
2355 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2356 // Our cards use the transaction context as the tag for queueing
2357 // Adaptec/DPT Private stuff
2358 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2359 msg[5] = d->tid;
2360 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2361 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2362 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2363 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2364 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2365
2366 mptr=msg+7;
2367
2368 // Write SCSI command into the message - always 16 byte block
2369 memset(mptr, 0, 16);
2370 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2371 mptr+=4;
2372 lenptr=mptr++; /* Remember me - fill in when we know */
2373 if (dpt_dma64(pHba)) {
2374 reqlen = 16; // SINGLE SGE
2375 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2376 *mptr++ = 1 << PAGE_SHIFT;
2377 } else {
2378 reqlen = 14; // SINGLE SGE
2379 }
2380 /* Now fill in the SGList and command */
2381
2382 nseg = scsi_dma_map(cmd);
2383 BUG_ON(nseg < 0);
2384 if (nseg) {
2385 struct scatterlist *sg;
2386
2387 len = 0;
2388 scsi_for_each_sg(cmd, sg, nseg, i) {
2389 lptr = mptr;
2390 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2391 len+=sg_dma_len(sg);
2392 addr = sg_dma_address(sg);
2393 *mptr++ = dma_low(addr);
2394 if (dpt_dma64(pHba))
2395 *mptr++ = dma_high(addr);
2396 /* Make this an end of list */
2397 if (i == nseg - 1)
2398 *lptr = direction|0xD0000000|sg_dma_len(sg);
2399 }
2400 reqlen = mptr - msg;
2401 *lenptr = len;
2402
2403 if(cmd->underflow && len != cmd->underflow){
2404 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2405 len, cmd->underflow);
2406 }
2407 } else {
2408 *lenptr = len = 0;
2409 reqlen = 12;
2410 }
2411
2412 /* Stick the headers on */
2413 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2414
2415 // Send it on it's way
2416 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2417 if (rcode == 0) {
2418 return 0;
2419 }
2420 return rcode;
2421 }
2422
2423
2424 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2425 {
2426 struct Scsi_Host *host;
2427
2428 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2429 if (host == NULL) {
2430 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2431 return -1;
2432 }
2433 host->hostdata[0] = (unsigned long)pHba;
2434 pHba->host = host;
2435
2436 host->irq = pHba->pDev->irq;
2437 /* no IO ports, so don't have to set host->io_port and
2438 * host->n_io_port
2439 */
2440 host->io_port = 0;
2441 host->n_io_port = 0;
2442 /* see comments in scsi_host.h */
2443 host->max_id = 16;
2444 host->max_lun = 256;
2445 host->max_channel = pHba->top_scsi_channel + 1;
2446 host->cmd_per_lun = 1;
2447 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2448 host->sg_tablesize = pHba->sg_tablesize;
2449 host->can_queue = pHba->post_fifo_size;
2450
2451 return 0;
2452 }
2453
2454
2455 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2456 {
2457 adpt_hba* pHba;
2458 u32 hba_status;
2459 u32 dev_status;
2460 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2461 // I know this would look cleaner if I just read bytes
2462 // but the model I have been using for all the rest of the
2463 // io is in 4 byte words - so I keep that model
2464 u16 detailed_status = readl(reply+16) &0xffff;
2465 dev_status = (detailed_status & 0xff);
2466 hba_status = detailed_status >> 8;
2467
2468 // calculate resid for sg
2469 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2470
2471 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2472
2473 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2474
2475 if(!(reply_flags & MSG_FAIL)) {
2476 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2477 case I2O_SCSI_DSC_SUCCESS:
2478 cmd->result = (DID_OK << 16);
2479 // handle underflow
2480 if (readl(reply+20) < cmd->underflow) {
2481 cmd->result = (DID_ERROR <<16);
2482 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2483 }
2484 break;
2485 case I2O_SCSI_DSC_REQUEST_ABORTED:
2486 cmd->result = (DID_ABORT << 16);
2487 break;
2488 case I2O_SCSI_DSC_PATH_INVALID:
2489 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2490 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2491 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2492 case I2O_SCSI_DSC_NO_ADAPTER:
2493 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2494 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2495 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2496 cmd->result = (DID_TIME_OUT << 16);
2497 break;
2498 case I2O_SCSI_DSC_ADAPTER_BUSY:
2499 case I2O_SCSI_DSC_BUS_BUSY:
2500 cmd->result = (DID_BUS_BUSY << 16);
2501 break;
2502 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2503 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2504 cmd->result = (DID_RESET << 16);
2505 break;
2506 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2507 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2508 cmd->result = (DID_PARITY << 16);
2509 break;
2510 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2511 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2512 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2513 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2514 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2515 case I2O_SCSI_DSC_DATA_OVERRUN:
2516 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2517 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2518 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2519 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2520 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2521 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2522 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2523 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2524 case I2O_SCSI_DSC_INVALID_CDB:
2525 case I2O_SCSI_DSC_LUN_INVALID:
2526 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2527 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2528 case I2O_SCSI_DSC_NO_NEXUS:
2529 case I2O_SCSI_DSC_CDB_RECEIVED:
2530 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2531 case I2O_SCSI_DSC_QUEUE_FROZEN:
2532 case I2O_SCSI_DSC_REQUEST_INVALID:
2533 default:
2534 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2535 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2536 hba_status, dev_status, cmd->cmnd[0]);
2537 cmd->result = (DID_ERROR << 16);
2538 break;
2539 }
2540
2541 // copy over the request sense data if it was a check
2542 // condition status
2543 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2544 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2545 // Copy over the sense data
2546 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2547 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2548 cmd->sense_buffer[2] == DATA_PROTECT ){
2549 /* This is to handle an array failed */
2550 cmd->result = (DID_TIME_OUT << 16);
2551 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2552 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2553 hba_status, dev_status, cmd->cmnd[0]);
2554
2555 }
2556 }
2557 } else {
2558 /* In this condtion we could not talk to the tid
2559 * the card rejected it. We should signal a retry
2560 * for a limitted number of retries.
2561 */
2562 cmd->result = (DID_TIME_OUT << 16);
2563 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2564 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2565 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2566 }
2567
2568 cmd->result |= (dev_status);
2569
2570 if(cmd->scsi_done != NULL){
2571 cmd->scsi_done(cmd);
2572 }
2573 return cmd->result;
2574 }
2575
2576
2577 static s32 adpt_rescan(adpt_hba* pHba)
2578 {
2579 s32 rcode;
2580 ulong flags = 0;
2581
2582 if(pHba->host)
2583 spin_lock_irqsave(pHba->host->host_lock, flags);
2584 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2585 goto out;
2586 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2587 goto out;
2588 rcode = 0;
2589 out: if(pHba->host)
2590 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2591 return rcode;
2592 }
2593
2594
2595 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2596 {
2597 int i;
2598 int max;
2599 int tid;
2600 struct i2o_device *d;
2601 i2o_lct *lct = pHba->lct;
2602 u8 bus_no = 0;
2603 s16 scsi_id;
2604 s16 scsi_lun;
2605 u32 buf[10]; // at least 8 u32's
2606 struct adpt_device* pDev = NULL;
2607 struct i2o_device* pI2o_dev = NULL;
2608
2609 if (lct == NULL) {
2610 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2611 return -1;
2612 }
2613
2614 max = lct->table_size;
2615 max -= 3;
2616 max /= 9;
2617
2618 // Mark each drive as unscanned
2619 for (d = pHba->devices; d; d = d->next) {
2620 pDev =(struct adpt_device*) d->owner;
2621 if(!pDev){
2622 continue;
2623 }
2624 pDev->state |= DPTI_DEV_UNSCANNED;
2625 }
2626
2627 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2628
2629 for(i=0;i<max;i++) {
2630 if( lct->lct_entry[i].user_tid != 0xfff){
2631 continue;
2632 }
2633
2634 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2635 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2636 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2637 tid = lct->lct_entry[i].tid;
2638 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2639 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2640 continue;
2641 }
2642 bus_no = buf[0]>>16;
2643 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2644 printk(KERN_WARNING
2645 "%s: Channel number %d out of range\n",
2646 pHba->name, bus_no);
2647 continue;
2648 }
2649
2650 scsi_id = buf[1];
2651 scsi_lun = (buf[2]>>8 )&0xff;
2652 pDev = pHba->channel[bus_no].device[scsi_id];
2653 /* da lun */
2654 while(pDev) {
2655 if(pDev->scsi_lun == scsi_lun) {
2656 break;
2657 }
2658 pDev = pDev->next_lun;
2659 }
2660 if(!pDev ) { // Something new add it
2661 d = kmalloc(sizeof(struct i2o_device),
2662 GFP_ATOMIC);
2663 if(d==NULL)
2664 {
2665 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2666 return -ENOMEM;
2667 }
2668
2669 d->controller = pHba;
2670 d->next = NULL;
2671
2672 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2673
2674 d->flags = 0;
2675 adpt_i2o_report_hba_unit(pHba, d);
2676 adpt_i2o_install_device(pHba, d);
2677
2678 pDev = pHba->channel[bus_no].device[scsi_id];
2679 if( pDev == NULL){
2680 pDev =
2681 kzalloc(sizeof(struct adpt_device),
2682 GFP_ATOMIC);
2683 if(pDev == NULL) {
2684 return -ENOMEM;
2685 }
2686 pHba->channel[bus_no].device[scsi_id] = pDev;
2687 } else {
2688 while (pDev->next_lun) {
2689 pDev = pDev->next_lun;
2690 }
2691 pDev = pDev->next_lun =
2692 kzalloc(sizeof(struct adpt_device),
2693 GFP_ATOMIC);
2694 if(pDev == NULL) {
2695 return -ENOMEM;
2696 }
2697 }
2698 pDev->tid = d->lct_data.tid;
2699 pDev->scsi_channel = bus_no;
2700 pDev->scsi_id = scsi_id;
2701 pDev->scsi_lun = scsi_lun;
2702 pDev->pI2o_dev = d;
2703 d->owner = pDev;
2704 pDev->type = (buf[0])&0xff;
2705 pDev->flags = (buf[0]>>8)&0xff;
2706 // Too late, SCSI system has made up it's mind, but what the hey ...
2707 if(scsi_id > pHba->top_scsi_id){
2708 pHba->top_scsi_id = scsi_id;
2709 }
2710 if(scsi_lun > pHba->top_scsi_lun){
2711 pHba->top_scsi_lun = scsi_lun;
2712 }
2713 continue;
2714 } // end of new i2o device
2715
2716 // We found an old device - check it
2717 while(pDev) {
2718 if(pDev->scsi_lun == scsi_lun) {
2719 if(!scsi_device_online(pDev->pScsi_dev)) {
2720 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2721 pHba->name,bus_no,scsi_id,scsi_lun);
2722 if (pDev->pScsi_dev) {
2723 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2724 }
2725 }
2726 d = pDev->pI2o_dev;
2727 if(d->lct_data.tid != tid) { // something changed
2728 pDev->tid = tid;
2729 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2730 if (pDev->pScsi_dev) {
2731 pDev->pScsi_dev->changed = TRUE;
2732 pDev->pScsi_dev->removable = TRUE;
2733 }
2734 }
2735 // Found it - mark it scanned
2736 pDev->state = DPTI_DEV_ONLINE;
2737 break;
2738 }
2739 pDev = pDev->next_lun;
2740 }
2741 }
2742 }
2743 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2744 pDev =(struct adpt_device*) pI2o_dev->owner;
2745 if(!pDev){
2746 continue;
2747 }
2748 // Drive offline drives that previously existed but could not be found
2749 // in the LCT table
2750 if (pDev->state & DPTI_DEV_UNSCANNED){
2751 pDev->state = DPTI_DEV_OFFLINE;
2752 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2753 if (pDev->pScsi_dev) {
2754 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2755 }
2756 }
2757 }
2758 return 0;
2759 }
2760
2761 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2762 {
2763 struct scsi_cmnd* cmd = NULL;
2764 struct scsi_device* d = NULL;
2765
2766 shost_for_each_device(d, pHba->host) {
2767 unsigned long flags;
2768 spin_lock_irqsave(&d->list_lock, flags);
2769 list_for_each_entry(cmd, &d->cmd_list, list) {
2770 if(cmd->serial_number == 0){
2771 continue;
2772 }
2773 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2774 cmd->scsi_done(cmd);
2775 }
2776 spin_unlock_irqrestore(&d->list_lock, flags);
2777 }
2778 }
2779
2780
2781 /*============================================================================
2782 * Routines from i2o subsystem
2783 *============================================================================
2784 */
2785
2786
2787
2788 /*
2789 * Bring an I2O controller into HOLD state. See the spec.
2790 */
2791 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2792 {
2793 int rcode;
2794
2795 if(pHba->initialized ) {
2796 if (adpt_i2o_status_get(pHba) < 0) {
2797 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2798 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2799 return rcode;
2800 }
2801 if (adpt_i2o_status_get(pHba) < 0) {
2802 printk(KERN_INFO "HBA not responding.\n");
2803 return -1;
2804 }
2805 }
2806
2807 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2808 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2809 return -1;
2810 }
2811
2812 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2813 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2814 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2815 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2816 adpt_i2o_reset_hba(pHba);
2817 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2818 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2819 return -1;
2820 }
2821 }
2822 } else {
2823 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2824 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2825 return rcode;
2826 }
2827
2828 }
2829
2830 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2831 return -1;
2832 }
2833
2834 /* In HOLD state */
2835
2836 if (adpt_i2o_hrt_get(pHba) < 0) {
2837 return -1;
2838 }
2839
2840 return 0;
2841 }
2842
2843 /*
2844 * Bring a controller online into OPERATIONAL state.
2845 */
2846
2847 static int adpt_i2o_online_hba(adpt_hba* pHba)
2848 {
2849 if (adpt_i2o_systab_send(pHba) < 0) {
2850 adpt_i2o_delete_hba(pHba);
2851 return -1;
2852 }
2853 /* In READY state */
2854
2855 if (adpt_i2o_enable_hba(pHba) < 0) {
2856 adpt_i2o_delete_hba(pHba);
2857 return -1;
2858 }
2859
2860 /* In OPERATIONAL state */
2861 return 0;
2862 }
2863
2864 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2865 {
2866 u32 __iomem *msg;
2867 ulong timeout = jiffies + 5*HZ;
2868
2869 while(m == EMPTY_QUEUE){
2870 rmb();
2871 m = readl(pHba->post_port);
2872 if(m != EMPTY_QUEUE){
2873 break;
2874 }
2875 if(time_after(jiffies,timeout)){
2876 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2877 return 2;
2878 }
2879 schedule_timeout_uninterruptible(1);
2880 }
2881 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2882 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2883 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2884 writel( 0,&msg[2]);
2885 wmb();
2886
2887 writel(m, pHba->post_port);
2888 wmb();
2889 return 0;
2890 }
2891
2892 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2893 {
2894 u8 *status;
2895 dma_addr_t addr;
2896 u32 __iomem *msg = NULL;
2897 int i;
2898 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2899 u32 m;
2900
2901 do {
2902 rmb();
2903 m = readl(pHba->post_port);
2904 if (m != EMPTY_QUEUE) {
2905 break;
2906 }
2907
2908 if(time_after(jiffies,timeout)){
2909 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2910 return -ETIMEDOUT;
2911 }
2912 schedule_timeout_uninterruptible(1);
2913 } while(m == EMPTY_QUEUE);
2914
2915 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2916
2917 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2918 if (!status) {
2919 adpt_send_nop(pHba, m);
2920 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2921 pHba->name);
2922 return -ENOMEM;
2923 }
2924 memset(status, 0, 4);
2925
2926 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2927 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2928 writel(0, &msg[2]);
2929 writel(0x0106, &msg[3]); /* Transaction context */
2930 writel(4096, &msg[4]); /* Host page frame size */
2931 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2932 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2933 writel((u32)addr, &msg[7]);
2934
2935 writel(m, pHba->post_port);
2936 wmb();
2937
2938 // Wait for the reply status to come back
2939 do {
2940 if (*status) {
2941 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2942 break;
2943 }
2944 }
2945 rmb();
2946 if(time_after(jiffies,timeout)){
2947 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2948 /* We lose 4 bytes of "status" here, but we
2949 cannot free these because controller may
2950 awake and corrupt those bytes at any time */
2951 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2952 return -ETIMEDOUT;
2953 }
2954 schedule_timeout_uninterruptible(1);
2955 } while (1);
2956
2957 // If the command was successful, fill the fifo with our reply
2958 // message packets
2959 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2960 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2961 return -2;
2962 }
2963 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2964
2965 if(pHba->reply_pool != NULL) {
2966 dma_free_coherent(&pHba->pDev->dev,
2967 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2968 pHba->reply_pool, pHba->reply_pool_pa);
2969 }
2970
2971 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2972 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2973 &pHba->reply_pool_pa, GFP_KERNEL);
2974 if (!pHba->reply_pool) {
2975 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2976 return -ENOMEM;
2977 }
2978 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2979
2980 for(i = 0; i < pHba->reply_fifo_size; i++) {
2981 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2982 pHba->reply_port);
2983 wmb();
2984 }
2985 adpt_i2o_status_get(pHba);
2986 return 0;
2987 }
2988
2989
2990 /*
2991 * I2O System Table. Contains information about
2992 * all the IOPs in the system. Used to inform IOPs
2993 * about each other's existence.
2994 *
2995 * sys_tbl_ver is the CurrentChangeIndicator that is
2996 * used by IOPs to track changes.
2997 */
2998
2999
3000
3001 static s32 adpt_i2o_status_get(adpt_hba* pHba)
3002 {
3003 ulong timeout;
3004 u32 m;
3005 u32 __iomem *msg;
3006 u8 *status_block=NULL;
3007
3008 if(pHba->status_block == NULL) {
3009 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
3010 sizeof(i2o_status_block),
3011 &pHba->status_block_pa, GFP_KERNEL);
3012 if(pHba->status_block == NULL) {
3013 printk(KERN_ERR
3014 "dpti%d: Get Status Block failed; Out of memory. \n",
3015 pHba->unit);
3016 return -ENOMEM;
3017 }
3018 }
3019 memset(pHba->status_block, 0, sizeof(i2o_status_block));
3020 status_block = (u8*)(pHba->status_block);
3021 timeout = jiffies+TMOUT_GETSTATUS*HZ;
3022 do {
3023 rmb();
3024 m = readl(pHba->post_port);
3025 if (m != EMPTY_QUEUE) {
3026 break;
3027 }
3028 if(time_after(jiffies,timeout)){
3029 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3030 pHba->name);
3031 return -ETIMEDOUT;
3032 }
3033 schedule_timeout_uninterruptible(1);
3034 } while(m==EMPTY_QUEUE);
3035
3036
3037 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3038
3039 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3040 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3041 writel(1, &msg[2]);
3042 writel(0, &msg[3]);
3043 writel(0, &msg[4]);
3044 writel(0, &msg[5]);
3045 writel( dma_low(pHba->status_block_pa), &msg[6]);
3046 writel( dma_high(pHba->status_block_pa), &msg[7]);
3047 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3048
3049 //post message
3050 writel(m, pHba->post_port);
3051 wmb();
3052
3053 while(status_block[87]!=0xff){
3054 if(time_after(jiffies,timeout)){
3055 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3056 pHba->unit);
3057 return -ETIMEDOUT;
3058 }
3059 rmb();
3060 schedule_timeout_uninterruptible(1);
3061 }
3062
3063 // Set up our number of outbound and inbound messages
3064 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3065 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3066 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3067 }
3068
3069 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3070 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3071 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3072 }
3073
3074 // Calculate the Scatter Gather list size
3075 if (dpt_dma64(pHba)) {
3076 pHba->sg_tablesize
3077 = ((pHba->status_block->inbound_frame_size * 4
3078 - 14 * sizeof(u32))
3079 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3080 } else {
3081 pHba->sg_tablesize
3082 = ((pHba->status_block->inbound_frame_size * 4
3083 - 12 * sizeof(u32))
3084 / sizeof(struct sg_simple_element));
3085 }
3086 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3087 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3088 }
3089
3090
3091 #ifdef DEBUG
3092 printk("dpti%d: State = ",pHba->unit);
3093 switch(pHba->status_block->iop_state) {
3094 case 0x01:
3095 printk("INIT\n");
3096 break;
3097 case 0x02:
3098 printk("RESET\n");
3099 break;
3100 case 0x04:
3101 printk("HOLD\n");
3102 break;
3103 case 0x05:
3104 printk("READY\n");
3105 break;
3106 case 0x08:
3107 printk("OPERATIONAL\n");
3108 break;
3109 case 0x10:
3110 printk("FAILED\n");
3111 break;
3112 case 0x11:
3113 printk("FAULTED\n");
3114 break;
3115 default:
3116 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3117 }
3118 #endif
3119 return 0;
3120 }
3121
3122 /*
3123 * Get the IOP's Logical Configuration Table
3124 */
3125 static int adpt_i2o_lct_get(adpt_hba* pHba)
3126 {
3127 u32 msg[8];
3128 int ret;
3129 u32 buf[16];
3130
3131 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3132 pHba->lct_size = pHba->status_block->expected_lct_size;
3133 }
3134 do {
3135 if (pHba->lct == NULL) {
3136 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3137 pHba->lct_size, &pHba->lct_pa,
3138 GFP_ATOMIC);
3139 if(pHba->lct == NULL) {
3140 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3141 pHba->name);
3142 return -ENOMEM;
3143 }
3144 }
3145 memset(pHba->lct, 0, pHba->lct_size);
3146
3147 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3148 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3149 msg[2] = 0;
3150 msg[3] = 0;
3151 msg[4] = 0xFFFFFFFF; /* All devices */
3152 msg[5] = 0x00000000; /* Report now */
3153 msg[6] = 0xD0000000|pHba->lct_size;
3154 msg[7] = (u32)pHba->lct_pa;
3155
3156 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3157 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3158 pHba->name, ret);
3159 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3160 return ret;
3161 }
3162
3163 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3164 pHba->lct_size = pHba->lct->table_size << 2;
3165 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3166 pHba->lct, pHba->lct_pa);
3167 pHba->lct = NULL;
3168 }
3169 } while (pHba->lct == NULL);
3170
3171 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3172
3173
3174 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3175 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3176 pHba->FwDebugBufferSize = buf[1];
3177 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3178 pHba->FwDebugBufferSize);
3179 if (pHba->FwDebugBuffer_P) {
3180 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3181 FW_DEBUG_FLAGS_OFFSET;
3182 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3183 FW_DEBUG_BLED_OFFSET;
3184 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3185 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3186 FW_DEBUG_STR_LENGTH_OFFSET;
3187 pHba->FwDebugBuffer_P += buf[2];
3188 pHba->FwDebugFlags = 0;
3189 }
3190 }
3191
3192 return 0;
3193 }
3194
3195 static int adpt_i2o_build_sys_table(void)
3196 {
3197 adpt_hba* pHba = hba_chain;
3198 int count = 0;
3199
3200 if (sys_tbl)
3201 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3202 sys_tbl, sys_tbl_pa);
3203
3204 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3205 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3206
3207 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3208 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3209 if (!sys_tbl) {
3210 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3211 return -ENOMEM;
3212 }
3213 memset(sys_tbl, 0, sys_tbl_len);
3214
3215 sys_tbl->num_entries = hba_count;
3216 sys_tbl->version = I2OVERSION;
3217 sys_tbl->change_ind = sys_tbl_ind++;
3218
3219 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3220 u64 addr;
3221 // Get updated Status Block so we have the latest information
3222 if (adpt_i2o_status_get(pHba)) {
3223 sys_tbl->num_entries--;
3224 continue; // try next one
3225 }
3226
3227 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3228 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3229 sys_tbl->iops[count].seg_num = 0;
3230 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3231 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3232 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3233 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3234 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3235 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3236 addr = pHba->base_addr_phys + 0x40;
3237 sys_tbl->iops[count].inbound_low = dma_low(addr);
3238 sys_tbl->iops[count].inbound_high = dma_high(addr);
3239
3240 count++;
3241 }
3242
3243 #ifdef DEBUG
3244 {
3245 u32 *table = (u32*)sys_tbl;
3246 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3247 for(count = 0; count < (sys_tbl_len >>2); count++) {
3248 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3249 count, table[count]);
3250 }
3251 }
3252 #endif
3253
3254 return 0;
3255 }
3256
3257
3258 /*
3259 * Dump the information block associated with a given unit (TID)
3260 */
3261
3262 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3263 {
3264 char buf[64];
3265 int unit = d->lct_data.tid;
3266
3267 printk(KERN_INFO "TID %3.3d ", unit);
3268
3269 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3270 {
3271 buf[16]=0;
3272 printk(" Vendor: %-12.12s", buf);
3273 }
3274 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3275 {
3276 buf[16]=0;
3277 printk(" Device: %-12.12s", buf);
3278 }
3279 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3280 {
3281 buf[8]=0;
3282 printk(" Rev: %-12.12s\n", buf);
3283 }
3284 #ifdef DEBUG
3285 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3286 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3287 printk(KERN_INFO "\tFlags: ");
3288
3289 if(d->lct_data.device_flags&(1<<0))
3290 printk("C"); // ConfigDialog requested
3291 if(d->lct_data.device_flags&(1<<1))
3292 printk("U"); // Multi-user capable
3293 if(!(d->lct_data.device_flags&(1<<4)))
3294 printk("P"); // Peer service enabled!
3295 if(!(d->lct_data.device_flags&(1<<5)))
3296 printk("M"); // Mgmt service enabled!
3297 printk("\n");
3298 #endif
3299 }
3300
3301 #ifdef DEBUG
3302 /*
3303 * Do i2o class name lookup
3304 */
3305 static const char *adpt_i2o_get_class_name(int class)
3306 {
3307 int idx = 16;
3308 static char *i2o_class_name[] = {
3309 "Executive",
3310 "Device Driver Module",
3311 "Block Device",
3312 "Tape Device",
3313 "LAN Interface",
3314 "WAN Interface",
3315 "Fibre Channel Port",
3316 "Fibre Channel Device",
3317 "SCSI Device",
3318 "ATE Port",
3319 "ATE Device",
3320 "Floppy Controller",
3321 "Floppy Device",
3322 "Secondary Bus Port",
3323 "Peer Transport Agent",
3324 "Peer Transport",
3325 "Unknown"
3326 };
3327
3328 switch(class&0xFFF) {
3329 case I2O_CLASS_EXECUTIVE:
3330 idx = 0; break;
3331 case I2O_CLASS_DDM:
3332 idx = 1; break;
3333 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3334 idx = 2; break;
3335 case I2O_CLASS_SEQUENTIAL_STORAGE:
3336 idx = 3; break;
3337 case I2O_CLASS_LAN:
3338 idx = 4; break;
3339 case I2O_CLASS_WAN:
3340 idx = 5; break;
3341 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3342 idx = 6; break;
3343 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3344 idx = 7; break;
3345 case I2O_CLASS_SCSI_PERIPHERAL:
3346 idx = 8; break;
3347 case I2O_CLASS_ATE_PORT:
3348 idx = 9; break;
3349 case I2O_CLASS_ATE_PERIPHERAL:
3350 idx = 10; break;
3351 case I2O_CLASS_FLOPPY_CONTROLLER:
3352 idx = 11; break;
3353 case I2O_CLASS_FLOPPY_DEVICE:
3354 idx = 12; break;
3355 case I2O_CLASS_BUS_ADAPTER_PORT:
3356 idx = 13; break;
3357 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3358 idx = 14; break;
3359 case I2O_CLASS_PEER_TRANSPORT:
3360 idx = 15; break;
3361 }
3362 return i2o_class_name[idx];
3363 }
3364 #endif
3365
3366
3367 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3368 {
3369 u32 msg[6];
3370 int ret, size = sizeof(i2o_hrt);
3371
3372 do {
3373 if (pHba->hrt == NULL) {
3374 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3375 size, &pHba->hrt_pa, GFP_KERNEL);
3376 if (pHba->hrt == NULL) {
3377 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3378 return -ENOMEM;
3379 }
3380 }
3381
3382 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3383 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3384 msg[2]= 0;
3385 msg[3]= 0;
3386 msg[4]= (0xD0000000 | size); /* Simple transaction */
3387 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3388
3389 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3390 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3391 return ret;
3392 }
3393
3394 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3395 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3396 dma_free_coherent(&pHba->pDev->dev, size,
3397 pHba->hrt, pHba->hrt_pa);
3398 size = newsize;
3399 pHba->hrt = NULL;
3400 }
3401 } while(pHba->hrt == NULL);
3402 return 0;
3403 }
3404
3405 /*
3406 * Query one scalar group value or a whole scalar group.
3407 */
3408 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3409 int group, int field, void *buf, int buflen)
3410 {
3411 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3412 u8 *opblk_va;
3413 dma_addr_t opblk_pa;
3414 u8 *resblk_va;
3415 dma_addr_t resblk_pa;
3416
3417 int size;
3418
3419 /* 8 bytes for header */
3420 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3421 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3422 if (resblk_va == NULL) {
3423 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3424 return -ENOMEM;
3425 }
3426
3427 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3428 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3429 if (opblk_va == NULL) {
3430 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3431 resblk_va, resblk_pa);
3432 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3433 pHba->name);
3434 return -ENOMEM;
3435 }
3436 if (field == -1) /* whole group */
3437 opblk[4] = -1;
3438
3439 memcpy(opblk_va, opblk, sizeof(opblk));
3440 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3441 opblk_va, opblk_pa, sizeof(opblk),
3442 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3443 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3444 if (size == -ETIME) {
3445 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3446 resblk_va, resblk_pa);
3447 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3448 return -ETIME;
3449 } else if (size == -EINTR) {
3450 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3451 resblk_va, resblk_pa);
3452 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3453 return -EINTR;
3454 }
3455
3456 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3457
3458 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3459 resblk_va, resblk_pa);
3460 if (size < 0)
3461 return size;
3462
3463 return buflen;
3464 }
3465
3466
3467 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3468 *
3469 * This function can be used for all UtilParamsGet/Set operations.
3470 * The OperationBlock is given in opblk-buffer,
3471 * and results are returned in resblk-buffer.
3472 * Note that the minimum sized resblk is 8 bytes and contains
3473 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3474 */
3475 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3476 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3477 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3478 {
3479 u32 msg[9];
3480 u32 *res = (u32 *)resblk_va;
3481 int wait_status;
3482
3483 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3484 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3485 msg[2] = 0;
3486 msg[3] = 0;
3487 msg[4] = 0;
3488 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3489 msg[6] = (u32)opblk_pa;
3490 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3491 msg[8] = (u32)resblk_pa;
3492
3493 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3494 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3495 return wait_status; /* -DetailedStatus */
3496 }
3497
3498 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3499 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3500 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3501 pHba->name,
3502 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3503 : "PARAMS_GET",
3504 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3505 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3506 }
3507
3508 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3509 }
3510
3511
3512 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3513 {
3514 u32 msg[4];
3515 int ret;
3516
3517 adpt_i2o_status_get(pHba);
3518
3519 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3520
3521 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3522 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3523 return 0;
3524 }
3525
3526 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3527 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3528 msg[2] = 0;
3529 msg[3] = 0;
3530
3531 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3532 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3533 pHba->unit, -ret);
3534 } else {
3535 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3536 }
3537
3538 adpt_i2o_status_get(pHba);
3539 return ret;
3540 }
3541
3542
3543 /*
3544 * Enable IOP. Allows the IOP to resume external operations.
3545 */
3546 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3547 {
3548 u32 msg[4];
3549 int ret;
3550
3551 adpt_i2o_status_get(pHba);
3552 if(!pHba->status_block){
3553 return -ENOMEM;
3554 }
3555 /* Enable only allowed on READY state */
3556 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3557 return 0;
3558
3559 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3560 return -EINVAL;
3561
3562 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3563 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3564 msg[2]= 0;
3565 msg[3]= 0;
3566
3567 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3568 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3569 pHba->name, ret);
3570 } else {
3571 PDEBUG("%s: Enabled.\n", pHba->name);
3572 }
3573
3574 adpt_i2o_status_get(pHba);
3575 return ret;
3576 }
3577
3578
3579 static int adpt_i2o_systab_send(adpt_hba* pHba)
3580 {
3581 u32 msg[12];
3582 int ret;
3583
3584 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3585 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3586 msg[2] = 0;
3587 msg[3] = 0;
3588 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3589 msg[5] = 0; /* Segment 0 */
3590
3591 /*
3592 * Provide three SGL-elements:
3593 * System table (SysTab), Private memory space declaration and
3594 * Private i/o space declaration
3595 */
3596 msg[6] = 0x54000000 | sys_tbl_len;
3597 msg[7] = (u32)sys_tbl_pa;
3598 msg[8] = 0x54000000 | 0;
3599 msg[9] = 0;
3600 msg[10] = 0xD4000000 | 0;
3601 msg[11] = 0;
3602
3603 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3604 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3605 pHba->name, ret);
3606 }
3607 #ifdef DEBUG
3608 else {
3609 PINFO("%s: SysTab set.\n", pHba->name);
3610 }
3611 #endif
3612
3613 return ret;
3614 }
3615
3616
3617 /*============================================================================
3618 *
3619 *============================================================================
3620 */
3621
3622
3623 #ifdef UARTDELAY
3624
3625 static static void adpt_delay(int millisec)
3626 {
3627 int i;
3628 for (i = 0; i < millisec; i++) {
3629 udelay(1000); /* delay for one millisecond */
3630 }
3631 }
3632
3633 #endif
3634
3635 static struct scsi_host_template driver_template = {
3636 .module = THIS_MODULE,
3637 .name = "dpt_i2o",
3638 .proc_name = "dpt_i2o",
3639 .proc_info = adpt_proc_info,
3640 .info = adpt_info,
3641 .queuecommand = adpt_queue,
3642 .eh_abort_handler = adpt_abort,
3643 .eh_device_reset_handler = adpt_device_reset,
3644 .eh_bus_reset_handler = adpt_bus_reset,
3645 .eh_host_reset_handler = adpt_reset,
3646 .bios_param = adpt_bios_param,
3647 .slave_configure = adpt_slave_configure,
3648 .can_queue = MAX_TO_IOP_MESSAGES,
3649 .this_id = 7,
3650 .cmd_per_lun = 1,
3651 .use_clustering = ENABLE_CLUSTERING,
3652 };
3653
3654 static int __init adpt_init(void)
3655 {
3656 int error;
3657 adpt_hba *pHba, *next;
3658
3659 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3660
3661 error = adpt_detect(&driver_template);
3662 if (error < 0)
3663 return error;
3664 if (hba_chain == NULL)
3665 return -ENODEV;
3666
3667 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3668 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3669 if (error)
3670 goto fail;
3671 scsi_scan_host(pHba->host);
3672 }
3673 return 0;
3674 fail:
3675 for (pHba = hba_chain; pHba; pHba = next) {
3676 next = pHba->next;
3677 scsi_remove_host(pHba->host);
3678 }
3679 return error;
3680 }
3681
3682 static void __exit adpt_exit(void)
3683 {
3684 adpt_hba *pHba, *next;
3685
3686 for (pHba = hba_chain; pHba; pHba = pHba->next)
3687 scsi_remove_host(pHba->host);
3688 for (pHba = hba_chain; pHba; pHba = next) {
3689 next = pHba->next;
3690 adpt_release(pHba->host);
3691 }
3692 }
3693
3694 module_init(adpt_init);
3695 module_exit(adpt_exit);
3696
3697 MODULE_LICENSE("GPL");