]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/dpt_i2o.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux...
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
1da177e4
LT
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
1da177e4
LT
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
dea3f665 52#include <linux/smp_lock.h>
1da177e4 53#include <linux/spinlock.h>
910638ae 54#include <linux/dma-mapping.h>
1da177e4
LT
55
56#include <linux/timer.h>
57#include <linux/string.h>
58#include <linux/ioport.h>
0b950672 59#include <linux/mutex.h>
1da177e4
LT
60
61#include <asm/processor.h> /* for boot_cpu_data */
62#include <asm/pgtable.h>
63#include <asm/io.h> /* for virt_to_bus, etc. */
64
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_device.h>
68#include <scsi/scsi_host.h>
69#include <scsi/scsi_tcq.h>
70
71#include "dpt/dptsig.h"
72#include "dpti.h"
73
74/*============================================================================
75 * Create a binary signature - this is read by dptsig
76 * Needed for our management apps
77 *============================================================================
78 */
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
0b950672 105static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4 106
67af2b06
MS
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
1da177e4 111
1da177e4
LT
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
1ed43910
MS
115static struct class *adpt_sysfs_class;
116
62ac5aed
MS
117#ifdef CONFIG_COMPAT
118static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
119#endif
120
00977a59 121static const struct file_operations adpt_fops = {
1da177e4
LT
122 .ioctl = adpt_ioctl,
123 .open = adpt_open,
62ac5aed
MS
124 .release = adpt_close,
125#ifdef CONFIG_COMPAT
126 .compat_ioctl = compat_adpt_ioctl,
1da177e4 127#endif
1da177e4 128};
1da177e4
LT
129
130/* Structures and definitions for synchronous message posting.
131 * See adpt_i2o_post_wait() for description
132 * */
133struct adpt_i2o_post_wait_data
134{
135 int status;
136 u32 id;
137 adpt_wait_queue_head_t *wq;
138 struct adpt_i2o_post_wait_data *next;
139};
140
141static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
142static u32 adpt_post_wait_id = 0;
143static DEFINE_SPINLOCK(adpt_post_wait_lock);
144
145
146/*============================================================================
147 * Functions
148 *============================================================================
149 */
150
62ac5aed
MS
151static inline int dpt_dma64(adpt_hba *pHba)
152{
153 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
154}
155
67af2b06
MS
156static inline u32 dma_high(dma_addr_t addr)
157{
158 return upper_32_bits(addr);
159}
160
161static inline u32 dma_low(dma_addr_t addr)
162{
163 return (u32)addr;
164}
165
1da177e4
LT
166static u8 adpt_read_blink_led(adpt_hba* host)
167{
172c122d 168 if (host->FwDebugBLEDflag_P) {
1da177e4
LT
169 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
170 return readb(host->FwDebugBLEDvalue_P);
171 }
172 }
173 return 0;
174}
175
176/*============================================================================
177 * Scsi host template interface functions
178 *============================================================================
179 */
180
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186MODULE_DEVICE_TABLE(pci,dptids);
187
24601bbc 188static int adpt_detect(struct scsi_host_template* sht)
1da177e4
LT
189{
190 struct pci_dev *pDev = NULL;
229bab6b
DC
191 adpt_hba *pHba;
192 adpt_hba *next;
1da177e4 193
1da177e4
LT
194 PINFO("Detecting Adaptec I2O RAID controllers...\n");
195
196 /* search for all Adatpec I2O RAID cards */
a07f3537 197 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1da177e4
LT
198 if(pDev->device == PCI_DPT_DEVICE_ID ||
199 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
24601bbc 200 if(adpt_install_hba(sht, pDev) ){
1da177e4
LT
201 PERROR("Could not Init an I2O RAID device\n");
202 PERROR("Will not try to detect others.\n");
203 return hba_count-1;
204 }
a07f3537 205 pci_dev_get(pDev);
1da177e4
LT
206 }
207 }
208
209 /* In INIT state, Activate IOPs */
229bab6b
DC
210 for (pHba = hba_chain; pHba; pHba = next) {
211 next = pHba->next;
1da177e4
LT
212 // Activate does get status , init outbound, and get hrt
213 if (adpt_i2o_activate_hba(pHba) < 0) {
214 adpt_i2o_delete_hba(pHba);
215 }
216 }
217
218
219 /* Active IOPs in HOLD state */
220
221rebuild_sys_tab:
222 if (hba_chain == NULL)
223 return 0;
224
225 /*
226 * If build_sys_table fails, we kill everything and bail
227 * as we can't init the IOPs w/o a system table
228 */
229 if (adpt_i2o_build_sys_table() < 0) {
230 adpt_i2o_sys_shutdown();
231 return 0;
232 }
233
234 PDEBUG("HBA's in HOLD state\n");
235
236 /* If IOP don't get online, we need to rebuild the System table */
237 for (pHba = hba_chain; pHba; pHba = pHba->next) {
238 if (adpt_i2o_online_hba(pHba) < 0) {
239 adpt_i2o_delete_hba(pHba);
240 goto rebuild_sys_tab;
241 }
242 }
243
244 /* Active IOPs now in OPERATIONAL state */
245 PDEBUG("HBA's in OPERATIONAL state\n");
246
247 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
229bab6b
DC
248 for (pHba = hba_chain; pHba; pHba = next) {
249 next = pHba->next;
1da177e4
LT
250 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
251 if (adpt_i2o_lct_get(pHba) < 0){
252 adpt_i2o_delete_hba(pHba);
253 continue;
254 }
255
256 if (adpt_i2o_parse_lct(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
259 }
260 adpt_inquiry(pHba);
261 }
262
1ed43910
MS
263 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
264 if (IS_ERR(adpt_sysfs_class)) {
265 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
266 adpt_sysfs_class = NULL;
267 }
268
229bab6b
DC
269 for (pHba = hba_chain; pHba; pHba = next) {
270 next = pHba->next;
c864cb14 271 if (adpt_scsi_host_alloc(pHba, sht) < 0){
1da177e4
LT
272 adpt_i2o_delete_hba(pHba);
273 continue;
274 }
275 pHba->initialized = TRUE;
276 pHba->state &= ~DPTI_STATE_RESET;
1ed43910 277 if (adpt_sysfs_class) {
d73a1a67 278 struct device *dev = device_create(adpt_sysfs_class,
9def0b97 279 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
1ed43910
MS
280 "dpti%d", pHba->unit);
281 if (IS_ERR(dev)) {
282 printk(KERN_WARNING"dpti%d: unable to "
283 "create device in dpt_i2o class\n",
284 pHba->unit);
285 }
286 }
1da177e4
LT
287 }
288
289 // Register our control device node
290 // nodes will need to be created in /dev to access this
291 // the nodes can not be created from within the driver
292 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
24601bbc 293 adpt_i2o_sys_shutdown();
1da177e4
LT
294 return 0;
295 }
296 return hba_count;
297}
298
299
24601bbc
AM
300/*
301 * scsi_unregister will be called AFTER we return.
302 */
303static int adpt_release(struct Scsi_Host *host)
1da177e4 304{
24601bbc 305 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
1da177e4
LT
306// adpt_i2o_quiesce_hba(pHba);
307 adpt_i2o_delete_hba(pHba);
24601bbc 308 scsi_unregister(host);
1da177e4
LT
309 return 0;
310}
311
312
313static void adpt_inquiry(adpt_hba* pHba)
314{
62ac5aed 315 u32 msg[17];
1da177e4
LT
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
67af2b06 323 dma_addr_t addr;
1da177e4
LT
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
67af2b06 328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
1da177e4
LT
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
62ac5aed
MS
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
1da177e4
LT
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
62ac5aed
MS
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
1da177e4
LT
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
67af2b06 392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
67af2b06 401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405}
406
407
408static int adpt_slave_configure(struct scsi_device * device)
409{
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
412
413 pHba = (adpt_hba *) host->hostdata[0];
414
415 if (host->can_queue && device->tagged_supported) {
416 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
417 host->can_queue - 1);
418 } else {
419 scsi_adjust_queue_depth(device, 0, 1);
420 }
421 return 0;
422}
423
424static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
425{
426 adpt_hba* pHba = NULL;
427 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
428
429 cmd->scsi_done = done;
430 /*
431 * SCSI REQUEST_SENSE commands will be executed automatically by the
432 * Host Adapter for any errors, so they should not be executed
433 * explicitly unless the Sense Data is zero indicating that no error
434 * occurred.
435 */
436
437 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
438 cmd->result = (DID_OK << 16);
439 cmd->scsi_done(cmd);
440 return 0;
441 }
442
443 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
444 if (!pHba) {
445 return FAILED;
446 }
447
448 rmb();
449 /*
450 * TODO: I need to block here if I am processing ioctl cmds
451 * but if the outstanding cmds all finish before the ioctl,
452 * the scsi-core will not know to start sending cmds to me again.
453 * I need to a way to restart the scsi-cores queues or should I block
454 * calling scsi_done on the outstanding cmds instead
455 * for now we don't set the IOCTL state
456 */
457 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
458 pHba->host->last_reset = jiffies;
459 pHba->host->resetting = 1;
460 return 1;
461 }
462
1da177e4
LT
463 // TODO if the cmd->device if offline then I may need to issue a bus rescan
464 // followed by a get_lct to see if the device is there anymore
465 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
466 /*
467 * First command request for this device. Set up a pointer
468 * to the device structure. This should be a TEST_UNIT_READY
469 * command from scan_scsis_single.
470 */
471 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
472 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
473 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
474 cmd->result = (DID_NO_CONNECT << 16);
475 cmd->scsi_done(cmd);
476 return 0;
477 }
478 cmd->device->hostdata = pDev;
479 }
480 pDev->pScsi_dev = cmd->device;
481
482 /*
483 * If we are being called from when the device is being reset,
484 * delay processing of the command until later.
485 */
486 if (pDev->state & DPTI_DEV_RESET ) {
487 return FAILED;
488 }
489 return adpt_scsi_to_i2o(pHba, cmd, pDev);
490}
491
492static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
493 sector_t capacity, int geom[])
494{
495 int heads=-1;
496 int sectors=-1;
497 int cylinders=-1;
498
499 // *** First lets set the default geometry ****
500
501 // If the capacity is less than ox2000
502 if (capacity < 0x2000 ) { // floppy
503 heads = 18;
504 sectors = 2;
505 }
506 // else if between 0x2000 and 0x20000
507 else if (capacity < 0x20000) {
508 heads = 64;
509 sectors = 32;
510 }
511 // else if between 0x20000 and 0x40000
512 else if (capacity < 0x40000) {
513 heads = 65;
514 sectors = 63;
515 }
516 // else if between 0x4000 and 0x80000
517 else if (capacity < 0x80000) {
518 heads = 128;
519 sectors = 63;
520 }
521 // else if greater than 0x80000
522 else {
523 heads = 255;
524 sectors = 63;
525 }
526 cylinders = sector_div(capacity, heads * sectors);
527
528 // Special case if CDROM
529 if(sdev->type == 5) { // CDROM
530 heads = 252;
531 sectors = 63;
532 cylinders = 1111;
533 }
534
535 geom[0] = heads;
536 geom[1] = sectors;
537 geom[2] = cylinders;
538
539 PDEBUG("adpt_bios_param: exit\n");
540 return 0;
541}
542
543
544static const char *adpt_info(struct Scsi_Host *host)
545{
546 adpt_hba* pHba;
547
548 pHba = (adpt_hba *) host->hostdata[0];
549 return (char *) (pHba->detail);
550}
551
552static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
553 int length, int inout)
554{
555 struct adpt_device* d;
556 int id;
557 int chan;
558 int len = 0;
559 int begin = 0;
560 int pos = 0;
561 adpt_hba* pHba;
562 int unit;
563
564 *start = buffer;
565 if (inout == TRUE) {
566 /*
567 * The user has done a write and wants us to take the
568 * data in the buffer and do something with it.
569 * proc_scsiwrite calls us with inout = 1
570 *
571 * Read data from buffer (writing to us) - NOT SUPPORTED
572 */
573 return -EINVAL;
574 }
575
576 /*
577 * inout = 0 means the user has done a read and wants information
578 * returned, so we write information about the cards into the buffer
579 * proc_scsiread() calls us with inout = 0
580 */
581
582 // Find HBA (host bus adapter) we are looking for
0b950672 583 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
584 for (pHba = hba_chain; pHba; pHba = pHba->next) {
585 if (pHba->host == host) {
586 break; /* found adapter */
587 }
588 }
0b950672 589 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
590 if (pHba == NULL) {
591 return 0;
592 }
593 host = pHba->host;
594
595 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
596 len += sprintf(buffer+len, "%s\n", pHba->detail);
597 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
598 pHba->host->host_no, pHba->name, host->irq);
599 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
600 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
601
602 pos = begin + len;
603
604 /* CHECKPOINT */
605 if(pos > offset + length) {
606 goto stop_output;
607 }
608 if(pos <= offset) {
609 /*
610 * If we haven't even written to where we last left
611 * off (the last time we were called), reset the
612 * beginning pointer.
613 */
614 len = 0;
615 begin = pos;
616 }
617 len += sprintf(buffer+len, "Devices:\n");
618 for(chan = 0; chan < MAX_CHANNEL; chan++) {
619 for(id = 0; id < MAX_ID; id++) {
620 d = pHba->channel[chan].device[id];
621 while(d){
622 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
623 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
624 pos = begin + len;
625
626
627 /* CHECKPOINT */
628 if(pos > offset + length) {
629 goto stop_output;
630 }
631 if(pos <= offset) {
632 len = 0;
633 begin = pos;
634 }
635
636 unit = d->pI2o_dev->lct_data.tid;
637 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
638 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
639 scsi_device_online(d->pScsi_dev)? "online":"offline");
640 pos = begin + len;
641
642 /* CHECKPOINT */
643 if(pos > offset + length) {
644 goto stop_output;
645 }
646 if(pos <= offset) {
647 len = 0;
648 begin = pos;
649 }
650
651 d = d->next_lun;
652 }
653 }
654 }
655
656 /*
657 * begin is where we last checked our position with regards to offset
658 * begin is always less than offset. len is relative to begin. It
659 * is the number of bytes written past begin
660 *
661 */
662stop_output:
663 /* stop the output and calculate the correct length */
664 *(buffer + len) = '\0';
665
666 *start = buffer + (offset - begin); /* Start of wanted data */
667 len -= (offset - begin);
668 if(len > length) {
669 len = length;
670 } else if(len < 0){
671 len = 0;
672 **start = '\0';
673 }
674 return len;
675}
676
62ac5aed
MS
677/*
678 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
679 */
680static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
681{
682 return (u32)cmd->serial_number;
683}
684
685/*
686 * Go from a u32 'context' to a struct scsi_cmnd * .
687 * This could probably be made more efficient.
688 */
689static struct scsi_cmnd *
690 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
691{
692 struct scsi_cmnd * cmd;
693 struct scsi_device * d;
694
695 if (context == 0)
696 return NULL;
697
698 spin_unlock(pHba->host->host_lock);
699 shost_for_each_device(d, pHba->host) {
700 unsigned long flags;
701 spin_lock_irqsave(&d->list_lock, flags);
702 list_for_each_entry(cmd, &d->cmd_list, list) {
703 if (((u32)cmd->serial_number == context)) {
704 spin_unlock_irqrestore(&d->list_lock, flags);
705 scsi_device_put(d);
706 spin_lock(pHba->host->host_lock);
707 return cmd;
708 }
709 }
710 spin_unlock_irqrestore(&d->list_lock, flags);
711 }
712 spin_lock(pHba->host->host_lock);
713
714 return NULL;
715}
716
717/*
718 * Turn a pointer to ioctl reply data into an u32 'context'
719 */
720static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
721{
722#if BITS_PER_LONG == 32
723 return (u32)(unsigned long)reply;
724#else
725 ulong flags = 0;
726 u32 nr, i;
727
728 spin_lock_irqsave(pHba->host->host_lock, flags);
729 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
730 for (i = 0; i < nr; i++) {
731 if (pHba->ioctl_reply_context[i] == NULL) {
732 pHba->ioctl_reply_context[i] = reply;
733 break;
734 }
735 }
736 spin_unlock_irqrestore(pHba->host->host_lock, flags);
737 if (i >= nr) {
738 kfree (reply);
739 printk(KERN_WARNING"%s: Too many outstanding "
740 "ioctl commands\n", pHba->name);
741 return (u32)-1;
742 }
743
744 return i;
745#endif
746}
747
748/*
749 * Go from an u32 'context' to a pointer to ioctl reply data.
750 */
751static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
752{
753#if BITS_PER_LONG == 32
754 return (void *)(unsigned long)context;
755#else
756 void *p = pHba->ioctl_reply_context[context];
757 pHba->ioctl_reply_context[context] = NULL;
758
759 return p;
760#endif
761}
1da177e4
LT
762
763/*===========================================================================
764 * Error Handling routines
765 *===========================================================================
766 */
767
768static int adpt_abort(struct scsi_cmnd * cmd)
769{
770 adpt_hba* pHba = NULL; /* host bus adapter structure */
771 struct adpt_device* dptdevice; /* dpt per device information */
772 u32 msg[5];
773 int rcode;
774
775 if(cmd->serial_number == 0){
776 return FAILED;
777 }
778 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
779 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
780 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
781 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
782 return FAILED;
783 }
784
785 memset(msg, 0, sizeof(msg));
786 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
787 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
788 msg[2] = 0;
789 msg[3]= 0;
62ac5aed 790 msg[4] = adpt_cmd_to_context(cmd);
e5508c13
SM
791 if (pHba->host)
792 spin_lock_irq(pHba->host->host_lock);
793 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
794 if (pHba->host)
795 spin_unlock_irq(pHba->host->host_lock);
796 if (rcode != 0) {
1da177e4
LT
797 if(rcode == -EOPNOTSUPP ){
798 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
799 return FAILED;
800 }
801 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
802 return FAILED;
803 }
804 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
805 return SUCCESS;
806}
807
808
809#define I2O_DEVICE_RESET 0x27
810// This is the same for BLK and SCSI devices
811// NOTE this is wrong in the i2o.h definitions
812// This is not currently supported by our adapter but we issue it anyway
813static int adpt_device_reset(struct scsi_cmnd* cmd)
814{
815 adpt_hba* pHba;
816 u32 msg[4];
817 u32 rcode;
818 int old_state;
1c2fb3f3 819 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
820
821 pHba = (void*) cmd->device->host->hostdata[0];
822 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
823 if (!d) {
824 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
825 return FAILED;
826 }
827 memset(msg, 0, sizeof(msg));
828 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
829 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
830 msg[2] = 0;
831 msg[3] = 0;
832
e5508c13
SM
833 if (pHba->host)
834 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
835 old_state = d->state;
836 d->state |= DPTI_DEV_RESET;
e5508c13
SM
837 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
838 d->state = old_state;
839 if (pHba->host)
840 spin_unlock_irq(pHba->host->host_lock);
841 if (rcode != 0) {
1da177e4
LT
842 if(rcode == -EOPNOTSUPP ){
843 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
844 return FAILED;
845 }
846 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
847 return FAILED;
848 } else {
1da177e4
LT
849 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
850 return SUCCESS;
851 }
852}
853
854
855#define I2O_HBA_BUS_RESET 0x87
856// This version of bus reset is called by the eh_error handler
857static int adpt_bus_reset(struct scsi_cmnd* cmd)
858{
859 adpt_hba* pHba;
860 u32 msg[4];
e5508c13 861 u32 rcode;
1da177e4
LT
862
863 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
864 memset(msg, 0, sizeof(msg));
865 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
866 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
867 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
868 msg[2] = 0;
869 msg[3] = 0;
e5508c13
SM
870 if (pHba->host)
871 spin_lock_irq(pHba->host->host_lock);
872 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
873 if (pHba->host)
874 spin_unlock_irq(pHba->host->host_lock);
875 if (rcode != 0) {
1da177e4
LT
876 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
877 return FAILED;
878 } else {
879 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
880 return SUCCESS;
881 }
882}
883
884// This version of reset is called by the eh_error_handler
df0ae249 885static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
886{
887 adpt_hba* pHba;
888 int rcode;
889 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
890 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
891 rcode = adpt_hba_reset(pHba);
892 if(rcode == 0){
893 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
894 return SUCCESS;
895 } else {
896 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
897 return FAILED;
898 }
899}
900
df0ae249
JG
901static int adpt_reset(struct scsi_cmnd* cmd)
902{
903 int rc;
904
905 spin_lock_irq(cmd->device->host->host_lock);
906 rc = __adpt_reset(cmd);
907 spin_unlock_irq(cmd->device->host->host_lock);
908
909 return rc;
910}
911
1da177e4
LT
912// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
913static int adpt_hba_reset(adpt_hba* pHba)
914{
915 int rcode;
916
917 pHba->state |= DPTI_STATE_RESET;
918
919 // Activate does get status , init outbound, and get hrt
920 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
921 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
922 adpt_i2o_delete_hba(pHba);
923 return rcode;
924 }
925
926 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
927 adpt_i2o_delete_hba(pHba);
928 return rcode;
929 }
930 PDEBUG("%s: in HOLD state\n",pHba->name);
931
932 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
933 adpt_i2o_delete_hba(pHba);
934 return rcode;
935 }
936 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
937
938 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
939 adpt_i2o_delete_hba(pHba);
940 return rcode;
941 }
942
943 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
944 adpt_i2o_delete_hba(pHba);
945 return rcode;
946 }
947 pHba->state &= ~DPTI_STATE_RESET;
948
949 adpt_fail_posted_scbs(pHba);
950 return 0; /* return success */
951}
952
953/*===========================================================================
954 *
955 *===========================================================================
956 */
957
958
959static void adpt_i2o_sys_shutdown(void)
960{
961 adpt_hba *pHba, *pNext;
458af543 962 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
963
964 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
965 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
966 /* Delete all IOPs from the controller chain */
967 /* They should have already been released by the
968 * scsi-core
969 */
970 for (pHba = hba_chain; pHba; pHba = pNext) {
971 pNext = pHba->next;
972 adpt_i2o_delete_hba(pHba);
973 }
974
975 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
976// spin_lock_irqsave(&adpt_post_wait_lock, flags);
977 /* Nothing should be outstanding at this point so just
978 * free them
979 */
458af543
AB
980 for(p1 = adpt_post_wait_queue; p1;) {
981 old = p1;
982 p1 = p1->next;
983 kfree(old);
1da177e4
LT
984 }
985// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
986 adpt_post_wait_queue = NULL;
987
988 printk(KERN_INFO "Adaptec I2O controllers down.\n");
989}
990
24601bbc 991static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
1da177e4
LT
992{
993
994 adpt_hba* pHba = NULL;
995 adpt_hba* p = NULL;
996 ulong base_addr0_phys = 0;
997 ulong base_addr1_phys = 0;
998 u32 hba_map0_area_size = 0;
999 u32 hba_map1_area_size = 0;
1000 void __iomem *base_addr_virt = NULL;
1001 void __iomem *msg_addr_virt = NULL;
62ac5aed 1002 int dma64 = 0;
1da177e4
LT
1003
1004 int raptorFlag = FALSE;
1da177e4
LT
1005
1006 if(pci_enable_device(pDev)) {
1007 return -EINVAL;
1008 }
9638d89a
SM
1009
1010 if (pci_request_regions(pDev, "dpt_i2o")) {
1011 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1012 return -EINVAL;
1013 }
1014
1da177e4 1015 pci_set_master(pDev);
62ac5aed
MS
1016
1017 /*
1018 * See if we should enable dma64 mode.
1019 */
1020 if (sizeof(dma_addr_t) > 4 &&
6a35528a 1021 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
284901a9 1022 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
62ac5aed
MS
1023 dma64 = 1;
1024 }
284901a9 1025 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1da177e4
LT
1026 return -EINVAL;
1027
67af2b06 1028 /* adapter only supports message blocks below 4GB */
284901a9 1029 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
67af2b06 1030
1da177e4
LT
1031 base_addr0_phys = pci_resource_start(pDev,0);
1032 hba_map0_area_size = pci_resource_len(pDev,0);
1033
1034 // Check if standard PCI card or single BAR Raptor
1035 if(pDev->device == PCI_DPT_DEVICE_ID){
1036 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1037 // Raptor card with this device id needs 4M
1038 hba_map0_area_size = 0x400000;
1039 } else { // Not Raptor - it is a PCI card
1040 if(hba_map0_area_size > 0x100000 ){
1041 hba_map0_area_size = 0x100000;
1042 }
1043 }
1044 } else {// Raptor split BAR config
1045 // Use BAR1 in this configuration
1046 base_addr1_phys = pci_resource_start(pDev,1);
1047 hba_map1_area_size = pci_resource_len(pDev,1);
1048 raptorFlag = TRUE;
1049 }
1050
62ac5aed
MS
1051#if BITS_PER_LONG == 64
1052 /*
1053 * The original Adaptec 64 bit driver has this comment here:
1054 * "x86_64 machines need more optimal mappings"
1055 *
1056 * I assume some HBAs report ridiculously large mappings
1057 * and we need to limit them on platforms with IOMMUs.
1058 */
1059 if (raptorFlag == TRUE) {
1060 if (hba_map0_area_size > 128)
1061 hba_map0_area_size = 128;
1062 if (hba_map1_area_size > 524288)
1063 hba_map1_area_size = 524288;
1064 } else {
1065 if (hba_map0_area_size > 524288)
1066 hba_map0_area_size = 524288;
1067 }
1068#endif
1069
1da177e4
LT
1070 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1071 if (!base_addr_virt) {
9c472dd9 1072 pci_release_regions(pDev);
1da177e4
LT
1073 PERROR("dpti: adpt_config_hba: io remap failed\n");
1074 return -EINVAL;
1075 }
1076
1077 if(raptorFlag == TRUE) {
1078 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1079 if (!msg_addr_virt) {
1080 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1081 iounmap(base_addr_virt);
9c472dd9 1082 pci_release_regions(pDev);
1da177e4
LT
1083 return -EINVAL;
1084 }
1085 } else {
1086 msg_addr_virt = base_addr_virt;
1087 }
1088
1089 // Allocate and zero the data structure
bbfbbbc1
MK
1090 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1091 if (!pHba) {
1092 if (msg_addr_virt != base_addr_virt)
1da177e4 1093 iounmap(msg_addr_virt);
1da177e4 1094 iounmap(base_addr_virt);
9c472dd9 1095 pci_release_regions(pDev);
1da177e4
LT
1096 return -ENOMEM;
1097 }
1da177e4 1098
0b950672 1099 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1100
1101 if(hba_chain != NULL){
1102 for(p = hba_chain; p->next; p = p->next);
1103 p->next = pHba;
1104 } else {
1105 hba_chain = pHba;
1106 }
1107 pHba->next = NULL;
1108 pHba->unit = hba_count;
23a2bc22 1109 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
1110 hba_count++;
1111
0b950672 1112 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1113
1114 pHba->pDev = pDev;
1115 pHba->base_addr_phys = base_addr0_phys;
1116
1117 // Set up the Virtual Base Address of the I2O Device
1118 pHba->base_addr_virt = base_addr_virt;
1119 pHba->msg_addr_virt = msg_addr_virt;
1120 pHba->irq_mask = base_addr_virt+0x30;
1121 pHba->post_port = base_addr_virt+0x40;
1122 pHba->reply_port = base_addr_virt+0x44;
1123
1124 pHba->hrt = NULL;
1125 pHba->lct = NULL;
1126 pHba->lct_size = 0;
1127 pHba->status_block = NULL;
1128 pHba->post_count = 0;
1129 pHba->state = DPTI_STATE_RESET;
1130 pHba->pDev = pDev;
1131 pHba->devices = NULL;
62ac5aed 1132 pHba->dma64 = dma64;
1da177e4
LT
1133
1134 // Initializing the spinlocks
1135 spin_lock_init(&pHba->state_lock);
1136 spin_lock_init(&adpt_post_wait_lock);
1137
1138 if(raptorFlag == 0){
62ac5aed
MS
1139 printk(KERN_INFO "Adaptec I2O RAID controller"
1140 " %d at %p size=%x irq=%d%s\n",
1141 hba_count-1, base_addr_virt,
1142 hba_map0_area_size, pDev->irq,
1143 dma64 ? " (64-bit DMA)" : "");
1da177e4 1144 } else {
62ac5aed
MS
1145 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1146 hba_count-1, pDev->irq,
1147 dma64 ? " (64-bit DMA)" : "");
1da177e4
LT
1148 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1149 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1150 }
1151
1d6f359a 1152 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1da177e4
LT
1153 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1154 adpt_i2o_delete_hba(pHba);
1155 return -EINVAL;
1156 }
1157
1158 return 0;
1159}
1160
1161
1162static void adpt_i2o_delete_hba(adpt_hba* pHba)
1163{
1164 adpt_hba* p1;
1165 adpt_hba* p2;
1166 struct i2o_device* d;
1167 struct i2o_device* next;
1168 int i;
1169 int j;
1170 struct adpt_device* pDev;
1171 struct adpt_device* pNext;
1172
1173
0b950672 1174 mutex_lock(&adpt_configuration_lock);
24601bbc
AM
1175 // scsi_unregister calls our adpt_release which
1176 // does a quiese
1da177e4
LT
1177 if(pHba->host){
1178 free_irq(pHba->host->irq, pHba);
1179 }
1da177e4
LT
1180 p2 = NULL;
1181 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1182 if(p1 == pHba) {
1183 if(p2) {
1184 p2->next = p1->next;
1185 } else {
1186 hba_chain = p1->next;
1187 }
1188 break;
1189 }
1190 }
1191
1192 hba_count--;
0b950672 1193 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1194
1195 iounmap(pHba->base_addr_virt);
9c472dd9 1196 pci_release_regions(pHba->pDev);
1da177e4
LT
1197 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1198 iounmap(pHba->msg_addr_virt);
1199 }
62ac5aed
MS
1200 if(pHba->FwDebugBuffer_P)
1201 iounmap(pHba->FwDebugBuffer_P);
67af2b06
MS
1202 if(pHba->hrt) {
1203 dma_free_coherent(&pHba->pDev->dev,
1204 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1205 pHba->hrt, pHba->hrt_pa);
1206 }
1207 if(pHba->lct) {
1208 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1209 pHba->lct, pHba->lct_pa);
1210 }
1211 if(pHba->status_block) {
1212 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1213 pHba->status_block, pHba->status_block_pa);
1214 }
1215 if(pHba->reply_pool) {
1216 dma_free_coherent(&pHba->pDev->dev,
1217 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1218 pHba->reply_pool, pHba->reply_pool_pa);
1219 }
1da177e4
LT
1220
1221 for(d = pHba->devices; d ; d = next){
1222 next = d->next;
1223 kfree(d);
1224 }
1225 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1226 for(j = 0; j < MAX_ID; j++){
1227 if(pHba->channel[i].device[j] != NULL){
1228 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1229 pNext = pDev->next_lun;
1230 kfree(pDev);
1231 }
1232 }
1233 }
1234 }
a07f3537 1235 pci_dev_put(pHba->pDev);
1ed43910
MS
1236 if (adpt_sysfs_class)
1237 device_destroy(adpt_sysfs_class,
1238 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
229bab6b 1239 kfree(pHba);
1ed43910 1240
1da177e4
LT
1241 if(hba_count <= 0){
1242 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1ed43910
MS
1243 if (adpt_sysfs_class) {
1244 class_destroy(adpt_sysfs_class);
1245 adpt_sysfs_class = NULL;
1246 }
1da177e4
LT
1247 }
1248}
1249
1da177e4
LT
1250static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1251{
1252 struct adpt_device* d;
1253
1254 if(chan < 0 || chan >= MAX_CHANNEL)
1255 return NULL;
1256
1257 if( pHba->channel[chan].device == NULL){
1258 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1259 return NULL;
1260 }
1261
1262 d = pHba->channel[chan].device[id];
1263 if(!d || d->tid == 0) {
1264 return NULL;
1265 }
1266
1267 /* If it is the only lun at that address then this should match*/
1268 if(d->scsi_lun == lun){
1269 return d;
1270 }
1271
1272 /* else we need to look through all the luns */
1273 for(d=d->next_lun ; d ; d = d->next_lun){
1274 if(d->scsi_lun == lun){
1275 return d;
1276 }
1277 }
1278 return NULL;
1279}
1280
1281
1282static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1283{
1284 // I used my own version of the WAIT_QUEUE_HEAD
1285 // to handle some version differences
1286 // When embedded in the kernel this could go back to the vanilla one
1287 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1288 int status = 0;
1289 ulong flags = 0;
1290 struct adpt_i2o_post_wait_data *p1, *p2;
1291 struct adpt_i2o_post_wait_data *wait_data =
1292 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
4452ea50 1293 DECLARE_WAITQUEUE(wait, current);
1da177e4 1294
4452ea50 1295 if (!wait_data)
1da177e4 1296 return -ENOMEM;
4452ea50 1297
1da177e4
LT
1298 /*
1299 * The spin locking is needed to keep anyone from playing
1300 * with the queue pointers and id while we do the same
1301 */
1302 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1303 // TODO we need a MORE unique way of getting ids
1304 // to support async LCT get
1305 wait_data->next = adpt_post_wait_queue;
1306 adpt_post_wait_queue = wait_data;
1307 adpt_post_wait_id++;
1308 adpt_post_wait_id &= 0x7fff;
1309 wait_data->id = adpt_post_wait_id;
1310 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1311
1312 wait_data->wq = &adpt_wq_i2o_post;
1313 wait_data->status = -ETIMEDOUT;
1314
4452ea50 1315 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1316
1317 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1318 timeout *= HZ;
1319 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1320 set_current_state(TASK_INTERRUPTIBLE);
1321 if(pHba->host)
1322 spin_unlock_irq(pHba->host->host_lock);
1323 if (!timeout)
1324 schedule();
1325 else{
1326 timeout = schedule_timeout(timeout);
1327 if (timeout == 0) {
1328 // I/O issued, but cannot get result in
1329 // specified time. Freeing resorces is
1330 // dangerous.
1331 status = -ETIME;
1332 }
1333 }
1334 if(pHba->host)
1335 spin_lock_irq(pHba->host->host_lock);
1336 }
4452ea50 1337 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1338
1339 if(status == -ETIMEDOUT){
1340 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1341 // We will have to free the wait_data memory during shutdown
1342 return status;
1343 }
1344
1345 /* Remove the entry from the queue. */
1346 p2 = NULL;
1347 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1348 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1349 if(p1 == wait_data) {
1350 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1351 status = -EOPNOTSUPP;
1352 }
1353 if(p2) {
1354 p2->next = p1->next;
1355 } else {
1356 adpt_post_wait_queue = p1->next;
1357 }
1358 break;
1359 }
1360 }
1361 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1362
1363 kfree(wait_data);
1364
1365 return status;
1366}
1367
1368
1369static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1370{
1371
1372 u32 m = EMPTY_QUEUE;
1373 u32 __iomem *msg;
1374 ulong timeout = jiffies + 30*HZ;
1375 do {
1376 rmb();
1377 m = readl(pHba->post_port);
1378 if (m != EMPTY_QUEUE) {
1379 break;
1380 }
1381 if(time_after(jiffies,timeout)){
1382 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1383 return -ETIMEDOUT;
1384 }
a9a3047d 1385 schedule_timeout_uninterruptible(1);
1da177e4
LT
1386 } while(m == EMPTY_QUEUE);
1387
1388 msg = pHba->msg_addr_virt + m;
1389 memcpy_toio(msg, data, len);
1390 wmb();
1391
1392 //post message
1393 writel(m, pHba->post_port);
1394 wmb();
1395
1396 return 0;
1397}
1398
1399
1400static void adpt_i2o_post_wait_complete(u32 context, int status)
1401{
1402 struct adpt_i2o_post_wait_data *p1 = NULL;
1403 /*
1404 * We need to search through the adpt_post_wait
1405 * queue to see if the given message is still
1406 * outstanding. If not, it means that the IOP
1407 * took longer to respond to the message than we
1408 * had allowed and timer has already expired.
1409 * Not much we can do about that except log
1410 * it for debug purposes, increase timeout, and recompile
1411 *
1412 * Lock needed to keep anyone from moving queue pointers
1413 * around while we're looking through them.
1414 */
1415
1416 context &= 0x7fff;
1417
1418 spin_lock(&adpt_post_wait_lock);
1419 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1420 if(p1->id == context) {
1421 p1->status = status;
1422 spin_unlock(&adpt_post_wait_lock);
1423 wake_up_interruptible(p1->wq);
1424 return;
1425 }
1426 }
1427 spin_unlock(&adpt_post_wait_lock);
1428 // If this happens we lose commands that probably really completed
1429 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1430 printk(KERN_DEBUG" Tasks in wait queue:\n");
1431 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1432 printk(KERN_DEBUG" %d\n",p1->id);
1433 }
1434 return;
1435}
1436
1437static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1438{
1439 u32 msg[8];
1440 u8* status;
67af2b06 1441 dma_addr_t addr;
1da177e4
LT
1442 u32 m = EMPTY_QUEUE ;
1443 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1444
1445 if(pHba->initialized == FALSE) { // First time reset should be quick
1446 timeout = jiffies + (25*HZ);
1447 } else {
1448 adpt_i2o_quiesce_hba(pHba);
1449 }
1450
1451 do {
1452 rmb();
1453 m = readl(pHba->post_port);
1454 if (m != EMPTY_QUEUE) {
1455 break;
1456 }
1457 if(time_after(jiffies,timeout)){
1458 printk(KERN_WARNING"Timeout waiting for message!\n");
1459 return -ETIMEDOUT;
1460 }
a9a3047d 1461 schedule_timeout_uninterruptible(1);
1da177e4
LT
1462 } while (m == EMPTY_QUEUE);
1463
67af2b06 1464 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1da177e4
LT
1465 if(status == NULL) {
1466 adpt_send_nop(pHba, m);
1467 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1468 return -ENOMEM;
1469 }
67af2b06 1470 memset(status,0,4);
1da177e4
LT
1471
1472 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1473 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1474 msg[2]=0;
1475 msg[3]=0;
1476 msg[4]=0;
1477 msg[5]=0;
67af2b06
MS
1478 msg[6]=dma_low(addr);
1479 msg[7]=dma_high(addr);
1da177e4
LT
1480
1481 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1482 wmb();
1483 writel(m, pHba->post_port);
1484 wmb();
1485
1486 while(*status == 0){
1487 if(time_after(jiffies,timeout)){
1488 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
67af2b06
MS
1489 /* We lose 4 bytes of "status" here, but we cannot
1490 free these because controller may awake and corrupt
1491 those bytes at any time */
1492 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1493 return -ETIMEDOUT;
1494 }
1495 rmb();
a9a3047d 1496 schedule_timeout_uninterruptible(1);
1da177e4
LT
1497 }
1498
1499 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1500 PDEBUG("%s: Reset in progress...\n", pHba->name);
1501 // Here we wait for message frame to become available
1502 // indicated that reset has finished
1503 do {
1504 rmb();
1505 m = readl(pHba->post_port);
1506 if (m != EMPTY_QUEUE) {
1507 break;
1508 }
1509 if(time_after(jiffies,timeout)){
1510 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
67af2b06
MS
1511 /* We lose 4 bytes of "status" here, but we
1512 cannot free these because controller may
1513 awake and corrupt those bytes at any time */
1514 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1515 return -ETIMEDOUT;
1516 }
a9a3047d 1517 schedule_timeout_uninterruptible(1);
1da177e4
LT
1518 } while (m == EMPTY_QUEUE);
1519 // Flush the offset
1520 adpt_send_nop(pHba, m);
1521 }
1522 adpt_i2o_status_get(pHba);
1523 if(*status == 0x02 ||
1524 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1525 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1526 pHba->name);
1527 } else {
1528 PDEBUG("%s: Reset completed.\n", pHba->name);
1529 }
1530
67af2b06 1531 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
1532#ifdef UARTDELAY
1533 // This delay is to allow someone attached to the card through the debug UART to
1534 // set up the dump levels that they want before the rest of the initialization sequence
1535 adpt_delay(20000);
1536#endif
1537 return 0;
1538}
1539
1540
1541static int adpt_i2o_parse_lct(adpt_hba* pHba)
1542{
1543 int i;
1544 int max;
1545 int tid;
1546 struct i2o_device *d;
1547 i2o_lct *lct = pHba->lct;
1548 u8 bus_no = 0;
1549 s16 scsi_id;
1550 s16 scsi_lun;
1551 u32 buf[10]; // larger than 7, or 8 ...
1552 struct adpt_device* pDev;
1553
1554 if (lct == NULL) {
1555 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1556 return -1;
1557 }
1558
1559 max = lct->table_size;
1560 max -= 3;
1561 max /= 9;
1562
1563 for(i=0;i<max;i++) {
1564 if( lct->lct_entry[i].user_tid != 0xfff){
1565 /*
1566 * If we have hidden devices, we need to inform the upper layers about
1567 * the possible maximum id reference to handle device access when
1568 * an array is disassembled. This code has no other purpose but to
1569 * allow us future access to devices that are currently hidden
1570 * behind arrays, hotspares or have not been configured (JBOD mode).
1571 */
1572 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1573 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1574 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1575 continue;
1576 }
1577 tid = lct->lct_entry[i].tid;
1578 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1579 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1580 continue;
1581 }
1582 bus_no = buf[0]>>16;
1583 scsi_id = buf[1];
1584 scsi_lun = (buf[2]>>8 )&0xff;
1585 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1586 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1587 continue;
1588 }
1589 if (scsi_id >= MAX_ID){
1590 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1591 continue;
1592 }
1593 if(bus_no > pHba->top_scsi_channel){
1594 pHba->top_scsi_channel = bus_no;
1595 }
1596 if(scsi_id > pHba->top_scsi_id){
1597 pHba->top_scsi_id = scsi_id;
1598 }
1599 if(scsi_lun > pHba->top_scsi_lun){
1600 pHba->top_scsi_lun = scsi_lun;
1601 }
1602 continue;
1603 }
5cbded58 1604 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
1605 if(d==NULL)
1606 {
1607 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1608 return -ENOMEM;
1609 }
1610
1c2fb3f3 1611 d->controller = pHba;
1da177e4
LT
1612 d->next = NULL;
1613
1614 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1615
1616 d->flags = 0;
1617 tid = d->lct_data.tid;
1618 adpt_i2o_report_hba_unit(pHba, d);
1619 adpt_i2o_install_device(pHba, d);
1620 }
1621 bus_no = 0;
1622 for(d = pHba->devices; d ; d = d->next) {
1623 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1624 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1625 tid = d->lct_data.tid;
1626 // TODO get the bus_no from hrt-but for now they are in order
1627 //bus_no =
1628 if(bus_no > pHba->top_scsi_channel){
1629 pHba->top_scsi_channel = bus_no;
1630 }
1631 pHba->channel[bus_no].type = d->lct_data.class_id;
1632 pHba->channel[bus_no].tid = tid;
1633 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1634 {
1635 pHba->channel[bus_no].scsi_id = buf[1];
1636 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1637 }
1638 // TODO remove - this is just until we get from hrt
1639 bus_no++;
1640 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1641 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1642 break;
1643 }
1644 }
1645 }
1646
1647 // Setup adpt_device table
1648 for(d = pHba->devices; d ; d = d->next) {
1649 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1650 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1651 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1652
1653 tid = d->lct_data.tid;
1654 scsi_id = -1;
1655 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1656 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1657 bus_no = buf[0]>>16;
1658 scsi_id = buf[1];
1659 scsi_lun = (buf[2]>>8 )&0xff;
1660 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1661 continue;
1662 }
1663 if (scsi_id >= MAX_ID) {
1664 continue;
1665 }
1666 if( pHba->channel[bus_no].device[scsi_id] == NULL){
ab552204 1667 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1668 if(pDev == NULL) {
1669 return -ENOMEM;
1670 }
1671 pHba->channel[bus_no].device[scsi_id] = pDev;
1da177e4
LT
1672 } else {
1673 for( pDev = pHba->channel[bus_no].device[scsi_id];
1674 pDev->next_lun; pDev = pDev->next_lun){
1675 }
ab552204 1676 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1677 if(pDev->next_lun == NULL) {
1678 return -ENOMEM;
1679 }
1da177e4
LT
1680 pDev = pDev->next_lun;
1681 }
1682 pDev->tid = tid;
1683 pDev->scsi_channel = bus_no;
1684 pDev->scsi_id = scsi_id;
1685 pDev->scsi_lun = scsi_lun;
1686 pDev->pI2o_dev = d;
1687 d->owner = pDev;
1688 pDev->type = (buf[0])&0xff;
1689 pDev->flags = (buf[0]>>8)&0xff;
1690 if(scsi_id > pHba->top_scsi_id){
1691 pHba->top_scsi_id = scsi_id;
1692 }
1693 if(scsi_lun > pHba->top_scsi_lun){
1694 pHba->top_scsi_lun = scsi_lun;
1695 }
1696 }
1697 if(scsi_id == -1){
1698 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1699 d->lct_data.identity_tag);
1700 }
1701 }
1702 }
1703 return 0;
1704}
1705
1706
1707/*
1708 * Each I2O controller has a chain of devices on it - these match
1709 * the useful parts of the LCT of the board.
1710 */
1711
1712static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1713{
0b950672 1714 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1715 d->controller=pHba;
1716 d->owner=NULL;
1717 d->next=pHba->devices;
1718 d->prev=NULL;
1719 if (pHba->devices != NULL){
1720 pHba->devices->prev=d;
1721 }
1722 pHba->devices=d;
1723 *d->dev_name = 0;
1724
0b950672 1725 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1726 return 0;
1727}
1728
1729static int adpt_open(struct inode *inode, struct file *file)
1730{
1731 int minor;
1732 adpt_hba* pHba;
1733
dea3f665 1734 lock_kernel();
1da177e4
LT
1735 //TODO check for root access
1736 //
1737 minor = iminor(inode);
1738 if (minor >= hba_count) {
dea3f665 1739 unlock_kernel();
1da177e4
LT
1740 return -ENXIO;
1741 }
0b950672 1742 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1743 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1744 if (pHba->unit == minor) {
1745 break; /* found adapter */
1746 }
1747 }
1748 if (pHba == NULL) {
0b950672 1749 mutex_unlock(&adpt_configuration_lock);
dea3f665 1750 unlock_kernel();
1da177e4
LT
1751 return -ENXIO;
1752 }
1753
1754// if(pHba->in_use){
0b950672 1755 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1756// return -EBUSY;
1757// }
1758
1759 pHba->in_use = 1;
0b950672 1760 mutex_unlock(&adpt_configuration_lock);
dea3f665 1761 unlock_kernel();
1da177e4
LT
1762
1763 return 0;
1764}
1765
1766static int adpt_close(struct inode *inode, struct file *file)
1767{
1768 int minor;
1769 adpt_hba* pHba;
1770
1771 minor = iminor(inode);
1772 if (minor >= hba_count) {
1773 return -ENXIO;
1774 }
0b950672 1775 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1776 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1777 if (pHba->unit == minor) {
1778 break; /* found adapter */
1779 }
1780 }
0b950672 1781 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1782 if (pHba == NULL) {
1783 return -ENXIO;
1784 }
1785
1786 pHba->in_use = 0;
1787
1788 return 0;
1789}
1790
1791
1792static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1793{
1794 u32 msg[MAX_MESSAGE_SIZE];
1795 u32* reply = NULL;
1796 u32 size = 0;
1797 u32 reply_size = 0;
1798 u32 __user *user_msg = arg;
1799 u32 __user * user_reply = NULL;
1800 void *sg_list[pHba->sg_tablesize];
1801 u32 sg_offset = 0;
1802 u32 sg_count = 0;
1803 int sg_index = 0;
1804 u32 i = 0;
1805 u32 rcode = 0;
1806 void *p = NULL;
67af2b06 1807 dma_addr_t addr;
1da177e4
LT
1808 ulong flags = 0;
1809
1810 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1811 // get user msg size in u32s
1812 if(get_user(size, &user_msg[0])){
1813 return -EFAULT;
1814 }
1815 size = size>>16;
1816
1817 user_reply = &user_msg[size];
1818 if(size > MAX_MESSAGE_SIZE){
1819 return -EFAULT;
1820 }
1821 size *= 4; // Convert to bytes
1822
1823 /* Copy in the user's I2O command */
1824 if(copy_from_user(msg, user_msg, size)) {
1825 return -EFAULT;
1826 }
1827 get_user(reply_size, &user_reply[0]);
1828 reply_size = reply_size>>16;
1829 if(reply_size > REPLY_FRAME_SIZE){
1830 reply_size = REPLY_FRAME_SIZE;
1831 }
1832 reply_size *= 4;
ab552204 1833 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1da177e4
LT
1834 if(reply == NULL) {
1835 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1836 return -ENOMEM;
1837 }
1da177e4
LT
1838 sg_offset = (msg[0]>>4)&0xf;
1839 msg[2] = 0x40000000; // IOCTL context
62ac5aed
MS
1840 msg[3] = adpt_ioctl_to_context(pHba, reply);
1841 if (msg[3] == (u32)-1)
1842 return -EBUSY;
1843
1da177e4
LT
1844 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1845 if(sg_offset) {
62ac5aed 1846 // TODO add 64 bit API
1da177e4
LT
1847 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1848 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1849 if (sg_count > pHba->sg_tablesize){
1850 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1851 kfree (reply);
1852 return -EINVAL;
1853 }
1854
1855 for(i = 0; i < sg_count; i++) {
1856 int sg_size;
1857
1858 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1859 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1860 rcode = -EINVAL;
1861 goto cleanup;
1862 }
1863 sg_size = sg[i].flag_count & 0xffffff;
1864 /* Allocate memory for the transfer */
67af2b06 1865 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1da177e4
LT
1866 if(!p) {
1867 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1868 pHba->name,sg_size,i,sg_count);
1869 rcode = -ENOMEM;
1870 goto cleanup;
1871 }
1872 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1873 /* Copy in the user's SG buffer if necessary */
1874 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
62ac5aed
MS
1875 // sg_simple_element API is 32 bit
1876 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1da177e4
LT
1877 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1878 rcode = -EFAULT;
1879 goto cleanup;
1880 }
1881 }
62ac5aed
MS
1882 /* sg_simple_element API is 32 bit, but addr < 4GB */
1883 sg[i].addr_bus = addr;
1da177e4
LT
1884 }
1885 }
1886
1887 do {
1888 if(pHba->host)
1889 spin_lock_irqsave(pHba->host->host_lock, flags);
1890 // This state stops any new commands from enterring the
1891 // controller while processing the ioctl
1892// pHba->state |= DPTI_STATE_IOCTL;
1893// We can't set this now - The scsi subsystem sets host_blocked and
1894// the queue empties and stops. We need a way to restart the queue
1895 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1896 if (rcode != 0)
1897 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1898 rcode, reply);
1899// pHba->state &= ~DPTI_STATE_IOCTL;
1900 if(pHba->host)
1901 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1902 } while(rcode == -ETIMEDOUT);
1903
1904 if(rcode){
1905 goto cleanup;
1906 }
1907
1908 if(sg_offset) {
1909 /* Copy back the Scatter Gather buffers back to user space */
1910 u32 j;
62ac5aed 1911 // TODO add 64 bit API
1da177e4
LT
1912 struct sg_simple_element* sg;
1913 int sg_size;
1914
1915 // re-acquire the original message to handle correctly the sg copy operation
1916 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1917 // get user msg size in u32s
1918 if(get_user(size, &user_msg[0])){
1919 rcode = -EFAULT;
1920 goto cleanup;
1921 }
1922 size = size>>16;
1923 size *= 4;
ef7562b7 1924 if (size > MAX_MESSAGE_SIZE) {
aefba418 1925 rcode = -EINVAL;
ef7562b7
AC
1926 goto cleanup;
1927 }
1da177e4
LT
1928 /* Copy in the user's I2O command */
1929 if (copy_from_user (msg, user_msg, size)) {
1930 rcode = -EFAULT;
1931 goto cleanup;
1932 }
1933 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1934
62ac5aed 1935 // TODO add 64 bit API
1da177e4
LT
1936 sg = (struct sg_simple_element*)(msg + sg_offset);
1937 for (j = 0; j < sg_count; j++) {
1938 /* Copy out the SG list to user's buffer if necessary */
1939 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1940 sg_size = sg[j].flag_count & 0xffffff;
62ac5aed
MS
1941 // sg_simple_element API is 32 bit
1942 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1da177e4
LT
1943 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1944 rcode = -EFAULT;
1945 goto cleanup;
1946 }
1947 }
1948 }
1949 }
1950
1951 /* Copy back the reply to user space */
1952 if (reply_size) {
1953 // we wrote our own values for context - now restore the user supplied ones
1954 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1955 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1956 rcode = -EFAULT;
1957 }
1958 if(copy_to_user(user_reply, reply, reply_size)) {
1959 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1960 rcode = -EFAULT;
1961 }
1962 }
1963
1964
1965cleanup:
67af2b06
MS
1966 if (rcode != -ETIME && rcode != -EINTR) {
1967 struct sg_simple_element *sg =
1968 (struct sg_simple_element*) (msg +sg_offset);
1da177e4 1969 kfree (reply);
67af2b06
MS
1970 while(sg_index) {
1971 if(sg_list[--sg_index]) {
1972 dma_free_coherent(&pHba->pDev->dev,
1973 sg[sg_index].flag_count & 0xffffff,
1974 sg_list[sg_index],
1975 sg[sg_index].addr_bus);
1976 }
1da177e4
LT
1977 }
1978 }
1979 return rcode;
1980}
1981
1da177e4
LT
1982#if defined __ia64__
1983static void adpt_ia64_info(sysInfo_S* si)
1984{
1985 // This is all the info we need for now
1986 // We will add more info as our new
1987 // managmenent utility requires it
1988 si->processorType = PROC_IA64;
1989}
1990#endif
1991
1da177e4
LT
1992#if defined __sparc__
1993static void adpt_sparc_info(sysInfo_S* si)
1994{
1995 // This is all the info we need for now
1996 // We will add more info as our new
1997 // managmenent utility requires it
1998 si->processorType = PROC_ULTRASPARC;
1999}
2000#endif
1da177e4
LT
2001#if defined __alpha__
2002static void adpt_alpha_info(sysInfo_S* si)
2003{
2004 // This is all the info we need for now
2005 // We will add more info as our new
2006 // managmenent utility requires it
2007 si->processorType = PROC_ALPHA;
2008}
2009#endif
2010
2011#if defined __i386__
1da177e4
LT
2012static void adpt_i386_info(sysInfo_S* si)
2013{
2014 // This is all the info we need for now
2015 // We will add more info as our new
2016 // managmenent utility requires it
2017 switch (boot_cpu_data.x86) {
2018 case CPU_386:
2019 si->processorType = PROC_386;
2020 break;
2021 case CPU_486:
2022 si->processorType = PROC_486;
2023 break;
2024 case CPU_586:
2025 si->processorType = PROC_PENTIUM;
2026 break;
2027 default: // Just in case
2028 si->processorType = PROC_PENTIUM;
2029 break;
2030 }
2031}
8b2cc917
AM
2032#endif
2033
2034/*
2035 * This routine returns information about the system. This does not effect
2036 * any logic and if the info is wrong - it doesn't matter.
2037 */
1da177e4 2038
8b2cc917
AM
2039/* Get all the info we can not get from kernel services */
2040static int adpt_system_info(void __user *buffer)
2041{
2042 sysInfo_S si;
2043
2044 memset(&si, 0, sizeof(si));
2045
2046 si.osType = OS_LINUX;
2047 si.osMajorVersion = 0;
2048 si.osMinorVersion = 0;
2049 si.osRevision = 0;
2050 si.busType = SI_PCI_BUS;
2051 si.processorFamily = DPTI_sig.dsProcessorFamily;
2052
2053#if defined __i386__
2054 adpt_i386_info(&si);
2055#elif defined (__ia64__)
2056 adpt_ia64_info(&si);
2057#elif defined(__sparc__)
2058 adpt_sparc_info(&si);
2059#elif defined (__alpha__)
2060 adpt_alpha_info(&si);
2061#else
2062 si.processorType = 0xff ;
1da177e4 2063#endif
8b2cc917
AM
2064 if (copy_to_user(buffer, &si, sizeof(si))){
2065 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2066 return -EFAULT;
2067 }
1da177e4 2068
8b2cc917
AM
2069 return 0;
2070}
1da177e4
LT
2071
2072static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2073 ulong arg)
2074{
2075 int minor;
2076 int error = 0;
2077 adpt_hba* pHba;
2078 ulong flags = 0;
2079 void __user *argp = (void __user *)arg;
2080
2081 minor = iminor(inode);
2082 if (minor >= DPTI_MAX_HBA){
2083 return -ENXIO;
2084 }
0b950672 2085 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
2086 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2087 if (pHba->unit == minor) {
2088 break; /* found adapter */
2089 }
2090 }
0b950672 2091 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
2092 if(pHba == NULL){
2093 return -ENXIO;
2094 }
2095
a9a3047d
NA
2096 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2097 schedule_timeout_uninterruptible(2);
1da177e4
LT
2098
2099 switch (cmd) {
2100 // TODO: handle 3 cases
2101 case DPT_SIGNATURE:
2102 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2103 return -EFAULT;
2104 }
2105 break;
2106 case I2OUSRCMD:
2107 return adpt_i2o_passthru(pHba, argp);
2108
2109 case DPT_CTRLINFO:{
2110 drvrHBAinfo_S HbaInfo;
2111
2112#define FLG_OSD_PCI_VALID 0x0001
2113#define FLG_OSD_DMA 0x0002
2114#define FLG_OSD_I2O 0x0004
2115 memset(&HbaInfo, 0, sizeof(HbaInfo));
2116 HbaInfo.drvrHBAnum = pHba->unit;
2117 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2118 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2119 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2120 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2121 HbaInfo.Interrupt = pHba->pDev->irq;
2122 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2123 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2124 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2125 return -EFAULT;
2126 }
2127 break;
2128 }
2129 case DPT_SYSINFO:
2130 return adpt_system_info(argp);
2131 case DPT_BLINKLED:{
2132 u32 value;
2133 value = (u32)adpt_read_blink_led(pHba);
2134 if (copy_to_user(argp, &value, sizeof(value))) {
2135 return -EFAULT;
2136 }
2137 break;
2138 }
2139 case I2ORESETCMD:
2140 if(pHba->host)
2141 spin_lock_irqsave(pHba->host->host_lock, flags);
2142 adpt_hba_reset(pHba);
2143 if(pHba->host)
2144 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2145 break;
2146 case I2ORESCANCMD:
2147 adpt_rescan(pHba);
2148 break;
2149 default:
2150 return -EINVAL;
2151 }
2152
2153 return error;
2154}
2155
62ac5aed
MS
2156#ifdef CONFIG_COMPAT
2157static long compat_adpt_ioctl(struct file *file,
2158 unsigned int cmd, unsigned long arg)
2159{
2160 struct inode *inode;
2161 long ret;
2162
2163 inode = file->f_dentry->d_inode;
2164
2165 lock_kernel();
2166
2167 switch(cmd) {
2168 case DPT_SIGNATURE:
2169 case I2OUSRCMD:
2170 case DPT_CTRLINFO:
2171 case DPT_SYSINFO:
2172 case DPT_BLINKLED:
2173 case I2ORESETCMD:
2174 case I2ORESCANCMD:
2175 case (DPT_TARGET_BUSY & 0xFFFF):
2176 case DPT_TARGET_BUSY:
2177 ret = adpt_ioctl(inode, file, cmd, arg);
2178 break;
2179 default:
2180 ret = -ENOIOCTLCMD;
2181 }
2182
2183 unlock_kernel();
2184
2185 return ret;
2186}
2187#endif
1da177e4 2188
7d12e780 2189static irqreturn_t adpt_isr(int irq, void *dev_id)
1da177e4
LT
2190{
2191 struct scsi_cmnd* cmd;
2192 adpt_hba* pHba = dev_id;
2193 u32 m;
1c2fb3f3 2194 void __iomem *reply;
1da177e4
LT
2195 u32 status=0;
2196 u32 context;
2197 ulong flags = 0;
2198 int handled = 0;
2199
2200 if (pHba == NULL){
2201 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2202 return IRQ_NONE;
2203 }
2204 if(pHba->host)
2205 spin_lock_irqsave(pHba->host->host_lock, flags);
2206
2207 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2208 m = readl(pHba->reply_port);
2209 if(m == EMPTY_QUEUE){
2210 // Try twice then give up
2211 rmb();
2212 m = readl(pHba->reply_port);
2213 if(m == EMPTY_QUEUE){
2214 // This really should not happen
2215 printk(KERN_ERR"dpti: Could not get reply frame\n");
2216 goto out;
2217 }
2218 }
67af2b06
MS
2219 if (pHba->reply_pool_pa <= m &&
2220 m < pHba->reply_pool_pa +
2221 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2222 reply = (u8 *)pHba->reply_pool +
2223 (m - pHba->reply_pool_pa);
2224 } else {
2225 /* Ick, we should *never* be here */
2226 printk(KERN_ERR "dpti: reply frame not from pool\n");
2227 reply = (u8 *)bus_to_virt(m);
2228 }
1da177e4
LT
2229
2230 if (readl(reply) & MSG_FAIL) {
2231 u32 old_m = readl(reply+28);
1c2fb3f3 2232 void __iomem *msg;
1da177e4
LT
2233 u32 old_context;
2234 PDEBUG("%s: Failed message\n",pHba->name);
2235 if(old_m >= 0x100000){
2236 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2237 writel(m,pHba->reply_port);
2238 continue;
2239 }
2240 // Transaction context is 0 in failed reply frame
1c2fb3f3 2241 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2242 old_context = readl(msg+12);
2243 writel(old_context, reply+12);
2244 adpt_send_nop(pHba, old_m);
2245 }
2246 context = readl(reply+8);
2247 if(context & 0x40000000){ // IOCTL
62ac5aed 2248 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
1c2fb3f3
BB
2249 if( p != NULL) {
2250 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2251 }
2252 // All IOCTLs will also be post wait
2253 }
2254 if(context & 0x80000000){ // Post wait message
2255 status = readl(reply+16);
2256 if(status >> 24){
2257 status &= 0xffff; /* Get detail status */
2258 } else {
2259 status = I2O_POST_WAIT_OK;
2260 }
2261 if(!(context & 0x40000000)) {
62ac5aed
MS
2262 cmd = adpt_cmd_from_context(pHba,
2263 readl(reply+12));
1da177e4
LT
2264 if(cmd != NULL) {
2265 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2266 }
2267 }
2268 adpt_i2o_post_wait_complete(context, status);
2269 } else { // SCSI message
62ac5aed 2270 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
1da177e4 2271 if(cmd != NULL){
67af2b06 2272 scsi_dma_unmap(cmd);
1da177e4
LT
2273 if(cmd->serial_number != 0) { // If not timedout
2274 adpt_i2o_to_scsi(reply, cmd);
2275 }
2276 }
2277 }
2278 writel(m, pHba->reply_port);
2279 wmb();
2280 rmb();
2281 }
2282 handled = 1;
2283out: if(pHba->host)
2284 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2285 return IRQ_RETVAL(handled);
2286}
2287
2288static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2289{
2290 int i;
2291 u32 msg[MAX_MESSAGE_SIZE];
2292 u32* mptr;
62ac5aed 2293 u32* lptr;
1da177e4
LT
2294 u32 *lenptr;
2295 int direction;
2296 int scsidir;
10803de4 2297 int nseg;
1da177e4
LT
2298 u32 len;
2299 u32 reqlen;
2300 s32 rcode;
62ac5aed 2301 dma_addr_t addr;
1da177e4
LT
2302
2303 memset(msg, 0 , sizeof(msg));
10803de4 2304 len = scsi_bufflen(cmd);
1da177e4
LT
2305 direction = 0x00000000;
2306
2307 scsidir = 0x00000000; // DATA NO XFER
2308 if(len) {
2309 /*
2310 * Set SCBFlags to indicate if data is being transferred
2311 * in or out, or no data transfer
2312 * Note: Do not have to verify index is less than 0 since
2313 * cmd->cmnd[0] is an unsigned char
2314 */
2315 switch(cmd->sc_data_direction){
2316 case DMA_FROM_DEVICE:
2317 scsidir =0x40000000; // DATA IN (iop<--dev)
2318 break;
2319 case DMA_TO_DEVICE:
2320 direction=0x04000000; // SGL OUT
2321 scsidir =0x80000000; // DATA OUT (iop-->dev)
2322 break;
2323 case DMA_NONE:
2324 break;
2325 case DMA_BIDIRECTIONAL:
2326 scsidir =0x40000000; // DATA IN (iop<--dev)
2327 // Assume In - and continue;
2328 break;
2329 default:
2330 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2331 pHba->name, cmd->cmnd[0]);
2332 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2333 cmd->scsi_done(cmd);
2334 return 0;
2335 }
2336 }
2337 // msg[0] is set later
2338 // I2O_CMD_SCSI_EXEC
2339 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2340 msg[2] = 0;
62ac5aed 2341 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
1da177e4
LT
2342 // Our cards use the transaction context as the tag for queueing
2343 // Adaptec/DPT Private stuff
2344 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2345 msg[5] = d->tid;
2346 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2347 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2348 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2349 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2350 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2351
2352 mptr=msg+7;
2353
2354 // Write SCSI command into the message - always 16 byte block
2355 memset(mptr, 0, 16);
2356 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2357 mptr+=4;
2358 lenptr=mptr++; /* Remember me - fill in when we know */
62ac5aed
MS
2359 if (dpt_dma64(pHba)) {
2360 reqlen = 16; // SINGLE SGE
2361 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2362 *mptr++ = 1 << PAGE_SHIFT;
2363 } else {
2364 reqlen = 14; // SINGLE SGE
2365 }
1da177e4 2366 /* Now fill in the SGList and command */
1da177e4 2367
10803de4
FT
2368 nseg = scsi_dma_map(cmd);
2369 BUG_ON(nseg < 0);
2370 if (nseg) {
2371 struct scatterlist *sg;
1da177e4
LT
2372
2373 len = 0;
10803de4 2374 scsi_for_each_sg(cmd, sg, nseg, i) {
62ac5aed 2375 lptr = mptr;
1da177e4
LT
2376 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2377 len+=sg_dma_len(sg);
62ac5aed
MS
2378 addr = sg_dma_address(sg);
2379 *mptr++ = dma_low(addr);
2380 if (dpt_dma64(pHba))
2381 *mptr++ = dma_high(addr);
10803de4
FT
2382 /* Make this an end of list */
2383 if (i == nseg - 1)
62ac5aed 2384 *lptr = direction|0xD0000000|sg_dma_len(sg);
1da177e4 2385 }
1da177e4
LT
2386 reqlen = mptr - msg;
2387 *lenptr = len;
2388
2389 if(cmd->underflow && len != cmd->underflow){
2390 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2391 len, cmd->underflow);
2392 }
2393 } else {
10803de4
FT
2394 *lenptr = len = 0;
2395 reqlen = 12;
1da177e4
LT
2396 }
2397
2398 /* Stick the headers on */
2399 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2400
2401 // Send it on it's way
2402 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2403 if (rcode == 0) {
2404 return 0;
2405 }
2406 return rcode;
2407}
2408
2409
c864cb14 2410static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
24601bbc 2411{
c864cb14 2412 struct Scsi_Host *host;
24601bbc 2413
c864cb14 2414 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
24601bbc 2415 if (host == NULL) {
c864cb14 2416 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
24601bbc
AM
2417 return -1;
2418 }
2419 host->hostdata[0] = (unsigned long)pHba;
2420 pHba->host = host;
2421
2422 host->irq = pHba->pDev->irq;
2423 /* no IO ports, so don't have to set host->io_port and
2424 * host->n_io_port
2425 */
2426 host->io_port = 0;
2427 host->n_io_port = 0;
2428 /* see comments in scsi_host.h */
2429 host->max_id = 16;
2430 host->max_lun = 256;
2431 host->max_channel = pHba->top_scsi_channel + 1;
2432 host->cmd_per_lun = 1;
67af2b06 2433 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
24601bbc
AM
2434 host->sg_tablesize = pHba->sg_tablesize;
2435 host->can_queue = pHba->post_fifo_size;
2436
2437 return 0;
2438}
2439
2440
1c2fb3f3 2441static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2442{
2443 adpt_hba* pHba;
2444 u32 hba_status;
2445 u32 dev_status;
2446 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2447 // I know this would look cleaner if I just read bytes
2448 // but the model I have been using for all the rest of the
2449 // io is in 4 byte words - so I keep that model
2450 u16 detailed_status = readl(reply+16) &0xffff;
2451 dev_status = (detailed_status & 0xff);
2452 hba_status = detailed_status >> 8;
2453
2454 // calculate resid for sg
df81d237 2455 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
1da177e4
LT
2456
2457 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2458
2459 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2460
2461 if(!(reply_flags & MSG_FAIL)) {
2462 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2463 case I2O_SCSI_DSC_SUCCESS:
2464 cmd->result = (DID_OK << 16);
2465 // handle underflow
df81d237 2466 if (readl(reply+20) < cmd->underflow) {
1da177e4
LT
2467 cmd->result = (DID_ERROR <<16);
2468 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2469 }
2470 break;
2471 case I2O_SCSI_DSC_REQUEST_ABORTED:
2472 cmd->result = (DID_ABORT << 16);
2473 break;
2474 case I2O_SCSI_DSC_PATH_INVALID:
2475 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2476 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2477 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2478 case I2O_SCSI_DSC_NO_ADAPTER:
2479 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2480 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2481 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2482 cmd->result = (DID_TIME_OUT << 16);
2483 break;
2484 case I2O_SCSI_DSC_ADAPTER_BUSY:
2485 case I2O_SCSI_DSC_BUS_BUSY:
2486 cmd->result = (DID_BUS_BUSY << 16);
2487 break;
2488 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2489 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2490 cmd->result = (DID_RESET << 16);
2491 break;
2492 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2493 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2494 cmd->result = (DID_PARITY << 16);
2495 break;
2496 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2497 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2498 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2499 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2500 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2501 case I2O_SCSI_DSC_DATA_OVERRUN:
2502 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2503 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2504 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2505 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2506 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2507 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2508 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2509 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2510 case I2O_SCSI_DSC_INVALID_CDB:
2511 case I2O_SCSI_DSC_LUN_INVALID:
2512 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2513 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2514 case I2O_SCSI_DSC_NO_NEXUS:
2515 case I2O_SCSI_DSC_CDB_RECEIVED:
2516 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2517 case I2O_SCSI_DSC_QUEUE_FROZEN:
2518 case I2O_SCSI_DSC_REQUEST_INVALID:
2519 default:
2520 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2521 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2522 hba_status, dev_status, cmd->cmnd[0]);
2523 cmd->result = (DID_ERROR << 16);
2524 break;
2525 }
2526
2527 // copy over the request sense data if it was a check
2528 // condition status
d814c517 2529 if (dev_status == SAM_STAT_CHECK_CONDITION) {
b80ca4f7 2530 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
1da177e4 2531 // Copy over the sense data
1c2fb3f3 2532 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2533 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2534 cmd->sense_buffer[2] == DATA_PROTECT ){
2535 /* This is to handle an array failed */
2536 cmd->result = (DID_TIME_OUT << 16);
2537 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2538 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2539 hba_status, dev_status, cmd->cmnd[0]);
2540
2541 }
2542 }
2543 } else {
2544 /* In this condtion we could not talk to the tid
2545 * the card rejected it. We should signal a retry
2546 * for a limitted number of retries.
2547 */
2548 cmd->result = (DID_TIME_OUT << 16);
2549 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2550 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2551 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2552 }
2553
2554 cmd->result |= (dev_status);
2555
2556 if(cmd->scsi_done != NULL){
2557 cmd->scsi_done(cmd);
2558 }
2559 return cmd->result;
2560}
2561
2562
2563static s32 adpt_rescan(adpt_hba* pHba)
2564{
2565 s32 rcode;
2566 ulong flags = 0;
2567
2568 if(pHba->host)
2569 spin_lock_irqsave(pHba->host->host_lock, flags);
2570 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2571 goto out;
2572 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2573 goto out;
2574 rcode = 0;
2575out: if(pHba->host)
2576 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2577 return rcode;
2578}
2579
2580
2581static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2582{
2583 int i;
2584 int max;
2585 int tid;
2586 struct i2o_device *d;
2587 i2o_lct *lct = pHba->lct;
2588 u8 bus_no = 0;
2589 s16 scsi_id;
2590 s16 scsi_lun;
2591 u32 buf[10]; // at least 8 u32's
2592 struct adpt_device* pDev = NULL;
2593 struct i2o_device* pI2o_dev = NULL;
2594
2595 if (lct == NULL) {
2596 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2597 return -1;
2598 }
2599
2600 max = lct->table_size;
2601 max -= 3;
2602 max /= 9;
2603
2604 // Mark each drive as unscanned
2605 for (d = pHba->devices; d; d = d->next) {
2606 pDev =(struct adpt_device*) d->owner;
2607 if(!pDev){
2608 continue;
2609 }
2610 pDev->state |= DPTI_DEV_UNSCANNED;
2611 }
2612
2613 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2614
2615 for(i=0;i<max;i++) {
2616 if( lct->lct_entry[i].user_tid != 0xfff){
2617 continue;
2618 }
2619
2620 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2621 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2622 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2623 tid = lct->lct_entry[i].tid;
2624 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2625 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2626 continue;
2627 }
2628 bus_no = buf[0]>>16;
2629 scsi_id = buf[1];
2630 scsi_lun = (buf[2]>>8 )&0xff;
2631 pDev = pHba->channel[bus_no].device[scsi_id];
2632 /* da lun */
2633 while(pDev) {
2634 if(pDev->scsi_lun == scsi_lun) {
2635 break;
2636 }
2637 pDev = pDev->next_lun;
2638 }
2639 if(!pDev ) { // Something new add it
5cbded58 2640 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
2641 if(d==NULL)
2642 {
2643 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2644 return -ENOMEM;
2645 }
2646
1c2fb3f3 2647 d->controller = pHba;
1da177e4
LT
2648 d->next = NULL;
2649
2650 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2651
2652 d->flags = 0;
2653 adpt_i2o_report_hba_unit(pHba, d);
2654 adpt_i2o_install_device(pHba, d);
2655
2656 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2657 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2658 continue;
2659 }
2660 pDev = pHba->channel[bus_no].device[scsi_id];
2661 if( pDev == NULL){
ab552204 2662 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2663 if(pDev == NULL) {
2664 return -ENOMEM;
2665 }
2666 pHba->channel[bus_no].device[scsi_id] = pDev;
2667 } else {
2668 while (pDev->next_lun) {
2669 pDev = pDev->next_lun;
2670 }
ab552204 2671 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2672 if(pDev == NULL) {
2673 return -ENOMEM;
2674 }
2675 }
1da177e4
LT
2676 pDev->tid = d->lct_data.tid;
2677 pDev->scsi_channel = bus_no;
2678 pDev->scsi_id = scsi_id;
2679 pDev->scsi_lun = scsi_lun;
2680 pDev->pI2o_dev = d;
2681 d->owner = pDev;
2682 pDev->type = (buf[0])&0xff;
2683 pDev->flags = (buf[0]>>8)&0xff;
2684 // Too late, SCSI system has made up it's mind, but what the hey ...
2685 if(scsi_id > pHba->top_scsi_id){
2686 pHba->top_scsi_id = scsi_id;
2687 }
2688 if(scsi_lun > pHba->top_scsi_lun){
2689 pHba->top_scsi_lun = scsi_lun;
2690 }
2691 continue;
2692 } // end of new i2o device
2693
2694 // We found an old device - check it
2695 while(pDev) {
2696 if(pDev->scsi_lun == scsi_lun) {
2697 if(!scsi_device_online(pDev->pScsi_dev)) {
2698 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2699 pHba->name,bus_no,scsi_id,scsi_lun);
2700 if (pDev->pScsi_dev) {
2701 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2702 }
2703 }
2704 d = pDev->pI2o_dev;
2705 if(d->lct_data.tid != tid) { // something changed
2706 pDev->tid = tid;
2707 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2708 if (pDev->pScsi_dev) {
2709 pDev->pScsi_dev->changed = TRUE;
2710 pDev->pScsi_dev->removable = TRUE;
2711 }
2712 }
2713 // Found it - mark it scanned
2714 pDev->state = DPTI_DEV_ONLINE;
2715 break;
2716 }
2717 pDev = pDev->next_lun;
2718 }
2719 }
2720 }
2721 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2722 pDev =(struct adpt_device*) pI2o_dev->owner;
2723 if(!pDev){
2724 continue;
2725 }
2726 // Drive offline drives that previously existed but could not be found
2727 // in the LCT table
2728 if (pDev->state & DPTI_DEV_UNSCANNED){
2729 pDev->state = DPTI_DEV_OFFLINE;
2730 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2731 if (pDev->pScsi_dev) {
2732 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2733 }
2734 }
2735 }
2736 return 0;
2737}
2738
2739static void adpt_fail_posted_scbs(adpt_hba* pHba)
2740{
2741 struct scsi_cmnd* cmd = NULL;
2742 struct scsi_device* d = NULL;
2743
2744 shost_for_each_device(d, pHba->host) {
2745 unsigned long flags;
2746 spin_lock_irqsave(&d->list_lock, flags);
2747 list_for_each_entry(cmd, &d->cmd_list, list) {
2748 if(cmd->serial_number == 0){
2749 continue;
2750 }
2751 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2752 cmd->scsi_done(cmd);
2753 }
2754 spin_unlock_irqrestore(&d->list_lock, flags);
2755 }
2756}
2757
2758
2759/*============================================================================
2760 * Routines from i2o subsystem
2761 *============================================================================
2762 */
2763
2764
2765
2766/*
2767 * Bring an I2O controller into HOLD state. See the spec.
2768 */
2769static int adpt_i2o_activate_hba(adpt_hba* pHba)
2770{
2771 int rcode;
2772
2773 if(pHba->initialized ) {
2774 if (adpt_i2o_status_get(pHba) < 0) {
2775 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2776 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2777 return rcode;
2778 }
2779 if (adpt_i2o_status_get(pHba) < 0) {
2780 printk(KERN_INFO "HBA not responding.\n");
2781 return -1;
2782 }
2783 }
2784
2785 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2786 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2787 return -1;
2788 }
2789
2790 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2791 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2792 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2793 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2794 adpt_i2o_reset_hba(pHba);
2795 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2796 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2797 return -1;
2798 }
2799 }
2800 } else {
2801 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2802 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2803 return rcode;
2804 }
2805
2806 }
2807
2808 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2809 return -1;
2810 }
2811
2812 /* In HOLD state */
2813
2814 if (adpt_i2o_hrt_get(pHba) < 0) {
2815 return -1;
2816 }
2817
2818 return 0;
2819}
2820
2821/*
2822 * Bring a controller online into OPERATIONAL state.
2823 */
2824
2825static int adpt_i2o_online_hba(adpt_hba* pHba)
2826{
2827 if (adpt_i2o_systab_send(pHba) < 0) {
2828 adpt_i2o_delete_hba(pHba);
2829 return -1;
2830 }
2831 /* In READY state */
2832
2833 if (adpt_i2o_enable_hba(pHba) < 0) {
2834 adpt_i2o_delete_hba(pHba);
2835 return -1;
2836 }
2837
2838 /* In OPERATIONAL state */
2839 return 0;
2840}
2841
2842static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2843{
2844 u32 __iomem *msg;
2845 ulong timeout = jiffies + 5*HZ;
2846
2847 while(m == EMPTY_QUEUE){
2848 rmb();
2849 m = readl(pHba->post_port);
2850 if(m != EMPTY_QUEUE){
2851 break;
2852 }
2853 if(time_after(jiffies,timeout)){
2854 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2855 return 2;
2856 }
a9a3047d 2857 schedule_timeout_uninterruptible(1);
1da177e4
LT
2858 }
2859 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2860 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2861 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2862 writel( 0,&msg[2]);
2863 wmb();
2864
2865 writel(m, pHba->post_port);
2866 wmb();
2867 return 0;
2868}
2869
2870static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2871{
2872 u8 *status;
67af2b06 2873 dma_addr_t addr;
1da177e4
LT
2874 u32 __iomem *msg = NULL;
2875 int i;
2876 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
1da177e4
LT
2877 u32 m;
2878
2879 do {
2880 rmb();
2881 m = readl(pHba->post_port);
2882 if (m != EMPTY_QUEUE) {
2883 break;
2884 }
2885
2886 if(time_after(jiffies,timeout)){
2887 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2888 return -ETIMEDOUT;
2889 }
a9a3047d 2890 schedule_timeout_uninterruptible(1);
1da177e4
LT
2891 } while(m == EMPTY_QUEUE);
2892
2893 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2894
67af2b06 2895 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
bbfbbbc1 2896 if (!status) {
1da177e4
LT
2897 adpt_send_nop(pHba, m);
2898 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2899 pHba->name);
2900 return -ENOMEM;
2901 }
67af2b06 2902 memset(status, 0, 4);
1da177e4
LT
2903
2904 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2905 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2906 writel(0, &msg[2]);
2907 writel(0x0106, &msg[3]); /* Transaction context */
2908 writel(4096, &msg[4]); /* Host page frame size */
2909 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2910 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
67af2b06 2911 writel((u32)addr, &msg[7]);
1da177e4
LT
2912
2913 writel(m, pHba->post_port);
2914 wmb();
2915
2916 // Wait for the reply status to come back
2917 do {
2918 if (*status) {
2919 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2920 break;
2921 }
2922 }
2923 rmb();
2924 if(time_after(jiffies,timeout)){
2925 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
67af2b06
MS
2926 /* We lose 4 bytes of "status" here, but we
2927 cannot free these because controller may
2928 awake and corrupt those bytes at any time */
2929 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
1da177e4
LT
2930 return -ETIMEDOUT;
2931 }
a9a3047d 2932 schedule_timeout_uninterruptible(1);
1da177e4
LT
2933 } while (1);
2934
2935 // If the command was successful, fill the fifo with our reply
2936 // message packets
2937 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
67af2b06 2938 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
2939 return -2;
2940 }
67af2b06 2941 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4 2942
67af2b06
MS
2943 if(pHba->reply_pool != NULL) {
2944 dma_free_coherent(&pHba->pDev->dev,
2945 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2946 pHba->reply_pool, pHba->reply_pool_pa);
2947 }
1da177e4 2948
67af2b06
MS
2949 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2950 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2951 &pHba->reply_pool_pa, GFP_KERNEL);
bbfbbbc1
MK
2952 if (!pHba->reply_pool) {
2953 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2954 return -ENOMEM;
1da177e4 2955 }
67af2b06 2956 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
1da177e4 2957
1da177e4 2958 for(i = 0; i < pHba->reply_fifo_size; i++) {
67af2b06
MS
2959 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2960 pHba->reply_port);
1da177e4 2961 wmb();
1da177e4
LT
2962 }
2963 adpt_i2o_status_get(pHba);
2964 return 0;
2965}
2966
2967
2968/*
2969 * I2O System Table. Contains information about
2970 * all the IOPs in the system. Used to inform IOPs
2971 * about each other's existence.
2972 *
2973 * sys_tbl_ver is the CurrentChangeIndicator that is
2974 * used by IOPs to track changes.
2975 */
2976
2977
2978
2979static s32 adpt_i2o_status_get(adpt_hba* pHba)
2980{
2981 ulong timeout;
2982 u32 m;
2983 u32 __iomem *msg;
2984 u8 *status_block=NULL;
1da177e4
LT
2985
2986 if(pHba->status_block == NULL) {
67af2b06
MS
2987 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2988 sizeof(i2o_status_block),
2989 &pHba->status_block_pa, GFP_KERNEL);
1da177e4
LT
2990 if(pHba->status_block == NULL) {
2991 printk(KERN_ERR
2992 "dpti%d: Get Status Block failed; Out of memory. \n",
2993 pHba->unit);
2994 return -ENOMEM;
2995 }
2996 }
2997 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2998 status_block = (u8*)(pHba->status_block);
1da177e4
LT
2999 timeout = jiffies+TMOUT_GETSTATUS*HZ;
3000 do {
3001 rmb();
3002 m = readl(pHba->post_port);
3003 if (m != EMPTY_QUEUE) {
3004 break;
3005 }
3006 if(time_after(jiffies,timeout)){
3007 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3008 pHba->name);
3009 return -ETIMEDOUT;
3010 }
a9a3047d 3011 schedule_timeout_uninterruptible(1);
1da177e4
LT
3012 } while(m==EMPTY_QUEUE);
3013
3014
3015 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3016
3017 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3018 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3019 writel(1, &msg[2]);
3020 writel(0, &msg[3]);
3021 writel(0, &msg[4]);
3022 writel(0, &msg[5]);
67af2b06
MS
3023 writel( dma_low(pHba->status_block_pa), &msg[6]);
3024 writel( dma_high(pHba->status_block_pa), &msg[7]);
1da177e4
LT
3025 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3026
3027 //post message
3028 writel(m, pHba->post_port);
3029 wmb();
3030
3031 while(status_block[87]!=0xff){
3032 if(time_after(jiffies,timeout)){
3033 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3034 pHba->unit);
3035 return -ETIMEDOUT;
3036 }
3037 rmb();
a9a3047d 3038 schedule_timeout_uninterruptible(1);
1da177e4
LT
3039 }
3040
3041 // Set up our number of outbound and inbound messages
3042 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3043 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3044 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3045 }
3046
3047 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3048 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3049 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3050 }
3051
3052 // Calculate the Scatter Gather list size
62ac5aed
MS
3053 if (dpt_dma64(pHba)) {
3054 pHba->sg_tablesize
3055 = ((pHba->status_block->inbound_frame_size * 4
3056 - 14 * sizeof(u32))
3057 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3058 } else {
3059 pHba->sg_tablesize
3060 = ((pHba->status_block->inbound_frame_size * 4
3061 - 12 * sizeof(u32))
3062 / sizeof(struct sg_simple_element));
3063 }
1da177e4
LT
3064 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3065 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3066 }
3067
3068
3069#ifdef DEBUG
3070 printk("dpti%d: State = ",pHba->unit);
3071 switch(pHba->status_block->iop_state) {
3072 case 0x01:
3073 printk("INIT\n");
3074 break;
3075 case 0x02:
3076 printk("RESET\n");
3077 break;
3078 case 0x04:
3079 printk("HOLD\n");
3080 break;
3081 case 0x05:
3082 printk("READY\n");
3083 break;
3084 case 0x08:
3085 printk("OPERATIONAL\n");
3086 break;
3087 case 0x10:
3088 printk("FAILED\n");
3089 break;
3090 case 0x11:
3091 printk("FAULTED\n");
3092 break;
3093 default:
3094 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3095 }
3096#endif
3097 return 0;
3098}
3099
3100/*
3101 * Get the IOP's Logical Configuration Table
3102 */
3103static int adpt_i2o_lct_get(adpt_hba* pHba)
3104{
3105 u32 msg[8];
3106 int ret;
3107 u32 buf[16];
3108
3109 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3110 pHba->lct_size = pHba->status_block->expected_lct_size;
3111 }
3112 do {
3113 if (pHba->lct == NULL) {
67af2b06
MS
3114 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3115 pHba->lct_size, &pHba->lct_pa,
3116 GFP_KERNEL);
1da177e4
LT
3117 if(pHba->lct == NULL) {
3118 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3119 pHba->name);
3120 return -ENOMEM;
3121 }
3122 }
3123 memset(pHba->lct, 0, pHba->lct_size);
3124
3125 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3126 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3127 msg[2] = 0;
3128 msg[3] = 0;
3129 msg[4] = 0xFFFFFFFF; /* All devices */
3130 msg[5] = 0x00000000; /* Report now */
3131 msg[6] = 0xD0000000|pHba->lct_size;
67af2b06 3132 msg[7] = (u32)pHba->lct_pa;
1da177e4
LT
3133
3134 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3135 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3136 pHba->name, ret);
3137 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3138 return ret;
3139 }
3140
3141 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3142 pHba->lct_size = pHba->lct->table_size << 2;
67af2b06
MS
3143 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3144 pHba->lct, pHba->lct_pa);
1da177e4
LT
3145 pHba->lct = NULL;
3146 }
3147 } while (pHba->lct == NULL);
3148
3149 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3150
3151
3152 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3153 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3154 pHba->FwDebugBufferSize = buf[1];
62ac5aed
MS
3155 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3156 pHba->FwDebugBufferSize);
3157 if (pHba->FwDebugBuffer_P) {
3158 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3159 FW_DEBUG_FLAGS_OFFSET;
3160 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3161 FW_DEBUG_BLED_OFFSET;
3162 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3163 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3164 FW_DEBUG_STR_LENGTH_OFFSET;
3165 pHba->FwDebugBuffer_P += buf[2];
3166 pHba->FwDebugFlags = 0;
3167 }
1da177e4
LT
3168 }
3169
3170 return 0;
3171}
3172
3173static int adpt_i2o_build_sys_table(void)
3174{
67af2b06 3175 adpt_hba* pHba = hba_chain;
1da177e4
LT
3176 int count = 0;
3177
67af2b06
MS
3178 if (sys_tbl)
3179 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3180 sys_tbl, sys_tbl_pa);
3181
1da177e4
LT
3182 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3183 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3184
67af2b06
MS
3185 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3186 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
bbfbbbc1 3187 if (!sys_tbl) {
1da177e4
LT
3188 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3189 return -ENOMEM;
3190 }
67af2b06 3191 memset(sys_tbl, 0, sys_tbl_len);
1da177e4
LT
3192
3193 sys_tbl->num_entries = hba_count;
3194 sys_tbl->version = I2OVERSION;
3195 sys_tbl->change_ind = sys_tbl_ind++;
3196
3197 for(pHba = hba_chain; pHba; pHba = pHba->next) {
67af2b06 3198 u64 addr;
1da177e4
LT
3199 // Get updated Status Block so we have the latest information
3200 if (adpt_i2o_status_get(pHba)) {
3201 sys_tbl->num_entries--;
3202 continue; // try next one
3203 }
3204
3205 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3206 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3207 sys_tbl->iops[count].seg_num = 0;
3208 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3209 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3210 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3211 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3212 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3213 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
67af2b06
MS
3214 addr = pHba->base_addr_phys + 0x40;
3215 sys_tbl->iops[count].inbound_low = dma_low(addr);
3216 sys_tbl->iops[count].inbound_high = dma_high(addr);
1da177e4
LT
3217
3218 count++;
3219 }
3220
3221#ifdef DEBUG
3222{
3223 u32 *table = (u32*)sys_tbl;
3224 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3225 for(count = 0; count < (sys_tbl_len >>2); count++) {
3226 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3227 count, table[count]);
3228 }
3229}
3230#endif
3231
3232 return 0;
3233}
3234
3235
3236/*
3237 * Dump the information block associated with a given unit (TID)
3238 */
3239
3240static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3241{
3242 char buf[64];
3243 int unit = d->lct_data.tid;
3244
3245 printk(KERN_INFO "TID %3.3d ", unit);
3246
3247 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3248 {
3249 buf[16]=0;
3250 printk(" Vendor: %-12.12s", buf);
3251 }
3252 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3253 {
3254 buf[16]=0;
3255 printk(" Device: %-12.12s", buf);
3256 }
3257 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3258 {
3259 buf[8]=0;
3260 printk(" Rev: %-12.12s\n", buf);
3261 }
3262#ifdef DEBUG
3263 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3264 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3265 printk(KERN_INFO "\tFlags: ");
3266
3267 if(d->lct_data.device_flags&(1<<0))
3268 printk("C"); // ConfigDialog requested
3269 if(d->lct_data.device_flags&(1<<1))
3270 printk("U"); // Multi-user capable
3271 if(!(d->lct_data.device_flags&(1<<4)))
3272 printk("P"); // Peer service enabled!
3273 if(!(d->lct_data.device_flags&(1<<5)))
3274 printk("M"); // Mgmt service enabled!
3275 printk("\n");
3276#endif
3277}
3278
3279#ifdef DEBUG
3280/*
3281 * Do i2o class name lookup
3282 */
3283static const char *adpt_i2o_get_class_name(int class)
3284{
3285 int idx = 16;
3286 static char *i2o_class_name[] = {
3287 "Executive",
3288 "Device Driver Module",
3289 "Block Device",
3290 "Tape Device",
3291 "LAN Interface",
3292 "WAN Interface",
3293 "Fibre Channel Port",
3294 "Fibre Channel Device",
3295 "SCSI Device",
3296 "ATE Port",
3297 "ATE Device",
3298 "Floppy Controller",
3299 "Floppy Device",
3300 "Secondary Bus Port",
3301 "Peer Transport Agent",
3302 "Peer Transport",
3303 "Unknown"
3304 };
3305
3306 switch(class&0xFFF) {
3307 case I2O_CLASS_EXECUTIVE:
3308 idx = 0; break;
3309 case I2O_CLASS_DDM:
3310 idx = 1; break;
3311 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3312 idx = 2; break;
3313 case I2O_CLASS_SEQUENTIAL_STORAGE:
3314 idx = 3; break;
3315 case I2O_CLASS_LAN:
3316 idx = 4; break;
3317 case I2O_CLASS_WAN:
3318 idx = 5; break;
3319 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3320 idx = 6; break;
3321 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3322 idx = 7; break;
3323 case I2O_CLASS_SCSI_PERIPHERAL:
3324 idx = 8; break;
3325 case I2O_CLASS_ATE_PORT:
3326 idx = 9; break;
3327 case I2O_CLASS_ATE_PERIPHERAL:
3328 idx = 10; break;
3329 case I2O_CLASS_FLOPPY_CONTROLLER:
3330 idx = 11; break;
3331 case I2O_CLASS_FLOPPY_DEVICE:
3332 idx = 12; break;
3333 case I2O_CLASS_BUS_ADAPTER_PORT:
3334 idx = 13; break;
3335 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3336 idx = 14; break;
3337 case I2O_CLASS_PEER_TRANSPORT:
3338 idx = 15; break;
3339 }
3340 return i2o_class_name[idx];
3341}
3342#endif
3343
3344
3345static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3346{
3347 u32 msg[6];
3348 int ret, size = sizeof(i2o_hrt);
3349
3350 do {
3351 if (pHba->hrt == NULL) {
67af2b06
MS
3352 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3353 size, &pHba->hrt_pa, GFP_KERNEL);
1da177e4
LT
3354 if (pHba->hrt == NULL) {
3355 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3356 return -ENOMEM;
3357 }
3358 }
3359
3360 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3361 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3362 msg[2]= 0;
3363 msg[3]= 0;
3364 msg[4]= (0xD0000000 | size); /* Simple transaction */
67af2b06 3365 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
1da177e4
LT
3366
3367 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3368 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3369 return ret;
3370 }
3371
3372 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
67af2b06
MS
3373 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3374 dma_free_coherent(&pHba->pDev->dev, size,
3375 pHba->hrt, pHba->hrt_pa);
3376 size = newsize;
1da177e4
LT
3377 pHba->hrt = NULL;
3378 }
3379 } while(pHba->hrt == NULL);
3380 return 0;
3381}
3382
3383/*
3384 * Query one scalar group value or a whole scalar group.
3385 */
3386static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3387 int group, int field, void *buf, int buflen)
3388{
3389 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
67af2b06
MS
3390 u8 *opblk_va;
3391 dma_addr_t opblk_pa;
3392 u8 *resblk_va;
3393 dma_addr_t resblk_pa;
1da177e4
LT
3394
3395 int size;
3396
3397 /* 8 bytes for header */
67af2b06
MS
3398 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3399 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3400 if (resblk_va == NULL) {
1da177e4
LT
3401 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3402 return -ENOMEM;
3403 }
3404
67af2b06
MS
3405 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3406 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3407 if (opblk_va == NULL) {
3408 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3409 resblk_va, resblk_pa);
3410 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3411 pHba->name);
3412 return -ENOMEM;
3413 }
1da177e4
LT
3414 if (field == -1) /* whole group */
3415 opblk[4] = -1;
3416
67af2b06 3417 memcpy(opblk_va, opblk, sizeof(opblk));
1da177e4 3418 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
67af2b06
MS
3419 opblk_va, opblk_pa, sizeof(opblk),
3420 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3421 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
1da177e4 3422 if (size == -ETIME) {
67af2b06
MS
3423 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3424 resblk_va, resblk_pa);
1da177e4
LT
3425 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3426 return -ETIME;
3427 } else if (size == -EINTR) {
67af2b06
MS
3428 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3429 resblk_va, resblk_pa);
1da177e4
LT
3430 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3431 return -EINTR;
3432 }
3433
67af2b06 3434 memcpy(buf, resblk_va+8, buflen); /* cut off header */
1da177e4 3435
67af2b06
MS
3436 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3437 resblk_va, resblk_pa);
1da177e4
LT
3438 if (size < 0)
3439 return size;
3440
3441 return buflen;
3442}
3443
3444
3445/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3446 *
3447 * This function can be used for all UtilParamsGet/Set operations.
3448 * The OperationBlock is given in opblk-buffer,
3449 * and results are returned in resblk-buffer.
3450 * Note that the minimum sized resblk is 8 bytes and contains
3451 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3452 */
3453static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
67af2b06
MS
3454 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3455 void *resblk_va, dma_addr_t resblk_pa, int reslen)
1da177e4
LT
3456{
3457 u32 msg[9];
67af2b06 3458 u32 *res = (u32 *)resblk_va;
1da177e4
LT
3459 int wait_status;
3460
3461 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3462 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3463 msg[2] = 0;
3464 msg[3] = 0;
3465 msg[4] = 0;
3466 msg[5] = 0x54000000 | oplen; /* OperationBlock */
67af2b06 3467 msg[6] = (u32)opblk_pa;
1da177e4 3468 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
67af2b06 3469 msg[8] = (u32)resblk_pa;
1da177e4
LT
3470
3471 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
67af2b06 3472 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
1da177e4
LT
3473 return wait_status; /* -DetailedStatus */
3474 }
3475
3476 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3477 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3478 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3479 pHba->name,
3480 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3481 : "PARAMS_GET",
3482 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3483 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3484 }
3485
3486 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3487}
3488
3489
3490static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3491{
3492 u32 msg[4];
3493 int ret;
3494
3495 adpt_i2o_status_get(pHba);
3496
3497 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3498
3499 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3500 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3501 return 0;
3502 }
3503
3504 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3505 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3506 msg[2] = 0;
3507 msg[3] = 0;
3508
3509 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3510 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3511 pHba->unit, -ret);
3512 } else {
3513 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3514 }
3515
3516 adpt_i2o_status_get(pHba);
3517 return ret;
3518}
3519
3520
3521/*
3522 * Enable IOP. Allows the IOP to resume external operations.
3523 */
3524static int adpt_i2o_enable_hba(adpt_hba* pHba)
3525{
3526 u32 msg[4];
3527 int ret;
3528
3529 adpt_i2o_status_get(pHba);
3530 if(!pHba->status_block){
3531 return -ENOMEM;
3532 }
3533 /* Enable only allowed on READY state */
3534 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3535 return 0;
3536
3537 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3538 return -EINVAL;
3539
3540 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3541 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3542 msg[2]= 0;
3543 msg[3]= 0;
3544
3545 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3546 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3547 pHba->name, ret);
3548 } else {
3549 PDEBUG("%s: Enabled.\n", pHba->name);
3550 }
3551
3552 adpt_i2o_status_get(pHba);
3553 return ret;
3554}
3555
3556
3557static int adpt_i2o_systab_send(adpt_hba* pHba)
3558{
3559 u32 msg[12];
3560 int ret;
3561
3562 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3563 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3564 msg[2] = 0;
3565 msg[3] = 0;
3566 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3567 msg[5] = 0; /* Segment 0 */
3568
3569 /*
3570 * Provide three SGL-elements:
3571 * System table (SysTab), Private memory space declaration and
3572 * Private i/o space declaration
3573 */
3574 msg[6] = 0x54000000 | sys_tbl_len;
67af2b06 3575 msg[7] = (u32)sys_tbl_pa;
1da177e4
LT
3576 msg[8] = 0x54000000 | 0;
3577 msg[9] = 0;
3578 msg[10] = 0xD4000000 | 0;
3579 msg[11] = 0;
3580
3581 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3582 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3583 pHba->name, ret);
3584 }
3585#ifdef DEBUG
3586 else {
3587 PINFO("%s: SysTab set.\n", pHba->name);
3588 }
3589#endif
3590
3591 return ret;
3592 }
3593
3594
3595/*============================================================================
3596 *
3597 *============================================================================
3598 */
3599
3600
3601#ifdef UARTDELAY
3602
3603static static void adpt_delay(int millisec)
3604{
3605 int i;
3606 for (i = 0; i < millisec; i++) {
3607 udelay(1000); /* delay for one millisecond */
3608 }
3609}
3610
3611#endif
3612
24601bbc 3613static struct scsi_host_template driver_template = {
c864cb14 3614 .module = THIS_MODULE,
1da177e4
LT
3615 .name = "dpt_i2o",
3616 .proc_name = "dpt_i2o",
3617 .proc_info = adpt_proc_info,
1da177e4
LT
3618 .info = adpt_info,
3619 .queuecommand = adpt_queue,
3620 .eh_abort_handler = adpt_abort,
3621 .eh_device_reset_handler = adpt_device_reset,
3622 .eh_bus_reset_handler = adpt_bus_reset,
3623 .eh_host_reset_handler = adpt_reset,
3624 .bios_param = adpt_bios_param,
3625 .slave_configure = adpt_slave_configure,
3626 .can_queue = MAX_TO_IOP_MESSAGES,
3627 .this_id = 7,
3628 .cmd_per_lun = 1,
3629 .use_clustering = ENABLE_CLUSTERING,
3630};
c864cb14
MS
3631
3632static int __init adpt_init(void)
3633{
3634 int error;
3635 adpt_hba *pHba, *next;
3636
3637 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3638
3639 error = adpt_detect(&driver_template);
3640 if (error < 0)
3641 return error;
3642 if (hba_chain == NULL)
3643 return -ENODEV;
3644
3645 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3646 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3647 if (error)
3648 goto fail;
3649 scsi_scan_host(pHba->host);
3650 }
3651 return 0;
3652fail:
3653 for (pHba = hba_chain; pHba; pHba = next) {
3654 next = pHba->next;
3655 scsi_remove_host(pHba->host);
3656 }
3657 return error;
3658}
3659
3660static void __exit adpt_exit(void)
3661{
3662 adpt_hba *pHba, *next;
3663
3664 for (pHba = hba_chain; pHba; pHba = pHba->next)
3665 scsi_remove_host(pHba->host);
3666 for (pHba = hba_chain; pHba; pHba = next) {
3667 next = pHba->next;
3668 adpt_release(pHba->host);
3669 }
3670}
3671
3672module_init(adpt_init);
3673module_exit(adpt_exit);
3674
1da177e4 3675MODULE_LICENSE("GPL");