]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/dpt_i2o.c
Merge branch 'sh/for-2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal...
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
1da177e4
LT
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
1da177e4
LT
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
dea3f665 52#include <linux/smp_lock.h>
1da177e4 53#include <linux/spinlock.h>
910638ae 54#include <linux/dma-mapping.h>
1da177e4
LT
55
56#include <linux/timer.h>
57#include <linux/string.h>
58#include <linux/ioport.h>
0b950672 59#include <linux/mutex.h>
1da177e4
LT
60
61#include <asm/processor.h> /* for boot_cpu_data */
62#include <asm/pgtable.h>
63#include <asm/io.h> /* for virt_to_bus, etc. */
64
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_device.h>
68#include <scsi/scsi_host.h>
69#include <scsi/scsi_tcq.h>
70
71#include "dpt/dptsig.h"
72#include "dpti.h"
73
74/*============================================================================
75 * Create a binary signature - this is read by dptsig
76 * Needed for our management apps
77 *============================================================================
78 */
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
0b950672 105static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4 106
67af2b06
MS
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
1da177e4 111
1da177e4
LT
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
1ed43910
MS
115static struct class *adpt_sysfs_class;
116
62ac5aed
MS
117#ifdef CONFIG_COMPAT
118static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
119#endif
120
00977a59 121static const struct file_operations adpt_fops = {
1da177e4
LT
122 .ioctl = adpt_ioctl,
123 .open = adpt_open,
62ac5aed
MS
124 .release = adpt_close,
125#ifdef CONFIG_COMPAT
126 .compat_ioctl = compat_adpt_ioctl,
1da177e4 127#endif
1da177e4 128};
1da177e4
LT
129
130/* Structures and definitions for synchronous message posting.
131 * See adpt_i2o_post_wait() for description
132 * */
133struct adpt_i2o_post_wait_data
134{
135 int status;
136 u32 id;
137 adpt_wait_queue_head_t *wq;
138 struct adpt_i2o_post_wait_data *next;
139};
140
141static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
142static u32 adpt_post_wait_id = 0;
143static DEFINE_SPINLOCK(adpt_post_wait_lock);
144
145
146/*============================================================================
147 * Functions
148 *============================================================================
149 */
150
62ac5aed
MS
151static inline int dpt_dma64(adpt_hba *pHba)
152{
153 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
154}
155
67af2b06
MS
156static inline u32 dma_high(dma_addr_t addr)
157{
158 return upper_32_bits(addr);
159}
160
161static inline u32 dma_low(dma_addr_t addr)
162{
163 return (u32)addr;
164}
165
1da177e4
LT
166static u8 adpt_read_blink_led(adpt_hba* host)
167{
172c122d 168 if (host->FwDebugBLEDflag_P) {
1da177e4
LT
169 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
170 return readb(host->FwDebugBLEDvalue_P);
171 }
172 }
173 return 0;
174}
175
176/*============================================================================
177 * Scsi host template interface functions
178 *============================================================================
179 */
180
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186MODULE_DEVICE_TABLE(pci,dptids);
187
24601bbc 188static int adpt_detect(struct scsi_host_template* sht)
1da177e4
LT
189{
190 struct pci_dev *pDev = NULL;
191 adpt_hba* pHba;
192
1da177e4
LT
193 PINFO("Detecting Adaptec I2O RAID controllers...\n");
194
195 /* search for all Adatpec I2O RAID cards */
a07f3537 196 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1da177e4
LT
197 if(pDev->device == PCI_DPT_DEVICE_ID ||
198 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
24601bbc 199 if(adpt_install_hba(sht, pDev) ){
1da177e4
LT
200 PERROR("Could not Init an I2O RAID device\n");
201 PERROR("Will not try to detect others.\n");
202 return hba_count-1;
203 }
a07f3537 204 pci_dev_get(pDev);
1da177e4
LT
205 }
206 }
207
208 /* In INIT state, Activate IOPs */
209 for (pHba = hba_chain; pHba; pHba = pHba->next) {
210 // Activate does get status , init outbound, and get hrt
211 if (adpt_i2o_activate_hba(pHba) < 0) {
212 adpt_i2o_delete_hba(pHba);
213 }
214 }
215
216
217 /* Active IOPs in HOLD state */
218
219rebuild_sys_tab:
220 if (hba_chain == NULL)
221 return 0;
222
223 /*
224 * If build_sys_table fails, we kill everything and bail
225 * as we can't init the IOPs w/o a system table
226 */
227 if (adpt_i2o_build_sys_table() < 0) {
228 adpt_i2o_sys_shutdown();
229 return 0;
230 }
231
232 PDEBUG("HBA's in HOLD state\n");
233
234 /* If IOP don't get online, we need to rebuild the System table */
235 for (pHba = hba_chain; pHba; pHba = pHba->next) {
236 if (adpt_i2o_online_hba(pHba) < 0) {
237 adpt_i2o_delete_hba(pHba);
238 goto rebuild_sys_tab;
239 }
240 }
241
242 /* Active IOPs now in OPERATIONAL state */
243 PDEBUG("HBA's in OPERATIONAL state\n");
244
245 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
246 for (pHba = hba_chain; pHba; pHba = pHba->next) {
247 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
248 if (adpt_i2o_lct_get(pHba) < 0){
249 adpt_i2o_delete_hba(pHba);
250 continue;
251 }
252
253 if (adpt_i2o_parse_lct(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257 adpt_inquiry(pHba);
258 }
259
1ed43910
MS
260 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
261 if (IS_ERR(adpt_sysfs_class)) {
262 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
263 adpt_sysfs_class = NULL;
264 }
265
1da177e4 266 for (pHba = hba_chain; pHba; pHba = pHba->next) {
c864cb14 267 if (adpt_scsi_host_alloc(pHba, sht) < 0){
1da177e4
LT
268 adpt_i2o_delete_hba(pHba);
269 continue;
270 }
271 pHba->initialized = TRUE;
272 pHba->state &= ~DPTI_STATE_RESET;
1ed43910 273 if (adpt_sysfs_class) {
d73a1a67 274 struct device *dev = device_create(adpt_sysfs_class,
9def0b97 275 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
1ed43910
MS
276 "dpti%d", pHba->unit);
277 if (IS_ERR(dev)) {
278 printk(KERN_WARNING"dpti%d: unable to "
279 "create device in dpt_i2o class\n",
280 pHba->unit);
281 }
282 }
1da177e4
LT
283 }
284
285 // Register our control device node
286 // nodes will need to be created in /dev to access this
287 // the nodes can not be created from within the driver
288 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
24601bbc 289 adpt_i2o_sys_shutdown();
1da177e4
LT
290 return 0;
291 }
292 return hba_count;
293}
294
295
24601bbc
AM
296/*
297 * scsi_unregister will be called AFTER we return.
298 */
299static int adpt_release(struct Scsi_Host *host)
1da177e4 300{
24601bbc 301 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
1da177e4
LT
302// adpt_i2o_quiesce_hba(pHba);
303 adpt_i2o_delete_hba(pHba);
24601bbc 304 scsi_unregister(host);
1da177e4
LT
305 return 0;
306}
307
308
309static void adpt_inquiry(adpt_hba* pHba)
310{
62ac5aed 311 u32 msg[17];
1da177e4
LT
312 u32 *mptr;
313 u32 *lenptr;
314 int direction;
315 int scsidir;
316 u32 len;
317 u32 reqlen;
318 u8* buf;
67af2b06 319 dma_addr_t addr;
1da177e4
LT
320 u8 scb[16];
321 s32 rcode;
322
323 memset(msg, 0, sizeof(msg));
67af2b06 324 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
1da177e4
LT
325 if(!buf){
326 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
327 return;
328 }
329 memset((void*)buf, 0, 36);
330
331 len = 36;
332 direction = 0x00000000;
333 scsidir =0x40000000; // DATA IN (iop<--dev)
334
62ac5aed
MS
335 if (dpt_dma64(pHba))
336 reqlen = 17; // SINGLE SGE, 64 bit
337 else
338 reqlen = 14; // SINGLE SGE, 32 bit
1da177e4
LT
339 /* Stick the headers on */
340 msg[0] = reqlen<<16 | SGL_OFFSET_12;
341 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
342 msg[2] = 0;
343 msg[3] = 0;
344 // Adaptec/DPT Private stuff
345 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
346 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
347 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
348 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
349 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
350 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
351 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
352
353 mptr=msg+7;
354
355 memset(scb, 0, sizeof(scb));
356 // Write SCSI command into the message - always 16 byte block
357 scb[0] = INQUIRY;
358 scb[1] = 0;
359 scb[2] = 0;
360 scb[3] = 0;
361 scb[4] = 36;
362 scb[5] = 0;
363 // Don't care about the rest of scb
364
365 memcpy(mptr, scb, sizeof(scb));
366 mptr+=4;
367 lenptr=mptr++; /* Remember me - fill in when we know */
368
369 /* Now fill in the SGList and command */
370 *lenptr = len;
62ac5aed
MS
371 if (dpt_dma64(pHba)) {
372 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
373 *mptr++ = 1 << PAGE_SHIFT;
374 *mptr++ = 0xD0000000|direction|len;
375 *mptr++ = dma_low(addr);
376 *mptr++ = dma_high(addr);
377 } else {
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = addr;
380 }
1da177e4
LT
381
382 // Send it on it's way
383 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
384 if (rcode != 0) {
385 sprintf(pHba->detail, "Adaptec I2O RAID");
386 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
387 if (rcode != -ETIME && rcode != -EINTR)
67af2b06 388 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
389 } else {
390 memset(pHba->detail, 0, sizeof(pHba->detail));
391 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
392 memcpy(&(pHba->detail[16]), " Model: ", 8);
393 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
394 memcpy(&(pHba->detail[40]), " FW: ", 4);
395 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
396 pHba->detail[48] = '\0'; /* precautionary */
67af2b06 397 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
398 }
399 adpt_i2o_status_get(pHba);
400 return ;
401}
402
403
404static int adpt_slave_configure(struct scsi_device * device)
405{
406 struct Scsi_Host *host = device->host;
407 adpt_hba* pHba;
408
409 pHba = (adpt_hba *) host->hostdata[0];
410
411 if (host->can_queue && device->tagged_supported) {
412 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
413 host->can_queue - 1);
414 } else {
415 scsi_adjust_queue_depth(device, 0, 1);
416 }
417 return 0;
418}
419
420static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
421{
422 adpt_hba* pHba = NULL;
423 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
424
425 cmd->scsi_done = done;
426 /*
427 * SCSI REQUEST_SENSE commands will be executed automatically by the
428 * Host Adapter for any errors, so they should not be executed
429 * explicitly unless the Sense Data is zero indicating that no error
430 * occurred.
431 */
432
433 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
434 cmd->result = (DID_OK << 16);
435 cmd->scsi_done(cmd);
436 return 0;
437 }
438
439 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
440 if (!pHba) {
441 return FAILED;
442 }
443
444 rmb();
445 /*
446 * TODO: I need to block here if I am processing ioctl cmds
447 * but if the outstanding cmds all finish before the ioctl,
448 * the scsi-core will not know to start sending cmds to me again.
449 * I need to a way to restart the scsi-cores queues or should I block
450 * calling scsi_done on the outstanding cmds instead
451 * for now we don't set the IOCTL state
452 */
453 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
454 pHba->host->last_reset = jiffies;
455 pHba->host->resetting = 1;
456 return 1;
457 }
458
1da177e4
LT
459 // TODO if the cmd->device if offline then I may need to issue a bus rescan
460 // followed by a get_lct to see if the device is there anymore
461 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
462 /*
463 * First command request for this device. Set up a pointer
464 * to the device structure. This should be a TEST_UNIT_READY
465 * command from scan_scsis_single.
466 */
467 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
468 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
469 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
470 cmd->result = (DID_NO_CONNECT << 16);
471 cmd->scsi_done(cmd);
472 return 0;
473 }
474 cmd->device->hostdata = pDev;
475 }
476 pDev->pScsi_dev = cmd->device;
477
478 /*
479 * If we are being called from when the device is being reset,
480 * delay processing of the command until later.
481 */
482 if (pDev->state & DPTI_DEV_RESET ) {
483 return FAILED;
484 }
485 return adpt_scsi_to_i2o(pHba, cmd, pDev);
486}
487
488static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
489 sector_t capacity, int geom[])
490{
491 int heads=-1;
492 int sectors=-1;
493 int cylinders=-1;
494
495 // *** First lets set the default geometry ****
496
497 // If the capacity is less than ox2000
498 if (capacity < 0x2000 ) { // floppy
499 heads = 18;
500 sectors = 2;
501 }
502 // else if between 0x2000 and 0x20000
503 else if (capacity < 0x20000) {
504 heads = 64;
505 sectors = 32;
506 }
507 // else if between 0x20000 and 0x40000
508 else if (capacity < 0x40000) {
509 heads = 65;
510 sectors = 63;
511 }
512 // else if between 0x4000 and 0x80000
513 else if (capacity < 0x80000) {
514 heads = 128;
515 sectors = 63;
516 }
517 // else if greater than 0x80000
518 else {
519 heads = 255;
520 sectors = 63;
521 }
522 cylinders = sector_div(capacity, heads * sectors);
523
524 // Special case if CDROM
525 if(sdev->type == 5) { // CDROM
526 heads = 252;
527 sectors = 63;
528 cylinders = 1111;
529 }
530
531 geom[0] = heads;
532 geom[1] = sectors;
533 geom[2] = cylinders;
534
535 PDEBUG("adpt_bios_param: exit\n");
536 return 0;
537}
538
539
540static const char *adpt_info(struct Scsi_Host *host)
541{
542 adpt_hba* pHba;
543
544 pHba = (adpt_hba *) host->hostdata[0];
545 return (char *) (pHba->detail);
546}
547
548static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
549 int length, int inout)
550{
551 struct adpt_device* d;
552 int id;
553 int chan;
554 int len = 0;
555 int begin = 0;
556 int pos = 0;
557 adpt_hba* pHba;
558 int unit;
559
560 *start = buffer;
561 if (inout == TRUE) {
562 /*
563 * The user has done a write and wants us to take the
564 * data in the buffer and do something with it.
565 * proc_scsiwrite calls us with inout = 1
566 *
567 * Read data from buffer (writing to us) - NOT SUPPORTED
568 */
569 return -EINVAL;
570 }
571
572 /*
573 * inout = 0 means the user has done a read and wants information
574 * returned, so we write information about the cards into the buffer
575 * proc_scsiread() calls us with inout = 0
576 */
577
578 // Find HBA (host bus adapter) we are looking for
0b950672 579 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
580 for (pHba = hba_chain; pHba; pHba = pHba->next) {
581 if (pHba->host == host) {
582 break; /* found adapter */
583 }
584 }
0b950672 585 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
586 if (pHba == NULL) {
587 return 0;
588 }
589 host = pHba->host;
590
591 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
592 len += sprintf(buffer+len, "%s\n", pHba->detail);
593 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
594 pHba->host->host_no, pHba->name, host->irq);
595 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
596 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
597
598 pos = begin + len;
599
600 /* CHECKPOINT */
601 if(pos > offset + length) {
602 goto stop_output;
603 }
604 if(pos <= offset) {
605 /*
606 * If we haven't even written to where we last left
607 * off (the last time we were called), reset the
608 * beginning pointer.
609 */
610 len = 0;
611 begin = pos;
612 }
613 len += sprintf(buffer+len, "Devices:\n");
614 for(chan = 0; chan < MAX_CHANNEL; chan++) {
615 for(id = 0; id < MAX_ID; id++) {
616 d = pHba->channel[chan].device[id];
617 while(d){
618 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
619 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
620 pos = begin + len;
621
622
623 /* CHECKPOINT */
624 if(pos > offset + length) {
625 goto stop_output;
626 }
627 if(pos <= offset) {
628 len = 0;
629 begin = pos;
630 }
631
632 unit = d->pI2o_dev->lct_data.tid;
633 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
634 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
635 scsi_device_online(d->pScsi_dev)? "online":"offline");
636 pos = begin + len;
637
638 /* CHECKPOINT */
639 if(pos > offset + length) {
640 goto stop_output;
641 }
642 if(pos <= offset) {
643 len = 0;
644 begin = pos;
645 }
646
647 d = d->next_lun;
648 }
649 }
650 }
651
652 /*
653 * begin is where we last checked our position with regards to offset
654 * begin is always less than offset. len is relative to begin. It
655 * is the number of bytes written past begin
656 *
657 */
658stop_output:
659 /* stop the output and calculate the correct length */
660 *(buffer + len) = '\0';
661
662 *start = buffer + (offset - begin); /* Start of wanted data */
663 len -= (offset - begin);
664 if(len > length) {
665 len = length;
666 } else if(len < 0){
667 len = 0;
668 **start = '\0';
669 }
670 return len;
671}
672
62ac5aed
MS
673/*
674 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
675 */
676static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
677{
678 return (u32)cmd->serial_number;
679}
680
681/*
682 * Go from a u32 'context' to a struct scsi_cmnd * .
683 * This could probably be made more efficient.
684 */
685static struct scsi_cmnd *
686 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
687{
688 struct scsi_cmnd * cmd;
689 struct scsi_device * d;
690
691 if (context == 0)
692 return NULL;
693
694 spin_unlock(pHba->host->host_lock);
695 shost_for_each_device(d, pHba->host) {
696 unsigned long flags;
697 spin_lock_irqsave(&d->list_lock, flags);
698 list_for_each_entry(cmd, &d->cmd_list, list) {
699 if (((u32)cmd->serial_number == context)) {
700 spin_unlock_irqrestore(&d->list_lock, flags);
701 scsi_device_put(d);
702 spin_lock(pHba->host->host_lock);
703 return cmd;
704 }
705 }
706 spin_unlock_irqrestore(&d->list_lock, flags);
707 }
708 spin_lock(pHba->host->host_lock);
709
710 return NULL;
711}
712
713/*
714 * Turn a pointer to ioctl reply data into an u32 'context'
715 */
716static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
717{
718#if BITS_PER_LONG == 32
719 return (u32)(unsigned long)reply;
720#else
721 ulong flags = 0;
722 u32 nr, i;
723
724 spin_lock_irqsave(pHba->host->host_lock, flags);
725 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
726 for (i = 0; i < nr; i++) {
727 if (pHba->ioctl_reply_context[i] == NULL) {
728 pHba->ioctl_reply_context[i] = reply;
729 break;
730 }
731 }
732 spin_unlock_irqrestore(pHba->host->host_lock, flags);
733 if (i >= nr) {
734 kfree (reply);
735 printk(KERN_WARNING"%s: Too many outstanding "
736 "ioctl commands\n", pHba->name);
737 return (u32)-1;
738 }
739
740 return i;
741#endif
742}
743
744/*
745 * Go from an u32 'context' to a pointer to ioctl reply data.
746 */
747static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
748{
749#if BITS_PER_LONG == 32
750 return (void *)(unsigned long)context;
751#else
752 void *p = pHba->ioctl_reply_context[context];
753 pHba->ioctl_reply_context[context] = NULL;
754
755 return p;
756#endif
757}
1da177e4
LT
758
759/*===========================================================================
760 * Error Handling routines
761 *===========================================================================
762 */
763
764static int adpt_abort(struct scsi_cmnd * cmd)
765{
766 adpt_hba* pHba = NULL; /* host bus adapter structure */
767 struct adpt_device* dptdevice; /* dpt per device information */
768 u32 msg[5];
769 int rcode;
770
771 if(cmd->serial_number == 0){
772 return FAILED;
773 }
774 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
775 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
776 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
777 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
778 return FAILED;
779 }
780
781 memset(msg, 0, sizeof(msg));
782 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
783 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
784 msg[2] = 0;
785 msg[3]= 0;
62ac5aed 786 msg[4] = adpt_cmd_to_context(cmd);
e5508c13
SM
787 if (pHba->host)
788 spin_lock_irq(pHba->host->host_lock);
789 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
790 if (pHba->host)
791 spin_unlock_irq(pHba->host->host_lock);
792 if (rcode != 0) {
1da177e4
LT
793 if(rcode == -EOPNOTSUPP ){
794 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
795 return FAILED;
796 }
797 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
798 return FAILED;
799 }
800 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
801 return SUCCESS;
802}
803
804
805#define I2O_DEVICE_RESET 0x27
806// This is the same for BLK and SCSI devices
807// NOTE this is wrong in the i2o.h definitions
808// This is not currently supported by our adapter but we issue it anyway
809static int adpt_device_reset(struct scsi_cmnd* cmd)
810{
811 adpt_hba* pHba;
812 u32 msg[4];
813 u32 rcode;
814 int old_state;
1c2fb3f3 815 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
816
817 pHba = (void*) cmd->device->host->hostdata[0];
818 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
819 if (!d) {
820 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
821 return FAILED;
822 }
823 memset(msg, 0, sizeof(msg));
824 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
825 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
826 msg[2] = 0;
827 msg[3] = 0;
828
e5508c13
SM
829 if (pHba->host)
830 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
831 old_state = d->state;
832 d->state |= DPTI_DEV_RESET;
e5508c13
SM
833 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
834 d->state = old_state;
835 if (pHba->host)
836 spin_unlock_irq(pHba->host->host_lock);
837 if (rcode != 0) {
1da177e4
LT
838 if(rcode == -EOPNOTSUPP ){
839 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
840 return FAILED;
841 }
842 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
843 return FAILED;
844 } else {
1da177e4
LT
845 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
846 return SUCCESS;
847 }
848}
849
850
851#define I2O_HBA_BUS_RESET 0x87
852// This version of bus reset is called by the eh_error handler
853static int adpt_bus_reset(struct scsi_cmnd* cmd)
854{
855 adpt_hba* pHba;
856 u32 msg[4];
e5508c13 857 u32 rcode;
1da177e4
LT
858
859 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
860 memset(msg, 0, sizeof(msg));
861 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
862 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
863 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
864 msg[2] = 0;
865 msg[3] = 0;
e5508c13
SM
866 if (pHba->host)
867 spin_lock_irq(pHba->host->host_lock);
868 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
869 if (pHba->host)
870 spin_unlock_irq(pHba->host->host_lock);
871 if (rcode != 0) {
1da177e4
LT
872 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
873 return FAILED;
874 } else {
875 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
876 return SUCCESS;
877 }
878}
879
880// This version of reset is called by the eh_error_handler
df0ae249 881static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
882{
883 adpt_hba* pHba;
884 int rcode;
885 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
886 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
887 rcode = adpt_hba_reset(pHba);
888 if(rcode == 0){
889 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
890 return SUCCESS;
891 } else {
892 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
893 return FAILED;
894 }
895}
896
df0ae249
JG
897static int adpt_reset(struct scsi_cmnd* cmd)
898{
899 int rc;
900
901 spin_lock_irq(cmd->device->host->host_lock);
902 rc = __adpt_reset(cmd);
903 spin_unlock_irq(cmd->device->host->host_lock);
904
905 return rc;
906}
907
1da177e4
LT
908// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
909static int adpt_hba_reset(adpt_hba* pHba)
910{
911 int rcode;
912
913 pHba->state |= DPTI_STATE_RESET;
914
915 // Activate does get status , init outbound, and get hrt
916 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
917 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
918 adpt_i2o_delete_hba(pHba);
919 return rcode;
920 }
921
922 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
923 adpt_i2o_delete_hba(pHba);
924 return rcode;
925 }
926 PDEBUG("%s: in HOLD state\n",pHba->name);
927
928 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
929 adpt_i2o_delete_hba(pHba);
930 return rcode;
931 }
932 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
933
934 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
935 adpt_i2o_delete_hba(pHba);
936 return rcode;
937 }
938
939 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
940 adpt_i2o_delete_hba(pHba);
941 return rcode;
942 }
943 pHba->state &= ~DPTI_STATE_RESET;
944
945 adpt_fail_posted_scbs(pHba);
946 return 0; /* return success */
947}
948
949/*===========================================================================
950 *
951 *===========================================================================
952 */
953
954
955static void adpt_i2o_sys_shutdown(void)
956{
957 adpt_hba *pHba, *pNext;
458af543 958 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
959
960 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
961 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
962 /* Delete all IOPs from the controller chain */
963 /* They should have already been released by the
964 * scsi-core
965 */
966 for (pHba = hba_chain; pHba; pHba = pNext) {
967 pNext = pHba->next;
968 adpt_i2o_delete_hba(pHba);
969 }
970
971 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
972// spin_lock_irqsave(&adpt_post_wait_lock, flags);
973 /* Nothing should be outstanding at this point so just
974 * free them
975 */
458af543
AB
976 for(p1 = adpt_post_wait_queue; p1;) {
977 old = p1;
978 p1 = p1->next;
979 kfree(old);
1da177e4
LT
980 }
981// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
982 adpt_post_wait_queue = NULL;
983
984 printk(KERN_INFO "Adaptec I2O controllers down.\n");
985}
986
24601bbc 987static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
1da177e4
LT
988{
989
990 adpt_hba* pHba = NULL;
991 adpt_hba* p = NULL;
992 ulong base_addr0_phys = 0;
993 ulong base_addr1_phys = 0;
994 u32 hba_map0_area_size = 0;
995 u32 hba_map1_area_size = 0;
996 void __iomem *base_addr_virt = NULL;
997 void __iomem *msg_addr_virt = NULL;
62ac5aed 998 int dma64 = 0;
1da177e4
LT
999
1000 int raptorFlag = FALSE;
1da177e4
LT
1001
1002 if(pci_enable_device(pDev)) {
1003 return -EINVAL;
1004 }
9638d89a
SM
1005
1006 if (pci_request_regions(pDev, "dpt_i2o")) {
1007 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1008 return -EINVAL;
1009 }
1010
1da177e4 1011 pci_set_master(pDev);
62ac5aed
MS
1012
1013 /*
1014 * See if we should enable dma64 mode.
1015 */
1016 if (sizeof(dma_addr_t) > 4 &&
1017 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1018 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1019 dma64 = 1;
1020 }
1021 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
1da177e4
LT
1022 return -EINVAL;
1023
67af2b06
MS
1024 /* adapter only supports message blocks below 4GB */
1025 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1026
1da177e4
LT
1027 base_addr0_phys = pci_resource_start(pDev,0);
1028 hba_map0_area_size = pci_resource_len(pDev,0);
1029
1030 // Check if standard PCI card or single BAR Raptor
1031 if(pDev->device == PCI_DPT_DEVICE_ID){
1032 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1033 // Raptor card with this device id needs 4M
1034 hba_map0_area_size = 0x400000;
1035 } else { // Not Raptor - it is a PCI card
1036 if(hba_map0_area_size > 0x100000 ){
1037 hba_map0_area_size = 0x100000;
1038 }
1039 }
1040 } else {// Raptor split BAR config
1041 // Use BAR1 in this configuration
1042 base_addr1_phys = pci_resource_start(pDev,1);
1043 hba_map1_area_size = pci_resource_len(pDev,1);
1044 raptorFlag = TRUE;
1045 }
1046
62ac5aed
MS
1047#if BITS_PER_LONG == 64
1048 /*
1049 * The original Adaptec 64 bit driver has this comment here:
1050 * "x86_64 machines need more optimal mappings"
1051 *
1052 * I assume some HBAs report ridiculously large mappings
1053 * and we need to limit them on platforms with IOMMUs.
1054 */
1055 if (raptorFlag == TRUE) {
1056 if (hba_map0_area_size > 128)
1057 hba_map0_area_size = 128;
1058 if (hba_map1_area_size > 524288)
1059 hba_map1_area_size = 524288;
1060 } else {
1061 if (hba_map0_area_size > 524288)
1062 hba_map0_area_size = 524288;
1063 }
1064#endif
1065
1da177e4
LT
1066 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1067 if (!base_addr_virt) {
9c472dd9 1068 pci_release_regions(pDev);
1da177e4
LT
1069 PERROR("dpti: adpt_config_hba: io remap failed\n");
1070 return -EINVAL;
1071 }
1072
1073 if(raptorFlag == TRUE) {
1074 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1075 if (!msg_addr_virt) {
1076 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1077 iounmap(base_addr_virt);
9c472dd9 1078 pci_release_regions(pDev);
1da177e4
LT
1079 return -EINVAL;
1080 }
1081 } else {
1082 msg_addr_virt = base_addr_virt;
1083 }
1084
1085 // Allocate and zero the data structure
bbfbbbc1
MK
1086 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1087 if (!pHba) {
1088 if (msg_addr_virt != base_addr_virt)
1da177e4 1089 iounmap(msg_addr_virt);
1da177e4 1090 iounmap(base_addr_virt);
9c472dd9 1091 pci_release_regions(pDev);
1da177e4
LT
1092 return -ENOMEM;
1093 }
1da177e4 1094
0b950672 1095 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1096
1097 if(hba_chain != NULL){
1098 for(p = hba_chain; p->next; p = p->next);
1099 p->next = pHba;
1100 } else {
1101 hba_chain = pHba;
1102 }
1103 pHba->next = NULL;
1104 pHba->unit = hba_count;
23a2bc22 1105 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
1106 hba_count++;
1107
0b950672 1108 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1109
1110 pHba->pDev = pDev;
1111 pHba->base_addr_phys = base_addr0_phys;
1112
1113 // Set up the Virtual Base Address of the I2O Device
1114 pHba->base_addr_virt = base_addr_virt;
1115 pHba->msg_addr_virt = msg_addr_virt;
1116 pHba->irq_mask = base_addr_virt+0x30;
1117 pHba->post_port = base_addr_virt+0x40;
1118 pHba->reply_port = base_addr_virt+0x44;
1119
1120 pHba->hrt = NULL;
1121 pHba->lct = NULL;
1122 pHba->lct_size = 0;
1123 pHba->status_block = NULL;
1124 pHba->post_count = 0;
1125 pHba->state = DPTI_STATE_RESET;
1126 pHba->pDev = pDev;
1127 pHba->devices = NULL;
62ac5aed 1128 pHba->dma64 = dma64;
1da177e4
LT
1129
1130 // Initializing the spinlocks
1131 spin_lock_init(&pHba->state_lock);
1132 spin_lock_init(&adpt_post_wait_lock);
1133
1134 if(raptorFlag == 0){
62ac5aed
MS
1135 printk(KERN_INFO "Adaptec I2O RAID controller"
1136 " %d at %p size=%x irq=%d%s\n",
1137 hba_count-1, base_addr_virt,
1138 hba_map0_area_size, pDev->irq,
1139 dma64 ? " (64-bit DMA)" : "");
1da177e4 1140 } else {
62ac5aed
MS
1141 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1142 hba_count-1, pDev->irq,
1143 dma64 ? " (64-bit DMA)" : "");
1da177e4
LT
1144 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1145 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1146 }
1147
1d6f359a 1148 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1da177e4
LT
1149 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1150 adpt_i2o_delete_hba(pHba);
1151 return -EINVAL;
1152 }
1153
1154 return 0;
1155}
1156
1157
1158static void adpt_i2o_delete_hba(adpt_hba* pHba)
1159{
1160 adpt_hba* p1;
1161 adpt_hba* p2;
1162 struct i2o_device* d;
1163 struct i2o_device* next;
1164 int i;
1165 int j;
1166 struct adpt_device* pDev;
1167 struct adpt_device* pNext;
1168
1169
0b950672 1170 mutex_lock(&adpt_configuration_lock);
24601bbc
AM
1171 // scsi_unregister calls our adpt_release which
1172 // does a quiese
1da177e4
LT
1173 if(pHba->host){
1174 free_irq(pHba->host->irq, pHba);
1175 }
1da177e4
LT
1176 p2 = NULL;
1177 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1178 if(p1 == pHba) {
1179 if(p2) {
1180 p2->next = p1->next;
1181 } else {
1182 hba_chain = p1->next;
1183 }
1184 break;
1185 }
1186 }
1187
1188 hba_count--;
0b950672 1189 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1190
1191 iounmap(pHba->base_addr_virt);
9c472dd9 1192 pci_release_regions(pHba->pDev);
1da177e4
LT
1193 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1194 iounmap(pHba->msg_addr_virt);
1195 }
62ac5aed
MS
1196 if(pHba->FwDebugBuffer_P)
1197 iounmap(pHba->FwDebugBuffer_P);
67af2b06
MS
1198 if(pHba->hrt) {
1199 dma_free_coherent(&pHba->pDev->dev,
1200 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1201 pHba->hrt, pHba->hrt_pa);
1202 }
1203 if(pHba->lct) {
1204 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1205 pHba->lct, pHba->lct_pa);
1206 }
1207 if(pHba->status_block) {
1208 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1209 pHba->status_block, pHba->status_block_pa);
1210 }
1211 if(pHba->reply_pool) {
1212 dma_free_coherent(&pHba->pDev->dev,
1213 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1214 pHba->reply_pool, pHba->reply_pool_pa);
1215 }
1da177e4
LT
1216
1217 for(d = pHba->devices; d ; d = next){
1218 next = d->next;
1219 kfree(d);
1220 }
1221 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1222 for(j = 0; j < MAX_ID; j++){
1223 if(pHba->channel[i].device[j] != NULL){
1224 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1225 pNext = pDev->next_lun;
1226 kfree(pDev);
1227 }
1228 }
1229 }
1230 }
a07f3537 1231 pci_dev_put(pHba->pDev);
1da177e4
LT
1232 kfree(pHba);
1233
1ed43910
MS
1234 if (adpt_sysfs_class)
1235 device_destroy(adpt_sysfs_class,
1236 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1237
1da177e4
LT
1238 if(hba_count <= 0){
1239 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1ed43910
MS
1240 if (adpt_sysfs_class) {
1241 class_destroy(adpt_sysfs_class);
1242 adpt_sysfs_class = NULL;
1243 }
1da177e4
LT
1244 }
1245}
1246
1da177e4
LT
1247static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1248{
1249 struct adpt_device* d;
1250
1251 if(chan < 0 || chan >= MAX_CHANNEL)
1252 return NULL;
1253
1254 if( pHba->channel[chan].device == NULL){
1255 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1256 return NULL;
1257 }
1258
1259 d = pHba->channel[chan].device[id];
1260 if(!d || d->tid == 0) {
1261 return NULL;
1262 }
1263
1264 /* If it is the only lun at that address then this should match*/
1265 if(d->scsi_lun == lun){
1266 return d;
1267 }
1268
1269 /* else we need to look through all the luns */
1270 for(d=d->next_lun ; d ; d = d->next_lun){
1271 if(d->scsi_lun == lun){
1272 return d;
1273 }
1274 }
1275 return NULL;
1276}
1277
1278
1279static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1280{
1281 // I used my own version of the WAIT_QUEUE_HEAD
1282 // to handle some version differences
1283 // When embedded in the kernel this could go back to the vanilla one
1284 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1285 int status = 0;
1286 ulong flags = 0;
1287 struct adpt_i2o_post_wait_data *p1, *p2;
1288 struct adpt_i2o_post_wait_data *wait_data =
1289 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
4452ea50 1290 DECLARE_WAITQUEUE(wait, current);
1da177e4 1291
4452ea50 1292 if (!wait_data)
1da177e4 1293 return -ENOMEM;
4452ea50 1294
1da177e4
LT
1295 /*
1296 * The spin locking is needed to keep anyone from playing
1297 * with the queue pointers and id while we do the same
1298 */
1299 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1300 // TODO we need a MORE unique way of getting ids
1301 // to support async LCT get
1302 wait_data->next = adpt_post_wait_queue;
1303 adpt_post_wait_queue = wait_data;
1304 adpt_post_wait_id++;
1305 adpt_post_wait_id &= 0x7fff;
1306 wait_data->id = adpt_post_wait_id;
1307 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1308
1309 wait_data->wq = &adpt_wq_i2o_post;
1310 wait_data->status = -ETIMEDOUT;
1311
4452ea50 1312 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1313
1314 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1315 timeout *= HZ;
1316 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1317 set_current_state(TASK_INTERRUPTIBLE);
1318 if(pHba->host)
1319 spin_unlock_irq(pHba->host->host_lock);
1320 if (!timeout)
1321 schedule();
1322 else{
1323 timeout = schedule_timeout(timeout);
1324 if (timeout == 0) {
1325 // I/O issued, but cannot get result in
1326 // specified time. Freeing resorces is
1327 // dangerous.
1328 status = -ETIME;
1329 }
1330 }
1331 if(pHba->host)
1332 spin_lock_irq(pHba->host->host_lock);
1333 }
4452ea50 1334 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1335
1336 if(status == -ETIMEDOUT){
1337 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1338 // We will have to free the wait_data memory during shutdown
1339 return status;
1340 }
1341
1342 /* Remove the entry from the queue. */
1343 p2 = NULL;
1344 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1345 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1346 if(p1 == wait_data) {
1347 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1348 status = -EOPNOTSUPP;
1349 }
1350 if(p2) {
1351 p2->next = p1->next;
1352 } else {
1353 adpt_post_wait_queue = p1->next;
1354 }
1355 break;
1356 }
1357 }
1358 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1359
1360 kfree(wait_data);
1361
1362 return status;
1363}
1364
1365
1366static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1367{
1368
1369 u32 m = EMPTY_QUEUE;
1370 u32 __iomem *msg;
1371 ulong timeout = jiffies + 30*HZ;
1372 do {
1373 rmb();
1374 m = readl(pHba->post_port);
1375 if (m != EMPTY_QUEUE) {
1376 break;
1377 }
1378 if(time_after(jiffies,timeout)){
1379 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1380 return -ETIMEDOUT;
1381 }
a9a3047d 1382 schedule_timeout_uninterruptible(1);
1da177e4
LT
1383 } while(m == EMPTY_QUEUE);
1384
1385 msg = pHba->msg_addr_virt + m;
1386 memcpy_toio(msg, data, len);
1387 wmb();
1388
1389 //post message
1390 writel(m, pHba->post_port);
1391 wmb();
1392
1393 return 0;
1394}
1395
1396
1397static void adpt_i2o_post_wait_complete(u32 context, int status)
1398{
1399 struct adpt_i2o_post_wait_data *p1 = NULL;
1400 /*
1401 * We need to search through the adpt_post_wait
1402 * queue to see if the given message is still
1403 * outstanding. If not, it means that the IOP
1404 * took longer to respond to the message than we
1405 * had allowed and timer has already expired.
1406 * Not much we can do about that except log
1407 * it for debug purposes, increase timeout, and recompile
1408 *
1409 * Lock needed to keep anyone from moving queue pointers
1410 * around while we're looking through them.
1411 */
1412
1413 context &= 0x7fff;
1414
1415 spin_lock(&adpt_post_wait_lock);
1416 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1417 if(p1->id == context) {
1418 p1->status = status;
1419 spin_unlock(&adpt_post_wait_lock);
1420 wake_up_interruptible(p1->wq);
1421 return;
1422 }
1423 }
1424 spin_unlock(&adpt_post_wait_lock);
1425 // If this happens we lose commands that probably really completed
1426 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1427 printk(KERN_DEBUG" Tasks in wait queue:\n");
1428 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1429 printk(KERN_DEBUG" %d\n",p1->id);
1430 }
1431 return;
1432}
1433
1434static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1435{
1436 u32 msg[8];
1437 u8* status;
67af2b06 1438 dma_addr_t addr;
1da177e4
LT
1439 u32 m = EMPTY_QUEUE ;
1440 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1441
1442 if(pHba->initialized == FALSE) { // First time reset should be quick
1443 timeout = jiffies + (25*HZ);
1444 } else {
1445 adpt_i2o_quiesce_hba(pHba);
1446 }
1447
1448 do {
1449 rmb();
1450 m = readl(pHba->post_port);
1451 if (m != EMPTY_QUEUE) {
1452 break;
1453 }
1454 if(time_after(jiffies,timeout)){
1455 printk(KERN_WARNING"Timeout waiting for message!\n");
1456 return -ETIMEDOUT;
1457 }
a9a3047d 1458 schedule_timeout_uninterruptible(1);
1da177e4
LT
1459 } while (m == EMPTY_QUEUE);
1460
67af2b06 1461 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1da177e4
LT
1462 if(status == NULL) {
1463 adpt_send_nop(pHba, m);
1464 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1465 return -ENOMEM;
1466 }
67af2b06 1467 memset(status,0,4);
1da177e4
LT
1468
1469 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1470 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1471 msg[2]=0;
1472 msg[3]=0;
1473 msg[4]=0;
1474 msg[5]=0;
67af2b06
MS
1475 msg[6]=dma_low(addr);
1476 msg[7]=dma_high(addr);
1da177e4
LT
1477
1478 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1479 wmb();
1480 writel(m, pHba->post_port);
1481 wmb();
1482
1483 while(*status == 0){
1484 if(time_after(jiffies,timeout)){
1485 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
67af2b06
MS
1486 /* We lose 4 bytes of "status" here, but we cannot
1487 free these because controller may awake and corrupt
1488 those bytes at any time */
1489 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1490 return -ETIMEDOUT;
1491 }
1492 rmb();
a9a3047d 1493 schedule_timeout_uninterruptible(1);
1da177e4
LT
1494 }
1495
1496 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1497 PDEBUG("%s: Reset in progress...\n", pHba->name);
1498 // Here we wait for message frame to become available
1499 // indicated that reset has finished
1500 do {
1501 rmb();
1502 m = readl(pHba->post_port);
1503 if (m != EMPTY_QUEUE) {
1504 break;
1505 }
1506 if(time_after(jiffies,timeout)){
1507 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
67af2b06
MS
1508 /* We lose 4 bytes of "status" here, but we
1509 cannot free these because controller may
1510 awake and corrupt those bytes at any time */
1511 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1512 return -ETIMEDOUT;
1513 }
a9a3047d 1514 schedule_timeout_uninterruptible(1);
1da177e4
LT
1515 } while (m == EMPTY_QUEUE);
1516 // Flush the offset
1517 adpt_send_nop(pHba, m);
1518 }
1519 adpt_i2o_status_get(pHba);
1520 if(*status == 0x02 ||
1521 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1522 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1523 pHba->name);
1524 } else {
1525 PDEBUG("%s: Reset completed.\n", pHba->name);
1526 }
1527
67af2b06 1528 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
1529#ifdef UARTDELAY
1530 // This delay is to allow someone attached to the card through the debug UART to
1531 // set up the dump levels that they want before the rest of the initialization sequence
1532 adpt_delay(20000);
1533#endif
1534 return 0;
1535}
1536
1537
1538static int adpt_i2o_parse_lct(adpt_hba* pHba)
1539{
1540 int i;
1541 int max;
1542 int tid;
1543 struct i2o_device *d;
1544 i2o_lct *lct = pHba->lct;
1545 u8 bus_no = 0;
1546 s16 scsi_id;
1547 s16 scsi_lun;
1548 u32 buf[10]; // larger than 7, or 8 ...
1549 struct adpt_device* pDev;
1550
1551 if (lct == NULL) {
1552 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1553 return -1;
1554 }
1555
1556 max = lct->table_size;
1557 max -= 3;
1558 max /= 9;
1559
1560 for(i=0;i<max;i++) {
1561 if( lct->lct_entry[i].user_tid != 0xfff){
1562 /*
1563 * If we have hidden devices, we need to inform the upper layers about
1564 * the possible maximum id reference to handle device access when
1565 * an array is disassembled. This code has no other purpose but to
1566 * allow us future access to devices that are currently hidden
1567 * behind arrays, hotspares or have not been configured (JBOD mode).
1568 */
1569 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1570 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1571 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1572 continue;
1573 }
1574 tid = lct->lct_entry[i].tid;
1575 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1576 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1577 continue;
1578 }
1579 bus_no = buf[0]>>16;
1580 scsi_id = buf[1];
1581 scsi_lun = (buf[2]>>8 )&0xff;
1582 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1583 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1584 continue;
1585 }
1586 if (scsi_id >= MAX_ID){
1587 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1588 continue;
1589 }
1590 if(bus_no > pHba->top_scsi_channel){
1591 pHba->top_scsi_channel = bus_no;
1592 }
1593 if(scsi_id > pHba->top_scsi_id){
1594 pHba->top_scsi_id = scsi_id;
1595 }
1596 if(scsi_lun > pHba->top_scsi_lun){
1597 pHba->top_scsi_lun = scsi_lun;
1598 }
1599 continue;
1600 }
5cbded58 1601 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
1602 if(d==NULL)
1603 {
1604 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1605 return -ENOMEM;
1606 }
1607
1c2fb3f3 1608 d->controller = pHba;
1da177e4
LT
1609 d->next = NULL;
1610
1611 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1612
1613 d->flags = 0;
1614 tid = d->lct_data.tid;
1615 adpt_i2o_report_hba_unit(pHba, d);
1616 adpt_i2o_install_device(pHba, d);
1617 }
1618 bus_no = 0;
1619 for(d = pHba->devices; d ; d = d->next) {
1620 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1621 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1622 tid = d->lct_data.tid;
1623 // TODO get the bus_no from hrt-but for now they are in order
1624 //bus_no =
1625 if(bus_no > pHba->top_scsi_channel){
1626 pHba->top_scsi_channel = bus_no;
1627 }
1628 pHba->channel[bus_no].type = d->lct_data.class_id;
1629 pHba->channel[bus_no].tid = tid;
1630 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1631 {
1632 pHba->channel[bus_no].scsi_id = buf[1];
1633 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1634 }
1635 // TODO remove - this is just until we get from hrt
1636 bus_no++;
1637 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1638 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1639 break;
1640 }
1641 }
1642 }
1643
1644 // Setup adpt_device table
1645 for(d = pHba->devices; d ; d = d->next) {
1646 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1647 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1648 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1649
1650 tid = d->lct_data.tid;
1651 scsi_id = -1;
1652 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1653 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1654 bus_no = buf[0]>>16;
1655 scsi_id = buf[1];
1656 scsi_lun = (buf[2]>>8 )&0xff;
1657 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1658 continue;
1659 }
1660 if (scsi_id >= MAX_ID) {
1661 continue;
1662 }
1663 if( pHba->channel[bus_no].device[scsi_id] == NULL){
ab552204 1664 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1665 if(pDev == NULL) {
1666 return -ENOMEM;
1667 }
1668 pHba->channel[bus_no].device[scsi_id] = pDev;
1da177e4
LT
1669 } else {
1670 for( pDev = pHba->channel[bus_no].device[scsi_id];
1671 pDev->next_lun; pDev = pDev->next_lun){
1672 }
ab552204 1673 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1674 if(pDev->next_lun == NULL) {
1675 return -ENOMEM;
1676 }
1da177e4
LT
1677 pDev = pDev->next_lun;
1678 }
1679 pDev->tid = tid;
1680 pDev->scsi_channel = bus_no;
1681 pDev->scsi_id = scsi_id;
1682 pDev->scsi_lun = scsi_lun;
1683 pDev->pI2o_dev = d;
1684 d->owner = pDev;
1685 pDev->type = (buf[0])&0xff;
1686 pDev->flags = (buf[0]>>8)&0xff;
1687 if(scsi_id > pHba->top_scsi_id){
1688 pHba->top_scsi_id = scsi_id;
1689 }
1690 if(scsi_lun > pHba->top_scsi_lun){
1691 pHba->top_scsi_lun = scsi_lun;
1692 }
1693 }
1694 if(scsi_id == -1){
1695 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1696 d->lct_data.identity_tag);
1697 }
1698 }
1699 }
1700 return 0;
1701}
1702
1703
1704/*
1705 * Each I2O controller has a chain of devices on it - these match
1706 * the useful parts of the LCT of the board.
1707 */
1708
1709static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1710{
0b950672 1711 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1712 d->controller=pHba;
1713 d->owner=NULL;
1714 d->next=pHba->devices;
1715 d->prev=NULL;
1716 if (pHba->devices != NULL){
1717 pHba->devices->prev=d;
1718 }
1719 pHba->devices=d;
1720 *d->dev_name = 0;
1721
0b950672 1722 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1723 return 0;
1724}
1725
1726static int adpt_open(struct inode *inode, struct file *file)
1727{
1728 int minor;
1729 adpt_hba* pHba;
1730
dea3f665 1731 lock_kernel();
1da177e4
LT
1732 //TODO check for root access
1733 //
1734 minor = iminor(inode);
1735 if (minor >= hba_count) {
dea3f665 1736 unlock_kernel();
1da177e4
LT
1737 return -ENXIO;
1738 }
0b950672 1739 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1740 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1741 if (pHba->unit == minor) {
1742 break; /* found adapter */
1743 }
1744 }
1745 if (pHba == NULL) {
0b950672 1746 mutex_unlock(&adpt_configuration_lock);
dea3f665 1747 unlock_kernel();
1da177e4
LT
1748 return -ENXIO;
1749 }
1750
1751// if(pHba->in_use){
0b950672 1752 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1753// return -EBUSY;
1754// }
1755
1756 pHba->in_use = 1;
0b950672 1757 mutex_unlock(&adpt_configuration_lock);
dea3f665 1758 unlock_kernel();
1da177e4
LT
1759
1760 return 0;
1761}
1762
1763static int adpt_close(struct inode *inode, struct file *file)
1764{
1765 int minor;
1766 adpt_hba* pHba;
1767
1768 minor = iminor(inode);
1769 if (minor >= hba_count) {
1770 return -ENXIO;
1771 }
0b950672 1772 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1773 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1774 if (pHba->unit == minor) {
1775 break; /* found adapter */
1776 }
1777 }
0b950672 1778 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1779 if (pHba == NULL) {
1780 return -ENXIO;
1781 }
1782
1783 pHba->in_use = 0;
1784
1785 return 0;
1786}
1787
1788
1789static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1790{
1791 u32 msg[MAX_MESSAGE_SIZE];
1792 u32* reply = NULL;
1793 u32 size = 0;
1794 u32 reply_size = 0;
1795 u32 __user *user_msg = arg;
1796 u32 __user * user_reply = NULL;
1797 void *sg_list[pHba->sg_tablesize];
1798 u32 sg_offset = 0;
1799 u32 sg_count = 0;
1800 int sg_index = 0;
1801 u32 i = 0;
1802 u32 rcode = 0;
1803 void *p = NULL;
67af2b06 1804 dma_addr_t addr;
1da177e4
LT
1805 ulong flags = 0;
1806
1807 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1808 // get user msg size in u32s
1809 if(get_user(size, &user_msg[0])){
1810 return -EFAULT;
1811 }
1812 size = size>>16;
1813
1814 user_reply = &user_msg[size];
1815 if(size > MAX_MESSAGE_SIZE){
1816 return -EFAULT;
1817 }
1818 size *= 4; // Convert to bytes
1819
1820 /* Copy in the user's I2O command */
1821 if(copy_from_user(msg, user_msg, size)) {
1822 return -EFAULT;
1823 }
1824 get_user(reply_size, &user_reply[0]);
1825 reply_size = reply_size>>16;
1826 if(reply_size > REPLY_FRAME_SIZE){
1827 reply_size = REPLY_FRAME_SIZE;
1828 }
1829 reply_size *= 4;
ab552204 1830 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1da177e4
LT
1831 if(reply == NULL) {
1832 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1833 return -ENOMEM;
1834 }
1da177e4
LT
1835 sg_offset = (msg[0]>>4)&0xf;
1836 msg[2] = 0x40000000; // IOCTL context
62ac5aed
MS
1837 msg[3] = adpt_ioctl_to_context(pHba, reply);
1838 if (msg[3] == (u32)-1)
1839 return -EBUSY;
1840
1da177e4
LT
1841 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1842 if(sg_offset) {
62ac5aed 1843 // TODO add 64 bit API
1da177e4
LT
1844 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1845 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1846 if (sg_count > pHba->sg_tablesize){
1847 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1848 kfree (reply);
1849 return -EINVAL;
1850 }
1851
1852 for(i = 0; i < sg_count; i++) {
1853 int sg_size;
1854
1855 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1856 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1857 rcode = -EINVAL;
1858 goto cleanup;
1859 }
1860 sg_size = sg[i].flag_count & 0xffffff;
1861 /* Allocate memory for the transfer */
67af2b06 1862 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1da177e4
LT
1863 if(!p) {
1864 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1865 pHba->name,sg_size,i,sg_count);
1866 rcode = -ENOMEM;
1867 goto cleanup;
1868 }
1869 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1870 /* Copy in the user's SG buffer if necessary */
1871 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
62ac5aed
MS
1872 // sg_simple_element API is 32 bit
1873 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1da177e4
LT
1874 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1875 rcode = -EFAULT;
1876 goto cleanup;
1877 }
1878 }
62ac5aed
MS
1879 /* sg_simple_element API is 32 bit, but addr < 4GB */
1880 sg[i].addr_bus = addr;
1da177e4
LT
1881 }
1882 }
1883
1884 do {
1885 if(pHba->host)
1886 spin_lock_irqsave(pHba->host->host_lock, flags);
1887 // This state stops any new commands from enterring the
1888 // controller while processing the ioctl
1889// pHba->state |= DPTI_STATE_IOCTL;
1890// We can't set this now - The scsi subsystem sets host_blocked and
1891// the queue empties and stops. We need a way to restart the queue
1892 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1893 if (rcode != 0)
1894 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1895 rcode, reply);
1896// pHba->state &= ~DPTI_STATE_IOCTL;
1897 if(pHba->host)
1898 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1899 } while(rcode == -ETIMEDOUT);
1900
1901 if(rcode){
1902 goto cleanup;
1903 }
1904
1905 if(sg_offset) {
1906 /* Copy back the Scatter Gather buffers back to user space */
1907 u32 j;
62ac5aed 1908 // TODO add 64 bit API
1da177e4
LT
1909 struct sg_simple_element* sg;
1910 int sg_size;
1911
1912 // re-acquire the original message to handle correctly the sg copy operation
1913 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1914 // get user msg size in u32s
1915 if(get_user(size, &user_msg[0])){
1916 rcode = -EFAULT;
1917 goto cleanup;
1918 }
1919 size = size>>16;
1920 size *= 4;
1921 /* Copy in the user's I2O command */
1922 if (copy_from_user (msg, user_msg, size)) {
1923 rcode = -EFAULT;
1924 goto cleanup;
1925 }
1926 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1927
62ac5aed 1928 // TODO add 64 bit API
1da177e4
LT
1929 sg = (struct sg_simple_element*)(msg + sg_offset);
1930 for (j = 0; j < sg_count; j++) {
1931 /* Copy out the SG list to user's buffer if necessary */
1932 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1933 sg_size = sg[j].flag_count & 0xffffff;
62ac5aed
MS
1934 // sg_simple_element API is 32 bit
1935 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1da177e4
LT
1936 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1937 rcode = -EFAULT;
1938 goto cleanup;
1939 }
1940 }
1941 }
1942 }
1943
1944 /* Copy back the reply to user space */
1945 if (reply_size) {
1946 // we wrote our own values for context - now restore the user supplied ones
1947 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1948 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1949 rcode = -EFAULT;
1950 }
1951 if(copy_to_user(user_reply, reply, reply_size)) {
1952 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1953 rcode = -EFAULT;
1954 }
1955 }
1956
1957
1958cleanup:
67af2b06
MS
1959 if (rcode != -ETIME && rcode != -EINTR) {
1960 struct sg_simple_element *sg =
1961 (struct sg_simple_element*) (msg +sg_offset);
1da177e4 1962 kfree (reply);
67af2b06
MS
1963 while(sg_index) {
1964 if(sg_list[--sg_index]) {
1965 dma_free_coherent(&pHba->pDev->dev,
1966 sg[sg_index].flag_count & 0xffffff,
1967 sg_list[sg_index],
1968 sg[sg_index].addr_bus);
1969 }
1da177e4
LT
1970 }
1971 }
1972 return rcode;
1973}
1974
1da177e4
LT
1975#if defined __ia64__
1976static void adpt_ia64_info(sysInfo_S* si)
1977{
1978 // This is all the info we need for now
1979 // We will add more info as our new
1980 // managmenent utility requires it
1981 si->processorType = PROC_IA64;
1982}
1983#endif
1984
1da177e4
LT
1985#if defined __sparc__
1986static void adpt_sparc_info(sysInfo_S* si)
1987{
1988 // This is all the info we need for now
1989 // We will add more info as our new
1990 // managmenent utility requires it
1991 si->processorType = PROC_ULTRASPARC;
1992}
1993#endif
1da177e4
LT
1994#if defined __alpha__
1995static void adpt_alpha_info(sysInfo_S* si)
1996{
1997 // This is all the info we need for now
1998 // We will add more info as our new
1999 // managmenent utility requires it
2000 si->processorType = PROC_ALPHA;
2001}
2002#endif
2003
2004#if defined __i386__
1da177e4
LT
2005static void adpt_i386_info(sysInfo_S* si)
2006{
2007 // This is all the info we need for now
2008 // We will add more info as our new
2009 // managmenent utility requires it
2010 switch (boot_cpu_data.x86) {
2011 case CPU_386:
2012 si->processorType = PROC_386;
2013 break;
2014 case CPU_486:
2015 si->processorType = PROC_486;
2016 break;
2017 case CPU_586:
2018 si->processorType = PROC_PENTIUM;
2019 break;
2020 default: // Just in case
2021 si->processorType = PROC_PENTIUM;
2022 break;
2023 }
2024}
8b2cc917
AM
2025#endif
2026
2027/*
2028 * This routine returns information about the system. This does not effect
2029 * any logic and if the info is wrong - it doesn't matter.
2030 */
1da177e4 2031
8b2cc917
AM
2032/* Get all the info we can not get from kernel services */
2033static int adpt_system_info(void __user *buffer)
2034{
2035 sysInfo_S si;
2036
2037 memset(&si, 0, sizeof(si));
2038
2039 si.osType = OS_LINUX;
2040 si.osMajorVersion = 0;
2041 si.osMinorVersion = 0;
2042 si.osRevision = 0;
2043 si.busType = SI_PCI_BUS;
2044 si.processorFamily = DPTI_sig.dsProcessorFamily;
2045
2046#if defined __i386__
2047 adpt_i386_info(&si);
2048#elif defined (__ia64__)
2049 adpt_ia64_info(&si);
2050#elif defined(__sparc__)
2051 adpt_sparc_info(&si);
2052#elif defined (__alpha__)
2053 adpt_alpha_info(&si);
2054#else
2055 si.processorType = 0xff ;
1da177e4 2056#endif
8b2cc917
AM
2057 if (copy_to_user(buffer, &si, sizeof(si))){
2058 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2059 return -EFAULT;
2060 }
1da177e4 2061
8b2cc917
AM
2062 return 0;
2063}
1da177e4
LT
2064
2065static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2066 ulong arg)
2067{
2068 int minor;
2069 int error = 0;
2070 adpt_hba* pHba;
2071 ulong flags = 0;
2072 void __user *argp = (void __user *)arg;
2073
2074 minor = iminor(inode);
2075 if (minor >= DPTI_MAX_HBA){
2076 return -ENXIO;
2077 }
0b950672 2078 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
2079 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2080 if (pHba->unit == minor) {
2081 break; /* found adapter */
2082 }
2083 }
0b950672 2084 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
2085 if(pHba == NULL){
2086 return -ENXIO;
2087 }
2088
a9a3047d
NA
2089 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2090 schedule_timeout_uninterruptible(2);
1da177e4
LT
2091
2092 switch (cmd) {
2093 // TODO: handle 3 cases
2094 case DPT_SIGNATURE:
2095 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2096 return -EFAULT;
2097 }
2098 break;
2099 case I2OUSRCMD:
2100 return adpt_i2o_passthru(pHba, argp);
2101
2102 case DPT_CTRLINFO:{
2103 drvrHBAinfo_S HbaInfo;
2104
2105#define FLG_OSD_PCI_VALID 0x0001
2106#define FLG_OSD_DMA 0x0002
2107#define FLG_OSD_I2O 0x0004
2108 memset(&HbaInfo, 0, sizeof(HbaInfo));
2109 HbaInfo.drvrHBAnum = pHba->unit;
2110 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2111 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2112 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2113 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2114 HbaInfo.Interrupt = pHba->pDev->irq;
2115 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2116 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2117 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2118 return -EFAULT;
2119 }
2120 break;
2121 }
2122 case DPT_SYSINFO:
2123 return adpt_system_info(argp);
2124 case DPT_BLINKLED:{
2125 u32 value;
2126 value = (u32)adpt_read_blink_led(pHba);
2127 if (copy_to_user(argp, &value, sizeof(value))) {
2128 return -EFAULT;
2129 }
2130 break;
2131 }
2132 case I2ORESETCMD:
2133 if(pHba->host)
2134 spin_lock_irqsave(pHba->host->host_lock, flags);
2135 adpt_hba_reset(pHba);
2136 if(pHba->host)
2137 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2138 break;
2139 case I2ORESCANCMD:
2140 adpt_rescan(pHba);
2141 break;
2142 default:
2143 return -EINVAL;
2144 }
2145
2146 return error;
2147}
2148
62ac5aed
MS
2149#ifdef CONFIG_COMPAT
2150static long compat_adpt_ioctl(struct file *file,
2151 unsigned int cmd, unsigned long arg)
2152{
2153 struct inode *inode;
2154 long ret;
2155
2156 inode = file->f_dentry->d_inode;
2157
2158 lock_kernel();
2159
2160 switch(cmd) {
2161 case DPT_SIGNATURE:
2162 case I2OUSRCMD:
2163 case DPT_CTRLINFO:
2164 case DPT_SYSINFO:
2165 case DPT_BLINKLED:
2166 case I2ORESETCMD:
2167 case I2ORESCANCMD:
2168 case (DPT_TARGET_BUSY & 0xFFFF):
2169 case DPT_TARGET_BUSY:
2170 ret = adpt_ioctl(inode, file, cmd, arg);
2171 break;
2172 default:
2173 ret = -ENOIOCTLCMD;
2174 }
2175
2176 unlock_kernel();
2177
2178 return ret;
2179}
2180#endif
1da177e4 2181
7d12e780 2182static irqreturn_t adpt_isr(int irq, void *dev_id)
1da177e4
LT
2183{
2184 struct scsi_cmnd* cmd;
2185 adpt_hba* pHba = dev_id;
2186 u32 m;
1c2fb3f3 2187 void __iomem *reply;
1da177e4
LT
2188 u32 status=0;
2189 u32 context;
2190 ulong flags = 0;
2191 int handled = 0;
2192
2193 if (pHba == NULL){
2194 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2195 return IRQ_NONE;
2196 }
2197 if(pHba->host)
2198 spin_lock_irqsave(pHba->host->host_lock, flags);
2199
2200 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2201 m = readl(pHba->reply_port);
2202 if(m == EMPTY_QUEUE){
2203 // Try twice then give up
2204 rmb();
2205 m = readl(pHba->reply_port);
2206 if(m == EMPTY_QUEUE){
2207 // This really should not happen
2208 printk(KERN_ERR"dpti: Could not get reply frame\n");
2209 goto out;
2210 }
2211 }
67af2b06
MS
2212 if (pHba->reply_pool_pa <= m &&
2213 m < pHba->reply_pool_pa +
2214 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2215 reply = (u8 *)pHba->reply_pool +
2216 (m - pHba->reply_pool_pa);
2217 } else {
2218 /* Ick, we should *never* be here */
2219 printk(KERN_ERR "dpti: reply frame not from pool\n");
2220 reply = (u8 *)bus_to_virt(m);
2221 }
1da177e4
LT
2222
2223 if (readl(reply) & MSG_FAIL) {
2224 u32 old_m = readl(reply+28);
1c2fb3f3 2225 void __iomem *msg;
1da177e4
LT
2226 u32 old_context;
2227 PDEBUG("%s: Failed message\n",pHba->name);
2228 if(old_m >= 0x100000){
2229 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2230 writel(m,pHba->reply_port);
2231 continue;
2232 }
2233 // Transaction context is 0 in failed reply frame
1c2fb3f3 2234 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2235 old_context = readl(msg+12);
2236 writel(old_context, reply+12);
2237 adpt_send_nop(pHba, old_m);
2238 }
2239 context = readl(reply+8);
2240 if(context & 0x40000000){ // IOCTL
62ac5aed 2241 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
1c2fb3f3
BB
2242 if( p != NULL) {
2243 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2244 }
2245 // All IOCTLs will also be post wait
2246 }
2247 if(context & 0x80000000){ // Post wait message
2248 status = readl(reply+16);
2249 if(status >> 24){
2250 status &= 0xffff; /* Get detail status */
2251 } else {
2252 status = I2O_POST_WAIT_OK;
2253 }
2254 if(!(context & 0x40000000)) {
62ac5aed
MS
2255 cmd = adpt_cmd_from_context(pHba,
2256 readl(reply+12));
1da177e4
LT
2257 if(cmd != NULL) {
2258 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2259 }
2260 }
2261 adpt_i2o_post_wait_complete(context, status);
2262 } else { // SCSI message
62ac5aed 2263 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
1da177e4 2264 if(cmd != NULL){
67af2b06 2265 scsi_dma_unmap(cmd);
1da177e4
LT
2266 if(cmd->serial_number != 0) { // If not timedout
2267 adpt_i2o_to_scsi(reply, cmd);
2268 }
2269 }
2270 }
2271 writel(m, pHba->reply_port);
2272 wmb();
2273 rmb();
2274 }
2275 handled = 1;
2276out: if(pHba->host)
2277 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2278 return IRQ_RETVAL(handled);
2279}
2280
2281static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2282{
2283 int i;
2284 u32 msg[MAX_MESSAGE_SIZE];
2285 u32* mptr;
62ac5aed 2286 u32* lptr;
1da177e4
LT
2287 u32 *lenptr;
2288 int direction;
2289 int scsidir;
10803de4 2290 int nseg;
1da177e4
LT
2291 u32 len;
2292 u32 reqlen;
2293 s32 rcode;
62ac5aed 2294 dma_addr_t addr;
1da177e4
LT
2295
2296 memset(msg, 0 , sizeof(msg));
10803de4 2297 len = scsi_bufflen(cmd);
1da177e4
LT
2298 direction = 0x00000000;
2299
2300 scsidir = 0x00000000; // DATA NO XFER
2301 if(len) {
2302 /*
2303 * Set SCBFlags to indicate if data is being transferred
2304 * in or out, or no data transfer
2305 * Note: Do not have to verify index is less than 0 since
2306 * cmd->cmnd[0] is an unsigned char
2307 */
2308 switch(cmd->sc_data_direction){
2309 case DMA_FROM_DEVICE:
2310 scsidir =0x40000000; // DATA IN (iop<--dev)
2311 break;
2312 case DMA_TO_DEVICE:
2313 direction=0x04000000; // SGL OUT
2314 scsidir =0x80000000; // DATA OUT (iop-->dev)
2315 break;
2316 case DMA_NONE:
2317 break;
2318 case DMA_BIDIRECTIONAL:
2319 scsidir =0x40000000; // DATA IN (iop<--dev)
2320 // Assume In - and continue;
2321 break;
2322 default:
2323 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2324 pHba->name, cmd->cmnd[0]);
2325 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2326 cmd->scsi_done(cmd);
2327 return 0;
2328 }
2329 }
2330 // msg[0] is set later
2331 // I2O_CMD_SCSI_EXEC
2332 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2333 msg[2] = 0;
62ac5aed 2334 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
1da177e4
LT
2335 // Our cards use the transaction context as the tag for queueing
2336 // Adaptec/DPT Private stuff
2337 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2338 msg[5] = d->tid;
2339 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2340 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2341 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2342 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2343 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2344
2345 mptr=msg+7;
2346
2347 // Write SCSI command into the message - always 16 byte block
2348 memset(mptr, 0, 16);
2349 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2350 mptr+=4;
2351 lenptr=mptr++; /* Remember me - fill in when we know */
62ac5aed
MS
2352 if (dpt_dma64(pHba)) {
2353 reqlen = 16; // SINGLE SGE
2354 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2355 *mptr++ = 1 << PAGE_SHIFT;
2356 } else {
2357 reqlen = 14; // SINGLE SGE
2358 }
1da177e4 2359 /* Now fill in the SGList and command */
1da177e4 2360
10803de4
FT
2361 nseg = scsi_dma_map(cmd);
2362 BUG_ON(nseg < 0);
2363 if (nseg) {
2364 struct scatterlist *sg;
1da177e4
LT
2365
2366 len = 0;
10803de4 2367 scsi_for_each_sg(cmd, sg, nseg, i) {
62ac5aed 2368 lptr = mptr;
1da177e4
LT
2369 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2370 len+=sg_dma_len(sg);
62ac5aed
MS
2371 addr = sg_dma_address(sg);
2372 *mptr++ = dma_low(addr);
2373 if (dpt_dma64(pHba))
2374 *mptr++ = dma_high(addr);
10803de4
FT
2375 /* Make this an end of list */
2376 if (i == nseg - 1)
62ac5aed 2377 *lptr = direction|0xD0000000|sg_dma_len(sg);
1da177e4 2378 }
1da177e4
LT
2379 reqlen = mptr - msg;
2380 *lenptr = len;
2381
2382 if(cmd->underflow && len != cmd->underflow){
2383 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2384 len, cmd->underflow);
2385 }
2386 } else {
10803de4
FT
2387 *lenptr = len = 0;
2388 reqlen = 12;
1da177e4
LT
2389 }
2390
2391 /* Stick the headers on */
2392 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2393
2394 // Send it on it's way
2395 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2396 if (rcode == 0) {
2397 return 0;
2398 }
2399 return rcode;
2400}
2401
2402
c864cb14 2403static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
24601bbc 2404{
c864cb14 2405 struct Scsi_Host *host;
24601bbc 2406
c864cb14 2407 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
24601bbc 2408 if (host == NULL) {
c864cb14 2409 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
24601bbc
AM
2410 return -1;
2411 }
2412 host->hostdata[0] = (unsigned long)pHba;
2413 pHba->host = host;
2414
2415 host->irq = pHba->pDev->irq;
2416 /* no IO ports, so don't have to set host->io_port and
2417 * host->n_io_port
2418 */
2419 host->io_port = 0;
2420 host->n_io_port = 0;
2421 /* see comments in scsi_host.h */
2422 host->max_id = 16;
2423 host->max_lun = 256;
2424 host->max_channel = pHba->top_scsi_channel + 1;
2425 host->cmd_per_lun = 1;
67af2b06 2426 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
24601bbc
AM
2427 host->sg_tablesize = pHba->sg_tablesize;
2428 host->can_queue = pHba->post_fifo_size;
2429
2430 return 0;
2431}
2432
2433
1c2fb3f3 2434static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2435{
2436 adpt_hba* pHba;
2437 u32 hba_status;
2438 u32 dev_status;
2439 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2440 // I know this would look cleaner if I just read bytes
2441 // but the model I have been using for all the rest of the
2442 // io is in 4 byte words - so I keep that model
2443 u16 detailed_status = readl(reply+16) &0xffff;
2444 dev_status = (detailed_status & 0xff);
2445 hba_status = detailed_status >> 8;
2446
2447 // calculate resid for sg
df81d237 2448 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
1da177e4
LT
2449
2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2451
2452 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2453
2454 if(!(reply_flags & MSG_FAIL)) {
2455 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2456 case I2O_SCSI_DSC_SUCCESS:
2457 cmd->result = (DID_OK << 16);
2458 // handle underflow
df81d237 2459 if (readl(reply+20) < cmd->underflow) {
1da177e4
LT
2460 cmd->result = (DID_ERROR <<16);
2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2462 }
2463 break;
2464 case I2O_SCSI_DSC_REQUEST_ABORTED:
2465 cmd->result = (DID_ABORT << 16);
2466 break;
2467 case I2O_SCSI_DSC_PATH_INVALID:
2468 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2469 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2470 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2471 case I2O_SCSI_DSC_NO_ADAPTER:
2472 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2473 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2474 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2475 cmd->result = (DID_TIME_OUT << 16);
2476 break;
2477 case I2O_SCSI_DSC_ADAPTER_BUSY:
2478 case I2O_SCSI_DSC_BUS_BUSY:
2479 cmd->result = (DID_BUS_BUSY << 16);
2480 break;
2481 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2482 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2483 cmd->result = (DID_RESET << 16);
2484 break;
2485 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2486 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2487 cmd->result = (DID_PARITY << 16);
2488 break;
2489 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2490 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2491 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2492 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2493 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2494 case I2O_SCSI_DSC_DATA_OVERRUN:
2495 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2496 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2497 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2498 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2499 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2500 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2501 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2502 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2503 case I2O_SCSI_DSC_INVALID_CDB:
2504 case I2O_SCSI_DSC_LUN_INVALID:
2505 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2506 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2507 case I2O_SCSI_DSC_NO_NEXUS:
2508 case I2O_SCSI_DSC_CDB_RECEIVED:
2509 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2510 case I2O_SCSI_DSC_QUEUE_FROZEN:
2511 case I2O_SCSI_DSC_REQUEST_INVALID:
2512 default:
2513 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2514 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2515 hba_status, dev_status, cmd->cmnd[0]);
2516 cmd->result = (DID_ERROR << 16);
2517 break;
2518 }
2519
2520 // copy over the request sense data if it was a check
2521 // condition status
d814c517 2522 if (dev_status == SAM_STAT_CHECK_CONDITION) {
b80ca4f7 2523 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
1da177e4 2524 // Copy over the sense data
1c2fb3f3 2525 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2526 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2527 cmd->sense_buffer[2] == DATA_PROTECT ){
2528 /* This is to handle an array failed */
2529 cmd->result = (DID_TIME_OUT << 16);
2530 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2531 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2532 hba_status, dev_status, cmd->cmnd[0]);
2533
2534 }
2535 }
2536 } else {
2537 /* In this condtion we could not talk to the tid
2538 * the card rejected it. We should signal a retry
2539 * for a limitted number of retries.
2540 */
2541 cmd->result = (DID_TIME_OUT << 16);
2542 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2543 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2544 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2545 }
2546
2547 cmd->result |= (dev_status);
2548
2549 if(cmd->scsi_done != NULL){
2550 cmd->scsi_done(cmd);
2551 }
2552 return cmd->result;
2553}
2554
2555
2556static s32 adpt_rescan(adpt_hba* pHba)
2557{
2558 s32 rcode;
2559 ulong flags = 0;
2560
2561 if(pHba->host)
2562 spin_lock_irqsave(pHba->host->host_lock, flags);
2563 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2564 goto out;
2565 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2566 goto out;
2567 rcode = 0;
2568out: if(pHba->host)
2569 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2570 return rcode;
2571}
2572
2573
2574static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2575{
2576 int i;
2577 int max;
2578 int tid;
2579 struct i2o_device *d;
2580 i2o_lct *lct = pHba->lct;
2581 u8 bus_no = 0;
2582 s16 scsi_id;
2583 s16 scsi_lun;
2584 u32 buf[10]; // at least 8 u32's
2585 struct adpt_device* pDev = NULL;
2586 struct i2o_device* pI2o_dev = NULL;
2587
2588 if (lct == NULL) {
2589 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2590 return -1;
2591 }
2592
2593 max = lct->table_size;
2594 max -= 3;
2595 max /= 9;
2596
2597 // Mark each drive as unscanned
2598 for (d = pHba->devices; d; d = d->next) {
2599 pDev =(struct adpt_device*) d->owner;
2600 if(!pDev){
2601 continue;
2602 }
2603 pDev->state |= DPTI_DEV_UNSCANNED;
2604 }
2605
2606 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2607
2608 for(i=0;i<max;i++) {
2609 if( lct->lct_entry[i].user_tid != 0xfff){
2610 continue;
2611 }
2612
2613 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2614 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2615 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2616 tid = lct->lct_entry[i].tid;
2617 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2618 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2619 continue;
2620 }
2621 bus_no = buf[0]>>16;
2622 scsi_id = buf[1];
2623 scsi_lun = (buf[2]>>8 )&0xff;
2624 pDev = pHba->channel[bus_no].device[scsi_id];
2625 /* da lun */
2626 while(pDev) {
2627 if(pDev->scsi_lun == scsi_lun) {
2628 break;
2629 }
2630 pDev = pDev->next_lun;
2631 }
2632 if(!pDev ) { // Something new add it
5cbded58 2633 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
2634 if(d==NULL)
2635 {
2636 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2637 return -ENOMEM;
2638 }
2639
1c2fb3f3 2640 d->controller = pHba;
1da177e4
LT
2641 d->next = NULL;
2642
2643 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2644
2645 d->flags = 0;
2646 adpt_i2o_report_hba_unit(pHba, d);
2647 adpt_i2o_install_device(pHba, d);
2648
2649 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2650 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2651 continue;
2652 }
2653 pDev = pHba->channel[bus_no].device[scsi_id];
2654 if( pDev == NULL){
ab552204 2655 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2656 if(pDev == NULL) {
2657 return -ENOMEM;
2658 }
2659 pHba->channel[bus_no].device[scsi_id] = pDev;
2660 } else {
2661 while (pDev->next_lun) {
2662 pDev = pDev->next_lun;
2663 }
ab552204 2664 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2665 if(pDev == NULL) {
2666 return -ENOMEM;
2667 }
2668 }
1da177e4
LT
2669 pDev->tid = d->lct_data.tid;
2670 pDev->scsi_channel = bus_no;
2671 pDev->scsi_id = scsi_id;
2672 pDev->scsi_lun = scsi_lun;
2673 pDev->pI2o_dev = d;
2674 d->owner = pDev;
2675 pDev->type = (buf[0])&0xff;
2676 pDev->flags = (buf[0]>>8)&0xff;
2677 // Too late, SCSI system has made up it's mind, but what the hey ...
2678 if(scsi_id > pHba->top_scsi_id){
2679 pHba->top_scsi_id = scsi_id;
2680 }
2681 if(scsi_lun > pHba->top_scsi_lun){
2682 pHba->top_scsi_lun = scsi_lun;
2683 }
2684 continue;
2685 } // end of new i2o device
2686
2687 // We found an old device - check it
2688 while(pDev) {
2689 if(pDev->scsi_lun == scsi_lun) {
2690 if(!scsi_device_online(pDev->pScsi_dev)) {
2691 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2692 pHba->name,bus_no,scsi_id,scsi_lun);
2693 if (pDev->pScsi_dev) {
2694 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2695 }
2696 }
2697 d = pDev->pI2o_dev;
2698 if(d->lct_data.tid != tid) { // something changed
2699 pDev->tid = tid;
2700 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2701 if (pDev->pScsi_dev) {
2702 pDev->pScsi_dev->changed = TRUE;
2703 pDev->pScsi_dev->removable = TRUE;
2704 }
2705 }
2706 // Found it - mark it scanned
2707 pDev->state = DPTI_DEV_ONLINE;
2708 break;
2709 }
2710 pDev = pDev->next_lun;
2711 }
2712 }
2713 }
2714 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2715 pDev =(struct adpt_device*) pI2o_dev->owner;
2716 if(!pDev){
2717 continue;
2718 }
2719 // Drive offline drives that previously existed but could not be found
2720 // in the LCT table
2721 if (pDev->state & DPTI_DEV_UNSCANNED){
2722 pDev->state = DPTI_DEV_OFFLINE;
2723 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2724 if (pDev->pScsi_dev) {
2725 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2726 }
2727 }
2728 }
2729 return 0;
2730}
2731
2732static void adpt_fail_posted_scbs(adpt_hba* pHba)
2733{
2734 struct scsi_cmnd* cmd = NULL;
2735 struct scsi_device* d = NULL;
2736
2737 shost_for_each_device(d, pHba->host) {
2738 unsigned long flags;
2739 spin_lock_irqsave(&d->list_lock, flags);
2740 list_for_each_entry(cmd, &d->cmd_list, list) {
2741 if(cmd->serial_number == 0){
2742 continue;
2743 }
2744 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2745 cmd->scsi_done(cmd);
2746 }
2747 spin_unlock_irqrestore(&d->list_lock, flags);
2748 }
2749}
2750
2751
2752/*============================================================================
2753 * Routines from i2o subsystem
2754 *============================================================================
2755 */
2756
2757
2758
2759/*
2760 * Bring an I2O controller into HOLD state. See the spec.
2761 */
2762static int adpt_i2o_activate_hba(adpt_hba* pHba)
2763{
2764 int rcode;
2765
2766 if(pHba->initialized ) {
2767 if (adpt_i2o_status_get(pHba) < 0) {
2768 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2769 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2770 return rcode;
2771 }
2772 if (adpt_i2o_status_get(pHba) < 0) {
2773 printk(KERN_INFO "HBA not responding.\n");
2774 return -1;
2775 }
2776 }
2777
2778 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2779 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2780 return -1;
2781 }
2782
2783 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2784 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2785 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2786 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2787 adpt_i2o_reset_hba(pHba);
2788 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2789 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2790 return -1;
2791 }
2792 }
2793 } else {
2794 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2795 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2796 return rcode;
2797 }
2798
2799 }
2800
2801 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2802 return -1;
2803 }
2804
2805 /* In HOLD state */
2806
2807 if (adpt_i2o_hrt_get(pHba) < 0) {
2808 return -1;
2809 }
2810
2811 return 0;
2812}
2813
2814/*
2815 * Bring a controller online into OPERATIONAL state.
2816 */
2817
2818static int adpt_i2o_online_hba(adpt_hba* pHba)
2819{
2820 if (adpt_i2o_systab_send(pHba) < 0) {
2821 adpt_i2o_delete_hba(pHba);
2822 return -1;
2823 }
2824 /* In READY state */
2825
2826 if (adpt_i2o_enable_hba(pHba) < 0) {
2827 adpt_i2o_delete_hba(pHba);
2828 return -1;
2829 }
2830
2831 /* In OPERATIONAL state */
2832 return 0;
2833}
2834
2835static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2836{
2837 u32 __iomem *msg;
2838 ulong timeout = jiffies + 5*HZ;
2839
2840 while(m == EMPTY_QUEUE){
2841 rmb();
2842 m = readl(pHba->post_port);
2843 if(m != EMPTY_QUEUE){
2844 break;
2845 }
2846 if(time_after(jiffies,timeout)){
2847 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2848 return 2;
2849 }
a9a3047d 2850 schedule_timeout_uninterruptible(1);
1da177e4
LT
2851 }
2852 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2853 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2854 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2855 writel( 0,&msg[2]);
2856 wmb();
2857
2858 writel(m, pHba->post_port);
2859 wmb();
2860 return 0;
2861}
2862
2863static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2864{
2865 u8 *status;
67af2b06 2866 dma_addr_t addr;
1da177e4
LT
2867 u32 __iomem *msg = NULL;
2868 int i;
2869 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
1da177e4
LT
2870 u32 m;
2871
2872 do {
2873 rmb();
2874 m = readl(pHba->post_port);
2875 if (m != EMPTY_QUEUE) {
2876 break;
2877 }
2878
2879 if(time_after(jiffies,timeout)){
2880 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2881 return -ETIMEDOUT;
2882 }
a9a3047d 2883 schedule_timeout_uninterruptible(1);
1da177e4
LT
2884 } while(m == EMPTY_QUEUE);
2885
2886 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2887
67af2b06 2888 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
bbfbbbc1 2889 if (!status) {
1da177e4
LT
2890 adpt_send_nop(pHba, m);
2891 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2892 pHba->name);
2893 return -ENOMEM;
2894 }
67af2b06 2895 memset(status, 0, 4);
1da177e4
LT
2896
2897 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2898 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2899 writel(0, &msg[2]);
2900 writel(0x0106, &msg[3]); /* Transaction context */
2901 writel(4096, &msg[4]); /* Host page frame size */
2902 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2903 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
67af2b06 2904 writel((u32)addr, &msg[7]);
1da177e4
LT
2905
2906 writel(m, pHba->post_port);
2907 wmb();
2908
2909 // Wait for the reply status to come back
2910 do {
2911 if (*status) {
2912 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2913 break;
2914 }
2915 }
2916 rmb();
2917 if(time_after(jiffies,timeout)){
2918 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
67af2b06
MS
2919 /* We lose 4 bytes of "status" here, but we
2920 cannot free these because controller may
2921 awake and corrupt those bytes at any time */
2922 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
1da177e4
LT
2923 return -ETIMEDOUT;
2924 }
a9a3047d 2925 schedule_timeout_uninterruptible(1);
1da177e4
LT
2926 } while (1);
2927
2928 // If the command was successful, fill the fifo with our reply
2929 // message packets
2930 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
67af2b06 2931 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
2932 return -2;
2933 }
67af2b06 2934 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4 2935
67af2b06
MS
2936 if(pHba->reply_pool != NULL) {
2937 dma_free_coherent(&pHba->pDev->dev,
2938 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2939 pHba->reply_pool, pHba->reply_pool_pa);
2940 }
1da177e4 2941
67af2b06
MS
2942 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2943 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2944 &pHba->reply_pool_pa, GFP_KERNEL);
bbfbbbc1
MK
2945 if (!pHba->reply_pool) {
2946 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2947 return -ENOMEM;
1da177e4 2948 }
67af2b06 2949 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
1da177e4 2950
1da177e4 2951 for(i = 0; i < pHba->reply_fifo_size; i++) {
67af2b06
MS
2952 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2953 pHba->reply_port);
1da177e4 2954 wmb();
1da177e4
LT
2955 }
2956 adpt_i2o_status_get(pHba);
2957 return 0;
2958}
2959
2960
2961/*
2962 * I2O System Table. Contains information about
2963 * all the IOPs in the system. Used to inform IOPs
2964 * about each other's existence.
2965 *
2966 * sys_tbl_ver is the CurrentChangeIndicator that is
2967 * used by IOPs to track changes.
2968 */
2969
2970
2971
2972static s32 adpt_i2o_status_get(adpt_hba* pHba)
2973{
2974 ulong timeout;
2975 u32 m;
2976 u32 __iomem *msg;
2977 u8 *status_block=NULL;
1da177e4
LT
2978
2979 if(pHba->status_block == NULL) {
67af2b06
MS
2980 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2981 sizeof(i2o_status_block),
2982 &pHba->status_block_pa, GFP_KERNEL);
1da177e4
LT
2983 if(pHba->status_block == NULL) {
2984 printk(KERN_ERR
2985 "dpti%d: Get Status Block failed; Out of memory. \n",
2986 pHba->unit);
2987 return -ENOMEM;
2988 }
2989 }
2990 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2991 status_block = (u8*)(pHba->status_block);
1da177e4
LT
2992 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2993 do {
2994 rmb();
2995 m = readl(pHba->post_port);
2996 if (m != EMPTY_QUEUE) {
2997 break;
2998 }
2999 if(time_after(jiffies,timeout)){
3000 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3001 pHba->name);
3002 return -ETIMEDOUT;
3003 }
a9a3047d 3004 schedule_timeout_uninterruptible(1);
1da177e4
LT
3005 } while(m==EMPTY_QUEUE);
3006
3007
3008 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3009
3010 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3011 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3012 writel(1, &msg[2]);
3013 writel(0, &msg[3]);
3014 writel(0, &msg[4]);
3015 writel(0, &msg[5]);
67af2b06
MS
3016 writel( dma_low(pHba->status_block_pa), &msg[6]);
3017 writel( dma_high(pHba->status_block_pa), &msg[7]);
1da177e4
LT
3018 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3019
3020 //post message
3021 writel(m, pHba->post_port);
3022 wmb();
3023
3024 while(status_block[87]!=0xff){
3025 if(time_after(jiffies,timeout)){
3026 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3027 pHba->unit);
3028 return -ETIMEDOUT;
3029 }
3030 rmb();
a9a3047d 3031 schedule_timeout_uninterruptible(1);
1da177e4
LT
3032 }
3033
3034 // Set up our number of outbound and inbound messages
3035 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3036 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3037 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3038 }
3039
3040 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3041 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3042 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3043 }
3044
3045 // Calculate the Scatter Gather list size
62ac5aed
MS
3046 if (dpt_dma64(pHba)) {
3047 pHba->sg_tablesize
3048 = ((pHba->status_block->inbound_frame_size * 4
3049 - 14 * sizeof(u32))
3050 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3051 } else {
3052 pHba->sg_tablesize
3053 = ((pHba->status_block->inbound_frame_size * 4
3054 - 12 * sizeof(u32))
3055 / sizeof(struct sg_simple_element));
3056 }
1da177e4
LT
3057 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3058 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3059 }
3060
3061
3062#ifdef DEBUG
3063 printk("dpti%d: State = ",pHba->unit);
3064 switch(pHba->status_block->iop_state) {
3065 case 0x01:
3066 printk("INIT\n");
3067 break;
3068 case 0x02:
3069 printk("RESET\n");
3070 break;
3071 case 0x04:
3072 printk("HOLD\n");
3073 break;
3074 case 0x05:
3075 printk("READY\n");
3076 break;
3077 case 0x08:
3078 printk("OPERATIONAL\n");
3079 break;
3080 case 0x10:
3081 printk("FAILED\n");
3082 break;
3083 case 0x11:
3084 printk("FAULTED\n");
3085 break;
3086 default:
3087 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3088 }
3089#endif
3090 return 0;
3091}
3092
3093/*
3094 * Get the IOP's Logical Configuration Table
3095 */
3096static int adpt_i2o_lct_get(adpt_hba* pHba)
3097{
3098 u32 msg[8];
3099 int ret;
3100 u32 buf[16];
3101
3102 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3103 pHba->lct_size = pHba->status_block->expected_lct_size;
3104 }
3105 do {
3106 if (pHba->lct == NULL) {
67af2b06
MS
3107 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3108 pHba->lct_size, &pHba->lct_pa,
3109 GFP_KERNEL);
1da177e4
LT
3110 if(pHba->lct == NULL) {
3111 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3112 pHba->name);
3113 return -ENOMEM;
3114 }
3115 }
3116 memset(pHba->lct, 0, pHba->lct_size);
3117
3118 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3119 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3120 msg[2] = 0;
3121 msg[3] = 0;
3122 msg[4] = 0xFFFFFFFF; /* All devices */
3123 msg[5] = 0x00000000; /* Report now */
3124 msg[6] = 0xD0000000|pHba->lct_size;
67af2b06 3125 msg[7] = (u32)pHba->lct_pa;
1da177e4
LT
3126
3127 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3128 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3129 pHba->name, ret);
3130 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3131 return ret;
3132 }
3133
3134 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3135 pHba->lct_size = pHba->lct->table_size << 2;
67af2b06
MS
3136 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3137 pHba->lct, pHba->lct_pa);
1da177e4
LT
3138 pHba->lct = NULL;
3139 }
3140 } while (pHba->lct == NULL);
3141
3142 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3143
3144
3145 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3146 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3147 pHba->FwDebugBufferSize = buf[1];
62ac5aed
MS
3148 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3149 pHba->FwDebugBufferSize);
3150 if (pHba->FwDebugBuffer_P) {
3151 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3152 FW_DEBUG_FLAGS_OFFSET;
3153 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3154 FW_DEBUG_BLED_OFFSET;
3155 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3156 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3157 FW_DEBUG_STR_LENGTH_OFFSET;
3158 pHba->FwDebugBuffer_P += buf[2];
3159 pHba->FwDebugFlags = 0;
3160 }
1da177e4
LT
3161 }
3162
3163 return 0;
3164}
3165
3166static int adpt_i2o_build_sys_table(void)
3167{
67af2b06 3168 adpt_hba* pHba = hba_chain;
1da177e4
LT
3169 int count = 0;
3170
67af2b06
MS
3171 if (sys_tbl)
3172 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3173 sys_tbl, sys_tbl_pa);
3174
1da177e4
LT
3175 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3176 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3177
67af2b06
MS
3178 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3179 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
bbfbbbc1 3180 if (!sys_tbl) {
1da177e4
LT
3181 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3182 return -ENOMEM;
3183 }
67af2b06 3184 memset(sys_tbl, 0, sys_tbl_len);
1da177e4
LT
3185
3186 sys_tbl->num_entries = hba_count;
3187 sys_tbl->version = I2OVERSION;
3188 sys_tbl->change_ind = sys_tbl_ind++;
3189
3190 for(pHba = hba_chain; pHba; pHba = pHba->next) {
67af2b06 3191 u64 addr;
1da177e4
LT
3192 // Get updated Status Block so we have the latest information
3193 if (adpt_i2o_status_get(pHba)) {
3194 sys_tbl->num_entries--;
3195 continue; // try next one
3196 }
3197
3198 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3199 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3200 sys_tbl->iops[count].seg_num = 0;
3201 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3202 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3203 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3204 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3205 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3206 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
67af2b06
MS
3207 addr = pHba->base_addr_phys + 0x40;
3208 sys_tbl->iops[count].inbound_low = dma_low(addr);
3209 sys_tbl->iops[count].inbound_high = dma_high(addr);
1da177e4
LT
3210
3211 count++;
3212 }
3213
3214#ifdef DEBUG
3215{
3216 u32 *table = (u32*)sys_tbl;
3217 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3218 for(count = 0; count < (sys_tbl_len >>2); count++) {
3219 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3220 count, table[count]);
3221 }
3222}
3223#endif
3224
3225 return 0;
3226}
3227
3228
3229/*
3230 * Dump the information block associated with a given unit (TID)
3231 */
3232
3233static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3234{
3235 char buf[64];
3236 int unit = d->lct_data.tid;
3237
3238 printk(KERN_INFO "TID %3.3d ", unit);
3239
3240 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3241 {
3242 buf[16]=0;
3243 printk(" Vendor: %-12.12s", buf);
3244 }
3245 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3246 {
3247 buf[16]=0;
3248 printk(" Device: %-12.12s", buf);
3249 }
3250 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3251 {
3252 buf[8]=0;
3253 printk(" Rev: %-12.12s\n", buf);
3254 }
3255#ifdef DEBUG
3256 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3257 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3258 printk(KERN_INFO "\tFlags: ");
3259
3260 if(d->lct_data.device_flags&(1<<0))
3261 printk("C"); // ConfigDialog requested
3262 if(d->lct_data.device_flags&(1<<1))
3263 printk("U"); // Multi-user capable
3264 if(!(d->lct_data.device_flags&(1<<4)))
3265 printk("P"); // Peer service enabled!
3266 if(!(d->lct_data.device_flags&(1<<5)))
3267 printk("M"); // Mgmt service enabled!
3268 printk("\n");
3269#endif
3270}
3271
3272#ifdef DEBUG
3273/*
3274 * Do i2o class name lookup
3275 */
3276static const char *adpt_i2o_get_class_name(int class)
3277{
3278 int idx = 16;
3279 static char *i2o_class_name[] = {
3280 "Executive",
3281 "Device Driver Module",
3282 "Block Device",
3283 "Tape Device",
3284 "LAN Interface",
3285 "WAN Interface",
3286 "Fibre Channel Port",
3287 "Fibre Channel Device",
3288 "SCSI Device",
3289 "ATE Port",
3290 "ATE Device",
3291 "Floppy Controller",
3292 "Floppy Device",
3293 "Secondary Bus Port",
3294 "Peer Transport Agent",
3295 "Peer Transport",
3296 "Unknown"
3297 };
3298
3299 switch(class&0xFFF) {
3300 case I2O_CLASS_EXECUTIVE:
3301 idx = 0; break;
3302 case I2O_CLASS_DDM:
3303 idx = 1; break;
3304 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3305 idx = 2; break;
3306 case I2O_CLASS_SEQUENTIAL_STORAGE:
3307 idx = 3; break;
3308 case I2O_CLASS_LAN:
3309 idx = 4; break;
3310 case I2O_CLASS_WAN:
3311 idx = 5; break;
3312 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3313 idx = 6; break;
3314 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3315 idx = 7; break;
3316 case I2O_CLASS_SCSI_PERIPHERAL:
3317 idx = 8; break;
3318 case I2O_CLASS_ATE_PORT:
3319 idx = 9; break;
3320 case I2O_CLASS_ATE_PERIPHERAL:
3321 idx = 10; break;
3322 case I2O_CLASS_FLOPPY_CONTROLLER:
3323 idx = 11; break;
3324 case I2O_CLASS_FLOPPY_DEVICE:
3325 idx = 12; break;
3326 case I2O_CLASS_BUS_ADAPTER_PORT:
3327 idx = 13; break;
3328 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3329 idx = 14; break;
3330 case I2O_CLASS_PEER_TRANSPORT:
3331 idx = 15; break;
3332 }
3333 return i2o_class_name[idx];
3334}
3335#endif
3336
3337
3338static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3339{
3340 u32 msg[6];
3341 int ret, size = sizeof(i2o_hrt);
3342
3343 do {
3344 if (pHba->hrt == NULL) {
67af2b06
MS
3345 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3346 size, &pHba->hrt_pa, GFP_KERNEL);
1da177e4
LT
3347 if (pHba->hrt == NULL) {
3348 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3349 return -ENOMEM;
3350 }
3351 }
3352
3353 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3354 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3355 msg[2]= 0;
3356 msg[3]= 0;
3357 msg[4]= (0xD0000000 | size); /* Simple transaction */
67af2b06 3358 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
1da177e4
LT
3359
3360 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3361 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3362 return ret;
3363 }
3364
3365 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
67af2b06
MS
3366 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3367 dma_free_coherent(&pHba->pDev->dev, size,
3368 pHba->hrt, pHba->hrt_pa);
3369 size = newsize;
1da177e4
LT
3370 pHba->hrt = NULL;
3371 }
3372 } while(pHba->hrt == NULL);
3373 return 0;
3374}
3375
3376/*
3377 * Query one scalar group value or a whole scalar group.
3378 */
3379static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3380 int group, int field, void *buf, int buflen)
3381{
3382 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
67af2b06
MS
3383 u8 *opblk_va;
3384 dma_addr_t opblk_pa;
3385 u8 *resblk_va;
3386 dma_addr_t resblk_pa;
1da177e4
LT
3387
3388 int size;
3389
3390 /* 8 bytes for header */
67af2b06
MS
3391 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3392 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3393 if (resblk_va == NULL) {
1da177e4
LT
3394 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3395 return -ENOMEM;
3396 }
3397
67af2b06
MS
3398 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3399 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3400 if (opblk_va == NULL) {
3401 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3402 resblk_va, resblk_pa);
3403 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3404 pHba->name);
3405 return -ENOMEM;
3406 }
1da177e4
LT
3407 if (field == -1) /* whole group */
3408 opblk[4] = -1;
3409
67af2b06 3410 memcpy(opblk_va, opblk, sizeof(opblk));
1da177e4 3411 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
67af2b06
MS
3412 opblk_va, opblk_pa, sizeof(opblk),
3413 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3414 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
1da177e4 3415 if (size == -ETIME) {
67af2b06
MS
3416 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3417 resblk_va, resblk_pa);
1da177e4
LT
3418 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3419 return -ETIME;
3420 } else if (size == -EINTR) {
67af2b06
MS
3421 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3422 resblk_va, resblk_pa);
1da177e4
LT
3423 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3424 return -EINTR;
3425 }
3426
67af2b06 3427 memcpy(buf, resblk_va+8, buflen); /* cut off header */
1da177e4 3428
67af2b06
MS
3429 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3430 resblk_va, resblk_pa);
1da177e4
LT
3431 if (size < 0)
3432 return size;
3433
3434 return buflen;
3435}
3436
3437
3438/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3439 *
3440 * This function can be used for all UtilParamsGet/Set operations.
3441 * The OperationBlock is given in opblk-buffer,
3442 * and results are returned in resblk-buffer.
3443 * Note that the minimum sized resblk is 8 bytes and contains
3444 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3445 */
3446static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
67af2b06
MS
3447 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3448 void *resblk_va, dma_addr_t resblk_pa, int reslen)
1da177e4
LT
3449{
3450 u32 msg[9];
67af2b06 3451 u32 *res = (u32 *)resblk_va;
1da177e4
LT
3452 int wait_status;
3453
3454 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3455 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3456 msg[2] = 0;
3457 msg[3] = 0;
3458 msg[4] = 0;
3459 msg[5] = 0x54000000 | oplen; /* OperationBlock */
67af2b06 3460 msg[6] = (u32)opblk_pa;
1da177e4 3461 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
67af2b06 3462 msg[8] = (u32)resblk_pa;
1da177e4
LT
3463
3464 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
67af2b06 3465 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
1da177e4
LT
3466 return wait_status; /* -DetailedStatus */
3467 }
3468
3469 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3470 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3471 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3472 pHba->name,
3473 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3474 : "PARAMS_GET",
3475 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3476 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3477 }
3478
3479 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3480}
3481
3482
3483static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3484{
3485 u32 msg[4];
3486 int ret;
3487
3488 adpt_i2o_status_get(pHba);
3489
3490 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3491
3492 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3493 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3494 return 0;
3495 }
3496
3497 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3498 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3499 msg[2] = 0;
3500 msg[3] = 0;
3501
3502 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3503 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3504 pHba->unit, -ret);
3505 } else {
3506 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3507 }
3508
3509 adpt_i2o_status_get(pHba);
3510 return ret;
3511}
3512
3513
3514/*
3515 * Enable IOP. Allows the IOP to resume external operations.
3516 */
3517static int adpt_i2o_enable_hba(adpt_hba* pHba)
3518{
3519 u32 msg[4];
3520 int ret;
3521
3522 adpt_i2o_status_get(pHba);
3523 if(!pHba->status_block){
3524 return -ENOMEM;
3525 }
3526 /* Enable only allowed on READY state */
3527 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3528 return 0;
3529
3530 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3531 return -EINVAL;
3532
3533 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3534 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3535 msg[2]= 0;
3536 msg[3]= 0;
3537
3538 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3539 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3540 pHba->name, ret);
3541 } else {
3542 PDEBUG("%s: Enabled.\n", pHba->name);
3543 }
3544
3545 adpt_i2o_status_get(pHba);
3546 return ret;
3547}
3548
3549
3550static int adpt_i2o_systab_send(adpt_hba* pHba)
3551{
3552 u32 msg[12];
3553 int ret;
3554
3555 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3556 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3557 msg[2] = 0;
3558 msg[3] = 0;
3559 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3560 msg[5] = 0; /* Segment 0 */
3561
3562 /*
3563 * Provide three SGL-elements:
3564 * System table (SysTab), Private memory space declaration and
3565 * Private i/o space declaration
3566 */
3567 msg[6] = 0x54000000 | sys_tbl_len;
67af2b06 3568 msg[7] = (u32)sys_tbl_pa;
1da177e4
LT
3569 msg[8] = 0x54000000 | 0;
3570 msg[9] = 0;
3571 msg[10] = 0xD4000000 | 0;
3572 msg[11] = 0;
3573
3574 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3575 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3576 pHba->name, ret);
3577 }
3578#ifdef DEBUG
3579 else {
3580 PINFO("%s: SysTab set.\n", pHba->name);
3581 }
3582#endif
3583
3584 return ret;
3585 }
3586
3587
3588/*============================================================================
3589 *
3590 *============================================================================
3591 */
3592
3593
3594#ifdef UARTDELAY
3595
3596static static void adpt_delay(int millisec)
3597{
3598 int i;
3599 for (i = 0; i < millisec; i++) {
3600 udelay(1000); /* delay for one millisecond */
3601 }
3602}
3603
3604#endif
3605
24601bbc 3606static struct scsi_host_template driver_template = {
c864cb14 3607 .module = THIS_MODULE,
1da177e4
LT
3608 .name = "dpt_i2o",
3609 .proc_name = "dpt_i2o",
3610 .proc_info = adpt_proc_info,
1da177e4
LT
3611 .info = adpt_info,
3612 .queuecommand = adpt_queue,
3613 .eh_abort_handler = adpt_abort,
3614 .eh_device_reset_handler = adpt_device_reset,
3615 .eh_bus_reset_handler = adpt_bus_reset,
3616 .eh_host_reset_handler = adpt_reset,
3617 .bios_param = adpt_bios_param,
3618 .slave_configure = adpt_slave_configure,
3619 .can_queue = MAX_TO_IOP_MESSAGES,
3620 .this_id = 7,
3621 .cmd_per_lun = 1,
3622 .use_clustering = ENABLE_CLUSTERING,
3623};
c864cb14
MS
3624
3625static int __init adpt_init(void)
3626{
3627 int error;
3628 adpt_hba *pHba, *next;
3629
3630 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3631
3632 error = adpt_detect(&driver_template);
3633 if (error < 0)
3634 return error;
3635 if (hba_chain == NULL)
3636 return -ENODEV;
3637
3638 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3639 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3640 if (error)
3641 goto fail;
3642 scsi_scan_host(pHba->host);
3643 }
3644 return 0;
3645fail:
3646 for (pHba = hba_chain; pHba; pHba = next) {
3647 next = pHba->next;
3648 scsi_remove_host(pHba->host);
3649 }
3650 return error;
3651}
3652
3653static void __exit adpt_exit(void)
3654{
3655 adpt_hba *pHba, *next;
3656
3657 for (pHba = hba_chain; pHba; pHba = pHba->next)
3658 scsi_remove_host(pHba->host);
3659 for (pHba = hba_chain; pHba; pHba = next) {
3660 next = pHba->next;
3661 adpt_release(pHba->host);
3662 }
3663}
3664
3665module_init(adpt_init);
3666module_exit(adpt_exit);
3667
1da177e4 3668MODULE_LICENSE("GPL");