]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/qlogicpti.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qlogicpti.c
1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
7 *
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
10 * John you really do.
11 *
12 * May, 2, 1997: Added support for QLGC,isp --jj
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27
28 #include <asm/byteorder.h>
29
30 #include "qlogicpti.h"
31
32 #include <asm/sbus.h>
33 #include <asm/dma.h>
34 #include <asm/system.h>
35 #include <asm/ptrace.h>
36 #include <asm/pgtable.h>
37 #include <asm/oplib.h>
38 #include <asm/io.h>
39 #include <asm/irq.h>
40
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_request.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_host.h>
48
49
50
51 #define MAX_TARGETS 16
52 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
53
54 #define DEFAULT_LOOP_COUNT 10000
55
56 #include "qlogicpti_asm.c"
57
58 static struct qlogicpti *qptichain = NULL;
59 static DEFINE_SPINLOCK(qptichain_lock);
60 static int qptis_running = 0;
61
62 #define PACKB(a, b) (((a)<<4)|(b))
63
64 static const u_char mbox_param[] = {
65 PACKB(1, 1), /* MBOX_NO_OP */
66 PACKB(5, 5), /* MBOX_LOAD_RAM */
67 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
68 PACKB(5, 5), /* MBOX_DUMP_RAM */
69 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
70 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
71 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
72 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
73 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
74 PACKB(0, 0), /* 0x0009 */
75 PACKB(0, 0), /* 0x000a */
76 PACKB(0, 0), /* 0x000b */
77 PACKB(0, 0), /* 0x000c */
78 PACKB(0, 0), /* 0x000d */
79 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
80 PACKB(0, 0), /* 0x000f */
81 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
82 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
83 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
84 PACKB(2, 2), /* MBOX_WAKE_UP */
85 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
86 PACKB(4, 4), /* MBOX_ABORT */
87 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
88 PACKB(3, 3), /* MBOX_ABORT_TARGET */
89 PACKB(2, 2), /* MBOX_BUS_RESET */
90 PACKB(2, 3), /* MBOX_STOP_QUEUE */
91 PACKB(2, 3), /* MBOX_START_QUEUE */
92 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
93 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
94 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
95 PACKB(0, 0), /* 0x001e */
96 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
97 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
98 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
99 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
100 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
101 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
102 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
103 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
104 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
105 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
106 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
107 PACKB(0, 0), /* 0x002a */
108 PACKB(0, 0), /* 0x002b */
109 PACKB(0, 0), /* 0x002c */
110 PACKB(0, 0), /* 0x002d */
111 PACKB(0, 0), /* 0x002e */
112 PACKB(0, 0), /* 0x002f */
113 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
114 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
115 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
116 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
117 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
118 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
119 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
120 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
121 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
122 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
123 PACKB(0, 0), /* 0x003a */
124 PACKB(0, 0), /* 0x003b */
125 PACKB(0, 0), /* 0x003c */
126 PACKB(0, 0), /* 0x003d */
127 PACKB(0, 0), /* 0x003e */
128 PACKB(0, 0), /* 0x003f */
129 PACKB(0, 0), /* 0x0040 */
130 PACKB(0, 0), /* 0x0041 */
131 PACKB(0, 0) /* 0x0042 */
132 };
133
134 #define MAX_MBOX_COMMAND (sizeof(mbox_param)/sizeof(u_short))
135
136 /* queue length's _must_ be power of two: */
137 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
138 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
139 QLOGICPTI_REQ_QUEUE_LEN)
140 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
141
142 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
143 {
144 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
145 qpti->qregs + SBUS_CTRL);
146 }
147
148 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
149 {
150 sbus_writew(0, qpti->qregs + SBUS_CTRL);
151 }
152
153 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
154 {
155 u16 val;
156 u8 bursts = qpti->bursts;
157
158 #if 0 /* It appears that at least PTI cards do not support
159 * 64-byte bursts and that setting the B64 bit actually
160 * is a nop and the chip ends up using the smallest burst
161 * size. -DaveM
162 */
163 if (sbus_can_burst64(qpti->sdev) && (bursts & DMA_BURST64)) {
164 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
165 } else
166 #endif
167 if (bursts & DMA_BURST32) {
168 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
169 } else if (bursts & DMA_BURST16) {
170 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
171 } else if (bursts & DMA_BURST8) {
172 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
173 } else {
174 val = 0; /* No sbus bursts for you... */
175 }
176 sbus_writew(val, qpti->qregs + SBUS_CFG1);
177 }
178
179 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
180 {
181 int loop_count;
182 u16 tmp;
183
184 if (mbox_param[param[0]] == 0)
185 return 1;
186
187 /* Set SBUS semaphore. */
188 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
189 tmp |= SBUS_SEMAPHORE_LCK;
190 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
191
192 /* Wait for host IRQ bit to clear. */
193 loop_count = DEFAULT_LOOP_COUNT;
194 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
195 barrier();
196 cpu_relax();
197 }
198 if (!loop_count)
199 printk(KERN_EMERG "qlogicpti: mbox_command loop timeout #1\n");
200
201 /* Write mailbox command registers. */
202 switch (mbox_param[param[0]] >> 4) {
203 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
205 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
206 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
207 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
208 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
209 }
210
211 /* Clear RISC interrupt. */
212 tmp = sbus_readw(qpti->qregs + HCCTRL);
213 tmp |= HCCTRL_CRIRQ;
214 sbus_writew(tmp, qpti->qregs + HCCTRL);
215
216 /* Clear SBUS semaphore. */
217 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
218
219 /* Set HOST interrupt. */
220 tmp = sbus_readw(qpti->qregs + HCCTRL);
221 tmp |= HCCTRL_SHIRQ;
222 sbus_writew(tmp, qpti->qregs + HCCTRL);
223
224 /* Wait for HOST interrupt clears. */
225 loop_count = DEFAULT_LOOP_COUNT;
226 while (--loop_count &&
227 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
228 udelay(20);
229 if (!loop_count)
230 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #2\n",
231 param[0]);
232
233 /* Wait for SBUS semaphore to get set. */
234 loop_count = DEFAULT_LOOP_COUNT;
235 while (--loop_count &&
236 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
237 udelay(20);
238
239 /* Workaround for some buggy chips. */
240 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
241 break;
242 }
243 if (!loop_count)
244 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #3\n",
245 param[0]);
246
247 /* Wait for MBOX busy condition to go away. */
248 loop_count = DEFAULT_LOOP_COUNT;
249 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
250 udelay(20);
251 if (!loop_count)
252 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #4\n",
253 param[0]);
254
255 /* Read back output parameters. */
256 switch (mbox_param[param[0]] & 0xf) {
257 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
258 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
259 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
260 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
261 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
262 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
263 }
264
265 /* Clear RISC interrupt. */
266 tmp = sbus_readw(qpti->qregs + HCCTRL);
267 tmp |= HCCTRL_CRIRQ;
268 sbus_writew(tmp, qpti->qregs + HCCTRL);
269
270 /* Release SBUS semaphore. */
271 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
272 tmp &= ~(SBUS_SEMAPHORE_LCK);
273 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
274
275 /* We're done. */
276 return 0;
277 }
278
279 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
280 {
281 int i;
282
283 qpti->host_param.initiator_scsi_id = qpti->scsi_id;
284 qpti->host_param.bus_reset_delay = 3;
285 qpti->host_param.retry_count = 0;
286 qpti->host_param.retry_delay = 5;
287 qpti->host_param.async_data_setup_time = 3;
288 qpti->host_param.req_ack_active_negation = 1;
289 qpti->host_param.data_line_active_negation = 1;
290 qpti->host_param.data_dma_burst_enable = 1;
291 qpti->host_param.command_dma_burst_enable = 1;
292 qpti->host_param.tag_aging = 8;
293 qpti->host_param.selection_timeout = 250;
294 qpti->host_param.max_queue_depth = 256;
295
296 for(i = 0; i < MAX_TARGETS; i++) {
297 /*
298 * disconnect, parity, arq, reneg on reset, and, oddly enough
299 * tags...the midlayer's notion of tagged support has to match
300 * our device settings, and since we base whether we enable a
301 * tag on a per-cmnd basis upon what the midlayer sez, we
302 * actually enable the capability here.
303 */
304 qpti->dev_param[i].device_flags = 0xcd;
305 qpti->dev_param[i].execution_throttle = 16;
306 if (qpti->ultra) {
307 qpti->dev_param[i].synchronous_period = 12;
308 qpti->dev_param[i].synchronous_offset = 8;
309 } else {
310 qpti->dev_param[i].synchronous_period = 25;
311 qpti->dev_param[i].synchronous_offset = 12;
312 }
313 qpti->dev_param[i].device_enable = 1;
314 }
315 /* this is very important to set! */
316 qpti->sbits = 1 << qpti->scsi_id;
317 }
318
319 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
320 {
321 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
322 u_short param[6];
323 unsigned short risc_code_addr;
324 int loop_count, i;
325 unsigned long flags;
326
327 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
328
329 spin_lock_irqsave(host->host_lock, flags);
330
331 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
332
333 /* Only reset the scsi bus if it is not free. */
334 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
335 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
336 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
337 udelay(400);
338 }
339
340 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
341 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
342 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
343
344 loop_count = DEFAULT_LOOP_COUNT;
345 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
346 udelay(20);
347 if (!loop_count)
348 printk(KERN_EMERG "qlogicpti: reset_hardware loop timeout\n");
349
350 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
351 set_sbus_cfg1(qpti);
352 qlogicpti_enable_irqs(qpti);
353
354 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
355 qpti->ultra = 1;
356 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
357 qpti->qregs + RISC_MTREG);
358 } else {
359 qpti->ultra = 0;
360 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
361 qpti->qregs + RISC_MTREG);
362 }
363
364 /* reset adapter and per-device default values. */
365 /* do it after finding out whether we're ultra mode capable */
366 qlogicpti_set_hostdev_defaults(qpti);
367
368 /* Release the RISC processor. */
369 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
370
371 /* Get RISC to start executing the firmware code. */
372 param[0] = MBOX_EXEC_FIRMWARE;
373 param[1] = risc_code_addr;
374 if (qlogicpti_mbox_command(qpti, param, 1)) {
375 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
376 qpti->qpti_id);
377 spin_unlock_irqrestore(host->host_lock, flags);
378 return 1;
379 }
380
381 /* Set initiator scsi ID. */
382 param[0] = MBOX_SET_INIT_SCSI_ID;
383 param[1] = qpti->host_param.initiator_scsi_id;
384 if (qlogicpti_mbox_command(qpti, param, 1) ||
385 (param[0] != MBOX_COMMAND_COMPLETE)) {
386 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
387 qpti->qpti_id);
388 spin_unlock_irqrestore(host->host_lock, flags);
389 return 1;
390 }
391
392 /* Initialize state of the queues, both hw and sw. */
393 qpti->req_in_ptr = qpti->res_out_ptr = 0;
394
395 param[0] = MBOX_INIT_RES_QUEUE;
396 param[1] = RES_QUEUE_LEN + 1;
397 param[2] = (u_short) (qpti->res_dvma >> 16);
398 param[3] = (u_short) (qpti->res_dvma & 0xffff);
399 param[4] = param[5] = 0;
400 if (qlogicpti_mbox_command(qpti, param, 1)) {
401 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
402 qpti->qpti_id);
403 spin_unlock_irqrestore(host->host_lock, flags);
404 return 1;
405 }
406
407 param[0] = MBOX_INIT_REQ_QUEUE;
408 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
409 param[2] = (u_short) (qpti->req_dvma >> 16);
410 param[3] = (u_short) (qpti->req_dvma & 0xffff);
411 param[4] = param[5] = 0;
412 if (qlogicpti_mbox_command(qpti, param, 1)) {
413 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
414 qpti->qpti_id);
415 spin_unlock_irqrestore(host->host_lock, flags);
416 return 1;
417 }
418
419 param[0] = MBOX_SET_RETRY_COUNT;
420 param[1] = qpti->host_param.retry_count;
421 param[2] = qpti->host_param.retry_delay;
422 qlogicpti_mbox_command(qpti, param, 0);
423
424 param[0] = MBOX_SET_TAG_AGE_LIMIT;
425 param[1] = qpti->host_param.tag_aging;
426 qlogicpti_mbox_command(qpti, param, 0);
427
428 for (i = 0; i < MAX_TARGETS; i++) {
429 param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
430 param[1] = (i << 8);
431 qlogicpti_mbox_command(qpti, param, 0);
432 }
433
434 param[0] = MBOX_GET_FIRMWARE_STATUS;
435 qlogicpti_mbox_command(qpti, param, 0);
436
437 param[0] = MBOX_SET_SELECT_TIMEOUT;
438 param[1] = qpti->host_param.selection_timeout;
439 qlogicpti_mbox_command(qpti, param, 0);
440
441 for (i = 0; i < MAX_TARGETS; i++) {
442 param[0] = MBOX_SET_TARGET_PARAMS;
443 param[1] = (i << 8);
444 param[2] = (qpti->dev_param[i].device_flags << 8);
445 /*
446 * Since we're now loading 1.31 f/w, force narrow/async.
447 */
448 param[2] |= 0xc0;
449 param[3] = 0; /* no offset, we do not have sync mode yet */
450 qlogicpti_mbox_command(qpti, param, 0);
451 }
452
453 /*
454 * Always (sigh) do an initial bus reset (kicks f/w).
455 */
456 param[0] = MBOX_BUS_RESET;
457 param[1] = qpti->host_param.bus_reset_delay;
458 qlogicpti_mbox_command(qpti, param, 0);
459 qpti->send_marker = 1;
460
461 spin_unlock_irqrestore(host->host_lock, flags);
462 return 0;
463 }
464
465 #define PTI_RESET_LIMIT 400
466
467 static int __init qlogicpti_load_firmware(struct qlogicpti *qpti)
468 {
469 struct Scsi_Host *host = qpti->qhost;
470 unsigned short csum = 0;
471 unsigned short param[6];
472 unsigned short *risc_code, risc_code_addr, risc_code_length;
473 unsigned long flags;
474 int i, timeout;
475
476 risc_code = &sbus_risc_code01[0];
477 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
478 risc_code_length = sbus_risc_code_length01;
479
480 spin_lock_irqsave(host->host_lock, flags);
481
482 /* Verify the checksum twice, one before loading it, and once
483 * afterwards via the mailbox commands.
484 */
485 for (i = 0; i < risc_code_length; i++)
486 csum += risc_code[i];
487 if (csum) {
488 spin_unlock_irqrestore(host->host_lock, flags);
489 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
490 qpti->qpti_id);
491 return 1;
492 }
493 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
494 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
495 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
496 timeout = PTI_RESET_LIMIT;
497 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
498 udelay(20);
499 if (!timeout) {
500 spin_unlock_irqrestore(host->host_lock, flags);
501 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
502 return 1;
503 }
504
505 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
506 mdelay(1);
507
508 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
509 set_sbus_cfg1(qpti);
510 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
511
512 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
513 qpti->ultra = 1;
514 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
515 qpti->qregs + RISC_MTREG);
516 } else {
517 qpti->ultra = 0;
518 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
519 qpti->qregs + RISC_MTREG);
520 }
521
522 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
523
524 /* Pin lines are only stable while RISC is paused. */
525 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
526 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
527 qpti->differential = 1;
528 else
529 qpti->differential = 0;
530 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
531
532 /* This shouldn't be necessary- we've reset things so we should be
533 running from the ROM now.. */
534
535 param[0] = MBOX_STOP_FIRMWARE;
536 param[1] = param[2] = param[3] = param[4] = param[5] = 0;
537 if (qlogicpti_mbox_command(qpti, param, 1)) {
538 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
539 qpti->qpti_id);
540 spin_unlock_irqrestore(host->host_lock, flags);
541 return 1;
542 }
543
544 /* Load it up.. */
545 for (i = 0; i < risc_code_length; i++) {
546 param[0] = MBOX_WRITE_RAM_WORD;
547 param[1] = risc_code_addr + i;
548 param[2] = risc_code[i];
549 if (qlogicpti_mbox_command(qpti, param, 1) ||
550 param[0] != MBOX_COMMAND_COMPLETE) {
551 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
552 qpti->qpti_id);
553 spin_unlock_irqrestore(host->host_lock, flags);
554 return 1;
555 }
556 }
557
558 /* Reset the ISP again. */
559 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
560 mdelay(1);
561
562 qlogicpti_enable_irqs(qpti);
563 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
564 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
565
566 /* Ask ISP to verify the checksum of the new code. */
567 param[0] = MBOX_VERIFY_CHECKSUM;
568 param[1] = risc_code_addr;
569 if (qlogicpti_mbox_command(qpti, param, 1) ||
570 (param[0] != MBOX_COMMAND_COMPLETE)) {
571 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
572 qpti->qpti_id);
573 spin_unlock_irqrestore(host->host_lock, flags);
574 return 1;
575 }
576
577 /* Start using newly downloaded firmware. */
578 param[0] = MBOX_EXEC_FIRMWARE;
579 param[1] = risc_code_addr;
580 qlogicpti_mbox_command(qpti, param, 1);
581
582 param[0] = MBOX_ABOUT_FIRMWARE;
583 if (qlogicpti_mbox_command(qpti, param, 1) ||
584 (param[0] != MBOX_COMMAND_COMPLETE)) {
585 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
586 qpti->qpti_id);
587 spin_unlock_irqrestore(host->host_lock, flags);
588 return 1;
589 }
590
591 /* Snag the major and minor revisions from the result. */
592 qpti->fware_majrev = param[1];
593 qpti->fware_minrev = param[2];
594 qpti->fware_micrev = param[3];
595
596 /* Set the clock rate */
597 param[0] = MBOX_SET_CLOCK_RATE;
598 param[1] = qpti->clock;
599 if (qlogicpti_mbox_command(qpti, param, 1) ||
600 (param[0] != MBOX_COMMAND_COMPLETE)) {
601 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
602 qpti->qpti_id);
603 spin_unlock_irqrestore(host->host_lock, flags);
604 return 1;
605 }
606
607 if (qpti->is_pti != 0) {
608 /* Load scsi initiator ID and interrupt level into sbus static ram. */
609 param[0] = MBOX_WRITE_RAM_WORD;
610 param[1] = 0xff80;
611 param[2] = (unsigned short) qpti->scsi_id;
612 qlogicpti_mbox_command(qpti, param, 1);
613
614 param[0] = MBOX_WRITE_RAM_WORD;
615 param[1] = 0xff00;
616 param[2] = (unsigned short) 3;
617 qlogicpti_mbox_command(qpti, param, 1);
618 }
619
620 spin_unlock_irqrestore(host->host_lock, flags);
621 return 0;
622 }
623
624 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
625 {
626 int curstat = sbus_readb(qpti->sreg);
627
628 curstat &= 0xf0;
629 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
630 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
631 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
632 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
633 if (curstat != qpti->swsreg) {
634 int error = 0;
635 if (curstat & SREG_FUSE) {
636 error++;
637 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
638 }
639 if (curstat & SREG_TPOWER) {
640 error++;
641 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
642 }
643 if (qpti->differential &&
644 (curstat & SREG_DSENSE) != SREG_DSENSE) {
645 error++;
646 printk("qlogicpti%d: You have a single ended device on a "
647 "differential bus! Please fix!\n", qpti->qpti_id);
648 }
649 qpti->swsreg = curstat;
650 return error;
651 }
652 return 0;
653 }
654
655 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs);
656
657 static void __init qpti_chain_add(struct qlogicpti *qpti)
658 {
659 spin_lock_irq(&qptichain_lock);
660 if (qptichain != NULL) {
661 struct qlogicpti *qlink = qptichain;
662
663 while(qlink->next)
664 qlink = qlink->next;
665 qlink->next = qpti;
666 } else {
667 qptichain = qpti;
668 }
669 qpti->next = NULL;
670 spin_unlock_irq(&qptichain_lock);
671 }
672
673 static void __init qpti_chain_del(struct qlogicpti *qpti)
674 {
675 spin_lock_irq(&qptichain_lock);
676 if (qptichain == qpti) {
677 qptichain = qpti->next;
678 } else {
679 struct qlogicpti *qlink = qptichain;
680 while(qlink->next != qpti)
681 qlink = qlink->next;
682 qlink->next = qpti->next;
683 }
684 qpti->next = NULL;
685 spin_unlock_irq(&qptichain_lock);
686 }
687
688 static int __init qpti_map_regs(struct qlogicpti *qpti)
689 {
690 struct sbus_dev *sdev = qpti->sdev;
691
692 qpti->qregs = sbus_ioremap(&sdev->resource[0], 0,
693 sdev->reg_addrs[0].reg_size,
694 "PTI Qlogic/ISP");
695 if (!qpti->qregs) {
696 printk("PTI: Qlogic/ISP registers are unmappable\n");
697 return -1;
698 }
699 if (qpti->is_pti) {
700 qpti->sreg = sbus_ioremap(&sdev->resource[0], (16 * 4096),
701 sizeof(unsigned char),
702 "PTI Qlogic/ISP statreg");
703 if (!qpti->sreg) {
704 printk("PTI: Qlogic/ISP status register is unmappable\n");
705 return -1;
706 }
707 }
708 return 0;
709 }
710
711 static int __init qpti_register_irq(struct qlogicpti *qpti)
712 {
713 struct sbus_dev *sdev = qpti->sdev;
714
715 qpti->qhost->irq = qpti->irq = sdev->irqs[0];
716
717 /* We used to try various overly-clever things to
718 * reduce the interrupt processing overhead on
719 * sun4c/sun4m when multiple PTI's shared the
720 * same IRQ. It was too complex and messy to
721 * sanely maintain.
722 */
723 if (request_irq(qpti->irq, qpti_intr,
724 SA_SHIRQ, "Qlogic/PTI", qpti))
725 goto fail;
726
727 printk("qpti%d: IRQ %s ", qpti->qpti_id, __irq_itoa(qpti->irq));
728
729 return 0;
730
731 fail:
732 printk("qpti%d: Cannot acquire irq line\n", qpti->qpti_id);
733 return -1;
734 }
735
736 static void __init qpti_get_scsi_id(struct qlogicpti *qpti)
737 {
738 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
739 "initiator-id",
740 -1);
741 if (qpti->scsi_id == -1)
742 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
743 "scsi-initiator-id",
744 -1);
745 if (qpti->scsi_id == -1)
746 qpti->scsi_id =
747 prom_getintdefault(qpti->sdev->bus->prom_node,
748 "scsi-initiator-id", 7);
749 qpti->qhost->this_id = qpti->scsi_id;
750 qpti->qhost->max_sectors = 64;
751
752 printk("SCSI ID %d ", qpti->scsi_id);
753 }
754
755 static void qpti_get_bursts(struct qlogicpti *qpti)
756 {
757 struct sbus_dev *sdev = qpti->sdev;
758 u8 bursts, bmask;
759
760 bursts = prom_getintdefault(qpti->prom_node, "burst-sizes", 0xff);
761 bmask = prom_getintdefault(sdev->bus->prom_node,
762 "burst-sizes", 0xff);
763 if (bmask != 0xff)
764 bursts &= bmask;
765 if (bursts == 0xff ||
766 (bursts & DMA_BURST16) == 0 ||
767 (bursts & DMA_BURST32) == 0)
768 bursts = (DMA_BURST32 - 1);
769
770 qpti->bursts = bursts;
771 }
772
773 static void qpti_get_clock(struct qlogicpti *qpti)
774 {
775 unsigned int cfreq;
776
777 /* Check for what the clock input to this card is.
778 * Default to 40Mhz.
779 */
780 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
781 qpti->clock = (cfreq + 500000)/1000000;
782 if (qpti->clock == 0) /* bullshit */
783 qpti->clock = 40;
784 }
785
786 /* The request and response queues must each be aligned
787 * on a page boundary.
788 */
789 static int __init qpti_map_queues(struct qlogicpti *qpti)
790 {
791 struct sbus_dev *sdev = qpti->sdev;
792
793 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
794 qpti->res_cpu = sbus_alloc_consistent(sdev,
795 QSIZE(RES_QUEUE_LEN),
796 &qpti->res_dvma);
797 if (qpti->res_cpu == NULL ||
798 qpti->res_dvma == 0) {
799 printk("QPTI: Cannot map response queue.\n");
800 return -1;
801 }
802
803 qpti->req_cpu = sbus_alloc_consistent(sdev,
804 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
805 &qpti->req_dvma);
806 if (qpti->req_cpu == NULL ||
807 qpti->req_dvma == 0) {
808 sbus_free_consistent(sdev, QSIZE(RES_QUEUE_LEN),
809 qpti->res_cpu, qpti->res_dvma);
810 printk("QPTI: Cannot map request queue.\n");
811 return -1;
812 }
813 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
814 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
815 return 0;
816 }
817
818 /* Detect all PTI Qlogic ISP's in the machine. */
819 static int __init qlogicpti_detect(struct scsi_host_template *tpnt)
820 {
821 struct qlogicpti *qpti;
822 struct Scsi_Host *qpti_host;
823 struct sbus_bus *sbus;
824 struct sbus_dev *sdev;
825 int nqptis = 0, nqptis_in_use = 0;
826
827 tpnt->proc_name = "qlogicpti";
828 for_each_sbus(sbus) {
829 for_each_sbusdev(sdev, sbus) {
830 /* Is this a red snapper? */
831 if (strcmp(sdev->prom_name, "ptisp") &&
832 strcmp(sdev->prom_name, "PTI,ptisp") &&
833 strcmp(sdev->prom_name, "QLGC,isp") &&
834 strcmp(sdev->prom_name, "SUNW,isp"))
835 continue;
836
837 /* Sometimes Antares cards come up not completely
838 * setup, and we get a report of a zero IRQ.
839 * Skip over them in such cases so we survive.
840 */
841 if (sdev->irqs[0] == 0) {
842 printk("qpti%d: Adapter reports no interrupt, "
843 "skipping over this card.", nqptis);
844 continue;
845 }
846
847 /* Yep, register and allocate software state. */
848 qpti_host = scsi_register(tpnt, sizeof(struct qlogicpti));
849 if (!qpti_host) {
850 printk("QPTI: Cannot register PTI Qlogic ISP SCSI host");
851 continue;
852 }
853 qpti = (struct qlogicpti *) qpti_host->hostdata;
854
855 /* We are wide capable, 16 targets. */
856 qpti_host->max_id = MAX_TARGETS;
857
858 /* Setup back pointers and misc. state. */
859 qpti->qhost = qpti_host;
860 qpti->sdev = sdev;
861 qpti->qpti_id = nqptis++;
862 qpti->prom_node = sdev->prom_node;
863 prom_getstring(qpti->prom_node, "name",
864 qpti->prom_name,
865 sizeof(qpti->prom_name));
866
867 /* This is not correct, actually. There's a switch
868 * on the PTI cards that put them into "emulation"
869 * mode- i.e., report themselves as QLGC,isp
870 * instead of PTI,ptisp. The only real substantive
871 * difference between non-pti and pti cards is
872 * the tmon register. Which is possibly even
873 * there for Qlogic cards, but non-functional.
874 */
875 qpti->is_pti = (strcmp (qpti->prom_name, "QLGC,isp") != 0);
876
877 qpti_chain_add(qpti);
878 if (qpti_map_regs(qpti) < 0)
879 goto fail_unlink;
880
881 if (qpti_register_irq(qpti) < 0)
882 goto fail_unmap_regs;
883
884 qpti_get_scsi_id(qpti);
885 qpti_get_bursts(qpti);
886 qpti_get_clock(qpti);
887
888 /* Clear out scsi_cmnd array. */
889 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
890
891 if (qpti_map_queues(qpti) < 0)
892 goto fail_free_irq;
893
894 /* Load the firmware. */
895 if (qlogicpti_load_firmware(qpti))
896 goto fail_unmap_queues;
897 if (qpti->is_pti) {
898 /* Check the PTI status reg. */
899 if (qlogicpti_verify_tmon(qpti))
900 goto fail_unmap_queues;
901 }
902
903 /* Reset the ISP and init res/req queues. */
904 if (qlogicpti_reset_hardware(qpti_host))
905 goto fail_unmap_queues;
906
907 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
908 qpti->fware_minrev, qpti->fware_micrev);
909 {
910 char buffer[60];
911
912 prom_getstring (qpti->prom_node,
913 "isp-fcode", buffer, 60);
914 if (buffer[0])
915 printk("(Firmware %s)", buffer);
916 if (prom_getbool(qpti->prom_node, "differential"))
917 qpti->differential = 1;
918 }
919
920 printk (" [%s Wide, using %s interface]\n",
921 (qpti->ultra ? "Ultra" : "Fast"),
922 (qpti->differential ? "differential" : "single ended"));
923
924 nqptis_in_use++;
925 continue;
926
927 fail_unmap_queues:
928 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
929 sbus_free_consistent(qpti->sdev,
930 QSIZE(RES_QUEUE_LEN),
931 qpti->res_cpu, qpti->res_dvma);
932 sbus_free_consistent(qpti->sdev,
933 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
934 qpti->req_cpu, qpti->req_dvma);
935 #undef QSIZE
936 fail_free_irq:
937 free_irq(qpti->irq, qpti);
938
939 fail_unmap_regs:
940 sbus_iounmap(qpti->qregs,
941 qpti->sdev->reg_addrs[0].reg_size);
942 if (qpti->is_pti)
943 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
944 fail_unlink:
945 qpti_chain_del(qpti);
946 scsi_unregister(qpti->qhost);
947 }
948 }
949 if (nqptis)
950 printk("QPTI: Total of %d PTI Qlogic/ISP hosts found, %d actually in use.\n",
951 nqptis, nqptis_in_use);
952 qptis_running = nqptis_in_use;
953 return nqptis;
954 }
955
956 static int qlogicpti_release(struct Scsi_Host *host)
957 {
958 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
959
960 /* Remove visibility from IRQ handlers. */
961 qpti_chain_del(qpti);
962
963 /* Shut up the card. */
964 sbus_writew(0, qpti->qregs + SBUS_CTRL);
965
966 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
967 free_irq(qpti->irq, qpti);
968
969 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
970 sbus_free_consistent(qpti->sdev,
971 QSIZE(RES_QUEUE_LEN),
972 qpti->res_cpu, qpti->res_dvma);
973 sbus_free_consistent(qpti->sdev,
974 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
975 qpti->req_cpu, qpti->req_dvma);
976 #undef QSIZE
977
978 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size);
979 if (qpti->is_pti)
980 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
981
982 return 0;
983 }
984
985 const char *qlogicpti_info(struct Scsi_Host *host)
986 {
987 static char buf[80];
988 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
989
990 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %s regs at %p",
991 __irq_itoa(qpti->qhost->irq), qpti->qregs);
992 return buf;
993 }
994
995 /* I am a certified frobtronicist. */
996 static inline void marker_frob(struct Command_Entry *cmd)
997 {
998 struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
999
1000 memset(marker, 0, sizeof(struct Marker_Entry));
1001 marker->hdr.entry_cnt = 1;
1002 marker->hdr.entry_type = ENTRY_MARKER;
1003 marker->modifier = SYNC_ALL;
1004 marker->rsvd = 0;
1005 }
1006
1007 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
1008 struct qlogicpti *qpti)
1009 {
1010 memset(cmd, 0, sizeof(struct Command_Entry));
1011 cmd->hdr.entry_cnt = 1;
1012 cmd->hdr.entry_type = ENTRY_COMMAND;
1013 cmd->target_id = Cmnd->device->id;
1014 cmd->target_lun = Cmnd->device->lun;
1015 cmd->cdb_length = Cmnd->cmd_len;
1016 cmd->control_flags = 0;
1017 if (Cmnd->device->tagged_supported) {
1018 if (qpti->cmd_count[Cmnd->device->id] == 0)
1019 qpti->tag_ages[Cmnd->device->id] = jiffies;
1020 if ((jiffies - qpti->tag_ages[Cmnd->device->id]) > (5*HZ)) {
1021 cmd->control_flags = CFLAG_ORDERED_TAG;
1022 qpti->tag_ages[Cmnd->device->id] = jiffies;
1023 } else
1024 cmd->control_flags = CFLAG_SIMPLE_TAG;
1025 }
1026 if ((Cmnd->cmnd[0] == WRITE_6) ||
1027 (Cmnd->cmnd[0] == WRITE_10) ||
1028 (Cmnd->cmnd[0] == WRITE_12))
1029 cmd->control_flags |= CFLAG_WRITE;
1030 else
1031 cmd->control_flags |= CFLAG_READ;
1032 cmd->time_out = 30;
1033 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
1034 }
1035
1036 /* Do it to it baby. */
1037 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
1038 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
1039 {
1040 struct dataseg *ds;
1041 struct scatterlist *sg;
1042 int i, n;
1043
1044 if (Cmnd->use_sg) {
1045 int sg_count;
1046
1047 sg = (struct scatterlist *) Cmnd->buffer;
1048 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
1049
1050 ds = cmd->dataseg;
1051 cmd->segment_cnt = sg_count;
1052
1053 /* Fill in first four sg entries: */
1054 n = sg_count;
1055 if (n > 4)
1056 n = 4;
1057 for (i = 0; i < n; i++, sg++) {
1058 ds[i].d_base = sg_dma_address(sg);
1059 ds[i].d_count = sg_dma_len(sg);
1060 }
1061 sg_count -= 4;
1062 while (sg_count > 0) {
1063 struct Continuation_Entry *cont;
1064
1065 ++cmd->hdr.entry_cnt;
1066 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
1067 in_ptr = NEXT_REQ_PTR(in_ptr);
1068 if (in_ptr == out_ptr)
1069 return -1;
1070
1071 cont->hdr.entry_type = ENTRY_CONTINUATION;
1072 cont->hdr.entry_cnt = 0;
1073 cont->hdr.sys_def_1 = 0;
1074 cont->hdr.flags = 0;
1075 cont->reserved = 0;
1076 ds = cont->dataseg;
1077 n = sg_count;
1078 if (n > 7)
1079 n = 7;
1080 for (i = 0; i < n; i++, sg++) {
1081 ds[i].d_base = sg_dma_address(sg);
1082 ds[i].d_count = sg_dma_len(sg);
1083 }
1084 sg_count -= n;
1085 }
1086 } else if (Cmnd->request_bufflen) {
1087 Cmnd->SCp.ptr = (char *)(unsigned long)
1088 sbus_map_single(qpti->sdev,
1089 Cmnd->request_buffer,
1090 Cmnd->request_bufflen,
1091 Cmnd->sc_data_direction);
1092
1093 cmd->dataseg[0].d_base = (u32) ((unsigned long)Cmnd->SCp.ptr);
1094 cmd->dataseg[0].d_count = Cmnd->request_bufflen;
1095 cmd->segment_cnt = 1;
1096 } else {
1097 cmd->dataseg[0].d_base = 0;
1098 cmd->dataseg[0].d_count = 0;
1099 cmd->segment_cnt = 1; /* Shouldn't this be 0? */
1100 }
1101
1102 /* Committed, record Scsi_Cmd so we can find it later. */
1103 cmd->handle = in_ptr;
1104 qpti->cmd_slots[in_ptr] = Cmnd;
1105
1106 qpti->cmd_count[Cmnd->device->id]++;
1107 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1108 qpti->req_in_ptr = in_ptr;
1109
1110 return in_ptr;
1111 }
1112
1113 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
1114 {
1115 /* Temporary workaround until bug is found and fixed (one bug has been found
1116 already, but fixing it makes things even worse) -jj */
1117 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
1118 host->can_queue = host->host_busy + num_free;
1119 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
1120 }
1121
1122 /*
1123 * Until we scan the entire bus with inquiries, go throught this fella...
1124 */
1125 static void ourdone(struct scsi_cmnd *Cmnd)
1126 {
1127 struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
1128 int tgt = Cmnd->device->id;
1129 void (*done) (struct scsi_cmnd *);
1130
1131 /* This grot added by DaveM, blame him for ugliness.
1132 * The issue is that in the 2.3.x driver we use the
1133 * host_scribble portion of the scsi command as a
1134 * completion linked list at interrupt service time,
1135 * so we have to store the done function pointer elsewhere.
1136 */
1137 done = (void (*)(struct scsi_cmnd *))
1138 (((unsigned long) Cmnd->SCp.Message)
1139 #ifdef __sparc_v9__
1140 | ((unsigned long) Cmnd->SCp.Status << 32UL)
1141 #endif
1142 );
1143
1144 if ((qpti->sbits & (1 << tgt)) == 0) {
1145 int ok = host_byte(Cmnd->result) == DID_OK;
1146 if (Cmnd->cmnd[0] == 0x12 && ok) {
1147 unsigned char *iqd;
1148
1149 if (Cmnd->use_sg != 0)
1150 BUG();
1151
1152 iqd = ((unsigned char *)Cmnd->buffer);
1153
1154 /* tags handled in midlayer */
1155 /* enable sync mode? */
1156 if (iqd[7] & 0x10) {
1157 qpti->dev_param[tgt].device_flags |= 0x10;
1158 } else {
1159 qpti->dev_param[tgt].synchronous_offset = 0;
1160 qpti->dev_param[tgt].synchronous_period = 0;
1161 }
1162 /* are we wide capable? */
1163 if (iqd[7] & 0x20) {
1164 qpti->dev_param[tgt].device_flags |= 0x20;
1165 }
1166 qpti->sbits |= (1 << tgt);
1167 } else if (!ok) {
1168 qpti->sbits |= (1 << tgt);
1169 }
1170 }
1171 done(Cmnd);
1172 }
1173
1174 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *));
1175
1176 static int qlogicpti_queuecommand_slow(struct scsi_cmnd *Cmnd,
1177 void (*done)(struct scsi_cmnd *))
1178 {
1179 struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
1180
1181 /*
1182 * done checking this host adapter?
1183 * If not, then rewrite the command
1184 * to finish through ourdone so we
1185 * can peek at Inquiry data results.
1186 */
1187 if (qpti->sbits && qpti->sbits != 0xffff) {
1188 /* See above about in ourdone this ugliness... */
1189 Cmnd->SCp.Message = ((unsigned long)done) & 0xffffffff;
1190 #ifdef CONFIG_SPARC64
1191 Cmnd->SCp.Status = ((unsigned long)done >> 32UL) & 0xffffffff;
1192 #endif
1193 return qlogicpti_queuecommand(Cmnd, ourdone);
1194 }
1195
1196 /*
1197 * We've peeked at all targets for this bus- time
1198 * to set parameters for devices for real now.
1199 */
1200 if (qpti->sbits == 0xffff) {
1201 int i;
1202 for(i = 0; i < MAX_TARGETS; i++) {
1203 u_short param[6];
1204 param[0] = MBOX_SET_TARGET_PARAMS;
1205 param[1] = (i << 8);
1206 param[2] = (qpti->dev_param[i].device_flags << 8);
1207 if (qpti->dev_param[i].device_flags & 0x10) {
1208 param[3] = (qpti->dev_param[i].synchronous_offset << 8) |
1209 qpti->dev_param[i].synchronous_period;
1210 } else {
1211 param[3] = 0;
1212 }
1213 (void) qlogicpti_mbox_command(qpti, param, 0);
1214 }
1215 /*
1216 * set to zero so any traverse through ourdone
1217 * doesn't start the whole process again,
1218 */
1219 qpti->sbits = 0;
1220 }
1221
1222 /* check to see if we're done with all adapters... */
1223 for (qpti = qptichain; qpti != NULL; qpti = qpti->next) {
1224 if (qpti->sbits) {
1225 break;
1226 }
1227 }
1228
1229 /*
1230 * if we hit the end of the chain w/o finding adapters still
1231 * capability-configuring, then we're done with all adapters
1232 * and can rock on..
1233 */
1234 if (qpti == NULL)
1235 Cmnd->device->host->hostt->queuecommand = qlogicpti_queuecommand;
1236
1237 return qlogicpti_queuecommand(Cmnd, done);
1238 }
1239
1240 /*
1241 * The middle SCSI layer ensures that queuecommand never gets invoked
1242 * concurrently with itself or the interrupt handler (though the
1243 * interrupt handler may call this routine as part of
1244 * request-completion handling).
1245 *
1246 * "This code must fly." -davem
1247 */
1248 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
1249 {
1250 struct Scsi_Host *host = Cmnd->device->host;
1251 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1252 struct Command_Entry *cmd;
1253 u_int out_ptr;
1254 int in_ptr;
1255
1256 Cmnd->scsi_done = done;
1257
1258 in_ptr = qpti->req_in_ptr;
1259 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1260 out_ptr = sbus_readw(qpti->qregs + MBOX4);
1261 in_ptr = NEXT_REQ_PTR(in_ptr);
1262 if (in_ptr == out_ptr)
1263 goto toss_command;
1264
1265 if (qpti->send_marker) {
1266 marker_frob(cmd);
1267 qpti->send_marker = 0;
1268 if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1269 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1270 qpti->req_in_ptr = in_ptr;
1271 goto toss_command;
1272 }
1273 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1274 in_ptr = NEXT_REQ_PTR(in_ptr);
1275 }
1276 cmd_frob(cmd, Cmnd, qpti);
1277 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1278 goto toss_command;
1279
1280 update_can_queue(host, in_ptr, out_ptr);
1281
1282 return 0;
1283
1284 toss_command:
1285 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1286 qpti->qpti_id);
1287
1288 /* Unfortunately, unless you use the new EH code, which
1289 * we don't, the midlayer will ignore the return value,
1290 * which is insane. We pick up the pieces like this.
1291 */
1292 Cmnd->result = DID_BUS_BUSY;
1293 done(Cmnd);
1294 return 1;
1295 }
1296
1297 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1298 {
1299 int host_status = DID_ERROR;
1300
1301 switch (sts->completion_status) {
1302 case CS_COMPLETE:
1303 host_status = DID_OK;
1304 break;
1305 case CS_INCOMPLETE:
1306 if (!(sts->state_flags & SF_GOT_BUS))
1307 host_status = DID_NO_CONNECT;
1308 else if (!(sts->state_flags & SF_GOT_TARGET))
1309 host_status = DID_BAD_TARGET;
1310 else if (!(sts->state_flags & SF_SENT_CDB))
1311 host_status = DID_ERROR;
1312 else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1313 host_status = DID_ERROR;
1314 else if (!(sts->state_flags & SF_GOT_STATUS))
1315 host_status = DID_ERROR;
1316 else if (!(sts->state_flags & SF_GOT_SENSE))
1317 host_status = DID_ERROR;
1318 break;
1319 case CS_DMA_ERROR:
1320 case CS_TRANSPORT_ERROR:
1321 host_status = DID_ERROR;
1322 break;
1323 case CS_RESET_OCCURRED:
1324 case CS_BUS_RESET:
1325 host_status = DID_RESET;
1326 break;
1327 case CS_ABORTED:
1328 host_status = DID_ABORT;
1329 break;
1330 case CS_TIMEOUT:
1331 host_status = DID_TIME_OUT;
1332 break;
1333 case CS_DATA_OVERRUN:
1334 case CS_COMMAND_OVERRUN:
1335 case CS_STATUS_OVERRUN:
1336 case CS_BAD_MESSAGE:
1337 case CS_NO_MESSAGE_OUT:
1338 case CS_EXT_ID_FAILED:
1339 case CS_IDE_MSG_FAILED:
1340 case CS_ABORT_MSG_FAILED:
1341 case CS_NOP_MSG_FAILED:
1342 case CS_PARITY_ERROR_MSG_FAILED:
1343 case CS_DEVICE_RESET_MSG_FAILED:
1344 case CS_ID_MSG_FAILED:
1345 case CS_UNEXP_BUS_FREE:
1346 host_status = DID_ERROR;
1347 break;
1348 case CS_DATA_UNDERRUN:
1349 host_status = DID_OK;
1350 break;
1351 default:
1352 printk(KERN_EMERG "qpti%d: unknown completion status 0x%04x\n",
1353 id, sts->completion_status);
1354 host_status = DID_ERROR;
1355 break;
1356 }
1357
1358 return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1359 }
1360
1361 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1362 {
1363 struct scsi_cmnd *Cmnd, *done_queue = NULL;
1364 struct Status_Entry *sts;
1365 u_int in_ptr, out_ptr;
1366
1367 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1368 return NULL;
1369
1370 in_ptr = sbus_readw(qpti->qregs + MBOX5);
1371 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1372 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1373 switch (sbus_readw(qpti->qregs + MBOX0)) {
1374 case ASYNC_SCSI_BUS_RESET:
1375 case EXECUTION_TIMEOUT_RESET:
1376 qpti->send_marker = 1;
1377 break;
1378 case INVALID_COMMAND:
1379 case HOST_INTERFACE_ERROR:
1380 case COMMAND_ERROR:
1381 case COMMAND_PARAM_ERROR:
1382 break;
1383 };
1384 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1385 }
1386
1387 /* This looks like a network driver! */
1388 out_ptr = qpti->res_out_ptr;
1389 while (out_ptr != in_ptr) {
1390 u_int cmd_slot;
1391
1392 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1393 out_ptr = NEXT_RES_PTR(out_ptr);
1394
1395 /* We store an index in the handle, not the pointer in
1396 * some form. This avoids problems due to the fact
1397 * that the handle provided is only 32-bits. -DaveM
1398 */
1399 cmd_slot = sts->handle;
1400 Cmnd = qpti->cmd_slots[cmd_slot];
1401 qpti->cmd_slots[cmd_slot] = NULL;
1402
1403 if (sts->completion_status == CS_RESET_OCCURRED ||
1404 sts->completion_status == CS_ABORTED ||
1405 (sts->status_flags & STF_BUS_RESET))
1406 qpti->send_marker = 1;
1407
1408 if (sts->state_flags & SF_GOT_SENSE)
1409 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1410 sizeof(Cmnd->sense_buffer));
1411
1412 if (sts->hdr.entry_type == ENTRY_STATUS)
1413 Cmnd->result =
1414 qlogicpti_return_status(sts, qpti->qpti_id);
1415 else
1416 Cmnd->result = DID_ERROR << 16;
1417
1418 if (Cmnd->use_sg) {
1419 sbus_unmap_sg(qpti->sdev,
1420 (struct scatterlist *)Cmnd->buffer,
1421 Cmnd->use_sg,
1422 Cmnd->sc_data_direction);
1423 } else {
1424 sbus_unmap_single(qpti->sdev,
1425 (__u32)((unsigned long)Cmnd->SCp.ptr),
1426 Cmnd->request_bufflen,
1427 Cmnd->sc_data_direction);
1428 }
1429 qpti->cmd_count[Cmnd->device->id]--;
1430 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1431 Cmnd->host_scribble = (unsigned char *) done_queue;
1432 done_queue = Cmnd;
1433 }
1434 qpti->res_out_ptr = out_ptr;
1435
1436 return done_queue;
1437 }
1438
1439 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
1440 {
1441 struct qlogicpti *qpti = dev_id;
1442 unsigned long flags;
1443 struct scsi_cmnd *dq;
1444
1445 spin_lock_irqsave(qpti->qhost->host_lock, flags);
1446 dq = qlogicpti_intr_handler(qpti);
1447
1448 if (dq != NULL) {
1449 do {
1450 struct scsi_cmnd *next;
1451
1452 next = (struct scsi_cmnd *) dq->host_scribble;
1453 dq->scsi_done(dq);
1454 dq = next;
1455 } while (dq != NULL);
1456 }
1457 spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1458
1459 return IRQ_HANDLED;
1460 }
1461
1462 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1463 {
1464 u_short param[6];
1465 struct Scsi_Host *host = Cmnd->device->host;
1466 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1467 int return_status = SUCCESS;
1468 u32 cmd_cookie;
1469 int i;
1470
1471 printk(KERN_WARNING "qlogicpti : Aborting cmd for tgt[%d] lun[%d]\n",
1472 (int)Cmnd->device->id, (int)Cmnd->device->lun);
1473
1474 qlogicpti_disable_irqs(qpti);
1475
1476 /* Find the 32-bit cookie we gave to the firmware for
1477 * this command.
1478 */
1479 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1480 if (qpti->cmd_slots[i] == Cmnd)
1481 break;
1482 cmd_cookie = i;
1483
1484 param[0] = MBOX_ABORT;
1485 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1486 param[2] = cmd_cookie >> 16;
1487 param[3] = cmd_cookie & 0xffff;
1488 if (qlogicpti_mbox_command(qpti, param, 0) ||
1489 (param[0] != MBOX_COMMAND_COMPLETE)) {
1490 printk(KERN_EMERG "qlogicpti : scsi abort failure: %x\n", param[0]);
1491 return_status = FAILED;
1492 }
1493
1494 qlogicpti_enable_irqs(qpti);
1495
1496 return return_status;
1497 }
1498
1499 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1500 {
1501 u_short param[6];
1502 struct Scsi_Host *host = Cmnd->device->host;
1503 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1504 int return_status = SUCCESS;
1505
1506 printk(KERN_WARNING "qlogicpti : Resetting SCSI bus!\n");
1507
1508 qlogicpti_disable_irqs(qpti);
1509
1510 param[0] = MBOX_BUS_RESET;
1511 param[1] = qpti->host_param.bus_reset_delay;
1512 if (qlogicpti_mbox_command(qpti, param, 0) ||
1513 (param[0] != MBOX_COMMAND_COMPLETE)) {
1514 printk(KERN_EMERG "qlogicisp : scsi bus reset failure: %x\n", param[0]);
1515 return_status = FAILED;
1516 }
1517
1518 qlogicpti_enable_irqs(qpti);
1519
1520 return return_status;
1521 }
1522
1523 static struct scsi_host_template driver_template = {
1524 .detect = qlogicpti_detect,
1525 .release = qlogicpti_release,
1526 .info = qlogicpti_info,
1527 .queuecommand = qlogicpti_queuecommand_slow,
1528 .eh_abort_handler = qlogicpti_abort,
1529 .eh_bus_reset_handler = qlogicpti_reset,
1530 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1531 .this_id = 7,
1532 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1533 .cmd_per_lun = 1,
1534 .use_clustering = ENABLE_CLUSTERING,
1535 };
1536
1537
1538 #include "scsi_module.c"
1539
1540 MODULE_LICENSE("GPL");
1541