]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/53c700.c
[SCSI] mpt fusion: fix for mounted raid volume filesytem that goes read-only
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / 53c700.c
CommitLineData
1da177e4
LT
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* NCR (or Symbios) 53c700 and 53c700-66 Driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17**
18** You should have received a copy of the GNU General Public License
19** along with this program; if not, write to the Free Software
20** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21**
22**-----------------------------------------------------------------------------
23 */
24
25/* Notes:
26 *
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
31 *
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
34 *
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
42 *
43 *
44 * TODO List:
45 *
46 * 1. Better statistics in the proc fs
47 *
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
51
52/* CHANGELOG
53 *
54 * Version 2.8
55 *
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
61 *
62 * Version 2.7
63 *
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
69 *
70 * Version 2.6
71 *
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
75 *
76 * Version 2.5
77 *
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
83 *
84 * Version 2.4
85 *
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
88 *
89 * Version 2.3
90 *
91 * More endianness/cache coherency changes.
92 *
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
96 *
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
100 *
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
103 *
104 * Version 2.2
105 *
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
109 *
110 * Version 2.1
111 *
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115#define NCR_700_VERSION "2.8"
116
1da177e4
LT
117#include <linux/kernel.h>
118#include <linux/types.h>
119#include <linux/string.h>
120#include <linux/ioport.h>
121#include <linux/delay.h>
122#include <linux/spinlock.h>
123#include <linux/completion.h>
1da177e4
LT
124#include <linux/init.h>
125#include <linux/proc_fs.h>
126#include <linux/blkdev.h>
127#include <linux/module.h>
128#include <linux/interrupt.h>
017560fc 129#include <linux/device.h>
1da177e4
LT
130#include <asm/dma.h>
131#include <asm/system.h>
132#include <asm/io.h>
133#include <asm/pgtable.h>
134#include <asm/byteorder.h>
135
136#include <scsi/scsi.h>
137#include <scsi/scsi_cmnd.h>
138#include <scsi/scsi_dbg.h>
139#include <scsi/scsi_eh.h>
140#include <scsi/scsi_host.h>
141#include <scsi/scsi_tcq.h>
142#include <scsi/scsi_transport.h>
143#include <scsi/scsi_transport_spi.h>
144
145#include "53c700.h"
146
147/* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end. This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153#define to32bit(x) ((__u32)((unsigned long)(x)))
154
155#ifdef NCR_700_DEBUG
156#define STATIC
157#else
158#define STATIC static
159#endif
160
161MODULE_AUTHOR("James Bottomley");
162MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163MODULE_LICENSE("GPL");
164
165/* This is the script */
166#include "53c700_d.h"
167
168
169STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
170STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
1da177e4
LT
172STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
173STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
174STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
0f13fc09 175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
1da177e4
LT
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180
181STATIC struct device_attribute *NCR_700_dev_attrs[];
182
183STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
184
185static char *NCR_700_phase[] = {
186 "",
187 "after selection",
188 "before command phase",
189 "after command phase",
190 "after status phase",
191 "after data in phase",
192 "after data out phase",
193 "during data phase",
194};
195
196static char *NCR_700_condition[] = {
197 "",
198 "NOT MSG_OUT",
199 "UNEXPECTED PHASE",
200 "NOT MSG_IN",
201 "UNEXPECTED MSG",
202 "MSG_IN",
203 "SDTR_MSG RECEIVED",
204 "REJECT_MSG RECEIVED",
205 "DISCONNECT_MSG RECEIVED",
206 "MSG_OUT",
207 "DATA_IN",
208
209};
210
211static char *NCR_700_fatal_messages[] = {
212 "unexpected message after reselection",
213 "still MSG_OUT after message injection",
214 "not MSG_IN after selection",
215 "Illegal message length received",
216};
217
218static char *NCR_700_SBCL_bits[] = {
219 "IO ",
220 "CD ",
221 "MSG ",
222 "ATN ",
223 "SEL ",
224 "BSY ",
225 "ACK ",
226 "REQ ",
227};
228
229static char *NCR_700_SBCL_to_phase[] = {
230 "DATA_OUT",
231 "DATA_IN",
232 "CMD_OUT",
233 "STATE",
234 "ILLEGAL PHASE",
235 "ILLEGAL PHASE",
236 "MSG OUT",
237 "MSG IN",
238};
239
1da177e4
LT
240/* This translates the SDTR message offset and period to a value
241 * which can be loaded into the SXFER_REG.
242 *
243 * NOTE: According to SCSI-2, the true transfer period (in ns) is
244 * actually four times this period value */
245static inline __u8
246NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
247 __u8 offset, __u8 period)
248{
249 int XFERP;
250
251 __u8 min_xferp = (hostdata->chip710
252 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
253 __u8 max_offset = (hostdata->chip710
254 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
255
256 if(offset == 0)
257 return 0;
258
259 if(period < hostdata->min_period) {
6ea3c0b2 260 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
1da177e4
LT
261 period = hostdata->min_period;
262 }
263 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
264 if(offset > max_offset) {
265 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
266 offset, max_offset);
267 offset = max_offset;
268 }
269 if(XFERP < min_xferp) {
270 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
271 XFERP, min_xferp);
272 XFERP = min_xferp;
273 }
274 return (offset & 0x0f) | (XFERP & 0x07)<<4;
275}
276
277static inline __u8
278NCR_700_get_SXFER(struct scsi_device *SDp)
279{
280 struct NCR_700_Host_Parameters *hostdata =
281 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
282
283 return NCR_700_offset_period_to_sxfer(hostdata,
284 spi_offset(SDp->sdev_target),
285 spi_period(SDp->sdev_target));
286}
287
288struct Scsi_Host *
289NCR_700_detect(struct scsi_host_template *tpnt,
290 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
291{
292 dma_addr_t pScript, pSlots;
293 __u8 *memory;
294 __u32 *script;
295 struct Scsi_Host *host;
296 static int banner = 0;
297 int j;
298
299 if(tpnt->sdev_attrs == NULL)
300 tpnt->sdev_attrs = NCR_700_dev_attrs;
301
302 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
303 &pScript, GFP_KERNEL);
304 if(memory == NULL) {
305 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
306 return NULL;
307 }
308
309 script = (__u32 *)memory;
310 hostdata->msgin = memory + MSGIN_OFFSET;
311 hostdata->msgout = memory + MSGOUT_OFFSET;
312 hostdata->status = memory + STATUS_OFFSET;
313 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
314 * if this isn't sufficient separation to avoid dma flushing issues */
f67637ee 315 BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
1da177e4
LT
316 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
317 hostdata->dev = dev;
6391a113 318
1da177e4
LT
319 pSlots = pScript + SLOTS_OFFSET;
320
321 /* Fill in the missing routines from the host template */
322 tpnt->queuecommand = NCR_700_queuecommand;
323 tpnt->eh_abort_handler = NCR_700_abort;
1da177e4
LT
324 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
325 tpnt->eh_host_reset_handler = NCR_700_host_reset;
326 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
327 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
328 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
329 tpnt->use_clustering = ENABLE_CLUSTERING;
330 tpnt->slave_configure = NCR_700_slave_configure;
331 tpnt->slave_destroy = NCR_700_slave_destroy;
0f13fc09 332 tpnt->slave_alloc = NCR_700_slave_alloc;
1da177e4
LT
333 tpnt->change_queue_depth = NCR_700_change_queue_depth;
334 tpnt->change_queue_type = NCR_700_change_queue_type;
6391a113 335
1da177e4
LT
336 if(tpnt->name == NULL)
337 tpnt->name = "53c700";
338 if(tpnt->proc_name == NULL)
339 tpnt->proc_name = "53c700";
1da177e4
LT
340
341 host = scsi_host_alloc(tpnt, 4);
342 if (!host)
343 return NULL;
344 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
345 * NCR_700_COMMAND_SLOTS_PER_HOST);
6391a113 346 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
1da177e4
LT
347 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
348 - (unsigned long)&hostdata->slots[0].SG[0]);
349 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
350 if(j == 0)
351 hostdata->free_list = &hostdata->slots[j];
352 else
353 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
354 hostdata->slots[j].state = NCR_700_SLOT_FREE;
355 }
356
6391a113 357 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
1da177e4 358 script[j] = bS_to_host(SCRIPT[j]);
1da177e4
LT
359
360 /* adjust all labels to be bus physical */
6391a113 361 for (j = 0; j < PATCHES; j++)
1da177e4 362 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
1da177e4 363 /* now patch up fixed addresses. */
d3fa72e4 364 script_patch_32(hostdata->dev, script, MessageLocation,
1da177e4 365 pScript + MSGOUT_OFFSET);
d3fa72e4 366 script_patch_32(hostdata->dev, script, StatusAddress,
1da177e4 367 pScript + STATUS_OFFSET);
d3fa72e4 368 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
1da177e4
LT
369 pScript + MSGIN_OFFSET);
370
371 hostdata->script = script;
372 hostdata->pScript = pScript;
373 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
374 hostdata->state = NCR_700_HOST_FREE;
375 hostdata->cmd = NULL;
2b89dad0 376 host->max_id = 8;
1da177e4
LT
377 host->max_lun = NCR_700_MAX_LUNS;
378 BUG_ON(NCR_700_transport_template == NULL);
379 host->transportt = NCR_700_transport_template;
56fece20 380 host->unique_id = (unsigned long)hostdata->base;
1da177e4
LT
381 hostdata->eh_complete = NULL;
382 host->hostdata[0] = (unsigned long)hostdata;
383 /* kick the chip */
384 NCR_700_writeb(0xff, host, CTEST9_REG);
6391a113 385 if (hostdata->chip710)
1da177e4
LT
386 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
387 else
388 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
389 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
6391a113 390 if (banner == 0) {
1da177e4
LT
391 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
392 banner = 1;
393 }
394 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
6391a113 395 hostdata->chip710 ? "53c710" :
1da177e4
LT
396 (hostdata->fast ? "53c700-66" : "53c700"),
397 hostdata->rev, hostdata->differential ?
398 "(Differential)" : "");
399 /* reset the chip */
400 NCR_700_chip_reset(host);
401
402 if (scsi_add_host(host, dev)) {
403 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
404 scsi_host_put(host);
405 return NULL;
406 }
407
408 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
409 SPI_SIGNAL_SE;
410
411 return host;
412}
413
414int
415NCR_700_release(struct Scsi_Host *host)
416{
417 struct NCR_700_Host_Parameters *hostdata =
418 (struct NCR_700_Host_Parameters *)host->hostdata[0];
419
420 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
421 hostdata->script, hostdata->pScript);
422 return 1;
423}
424
425static inline __u8
426NCR_700_identify(int can_disconnect, __u8 lun)
427{
428 return IDENTIFY_BASE |
429 ((can_disconnect) ? 0x40 : 0) |
430 (lun & NCR_700_LUN_MASK);
431}
432
433/*
434 * Function : static int data_residual (Scsi_Host *host)
435 *
436 * Purpose : return residual data count of what's in the chip. If you
437 * really want to know what this function is doing, it's almost a
438 * direct transcription of the algorithm described in the 53c710
439 * guide, except that the DBC and DFIFO registers are only 6 bits
440 * wide on a 53c700.
441 *
442 * Inputs : host - SCSI host */
443static inline int
444NCR_700_data_residual (struct Scsi_Host *host) {
445 struct NCR_700_Host_Parameters *hostdata =
446 (struct NCR_700_Host_Parameters *)host->hostdata[0];
447 int count, synchronous = 0;
448 unsigned int ddir;
449
450 if(hostdata->chip710) {
451 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
452 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
453 } else {
454 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
455 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
456 }
457
458 if(hostdata->fast)
459 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
460
461 /* get the data direction */
462 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
463
464 if (ddir) {
465 /* Receive */
466 if (synchronous)
467 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
468 else
469 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
470 ++count;
471 } else {
472 /* Send */
473 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
474 if (sstat & SODL_REG_FULL)
475 ++count;
476 if (synchronous && (sstat & SODR_REG_FULL))
477 ++count;
478 }
479#ifdef NCR_700_DEBUG
480 if(count)
481 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
482#endif
483 return count;
484}
485
486/* print out the SCSI wires and corresponding phase from the SBCL register
487 * in the chip */
488static inline char *
489sbcl_to_string(__u8 sbcl)
490{
491 int i;
492 static char ret[256];
493
494 ret[0]='\0';
495 for(i=0; i<8; i++) {
496 if((1<<i) & sbcl)
497 strcat(ret, NCR_700_SBCL_bits[i]);
498 }
499 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
500 return ret;
501}
502
503static inline __u8
504bitmap_to_number(__u8 bitmap)
505{
506 __u8 i;
507
508 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
509 ;
510 return i;
511}
512
513/* Pull a slot off the free list */
514STATIC struct NCR_700_command_slot *
515find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
516{
517 struct NCR_700_command_slot *slot = hostdata->free_list;
518
519 if(slot == NULL) {
520 /* sanity check */
521 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
522 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
523 return NULL;
524 }
525
526 if(slot->state != NCR_700_SLOT_FREE)
527 /* should panic! */
528 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
529
530
531 hostdata->free_list = slot->ITL_forw;
532 slot->ITL_forw = NULL;
533
534
535 /* NOTE: set the state to busy here, not queued, since this
536 * indicates the slot is in use and cannot be run by the IRQ
537 * finish routine. If we cannot queue the command when it
538 * is properly build, we then change to NCR_700_SLOT_QUEUED */
539 slot->state = NCR_700_SLOT_BUSY;
67d59dfd 540 slot->flags = 0;
1da177e4
LT
541 hostdata->command_slot_count++;
542
543 return slot;
544}
545
546STATIC void
547free_slot(struct NCR_700_command_slot *slot,
548 struct NCR_700_Host_Parameters *hostdata)
549{
550 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
551 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
552 }
553 if(slot->state == NCR_700_SLOT_FREE) {
554 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
555 }
556
557 slot->resume_offset = 0;
558 slot->cmnd = NULL;
559 slot->state = NCR_700_SLOT_FREE;
560 slot->ITL_forw = hostdata->free_list;
561 hostdata->free_list = slot;
562 hostdata->command_slot_count--;
563}
564
565
566/* This routine really does very little. The command is indexed on
567 the ITL and (if tagged) the ITLQ lists in _queuecommand */
568STATIC void
569save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
570 struct scsi_cmnd *SCp, __u32 dsp)
571{
572 /* Its just possible that this gets executed twice */
573 if(SCp != NULL) {
574 struct NCR_700_command_slot *slot =
575 (struct NCR_700_command_slot *)SCp->host_scribble;
576
577 slot->resume_offset = dsp;
578 }
579 hostdata->state = NCR_700_HOST_FREE;
580 hostdata->cmd = NULL;
581}
582
583STATIC inline void
584NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
585 struct NCR_700_command_slot *slot)
586{
587 if(SCp->sc_data_direction != DMA_NONE &&
3258a4d5
FT
588 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
589 scsi_dma_unmap(SCp);
1da177e4
LT
590}
591
592STATIC inline void
593NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
594 struct scsi_cmnd *SCp, int result)
595{
596 hostdata->state = NCR_700_HOST_FREE;
597 hostdata->cmd = NULL;
598
599 if(SCp != NULL) {
600 struct NCR_700_command_slot *slot =
601 (struct NCR_700_command_slot *)SCp->host_scribble;
602
0f13fc09
JB
603 dma_unmap_single(hostdata->dev, slot->pCmd,
604 sizeof(SCp->cmnd), DMA_TO_DEVICE);
67d59dfd 605 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
0f13fc09 606 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
607#ifdef NCR_700_DEBUG
608 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
609 SCp, SCp->cmnd[7], result);
610 scsi_print_sense("53c700", SCp);
611
612#endif
67d59dfd 613 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1da177e4
LT
614 /* restore the old result if the request sense was
615 * successful */
c603d04e 616 if (result == 0)
0f13fc09 617 result = cmnd[7];
c603d04e
JB
618 /* restore the original length */
619 SCp->cmd_len = cmnd[8];
67d59dfd 620 } else
0f13fc09 621 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd 622
1da177e4
LT
623 free_slot(slot, hostdata);
624#ifdef NCR_700_DEBUG
625 if(NCR_700_get_depth(SCp->device) == 0 ||
626 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
627 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
628 NCR_700_get_depth(SCp->device));
629#endif /* NCR_700_DEBUG */
630 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
631
632 SCp->host_scribble = NULL;
633 SCp->result = result;
634 SCp->scsi_done(SCp);
635 } else {
636 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
637 }
638}
639
640
641STATIC void
642NCR_700_internal_bus_reset(struct Scsi_Host *host)
643{
644 /* Bus reset */
645 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
646 udelay(50);
647 NCR_700_writeb(0, host, SCNTL1_REG);
648
649}
650
651STATIC void
652NCR_700_chip_setup(struct Scsi_Host *host)
653{
654 struct NCR_700_Host_Parameters *hostdata =
655 (struct NCR_700_Host_Parameters *)host->hostdata[0];
656 __u32 dcntl_extra = 0;
657 __u8 min_period;
658 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
659
660 if(hostdata->chip710) {
f67a9c15
TB
661 __u8 burst_disable = 0;
662 __u8 burst_length = 0;
663
664 switch (hostdata->burst_length) {
665 case 1:
666 burst_length = BURST_LENGTH_1;
667 break;
668 case 2:
669 burst_length = BURST_LENGTH_2;
670 break;
671 case 4:
672 burst_length = BURST_LENGTH_4;
673 break;
674 case 8:
675 burst_length = BURST_LENGTH_8;
676 break;
677 default:
678 burst_disable = BURST_DISABLE;
679 break;
680 }
1da177e4
LT
681 dcntl_extra = COMPAT_700_MODE;
682
683 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
f67a9c15 684 NCR_700_writeb(burst_length | hostdata->dmode_extra,
1da177e4
LT
685 host, DMODE_710_REG);
686 NCR_700_writeb(burst_disable | (hostdata->differential ?
687 DIFF : 0), host, CTEST7_REG);
688 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
689 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
690 | AUTO_ATN, host, SCNTL0_REG);
691 } else {
692 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
693 host, DMODE_700_REG);
694 NCR_700_writeb(hostdata->differential ?
695 DIFF : 0, host, CTEST7_REG);
696 if(hostdata->fast) {
697 /* this is for 700-66, does nothing on 700 */
698 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
699 | GENERATE_RECEIVE_PARITY, host,
700 CTEST8_REG);
701 } else {
702 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
703 | PARITY | AUTO_ATN, host, SCNTL0_REG);
704 }
705 }
706
707 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
708 NCR_700_writeb(0, host, SBCL_REG);
709 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
710
711 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
712 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
713
714 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
715 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
716 if(hostdata->clock > 75) {
717 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
718 /* do the best we can, but the async clock will be out
719 * of spec: sync divider 2, async divider 3 */
720 DEBUG(("53c700: sync 2 async 3\n"));
721 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
722 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
723 hostdata->sync_clock = hostdata->clock/2;
724 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
725 /* sync divider 1.5, async divider 3 */
726 DEBUG(("53c700: sync 1.5 async 3\n"));
727 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
728 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
729 hostdata->sync_clock = hostdata->clock*2;
730 hostdata->sync_clock /= 3;
731
732 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
733 /* sync divider 1, async divider 2 */
734 DEBUG(("53c700: sync 1 async 2\n"));
735 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
736 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
737 hostdata->sync_clock = hostdata->clock;
738 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
739 /* sync divider 1, async divider 1.5 */
740 DEBUG(("53c700: sync 1 async 1.5\n"));
741 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
742 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
743 hostdata->sync_clock = hostdata->clock;
744 } else {
745 DEBUG(("53c700: sync 1 async 1\n"));
746 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
747 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
748 /* sync divider 1, async divider 1 */
749 hostdata->sync_clock = hostdata->clock;
750 }
751 /* Calculate the actual minimum period that can be supported
752 * by our synchronous clock speed. See the 710 manual for
753 * exact details of this calculation which is based on a
754 * setting of the SXFER register */
755 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
756 hostdata->min_period = NCR_700_MIN_PERIOD;
757 if(min_period > NCR_700_MIN_PERIOD)
758 hostdata->min_period = min_period;
759}
760
761STATIC void
762NCR_700_chip_reset(struct Scsi_Host *host)
763{
764 struct NCR_700_Host_Parameters *hostdata =
765 (struct NCR_700_Host_Parameters *)host->hostdata[0];
766 if(hostdata->chip710) {
767 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
768 udelay(100);
769
770 NCR_700_writeb(0, host, ISTAT_REG);
771 } else {
772 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
773 udelay(100);
774
775 NCR_700_writeb(0, host, DCNTL_REG);
776 }
777
778 mdelay(1000);
779
780 NCR_700_chip_setup(host);
781}
782
783/* The heart of the message processing engine is that the instruction
784 * immediately after the INT is the normal case (and so must be CLEAR
785 * ACK). If we want to do something else, we call that routine in
786 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
787 * ACK) so that the routine returns correctly to resume its activity
788 * */
789STATIC __u32
790process_extended_message(struct Scsi_Host *host,
791 struct NCR_700_Host_Parameters *hostdata,
792 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
793{
794 __u32 resume_offset = dsp, temp = dsp + 8;
795 __u8 pun = 0xff, lun = 0xff;
796
797 if(SCp != NULL) {
798 pun = SCp->device->id;
799 lun = SCp->device->lun;
800 }
801
802 switch(hostdata->msgin[2]) {
803 case A_SDTR_MSG:
804 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
805 struct scsi_target *starget = SCp->device->sdev_target;
806 __u8 period = hostdata->msgin[3];
807 __u8 offset = hostdata->msgin[4];
808
809 if(offset == 0 || period == 0) {
810 offset = 0;
811 period = 0;
812 }
813
814 spi_offset(starget) = offset;
815 spi_period(starget) = period;
816
817 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
818 spi_display_xfer_agreement(starget);
819 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
820 }
821
822 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
823 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
824
825 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
826 host, SXFER_REG);
827
828 } else {
829 /* SDTR message out of the blue, reject it */
017560fc
JG
830 shost_printk(KERN_WARNING, host,
831 "Unexpected SDTR msg\n");
1da177e4 832 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
833 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
834 script_patch_16(hostdata->dev, hostdata->script,
835 MessageCount, 1);
1da177e4
LT
836 /* SendMsgOut returns, so set up the return
837 * address */
838 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
839 }
840 break;
841
842 case A_WDTR_MSG:
843 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
844 host->host_no, pun, lun);
845 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
846 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
847 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
848 1);
1da177e4
LT
849 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
850
851 break;
852
853 default:
854 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
855 host->host_no, pun, lun,
856 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 857 spi_print_msg(hostdata->msgin);
1da177e4
LT
858 printk("\n");
859 /* just reject it */
860 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
861 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
862 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
863 1);
1da177e4
LT
864 /* SendMsgOut returns, so set up the return
865 * address */
866 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
867 }
868 NCR_700_writel(temp, host, TEMP_REG);
869 return resume_offset;
870}
871
872STATIC __u32
873process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
874 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
875{
876 /* work out where to return to */
877 __u32 temp = dsp + 8, resume_offset = dsp;
878 __u8 pun = 0xff, lun = 0xff;
879
880 if(SCp != NULL) {
881 pun = SCp->device->id;
882 lun = SCp->device->lun;
883 }
884
885#ifdef NCR_700_DEBUG
886 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
887 NCR_700_phase[(dsps & 0xf00) >> 8]);
1abfd370 888 spi_print_msg(hostdata->msgin);
1da177e4
LT
889 printk("\n");
890#endif
891
892 switch(hostdata->msgin[0]) {
893
894 case A_EXTENDED_MSG:
895 resume_offset = process_extended_message(host, hostdata, SCp,
896 dsp, dsps);
897 break;
898
899 case A_REJECT_MSG:
900 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
901 /* Rejected our sync negotiation attempt */
902 spi_period(SCp->device->sdev_target) =
903 spi_offset(SCp->device->sdev_target) = 0;
904 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
905 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
906 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
907 /* rejected our first simple tag message */
017560fc
JG
908 scmd_printk(KERN_WARNING, SCp,
909 "Rejected first tag queue attempt, turning off tag queueing\n");
1da177e4
LT
910 /* we're done negotiating */
911 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
017560fc 912 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
913 SCp->device->tagged_supported = 0;
914 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
915 } else {
017560fc
JG
916 shost_printk(KERN_WARNING, host,
917 "(%d:%d) Unexpected REJECT Message %s\n",
918 pun, lun,
1da177e4
LT
919 NCR_700_phase[(dsps & 0xf00) >> 8]);
920 /* however, just ignore it */
921 }
922 break;
923
924 case A_PARITY_ERROR_MSG:
925 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
926 pun, lun);
927 NCR_700_internal_bus_reset(host);
928 break;
929 case A_SIMPLE_TAG_MSG:
930 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
931 pun, lun, hostdata->msgin[1],
932 NCR_700_phase[(dsps & 0xf00) >> 8]);
933 /* just ignore it */
934 break;
935 default:
936 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
937 host->host_no, pun, lun,
938 NCR_700_phase[(dsps & 0xf00) >> 8]);
939
1abfd370 940 spi_print_msg(hostdata->msgin);
1da177e4
LT
941 printk("\n");
942 /* just reject it */
943 hostdata->msgout[0] = A_REJECT_MSG;
d3fa72e4
RB
944 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
945 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
946 1);
1da177e4
LT
947 /* SendMsgOut returns, so set up the return
948 * address */
949 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
950
951 break;
952 }
953 NCR_700_writel(temp, host, TEMP_REG);
954 /* set us up to receive another message */
d3fa72e4 955 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1da177e4
LT
956 return resume_offset;
957}
958
959STATIC __u32
960process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
961 struct Scsi_Host *host,
962 struct NCR_700_Host_Parameters *hostdata)
963{
964 __u32 resume_offset = 0;
965 __u8 pun = 0xff, lun=0xff;
966
967 if(SCp != NULL) {
968 pun = SCp->device->id;
969 lun = SCp->device->lun;
970 }
971
972 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
973 DEBUG((" COMMAND COMPLETE, status=%02x\n",
974 hostdata->status[0]));
975 /* OK, if TCQ still under negotiation, we now know it works */
976 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
977 NCR_700_set_tag_neg_state(SCp->device,
978 NCR_700_FINISHED_TAG_NEGOTIATION);
979
980 /* check for contingent allegiance contitions */
981 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
982 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
983 struct NCR_700_command_slot *slot =
984 (struct NCR_700_command_slot *)SCp->host_scribble;
0f13fc09 985 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
986 /* OOPS: bad device, returning another
987 * contingent allegiance condition */
017560fc
JG
988 scmd_printk(KERN_ERR, SCp,
989 "broken device is looping in contingent allegiance: ignoring\n");
1da177e4
LT
990 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
991 } else {
0f13fc09
JB
992 char *cmnd =
993 NCR_700_get_sense_cmnd(SCp->device);
1da177e4
LT
994#ifdef NCR_DEBUG
995 scsi_print_command(SCp);
996 printk(" cmd %p has status %d, requesting sense\n",
997 SCp, hostdata->status[0]);
998#endif
999 /* we can destroy the command here
1000 * because the contingent allegiance
1001 * condition will cause a retry which
1002 * will re-copy the command from the
1003 * saved data_cmnd. We also unmap any
1004 * data associated with the command
1005 * here */
1006 NCR_700_unmap(hostdata, SCp, slot);
67d59dfd
JB
1007 dma_unmap_single(hostdata->dev, slot->pCmd,
1008 sizeof(SCp->cmnd),
1009 DMA_TO_DEVICE);
1010
0f13fc09
JB
1011 cmnd[0] = REQUEST_SENSE;
1012 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1013 cmnd[2] = 0;
1014 cmnd[3] = 0;
1015 cmnd[4] = sizeof(SCp->sense_buffer);
1016 cmnd[5] = 0;
1da177e4
LT
1017 /* Here's a quiet hack: the
1018 * REQUEST_SENSE command is six bytes,
1019 * so store a flag indicating that
1020 * this was an internal sense request
1021 * and the original status at the end
1022 * of the command */
0f13fc09
JB
1023 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1024 cmnd[7] = hostdata->status[0];
c603d04e
JB
1025 cmnd[8] = SCp->cmd_len;
1026 SCp->cmd_len = 6; /* command length for
1027 * REQUEST_SENSE */
0f13fc09 1028 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1da177e4
LT
1029 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1030 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1031 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1032 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1033 slot->SG[1].pAddr = 0;
1034 slot->resume_offset = hostdata->pScript;
d3fa72e4
RB
1035 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1036 dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1037
1da177e4
LT
1038 /* queue the command for reissue */
1039 slot->state = NCR_700_SLOT_QUEUED;
67d59dfd 1040 slot->flags = NCR_700_FLAG_AUTOSENSE;
1da177e4
LT
1041 hostdata->state = NCR_700_HOST_FREE;
1042 hostdata->cmd = NULL;
1043 }
1044 } else {
1045 // Currently rely on the mid layer evaluation
1046 // of the tag queuing capability
1047 //
1048 //if(status_byte(hostdata->status[0]) == GOOD &&
1049 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1050 // /* Piggy back the tag queueing support
1051 // * on this command */
1052 // dma_sync_single_for_cpu(hostdata->dev,
1053 // slot->dma_handle,
1054 // SCp->request_bufflen,
1055 // DMA_FROM_DEVICE);
1056 // if(((char *)SCp->request_buffer)[7] & 0x02) {
017560fc
JG
1057 // scmd_printk(KERN_INFO, SCp,
1058 // "Enabling Tag Command Queuing\n");
1059 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1060 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1061 // } else {
1062 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
017560fc 1063 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1064 // }
1065 //}
1066 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1067 }
1068 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1069 __u8 i = (dsps & 0xf00) >> 8;
1070
017560fc 1071 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1da177e4
LT
1072 NCR_700_phase[i],
1073 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
017560fc
JG
1074 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1075 SCp->cmd_len);
1da177e4
LT
1076 scsi_print_command(SCp);
1077
1078 NCR_700_internal_bus_reset(host);
1079 } else if((dsps & 0xfffff000) == A_FATAL) {
1080 int i = (dsps & 0xfff);
1081
1082 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1083 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1084 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1085 printk(KERN_ERR " msg begins %02x %02x\n",
1086 hostdata->msgin[0], hostdata->msgin[1]);
1087 }
1088 NCR_700_internal_bus_reset(host);
1089 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1090#ifdef NCR_700_DEBUG
1091 __u8 i = (dsps & 0xf00) >> 8;
1092
1093 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1094 host->host_no, pun, lun,
1095 i, NCR_700_phase[i]);
1096#endif
1097 save_for_reselection(hostdata, SCp, dsp);
1098
1099 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1100 __u8 lun;
1101 struct NCR_700_command_slot *slot;
1102 __u8 reselection_id = hostdata->reselection_id;
1103 struct scsi_device *SDp;
1104
1105 lun = hostdata->msgin[0] & 0x1f;
1106
1107 hostdata->reselection_id = 0xff;
1108 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1109 host->host_no, reselection_id, lun));
1110 /* clear the reselection indicator */
1111 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1112 if(unlikely(SDp == NULL)) {
1113 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1114 host->host_no, reselection_id, lun);
1115 BUG();
1116 }
1117 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1118 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1119 if(unlikely(SCp == NULL)) {
1120 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1121 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1122 BUG();
1123 }
1124
1125 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
017560fc
JG
1126 DDEBUG(KERN_DEBUG, SDp,
1127 "reselection is tag %d, slot %p(%d)\n",
1128 hostdata->msgin[2], slot, slot->tag);
1da177e4
LT
1129 } else {
1130 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1131 if(unlikely(SCp == NULL)) {
017560fc
JG
1132 sdev_printk(KERN_ERR, SDp,
1133 "no saved request for untagged cmd\n");
1da177e4
LT
1134 BUG();
1135 }
1136 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1137 }
1138
1139 if(slot == NULL) {
1140 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1141 host->host_no, reselection_id, lun,
1142 hostdata->msgin[0], hostdata->msgin[1],
1143 hostdata->msgin[2]);
1144 } else {
1145 if(hostdata->state != NCR_700_HOST_BUSY)
1146 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1147 host->host_no);
1148 resume_offset = slot->resume_offset;
1149 hostdata->cmd = slot->cmnd;
1150
1151 /* re-patch for this command */
d3fa72e4
RB
1152 script_patch_32_abs(hostdata->dev, hostdata->script,
1153 CommandAddress, slot->pCmd);
1154 script_patch_16(hostdata->dev, hostdata->script,
1da177e4 1155 CommandCount, slot->cmnd->cmd_len);
d3fa72e4
RB
1156 script_patch_32_abs(hostdata->dev, hostdata->script,
1157 SGScriptStartAddress,
1da177e4
LT
1158 to32bit(&slot->pSG[0].ins));
1159
1160 /* Note: setting SXFER only works if we're
1161 * still in the MESSAGE phase, so it is vital
1162 * that ACK is still asserted when we process
1163 * the reselection message. The resume offset
1164 * should therefore always clear ACK */
1165 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1166 host, SXFER_REG);
d3fa72e4 1167 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4 1168 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
d3fa72e4 1169 dma_cache_sync(hostdata->dev, hostdata->msgout,
1da177e4
LT
1170 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1171 /* I'm just being paranoid here, the command should
1172 * already have been flushed from the cache */
d3fa72e4 1173 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1da177e4
LT
1174 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1175
1176
1177
1178 }
1179 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1180
1181 /* This section is full of debugging code because I've
1182 * never managed to reach it. I think what happens is
1183 * that, because the 700 runs with selection
1184 * interrupts enabled the whole time that we take a
1185 * selection interrupt before we manage to get to the
1186 * reselected script interrupt */
1187
1188 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1189 struct NCR_700_command_slot *slot;
1190
1191 /* Take out our own ID */
1192 reselection_id &= ~(1<<host->this_id);
1193
1194 /* I've never seen this happen, so keep this as a printk rather
1195 * than a debug */
1196 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1197 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1198
1199 {
1200 /* FIXME: DEBUGGING CODE */
1201 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1202 int i;
1203
1204 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1205 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1206 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1207 break;
1208 }
1209 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1210 SCp = hostdata->slots[i].cmnd;
1211 }
1212
1213 if(SCp != NULL) {
1214 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1215 /* change slot from busy to queued to redo command */
1216 slot->state = NCR_700_SLOT_QUEUED;
1217 }
1218 hostdata->cmd = NULL;
1219
1220 if(reselection_id == 0) {
1221 if(hostdata->reselection_id == 0xff) {
1222 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1223 return 0;
1224 } else {
1225 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1226 host->host_no);
1227 reselection_id = hostdata->reselection_id;
1228 }
1229 } else {
1230
1231 /* convert to real ID */
1232 reselection_id = bitmap_to_number(reselection_id);
1233 }
1234 hostdata->reselection_id = reselection_id;
1235 /* just in case we have a stale simple tag message, clear it */
1236 hostdata->msgin[1] = 0;
d3fa72e4 1237 dma_cache_sync(hostdata->dev, hostdata->msgin,
1da177e4
LT
1238 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1239 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1240 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1241 } else {
1242 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1243 }
1244 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1245 /* we've just disconnected from the bus, do nothing since
1246 * a return here will re-run the queued command slot
1247 * that may have been interrupted by the initial selection */
1248 DEBUG((" SELECTION COMPLETED\n"));
1249 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1250 resume_offset = process_message(host, hostdata, SCp,
1251 dsp, dsps);
1252 } else if((dsps & 0xfffff000) == 0) {
1253 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1254 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1255 host->host_no, pun, lun, NCR_700_condition[i],
1256 NCR_700_phase[j], dsp - hostdata->pScript);
1257 if(SCp != NULL) {
3258a4d5 1258 struct scatterlist *sg;
1da177e4 1259
3258a4d5
FT
1260 scsi_print_command(SCp);
1261 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1262 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1da177e4 1263 }
3258a4d5 1264 }
1da177e4
LT
1265 NCR_700_internal_bus_reset(host);
1266 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1267 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1268 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1269 resume_offset = dsp;
1270 } else {
1271 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1272 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1273 NCR_700_internal_bus_reset(host);
1274 }
1275 return resume_offset;
1276}
1277
1278/* We run the 53c700 with selection interrupts always enabled. This
1279 * means that the chip may be selected as soon as the bus frees. On a
1280 * busy bus, this can be before the scripts engine finishes its
1281 * processing. Therefore, part of the selection processing has to be
1282 * to find out what the scripts engine is doing and complete the
1283 * function if necessary (i.e. process the pending disconnect or save
1284 * the interrupted initial selection */
1285STATIC inline __u32
1286process_selection(struct Scsi_Host *host, __u32 dsp)
1287{
1288 __u8 id = 0; /* Squash compiler warning */
1289 int count = 0;
1290 __u32 resume_offset = 0;
1291 struct NCR_700_Host_Parameters *hostdata =
1292 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1293 struct scsi_cmnd *SCp = hostdata->cmd;
1294 __u8 sbcl;
1295
1296 for(count = 0; count < 5; count++) {
1297 id = NCR_700_readb(host, hostdata->chip710 ?
1298 CTEST9_REG : SFBR_REG);
1299
1300 /* Take out our own ID */
1301 id &= ~(1<<host->this_id);
1302 if(id != 0)
1303 break;
1304 udelay(5);
1305 }
1306 sbcl = NCR_700_readb(host, SBCL_REG);
1307 if((sbcl & SBCL_IO) == 0) {
1308 /* mark as having been selected rather than reselected */
1309 id = 0xff;
1310 } else {
1311 /* convert to real ID */
1312 hostdata->reselection_id = id = bitmap_to_number(id);
1313 DEBUG(("scsi%d: Reselected by %d\n",
1314 host->host_no, id));
1315 }
1316 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1317 struct NCR_700_command_slot *slot =
1318 (struct NCR_700_command_slot *)SCp->host_scribble;
1319 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1320
1321 switch(dsp - hostdata->pScript) {
1322 case Ent_Disconnect1:
1323 case Ent_Disconnect2:
1324 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1325 break;
1326 case Ent_Disconnect3:
1327 case Ent_Disconnect4:
1328 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1329 break;
1330 case Ent_Disconnect5:
1331 case Ent_Disconnect6:
1332 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1333 break;
1334 case Ent_Disconnect7:
1335 case Ent_Disconnect8:
1336 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1337 break;
1338 case Ent_Finish1:
1339 case Ent_Finish2:
1340 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1341 break;
1342
1343 default:
1344 slot->state = NCR_700_SLOT_QUEUED;
1345 break;
1346 }
1347 }
1348 hostdata->state = NCR_700_HOST_BUSY;
1349 hostdata->cmd = NULL;
1350 /* clear any stale simple tag message */
1351 hostdata->msgin[1] = 0;
d3fa72e4 1352 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4
LT
1353 DMA_BIDIRECTIONAL);
1354
1355 if(id == 0xff) {
1356 /* Selected as target, Ignore */
1357 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1358 } else if(hostdata->tag_negotiated & (1<<id)) {
1359 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1360 } else {
1361 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1362 }
1363 return resume_offset;
1364}
1365
1366static inline void
1367NCR_700_clear_fifo(struct Scsi_Host *host) {
1368 const struct NCR_700_Host_Parameters *hostdata
1369 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1370 if(hostdata->chip710) {
1371 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1372 } else {
1373 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1374 }
1375}
1376
1377static inline void
1378NCR_700_flush_fifo(struct Scsi_Host *host) {
1379 const struct NCR_700_Host_Parameters *hostdata
1380 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1381 if(hostdata->chip710) {
1382 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1383 udelay(10);
1384 NCR_700_writeb(0, host, CTEST8_REG);
1385 } else {
1386 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1387 udelay(10);
1388 NCR_700_writeb(0, host, DFIFO_REG);
1389 }
1390}
1391
1392
1393/* The queue lock with interrupts disabled must be held on entry to
1394 * this function */
1395STATIC int
1396NCR_700_start_command(struct scsi_cmnd *SCp)
1397{
1398 struct NCR_700_command_slot *slot =
1399 (struct NCR_700_command_slot *)SCp->host_scribble;
1400 struct NCR_700_Host_Parameters *hostdata =
1401 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1402 __u16 count = 1; /* for IDENTIFY message */
1403
1404 if(hostdata->state != NCR_700_HOST_FREE) {
1405 /* keep this inside the lock to close the race window where
1406 * the running command finishes on another CPU while we don't
1407 * change the state to queued on this one */
1408 slot->state = NCR_700_SLOT_QUEUED;
1409
1410 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1411 SCp->device->host->host_no, slot->cmnd, slot));
1412 return 0;
1413 }
1414 hostdata->state = NCR_700_HOST_BUSY;
1415 hostdata->cmd = SCp;
1416 slot->state = NCR_700_SLOT_BUSY;
1417 /* keep interrupts disabled until we have the command correctly
1418 * set up so we cannot take a selection interrupt */
1419
67d59dfd
JB
1420 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1421 slot->flags != NCR_700_FLAG_AUTOSENSE),
1da177e4
LT
1422 SCp->device->lun);
1423 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1424 * if the negotiated transfer parameters still hold, so
1425 * always renegotiate them */
67d59dfd
JB
1426 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1427 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1da177e4
LT
1428 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1429 }
1430
1431 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1432 * If a contingent allegiance condition exists, the device
1433 * will refuse all tags, so send the request sense as untagged
1434 * */
422c0d61 1435 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
67d59dfd
JB
1436 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1437 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1da177e4
LT
1438 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1439 }
1440
1441 if(hostdata->fast &&
1442 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
6ea3c0b2
MW
1443 count += spi_populate_sync_msg(&hostdata->msgout[count],
1444 spi_period(SCp->device->sdev_target),
1445 spi_offset(SCp->device->sdev_target));
1da177e4
LT
1446 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1447 }
1448
d3fa72e4 1449 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1da177e4
LT
1450
1451
d3fa72e4 1452 script_patch_ID(hostdata->dev, hostdata->script,
422c0d61 1453 Device_ID, 1<<scmd_id(SCp));
1da177e4 1454
d3fa72e4 1455 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1da177e4 1456 slot->pCmd);
d3fa72e4
RB
1457 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1458 SCp->cmd_len);
1da177e4
LT
1459 /* finally plumb the beginning of the SG list into the script
1460 * */
d3fa72e4
RB
1461 script_patch_32_abs(hostdata->dev, hostdata->script,
1462 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1da177e4
LT
1463 NCR_700_clear_fifo(SCp->device->host);
1464
1465 if(slot->resume_offset == 0)
1466 slot->resume_offset = hostdata->pScript;
1467 /* now perform all the writebacks and invalidates */
d3fa72e4
RB
1468 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1469 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1da177e4 1470 DMA_FROM_DEVICE);
d3fa72e4
RB
1471 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1472 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1da177e4
LT
1473
1474 /* set the synchronous period/offset */
1475 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1476 SCp->device->host, SXFER_REG);
1477 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1478 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1479
1480 return 1;
1481}
1482
1483irqreturn_t
7d12e780 1484NCR_700_intr(int irq, void *dev_id)
1da177e4
LT
1485{
1486 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1487 struct NCR_700_Host_Parameters *hostdata =
1488 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1489 __u8 istat;
1490 __u32 resume_offset = 0;
1491 __u8 pun = 0xff, lun = 0xff;
1492 unsigned long flags;
1493 int handled = 0;
1494
1495 /* Use the host lock to serialise acess to the 53c700
1496 * hardware. Note: In future, we may need to take the queue
1497 * lock to enter the done routines. When that happens, we
1498 * need to ensure that for this driver, the host lock and the
1499 * queue lock point to the same thing. */
1500 spin_lock_irqsave(host->host_lock, flags);
1501 if((istat = NCR_700_readb(host, ISTAT_REG))
1502 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1503 __u32 dsps;
1504 __u8 sstat0 = 0, dstat = 0;
1505 __u32 dsp;
1506 struct scsi_cmnd *SCp = hostdata->cmd;
1507 enum NCR_700_Host_State state;
1508
1509 handled = 1;
1510 state = hostdata->state;
1511 SCp = hostdata->cmd;
1512
1513 if(istat & SCSI_INT_PENDING) {
1514 udelay(10);
1515
1516 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1517 }
1518
1519 if(istat & DMA_INT_PENDING) {
1520 udelay(10);
1521
1522 dstat = NCR_700_readb(host, DSTAT_REG);
1523 }
1524
1525 dsps = NCR_700_readl(host, DSPS_REG);
1526 dsp = NCR_700_readl(host, DSP_REG);
1527
1528 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1529 host->host_no, istat, sstat0, dstat,
1530 (dsp - (__u32)(hostdata->pScript))/4,
1531 dsp, dsps));
1532
1533 if(SCp != NULL) {
1534 pun = SCp->device->id;
1535 lun = SCp->device->lun;
1536 }
1537
1538 if(sstat0 & SCSI_RESET_DETECTED) {
1539 struct scsi_device *SDp;
1540 int i;
1541
1542 hostdata->state = NCR_700_HOST_BUSY;
1543
1544 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1545 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1546
1547 scsi_report_bus_reset(host, 0);
1548
1549 /* clear all the negotiated parameters */
1550 __shost_for_each_device(SDp, host)
0f13fc09 1551 NCR_700_clear_flag(SDp, ~0);
1da177e4
LT
1552
1553 /* clear all the slots and their pending commands */
1554 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1555 struct scsi_cmnd *SCp;
1556 struct NCR_700_command_slot *slot =
1557 &hostdata->slots[i];
1558
1559 if(slot->state == NCR_700_SLOT_FREE)
1560 continue;
1561
1562 SCp = slot->cmnd;
1563 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1564 slot, SCp);
1565 free_slot(slot, hostdata);
1566 SCp->host_scribble = NULL;
1567 NCR_700_set_depth(SCp->device, 0);
1568 /* NOTE: deadlock potential here: we
1569 * rely on mid-layer guarantees that
1570 * scsi_done won't try to issue the
1571 * command again otherwise we'll
1572 * deadlock on the
1573 * hostdata->state_lock */
1574 SCp->result = DID_RESET << 16;
1575 SCp->scsi_done(SCp);
1576 }
1577 mdelay(25);
1578 NCR_700_chip_setup(host);
1579
1580 hostdata->state = NCR_700_HOST_FREE;
1581 hostdata->cmd = NULL;
1582 /* signal back if this was an eh induced reset */
1583 if(hostdata->eh_complete != NULL)
1584 complete(hostdata->eh_complete);
1585 goto out_unlock;
1586 } else if(sstat0 & SELECTION_TIMEOUT) {
1587 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1588 host->host_no, pun, lun));
1589 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1590 } else if(sstat0 & PHASE_MISMATCH) {
1591 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1592 (struct NCR_700_command_slot *)SCp->host_scribble;
1593
1594 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1595 /* It wants to reply to some part of
1596 * our message */
1597#ifdef NCR_700_DEBUG
1598 __u32 temp = NCR_700_readl(host, TEMP_REG);
1599 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1600 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1601#endif
1602 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1603 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1604 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1605 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1606 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1607 int residual = NCR_700_data_residual(host);
1608 int i;
1609#ifdef NCR_700_DEBUG
1610 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1611
1612 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1613 host->host_no, pun, lun,
1614 SGcount, data_transfer);
1615 scsi_print_command(SCp);
1616 if(residual) {
1617 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1618 host->host_no, pun, lun,
1619 SGcount, data_transfer, residual);
1620 }
1621#endif
1622 data_transfer += residual;
1623
1624 if(data_transfer != 0) {
1625 int count;
1626 __u32 pAddr;
1627
1628 SGcount--;
1629
1630 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1631 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1632 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1633 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1634 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1635 pAddr += (count - data_transfer);
1636#ifdef NCR_700_DEBUG
1637 if(pAddr != naddr) {
1638 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1639 }
1640#endif
1641 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1642 }
1643 /* set the executed moves to nops */
1644 for(i=0; i<SGcount; i++) {
1645 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1646 slot->SG[i].pAddr = 0;
1647 }
d3fa72e4 1648 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4
LT
1649 /* and pretend we disconnected after
1650 * the command phase */
1651 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1652 /* make sure all the data is flushed */
1653 NCR_700_flush_fifo(host);
1654 } else {
1655 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1656 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1657 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1658 NCR_700_internal_bus_reset(host);
1659 }
1660
1661 } else if(sstat0 & SCSI_GROSS_ERROR) {
1662 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1663 host->host_no, pun, lun);
1664 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665 } else if(sstat0 & PARITY_ERROR) {
1666 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1667 host->host_no, pun, lun);
1668 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1669 } else if(dstat & SCRIPT_INT_RECEIVED) {
1670 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1671 host->host_no, pun, lun));
1672 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1673 } else if(dstat & (ILGL_INST_DETECTED)) {
1674 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1675 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1676 host->host_no, pun, lun,
1677 dsp, dsp - hostdata->pScript);
1678 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1679 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1680 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1681 host->host_no, pun, lun, dstat);
1682 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1683 }
1684
1685
1686 /* NOTE: selection interrupt processing MUST occur
1687 * after script interrupt processing to correctly cope
1688 * with the case where we process a disconnect and
1689 * then get reselected before we process the
1690 * disconnection */
1691 if(sstat0 & SELECTED) {
1692 /* FIXME: It currently takes at least FOUR
1693 * interrupts to complete a command that
1694 * disconnects: one for the disconnect, one
1695 * for the reselection, one to get the
1696 * reselection data and one to complete the
1697 * command. If we guess the reselected
1698 * command here and prepare it, we only need
1699 * to get a reselection data interrupt if we
1700 * guessed wrongly. Since the interrupt
1701 * overhead is much greater than the command
1702 * setup, this would be an efficient
1703 * optimisation particularly as we probably
1704 * only have one outstanding command on a
1705 * target most of the time */
1706
1707 resume_offset = process_selection(host, dsp);
1708
1709 }
1710
1711 }
1712
1713 if(resume_offset) {
1714 if(hostdata->state != NCR_700_HOST_BUSY) {
1715 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1716 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1717 hostdata->state = NCR_700_HOST_BUSY;
1718 }
1719
1720 DEBUG(("Attempting to resume at %x\n", resume_offset));
1721 NCR_700_clear_fifo(host);
1722 NCR_700_writel(resume_offset, host, DSP_REG);
1723 }
1724 /* There is probably a technical no-no about this: If we're a
1725 * shared interrupt and we got this interrupt because the
1726 * other device needs servicing not us, we're still going to
1727 * check our queued commands here---of course, there shouldn't
1728 * be any outstanding.... */
1729 if(hostdata->state == NCR_700_HOST_FREE) {
1730 int i;
1731
1732 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1733 /* fairness: always run the queue from the last
1734 * position we left off */
1735 int j = (i + hostdata->saved_slot_position)
1736 % NCR_700_COMMAND_SLOTS_PER_HOST;
1737
1738 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1739 continue;
1740 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1741 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1742 host->host_no, &hostdata->slots[j],
1743 hostdata->slots[j].cmnd));
1744 hostdata->saved_slot_position = j + 1;
1745 }
1746
1747 break;
1748 }
1749 }
1750 out_unlock:
1751 spin_unlock_irqrestore(host->host_lock, flags);
1752 return IRQ_RETVAL(handled);
1753}
1754
1755STATIC int
1756NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1757{
1758 struct NCR_700_Host_Parameters *hostdata =
1759 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1760 __u32 move_ins;
1761 enum dma_data_direction direction;
1762 struct NCR_700_command_slot *slot;
1763
1764 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1765 /* We're over our allocation, this should never happen
1766 * since we report the max allocation to the mid layer */
1767 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1768 return 1;
1769 }
1770 /* check for untagged commands. We cannot have any outstanding
1771 * commands if we accept them. Commands could be untagged because:
1772 *
1773 * - The tag negotiated bitmap is clear
1774 * - The blk layer sent and untagged command
1775 */
1776 if(NCR_700_get_depth(SCp->device) != 0
017560fc 1777 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1da177e4 1778 || !blk_rq_tagged(SCp->request))) {
017560fc
JG
1779 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1780 NCR_700_get_depth(SCp->device));
1da177e4
LT
1781 return SCSI_MLQUEUE_DEVICE_BUSY;
1782 }
1783 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
017560fc
JG
1784 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1785 NCR_700_get_depth(SCp->device));
1da177e4
LT
1786 return SCSI_MLQUEUE_DEVICE_BUSY;
1787 }
1788 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1789
1790 /* begin the command here */
1791 /* no need to check for NULL, test for command_slot_count above
1792 * ensures a slot is free */
1793 slot = find_empty_slot(hostdata);
1794
1795 slot->cmnd = SCp;
1796
1797 SCp->scsi_done = done;
1798 SCp->host_scribble = (unsigned char *)slot;
1799 SCp->SCp.ptr = NULL;
1800 SCp->SCp.buffer = NULL;
1801
1802#ifdef NCR_700_DEBUG
1803 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1804 scsi_print_command(SCp);
1805#endif
1806 if(blk_rq_tagged(SCp->request)
017560fc 1807 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1da177e4 1808 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
017560fc
JG
1809 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1810 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1da177e4
LT
1811 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1812 }
1813
1814 /* here we may have to process an untagged command. The gate
1815 * above ensures that this will be the only one outstanding,
1816 * so clear the tag negotiated bit.
1817 *
1818 * FIXME: This will royally screw up on multiple LUN devices
1819 * */
1820 if(!blk_rq_tagged(SCp->request)
017560fc
JG
1821 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1822 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1823 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1da177e4
LT
1824 }
1825
017560fc 1826 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1da177e4
LT
1827 && scsi_get_tag_type(SCp->device)) {
1828 slot->tag = SCp->request->tag;
017560fc
JG
1829 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1830 slot->tag, slot);
1da177e4
LT
1831 } else {
1832 slot->tag = SCSI_NO_TAG;
1833 /* must populate current_cmnd for scsi_find_tag to work */
1834 SCp->device->current_cmnd = SCp;
1835 }
1836 /* sanity check: some of the commands generated by the mid-layer
1837 * have an eccentric idea of their sc_data_direction */
3258a4d5
FT
1838 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1839 SCp->sc_data_direction != DMA_NONE) {
1da177e4
LT
1840#ifdef NCR_700_DEBUG
1841 printk("53c700: Command");
1842 scsi_print_command(SCp);
1843 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1844#endif
1845 SCp->sc_data_direction = DMA_NONE;
1846 }
1847
1848 switch (SCp->cmnd[0]) {
1849 case REQUEST_SENSE:
1850 /* clear the internal sense magic */
1851 SCp->cmnd[6] = 0;
1852 /* fall through */
1853 default:
1854 /* OK, get it from the command */
1855 switch(SCp->sc_data_direction) {
1856 case DMA_BIDIRECTIONAL:
1857 default:
1858 printk(KERN_ERR "53c700: Unknown command for data direction ");
1859 scsi_print_command(SCp);
1860
1861 move_ins = 0;
1862 break;
1863 case DMA_NONE:
1864 move_ins = 0;
1865 break;
1866 case DMA_FROM_DEVICE:
1867 move_ins = SCRIPT_MOVE_DATA_IN;
1868 break;
1869 case DMA_TO_DEVICE:
1870 move_ins = SCRIPT_MOVE_DATA_OUT;
1871 break;
1872 }
1873 }
1874
1875 /* now build the scatter gather list */
1876 direction = SCp->sc_data_direction;
1877 if(move_ins != 0) {
1878 int i;
1879 int sg_count;
1880 dma_addr_t vPtr = 0;
3258a4d5 1881 struct scatterlist *sg;
1da177e4
LT
1882 __u32 count = 0;
1883
3258a4d5
FT
1884 sg_count = scsi_dma_map(SCp);
1885 BUG_ON(sg_count < 0);
1da177e4 1886
3258a4d5
FT
1887 scsi_for_each_sg(SCp, sg, sg_count, i) {
1888 vPtr = sg_dma_address(sg);
1889 count = sg_dma_len(sg);
1da177e4
LT
1890
1891 slot->SG[i].ins = bS_to_host(move_ins | count);
1892 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1893 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1894 slot->SG[i].pAddr = bS_to_host(vPtr);
1895 }
1896 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1897 slot->SG[i].pAddr = 0;
d3fa72e4 1898 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1da177e4 1899 DEBUG((" SETTING %08lx to %x\n",
d3fa72e4 1900 (&slot->pSG[i].ins),
1da177e4
LT
1901 slot->SG[i].ins));
1902 }
1903 slot->resume_offset = 0;
1904 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1905 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1906 NCR_700_start_command(SCp);
1907 return 0;
1908}
1909
1910STATIC int
1911NCR_700_abort(struct scsi_cmnd * SCp)
1912{
1913 struct NCR_700_command_slot *slot;
1914
017560fc
JG
1915 scmd_printk(KERN_INFO, SCp,
1916 "New error handler wants to abort command\n\t");
1da177e4
LT
1917 scsi_print_command(SCp);
1918
1919 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1920
1921 if(slot == NULL)
1922 /* no outstanding command to abort */
1923 return SUCCESS;
1924 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1925 /* FIXME: This is because of a problem in the new
1926 * error handler. When it is in error recovery, it
1927 * will send a TUR to a device it thinks may still be
1928 * showing a problem. If the TUR isn't responded to,
1929 * it will abort it and mark the device off line.
1930 * Unfortunately, it does no other error recovery, so
1931 * this would leave us with an outstanding command
1932 * occupying a slot. Rather than allow this to
1933 * happen, we issue a bus reset to force all
1934 * outstanding commands to terminate here. */
1935 NCR_700_internal_bus_reset(SCp->device->host);
1936 /* still drop through and return failed */
1937 }
1938 return FAILED;
1939
1940}
1941
1942STATIC int
1943NCR_700_bus_reset(struct scsi_cmnd * SCp)
1944{
6e9a4738 1945 DECLARE_COMPLETION_ONSTACK(complete);
1da177e4
LT
1946 struct NCR_700_Host_Parameters *hostdata =
1947 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1948
017560fc
JG
1949 scmd_printk(KERN_INFO, SCp,
1950 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1da177e4 1951 scsi_print_command(SCp);
68b3aa7c 1952
1da177e4
LT
1953 /* In theory, eh_complete should always be null because the
1954 * eh is single threaded, but just in case we're handling a
1955 * reset via sg or something */
68b3aa7c
JG
1956 spin_lock_irq(SCp->device->host->host_lock);
1957 while (hostdata->eh_complete != NULL) {
1da177e4
LT
1958 spin_unlock_irq(SCp->device->host->host_lock);
1959 msleep_interruptible(100);
1960 spin_lock_irq(SCp->device->host->host_lock);
1961 }
68b3aa7c 1962
1da177e4
LT
1963 hostdata->eh_complete = &complete;
1964 NCR_700_internal_bus_reset(SCp->device->host);
68b3aa7c 1965
1da177e4
LT
1966 spin_unlock_irq(SCp->device->host->host_lock);
1967 wait_for_completion(&complete);
1968 spin_lock_irq(SCp->device->host->host_lock);
68b3aa7c 1969
1da177e4
LT
1970 hostdata->eh_complete = NULL;
1971 /* Revalidate the transport parameters of the failing device */
1972 if(hostdata->fast)
1973 spi_schedule_dv_device(SCp->device);
68b3aa7c
JG
1974
1975 spin_unlock_irq(SCp->device->host->host_lock);
1da177e4
LT
1976 return SUCCESS;
1977}
1978
1da177e4
LT
1979STATIC int
1980NCR_700_host_reset(struct scsi_cmnd * SCp)
1981{
017560fc 1982 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1da177e4
LT
1983 scsi_print_command(SCp);
1984
df0ae249
JG
1985 spin_lock_irq(SCp->device->host->host_lock);
1986
1da177e4
LT
1987 NCR_700_internal_bus_reset(SCp->device->host);
1988 NCR_700_chip_reset(SCp->device->host);
df0ae249
JG
1989
1990 spin_unlock_irq(SCp->device->host->host_lock);
1991
1da177e4
LT
1992 return SUCCESS;
1993}
1994
1995STATIC void
1996NCR_700_set_period(struct scsi_target *STp, int period)
1997{
1998 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1999 struct NCR_700_Host_Parameters *hostdata =
2000 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2001
2002 if(!hostdata->fast)
2003 return;
2004
2005 if(period < hostdata->min_period)
2006 period = hostdata->min_period;
2007
2008 spi_period(STp) = period;
2009 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2010 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2011 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2012}
2013
2014STATIC void
2015NCR_700_set_offset(struct scsi_target *STp, int offset)
2016{
2017 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2018 struct NCR_700_Host_Parameters *hostdata =
2019 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2020 int max_offset = hostdata->chip710
2021 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2022
2023 if(!hostdata->fast)
2024 return;
2025
2026 if(offset > max_offset)
2027 offset = max_offset;
2028
2029 /* if we're currently async, make sure the period is reasonable */
2030 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2031 spi_period(STp) > 0xff))
2032 spi_period(STp) = hostdata->min_period;
2033
2034 spi_offset(STp) = offset;
2035 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2036 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2037 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2038}
2039
0f13fc09
JB
2040STATIC int
2041NCR_700_slave_alloc(struct scsi_device *SDp)
2042{
2043 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2044 GFP_KERNEL);
1da177e4 2045
0f13fc09
JB
2046 if (!SDp->hostdata)
2047 return -ENOMEM;
2048
2049 return 0;
2050}
1da177e4
LT
2051
2052STATIC int
2053NCR_700_slave_configure(struct scsi_device *SDp)
2054{
2055 struct NCR_700_Host_Parameters *hostdata =
2056 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2057
2058 /* to do here: allocate memory; build a queue_full list */
2059 if(SDp->tagged_supported) {
2060 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2061 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2062 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2063 } else {
2064 /* initialise to default depth */
2065 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2066 }
2067 if(hostdata->fast) {
2068 /* Find the correct offset and period via domain validation */
2069 if (!spi_initial_dv(SDp->sdev_target))
2070 spi_dv_device(SDp);
2071 } else {
2072 spi_offset(SDp->sdev_target) = 0;
2073 spi_period(SDp->sdev_target) = 0;
2074 }
2075 return 0;
2076}
2077
2078STATIC void
2079NCR_700_slave_destroy(struct scsi_device *SDp)
2080{
67d59dfd
JB
2081 kfree(SDp->hostdata);
2082 SDp->hostdata = NULL;
1da177e4
LT
2083}
2084
2085static int
2086NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2087{
2088 if (depth > NCR_700_MAX_TAGS)
2089 depth = NCR_700_MAX_TAGS;
2090
2091 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2092 return depth;
2093}
2094
2095static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2096{
2097 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2098 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2099 struct NCR_700_Host_Parameters *hostdata =
2100 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2101
2102 scsi_set_tag_type(SDp, tag_type);
2103
2104 /* We have a global (per target) flag to track whether TCQ is
2105 * enabled, so we'll be turning it off for the entire target here.
2106 * our tag algorithm will fail if we mix tagged and untagged commands,
2107 * so quiesce the device before doing this */
2108 if (change_tag)
2109 scsi_target_quiesce(SDp->sdev_target);
2110
2111 if (!tag_type) {
2112 /* shift back to the default unqueued number of commands
2113 * (the user can still raise this) */
2114 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
422c0d61 2115 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
1da177e4
LT
2116 } else {
2117 /* Here, we cleared the negotiation flag above, so this
2118 * will force the driver to renegotiate */
2119 scsi_activate_tcq(SDp, SDp->queue_depth);
2120 if (change_tag)
2121 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2122 }
2123 if (change_tag)
2124 scsi_target_resume(SDp->sdev_target);
2125
2126 return tag_type;
2127}
2128
2129static ssize_t
10523b3b 2130NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
2131{
2132 struct scsi_device *SDp = to_scsi_device(dev);
2133
2134 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2135}
2136
2137static struct device_attribute NCR_700_active_tags_attr = {
2138 .attr = {
2139 .name = "active_tags",
2140 .mode = S_IRUGO,
2141 },
2142 .show = NCR_700_show_active_tags,
2143};
2144
2145STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2146 &NCR_700_active_tags_attr,
2147 NULL,
2148};
2149
2150EXPORT_SYMBOL(NCR_700_detect);
2151EXPORT_SYMBOL(NCR_700_release);
2152EXPORT_SYMBOL(NCR_700_intr);
2153
2154static struct spi_function_template NCR_700_transport_functions = {
2155 .set_period = NCR_700_set_period,
2156 .show_period = 1,
2157 .set_offset = NCR_700_set_offset,
2158 .show_offset = 1,
2159};
2160
2161static int __init NCR_700_init(void)
2162{
2163 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2164 if(!NCR_700_transport_template)
2165 return -ENODEV;
2166 return 0;
2167}
2168
2169static void __exit NCR_700_exit(void)
2170{
2171 spi_release_transport(NCR_700_transport_template);
2172}
2173
2174module_init(NCR_700_init);
2175module_exit(NCR_700_exit);
2176