]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/esp_scsi.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/usb-2.6
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / esp_scsi.c
CommitLineData
cd9ad58d
DM
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16
17#include <asm/irq.h>
18#include <asm/io.h>
19#include <asm/dma.h>
20
21#include <scsi/scsi.h>
22#include <scsi/scsi_host.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_tcq.h>
26#include <scsi/scsi_dbg.h>
27#include <scsi/scsi_transport_spi.h>
28
29#include "esp_scsi.h"
30
31#define DRV_MODULE_NAME "esp"
32#define PFX DRV_MODULE_NAME ": "
33#define DRV_VERSION "2.000"
34#define DRV_MODULE_RELDATE "April 19, 2007"
35
36/* SCSI bus reset settle time in seconds. */
37static int esp_bus_reset_settle = 3;
38
39static u32 esp_debug;
40#define ESP_DEBUG_INTR 0x00000001
41#define ESP_DEBUG_SCSICMD 0x00000002
42#define ESP_DEBUG_RESET 0x00000004
43#define ESP_DEBUG_MSGIN 0x00000008
44#define ESP_DEBUG_MSGOUT 0x00000010
45#define ESP_DEBUG_CMDDONE 0x00000020
46#define ESP_DEBUG_DISCONNECT 0x00000040
47#define ESP_DEBUG_DATASTART 0x00000080
48#define ESP_DEBUG_DATADONE 0x00000100
49#define ESP_DEBUG_RECONNECT 0x00000200
50#define ESP_DEBUG_AUTOSENSE 0x00000400
51
52#define esp_log_intr(f, a...) \
53do { if (esp_debug & ESP_DEBUG_INTR) \
54 printk(f, ## a); \
55} while (0)
56
57#define esp_log_reset(f, a...) \
58do { if (esp_debug & ESP_DEBUG_RESET) \
59 printk(f, ## a); \
60} while (0)
61
62#define esp_log_msgin(f, a...) \
63do { if (esp_debug & ESP_DEBUG_MSGIN) \
64 printk(f, ## a); \
65} while (0)
66
67#define esp_log_msgout(f, a...) \
68do { if (esp_debug & ESP_DEBUG_MSGOUT) \
69 printk(f, ## a); \
70} while (0)
71
72#define esp_log_cmddone(f, a...) \
73do { if (esp_debug & ESP_DEBUG_CMDDONE) \
74 printk(f, ## a); \
75} while (0)
76
77#define esp_log_disconnect(f, a...) \
78do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
79 printk(f, ## a); \
80} while (0)
81
82#define esp_log_datastart(f, a...) \
83do { if (esp_debug & ESP_DEBUG_DATASTART) \
84 printk(f, ## a); \
85} while (0)
86
87#define esp_log_datadone(f, a...) \
88do { if (esp_debug & ESP_DEBUG_DATADONE) \
89 printk(f, ## a); \
90} while (0)
91
92#define esp_log_reconnect(f, a...) \
93do { if (esp_debug & ESP_DEBUG_RECONNECT) \
94 printk(f, ## a); \
95} while (0)
96
97#define esp_log_autosense(f, a...) \
98do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
99 printk(f, ## a); \
100} while (0)
101
102#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
103#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
104
105static void esp_log_fill_regs(struct esp *esp,
106 struct esp_event_ent *p)
107{
108 p->sreg = esp->sreg;
109 p->seqreg = esp->seqreg;
110 p->sreg2 = esp->sreg2;
111 p->ireg = esp->ireg;
112 p->select_state = esp->select_state;
113 p->event = esp->event;
114}
115
116void scsi_esp_cmd(struct esp *esp, u8 val)
117{
118 struct esp_event_ent *p;
119 int idx = esp->esp_event_cur;
120
121 p = &esp->esp_event_log[idx];
122 p->type = ESP_EVENT_TYPE_CMD;
123 p->val = val;
124 esp_log_fill_regs(esp, p);
125
126 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
127
128 esp_write8(val, ESP_CMD);
129}
130EXPORT_SYMBOL(scsi_esp_cmd);
131
132static void esp_event(struct esp *esp, u8 val)
133{
134 struct esp_event_ent *p;
135 int idx = esp->esp_event_cur;
136
137 p = &esp->esp_event_log[idx];
138 p->type = ESP_EVENT_TYPE_EVENT;
139 p->val = val;
140 esp_log_fill_regs(esp, p);
141
142 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
143
144 esp->event = val;
145}
146
147static void esp_dump_cmd_log(struct esp *esp)
148{
149 int idx = esp->esp_event_cur;
150 int stop = idx;
151
152 printk(KERN_INFO PFX "esp%d: Dumping command log\n",
153 esp->host->unique_id);
154 do {
155 struct esp_event_ent *p = &esp->esp_event_log[idx];
156
157 printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
158 esp->host->unique_id, idx,
159 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
160
161 printk("val[%02x] sreg[%02x] seqreg[%02x] "
162 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
163 p->val, p->sreg, p->seqreg,
164 p->sreg2, p->ireg, p->select_state, p->event);
165
166 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
167 } while (idx != stop);
168}
169
170static void esp_flush_fifo(struct esp *esp)
171{
172 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
173 if (esp->rev == ESP236) {
174 int lim = 1000;
175
176 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
177 if (--lim == 0) {
178 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
179 "will not clear!\n",
180 esp->host->unique_id);
181 break;
182 }
183 udelay(1);
184 }
185 }
186}
187
188static void hme_read_fifo(struct esp *esp)
189{
190 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
191 int idx = 0;
192
193 while (fcnt--) {
194 esp->fifo[idx++] = esp_read8(ESP_FDATA);
195 esp->fifo[idx++] = esp_read8(ESP_FDATA);
196 }
197 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
198 esp_write8(0, ESP_FDATA);
199 esp->fifo[idx++] = esp_read8(ESP_FDATA);
200 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
201 }
202 esp->fifo_cnt = idx;
203}
204
205static void esp_set_all_config3(struct esp *esp, u8 val)
206{
207 int i;
208
209 for (i = 0; i < ESP_MAX_TARGET; i++)
210 esp->target[i].esp_config3 = val;
211}
212
213/* Reset the ESP chip, _not_ the SCSI bus. */
214static void esp_reset_esp(struct esp *esp)
215{
216 u8 family_code, version;
217
218 /* Now reset the ESP chip */
219 scsi_esp_cmd(esp, ESP_CMD_RC);
220 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222
223 /* Reload the configuration registers */
224 esp_write8(esp->cfact, ESP_CFACT);
225
226 esp->prev_stp = 0;
227 esp_write8(esp->prev_stp, ESP_STP);
228
229 esp->prev_soff = 0;
230 esp_write8(esp->prev_soff, ESP_SOFF);
231
232 esp_write8(esp->neg_defp, ESP_TIMEO);
233
234 /* This is the only point at which it is reliable to read
235 * the ID-code for a fast ESP chip variants.
236 */
237 esp->max_period = ((35 * esp->ccycle) / 1000);
238 if (esp->rev == FAST) {
239 version = esp_read8(ESP_UID);
240 family_code = (version & 0xf8) >> 3;
241 if (family_code == 0x02)
242 esp->rev = FAS236;
243 else if (family_code == 0x0a)
244 esp->rev = FASHME; /* Version is usually '5'. */
245 else
246 esp->rev = FAS100A;
247 esp->min_period = ((4 * esp->ccycle) / 1000);
248 } else {
249 esp->min_period = ((5 * esp->ccycle) / 1000);
250 }
251 esp->max_period = (esp->max_period + 3)>>2;
252 esp->min_period = (esp->min_period + 3)>>2;
253
254 esp_write8(esp->config1, ESP_CFG1);
255 switch (esp->rev) {
256 case ESP100:
257 /* nothing to do */
258 break;
259
260 case ESP100A:
261 esp_write8(esp->config2, ESP_CFG2);
262 break;
263
264 case ESP236:
265 /* Slow 236 */
266 esp_write8(esp->config2, ESP_CFG2);
267 esp->prev_cfg3 = esp->target[0].esp_config3;
268 esp_write8(esp->prev_cfg3, ESP_CFG3);
269 break;
270
271 case FASHME:
272 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
273 /* fallthrough... */
274
275 case FAS236:
276 /* Fast 236 or HME */
277 esp_write8(esp->config2, ESP_CFG2);
278 if (esp->rev == FASHME) {
279 u8 cfg3 = esp->target[0].esp_config3;
280
281 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
282 if (esp->scsi_id >= 8)
283 cfg3 |= ESP_CONFIG3_IDBIT3;
284 esp_set_all_config3(esp, cfg3);
285 } else {
286 u32 cfg3 = esp->target[0].esp_config3;
287
288 cfg3 |= ESP_CONFIG3_FCLK;
289 esp_set_all_config3(esp, cfg3);
290 }
291 esp->prev_cfg3 = esp->target[0].esp_config3;
292 esp_write8(esp->prev_cfg3, ESP_CFG3);
293 if (esp->rev == FASHME) {
294 esp->radelay = 80;
295 } else {
296 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
297 esp->radelay = 0;
298 else
299 esp->radelay = 96;
300 }
301 break;
302
303 case FAS100A:
304 /* Fast 100a */
305 esp_write8(esp->config2, ESP_CFG2);
306 esp_set_all_config3(esp,
307 (esp->target[0].esp_config3 |
308 ESP_CONFIG3_FCLOCK));
309 esp->prev_cfg3 = esp->target[0].esp_config3;
310 esp_write8(esp->prev_cfg3, ESP_CFG3);
311 esp->radelay = 32;
312 break;
313
314 default:
315 break;
316 }
317
318 /* Eat any bitrot in the chip */
319 esp_read8(ESP_INTRPT);
320 udelay(100);
321}
322
323static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
324{
325 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
326 struct scatterlist *sg = cmd->request_buffer;
327 int dir = cmd->sc_data_direction;
328 int total, i;
329
330 if (dir == DMA_NONE)
331 return;
332
333 BUG_ON(cmd->use_sg == 0);
334
335 spriv->u.num_sg = esp->ops->map_sg(esp, sg,
336 cmd->use_sg, dir);
337 spriv->cur_residue = sg_dma_len(sg);
338 spriv->cur_sg = sg;
339
340 total = 0;
341 for (i = 0; i < spriv->u.num_sg; i++)
342 total += sg_dma_len(&sg[i]);
343 spriv->tot_residue = total;
344}
345
346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
347 struct scsi_cmnd *cmd)
348{
349 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
350
351 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
352 return ent->sense_dma +
353 (ent->sense_ptr - cmd->sense_buffer);
354 }
355
356 return sg_dma_address(p->cur_sg) +
357 (sg_dma_len(p->cur_sg) -
358 p->cur_residue);
359}
360
361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
362 struct scsi_cmnd *cmd)
363{
364 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
365
366 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
367 return SCSI_SENSE_BUFFERSIZE -
368 (ent->sense_ptr - cmd->sense_buffer);
369 }
370 return p->cur_residue;
371}
372
373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
374 struct scsi_cmnd *cmd, unsigned int len)
375{
376 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
377
378 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
379 ent->sense_ptr += len;
380 return;
381 }
382
383 p->cur_residue -= len;
384 p->tot_residue -= len;
385 if (p->cur_residue < 0 || p->tot_residue < 0) {
386 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
387 esp->host->unique_id);
388 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
389 "len[%u]\n",
390 esp->host->unique_id,
391 p->cur_residue, p->tot_residue, len);
392 p->cur_residue = 0;
393 p->tot_residue = 0;
394 }
395 if (!p->cur_residue && p->tot_residue) {
396 p->cur_sg++;
397 p->cur_residue = sg_dma_len(p->cur_sg);
398 }
399}
400
401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
402{
403 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
404 int dir = cmd->sc_data_direction;
405
406 if (dir == DMA_NONE)
407 return;
408
409 esp->ops->unmap_sg(esp, cmd->request_buffer,
410 spriv->u.num_sg, dir);
411}
412
413static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
414{
415 struct scsi_cmnd *cmd = ent->cmd;
416 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
417
418 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
419 ent->saved_sense_ptr = ent->sense_ptr;
420 return;
421 }
422 ent->saved_cur_residue = spriv->cur_residue;
423 ent->saved_cur_sg = spriv->cur_sg;
424 ent->saved_tot_residue = spriv->tot_residue;
425}
426
427static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
428{
429 struct scsi_cmnd *cmd = ent->cmd;
430 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
431
432 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
433 ent->sense_ptr = ent->saved_sense_ptr;
434 return;
435 }
436 spriv->cur_residue = ent->saved_cur_residue;
437 spriv->cur_sg = ent->saved_cur_sg;
438 spriv->tot_residue = ent->saved_tot_residue;
439}
440
441static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
442{
443 if (cmd->cmd_len == 6 ||
444 cmd->cmd_len == 10 ||
445 cmd->cmd_len == 12) {
446 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
447 } else {
448 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
449 }
450}
451
452static void esp_write_tgt_config3(struct esp *esp, int tgt)
453{
454 if (esp->rev > ESP100A) {
455 u8 val = esp->target[tgt].esp_config3;
456
457 if (val != esp->prev_cfg3) {
458 esp->prev_cfg3 = val;
459 esp_write8(val, ESP_CFG3);
460 }
461 }
462}
463
464static void esp_write_tgt_sync(struct esp *esp, int tgt)
465{
466 u8 off = esp->target[tgt].esp_offset;
467 u8 per = esp->target[tgt].esp_period;
468
469 if (off != esp->prev_soff) {
470 esp->prev_soff = off;
471 esp_write8(off, ESP_SOFF);
472 }
473 if (per != esp->prev_stp) {
474 esp->prev_stp = per;
475 esp_write8(per, ESP_STP);
476 }
477}
478
479static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
480{
481 if (esp->rev == FASHME) {
482 /* Arbitrary segment boundaries, 24-bit counts. */
483 if (dma_len > (1U << 24))
484 dma_len = (1U << 24);
485 } else {
486 u32 base, end;
487
488 /* ESP chip limits other variants by 16-bits of transfer
489 * count. Actually on FAS100A and FAS236 we could get
490 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
491 * in the ESP_CFG2 register but that causes other unwanted
492 * changes so we don't use it currently.
493 */
494 if (dma_len > (1U << 16))
495 dma_len = (1U << 16);
496
497 /* All of the DMA variants hooked up to these chips
498 * cannot handle crossing a 24-bit address boundary.
499 */
500 base = dma_addr & ((1U << 24) - 1U);
501 end = base + dma_len;
502 if (end > (1U << 24))
503 end = (1U <<24);
504 dma_len = end - base;
505 }
506 return dma_len;
507}
508
509static int esp_need_to_nego_wide(struct esp_target_data *tp)
510{
511 struct scsi_target *target = tp->starget;
512
513 return spi_width(target) != tp->nego_goal_width;
514}
515
516static int esp_need_to_nego_sync(struct esp_target_data *tp)
517{
518 struct scsi_target *target = tp->starget;
519
520 /* When offset is zero, period is "don't care". */
521 if (!spi_offset(target) && !tp->nego_goal_offset)
522 return 0;
523
524 if (spi_offset(target) == tp->nego_goal_offset &&
525 spi_period(target) == tp->nego_goal_period)
526 return 0;
527
528 return 1;
529}
530
531static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
532 struct esp_lun_data *lp)
533{
534 if (!ent->tag[0]) {
535 /* Non-tagged, slot already taken? */
536 if (lp->non_tagged_cmd)
537 return -EBUSY;
538
539 if (lp->hold) {
540 /* We are being held by active tagged
541 * commands.
542 */
543 if (lp->num_tagged)
544 return -EBUSY;
545
546 /* Tagged commands completed, we can unplug
547 * the queue and run this untagged command.
548 */
549 lp->hold = 0;
550 } else if (lp->num_tagged) {
551 /* Plug the queue until num_tagged decreases
552 * to zero in esp_free_lun_tag.
553 */
554 lp->hold = 1;
555 return -EBUSY;
556 }
557
558 lp->non_tagged_cmd = ent;
559 return 0;
560 } else {
561 /* Tagged command, see if blocked by a
562 * non-tagged one.
563 */
564 if (lp->non_tagged_cmd || lp->hold)
565 return -EBUSY;
566 }
567
568 BUG_ON(lp->tagged_cmds[ent->tag[1]]);
569
570 lp->tagged_cmds[ent->tag[1]] = ent;
571 lp->num_tagged++;
572
573 return 0;
574}
575
576static void esp_free_lun_tag(struct esp_cmd_entry *ent,
577 struct esp_lun_data *lp)
578{
579 if (ent->tag[0]) {
580 BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
581 lp->tagged_cmds[ent->tag[1]] = NULL;
582 lp->num_tagged--;
583 } else {
584 BUG_ON(lp->non_tagged_cmd != ent);
585 lp->non_tagged_cmd = NULL;
586 }
587}
588
589/* When a contingent allegiance conditon is created, we force feed a
590 * REQUEST_SENSE command to the device to fetch the sense data. I
591 * tried many other schemes, relying on the scsi error handling layer
592 * to send out the REQUEST_SENSE automatically, but this was difficult
593 * to get right especially in the presence of applications like smartd
594 * which use SG_IO to send out their own REQUEST_SENSE commands.
595 */
596static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
597{
598 struct scsi_cmnd *cmd = ent->cmd;
599 struct scsi_device *dev = cmd->device;
600 int tgt, lun;
601 u8 *p, val;
602
603 tgt = dev->id;
604 lun = dev->lun;
605
606
607 if (!ent->sense_ptr) {
608 esp_log_autosense("esp%d: Doing auto-sense for "
609 "tgt[%d] lun[%d]\n",
610 esp->host->unique_id, tgt, lun);
611
612 ent->sense_ptr = cmd->sense_buffer;
613 ent->sense_dma = esp->ops->map_single(esp,
614 ent->sense_ptr,
615 SCSI_SENSE_BUFFERSIZE,
616 DMA_FROM_DEVICE);
617 }
618 ent->saved_sense_ptr = ent->sense_ptr;
619
620 esp->active_cmd = ent;
621
622 p = esp->command_block;
623 esp->msg_out_len = 0;
624
625 *p++ = IDENTIFY(0, lun);
626 *p++ = REQUEST_SENSE;
627 *p++ = ((dev->scsi_level <= SCSI_2) ?
628 (lun << 5) : 0);
629 *p++ = 0;
630 *p++ = 0;
631 *p++ = SCSI_SENSE_BUFFERSIZE;
632 *p++ = 0;
633
634 esp->select_state = ESP_SELECT_BASIC;
635
636 val = tgt;
637 if (esp->rev == FASHME)
638 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
639 esp_write8(val, ESP_BUSID);
640
641 esp_write_tgt_sync(esp, tgt);
642 esp_write_tgt_config3(esp, tgt);
643
644 val = (p - esp->command_block);
645
646 if (esp->rev == FASHME)
647 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
648 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
649 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
650}
651
652static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
653{
654 struct esp_cmd_entry *ent;
655
656 list_for_each_entry(ent, &esp->queued_cmds, list) {
657 struct scsi_cmnd *cmd = ent->cmd;
658 struct scsi_device *dev = cmd->device;
659 struct esp_lun_data *lp = dev->hostdata;
660
661 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
662 ent->tag[0] = 0;
663 ent->tag[1] = 0;
664 return ent;
665 }
666
667 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
668 ent->tag[0] = 0;
669 ent->tag[1] = 0;
670 }
671
672 if (esp_alloc_lun_tag(ent, lp) < 0)
673 continue;
674
675 return ent;
676 }
677
678 return NULL;
679}
680
681static void esp_maybe_execute_command(struct esp *esp)
682{
683 struct esp_target_data *tp;
684 struct esp_lun_data *lp;
685 struct scsi_device *dev;
686 struct scsi_cmnd *cmd;
687 struct esp_cmd_entry *ent;
688 int tgt, lun, i;
689 u32 val, start_cmd;
690 u8 *p;
691
692 if (esp->active_cmd ||
693 (esp->flags & ESP_FLAG_RESETTING))
694 return;
695
696 ent = find_and_prep_issuable_command(esp);
697 if (!ent)
698 return;
699
700 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
701 esp_autosense(esp, ent);
702 return;
703 }
704
705 cmd = ent->cmd;
706 dev = cmd->device;
707 tgt = dev->id;
708 lun = dev->lun;
709 tp = &esp->target[tgt];
710 lp = dev->hostdata;
711
712 list_del(&ent->list);
713 list_add(&ent->list, &esp->active_cmds);
714
715 esp->active_cmd = ent;
716
717 esp_map_dma(esp, cmd);
718 esp_save_pointers(esp, ent);
719
720 esp_check_command_len(esp, cmd);
721
722 p = esp->command_block;
723
724 esp->msg_out_len = 0;
725 if (tp->flags & ESP_TGT_CHECK_NEGO) {
726 /* Need to negotiate. If the target is broken
727 * go for synchronous transfers and non-wide.
728 */
729 if (tp->flags & ESP_TGT_BROKEN) {
730 tp->flags &= ~ESP_TGT_DISCONNECT;
731 tp->nego_goal_period = 0;
732 tp->nego_goal_offset = 0;
733 tp->nego_goal_width = 0;
734 tp->nego_goal_tags = 0;
735 }
736
737 /* If the settings are not changing, skip this. */
738 if (spi_width(tp->starget) == tp->nego_goal_width &&
739 spi_period(tp->starget) == tp->nego_goal_period &&
740 spi_offset(tp->starget) == tp->nego_goal_offset) {
741 tp->flags &= ~ESP_TGT_CHECK_NEGO;
742 goto build_identify;
743 }
744
745 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
746 esp->msg_out_len =
747 spi_populate_width_msg(&esp->msg_out[0],
748 (tp->nego_goal_width ?
749 1 : 0));
750 tp->flags |= ESP_TGT_NEGO_WIDE;
751 } else if (esp_need_to_nego_sync(tp)) {
752 esp->msg_out_len =
753 spi_populate_sync_msg(&esp->msg_out[0],
754 tp->nego_goal_period,
755 tp->nego_goal_offset);
756 tp->flags |= ESP_TGT_NEGO_SYNC;
757 } else {
758 tp->flags &= ~ESP_TGT_CHECK_NEGO;
759 }
760
761 /* Process it like a slow command. */
762 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
763 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
764 }
765
766build_identify:
767 /* If we don't have a lun-data struct yet, we're probing
768 * so do not disconnect. Also, do not disconnect unless
769 * we have a tag on this command.
770 */
771 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
772 *p++ = IDENTIFY(1, lun);
773 else
774 *p++ = IDENTIFY(0, lun);
775
776 if (ent->tag[0] && esp->rev == ESP100) {
777 /* ESP100 lacks select w/atn3 command, use select
778 * and stop instead.
779 */
780 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
781 }
782
783 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
784 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
785 if (ent->tag[0]) {
786 *p++ = ent->tag[0];
787 *p++ = ent->tag[1];
788
789 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
790 }
791
792 for (i = 0; i < cmd->cmd_len; i++)
793 *p++ = cmd->cmnd[i];
794
795 esp->select_state = ESP_SELECT_BASIC;
796 } else {
797 esp->cmd_bytes_left = cmd->cmd_len;
798 esp->cmd_bytes_ptr = &cmd->cmnd[0];
799
800 if (ent->tag[0]) {
801 for (i = esp->msg_out_len - 1;
802 i >= 0; i--)
803 esp->msg_out[i + 2] = esp->msg_out[i];
804 esp->msg_out[0] = ent->tag[0];
805 esp->msg_out[1] = ent->tag[1];
806 esp->msg_out_len += 2;
807 }
808
809 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
810 esp->select_state = ESP_SELECT_MSGOUT;
811 }
812 val = tgt;
813 if (esp->rev == FASHME)
814 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
815 esp_write8(val, ESP_BUSID);
816
817 esp_write_tgt_sync(esp, tgt);
818 esp_write_tgt_config3(esp, tgt);
819
820 val = (p - esp->command_block);
821
822 if (esp_debug & ESP_DEBUG_SCSICMD) {
823 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
824 for (i = 0; i < cmd->cmd_len; i++)
825 printk("%02x ", cmd->cmnd[i]);
826 printk("]\n");
827 }
828
829 if (esp->rev == FASHME)
830 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
831 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
832 val, 16, 0, start_cmd);
833}
834
835static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
836{
837 struct list_head *head = &esp->esp_cmd_pool;
838 struct esp_cmd_entry *ret;
839
840 if (list_empty(head)) {
841 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
842 } else {
843 ret = list_entry(head->next, struct esp_cmd_entry, list);
844 list_del(&ret->list);
845 memset(ret, 0, sizeof(*ret));
846 }
847 return ret;
848}
849
850static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
851{
852 list_add(&ent->list, &esp->esp_cmd_pool);
853}
854
855static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
856 struct scsi_cmnd *cmd, unsigned int result)
857{
858 struct scsi_device *dev = cmd->device;
859 int tgt = dev->id;
860 int lun = dev->lun;
861
862 esp->active_cmd = NULL;
863 esp_unmap_dma(esp, cmd);
864 esp_free_lun_tag(ent, dev->hostdata);
865 cmd->result = result;
866
867 if (ent->eh_done) {
868 complete(ent->eh_done);
869 ent->eh_done = NULL;
870 }
871
872 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
873 esp->ops->unmap_single(esp, ent->sense_dma,
874 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
875 ent->sense_ptr = NULL;
876
877 /* Restore the message/status bytes to what we actually
878 * saw originally. Also, report that we are providing
879 * the sense data.
880 */
881 cmd->result = ((DRIVER_SENSE << 24) |
882 (DID_OK << 16) |
883 (COMMAND_COMPLETE << 8) |
884 (SAM_STAT_CHECK_CONDITION << 0));
885
886 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
887 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
888 int i;
889
890 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
891 esp->host->unique_id, tgt, lun);
892 for (i = 0; i < 18; i++)
893 printk("%02x ", cmd->sense_buffer[i]);
894 printk("]\n");
895 }
896 }
897
898 cmd->scsi_done(cmd);
899
900 list_del(&ent->list);
901 esp_put_ent(esp, ent);
902
903 esp_maybe_execute_command(esp);
904}
905
906static unsigned int compose_result(unsigned int status, unsigned int message,
907 unsigned int driver_code)
908{
909 return (status | (message << 8) | (driver_code << 16));
910}
911
912static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
913{
914 struct scsi_device *dev = ent->cmd->device;
915 struct esp_lun_data *lp = dev->hostdata;
916
917 scsi_track_queue_full(dev, lp->num_tagged - 1);
918}
919
920static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
921{
922 struct scsi_device *dev = cmd->device;
923 struct esp *esp = host_to_esp(dev->host);
924 struct esp_cmd_priv *spriv;
925 struct esp_cmd_entry *ent;
926
927 ent = esp_get_ent(esp);
928 if (!ent)
929 return SCSI_MLQUEUE_HOST_BUSY;
930
931 ent->cmd = cmd;
932
933 cmd->scsi_done = done;
934
935 spriv = ESP_CMD_PRIV(cmd);
936 spriv->u.dma_addr = ~(dma_addr_t)0x0;
937
938 list_add_tail(&ent->list, &esp->queued_cmds);
939
940 esp_maybe_execute_command(esp);
941
942 return 0;
943}
944
945static int esp_check_gross_error(struct esp *esp)
946{
947 if (esp->sreg & ESP_STAT_SPAM) {
948 /* Gross Error, could be one of:
949 * - top of fifo overwritten
950 * - top of command register overwritten
951 * - DMA programmed with wrong direction
952 * - improper phase change
953 */
954 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
955 esp->host->unique_id, esp->sreg);
956 /* XXX Reset the chip. XXX */
957 return 1;
958 }
959 return 0;
960}
961
962static int esp_check_spur_intr(struct esp *esp)
963{
964 switch (esp->rev) {
965 case ESP100:
966 case ESP100A:
967 /* The interrupt pending bit of the status register cannot
968 * be trusted on these revisions.
969 */
970 esp->sreg &= ~ESP_STAT_INTR;
971 break;
972
973 default:
974 if (!(esp->sreg & ESP_STAT_INTR)) {
975 esp->ireg = esp_read8(ESP_INTRPT);
976 if (esp->ireg & ESP_INTR_SR)
977 return 1;
978
979 /* If the DMA is indicating interrupt pending and the
980 * ESP is not, the only possibility is a DMA error.
981 */
982 if (!esp->ops->dma_error(esp)) {
983 printk(KERN_ERR PFX "esp%d: Spurious irq, "
984 "sreg=%x.\n",
985 esp->host->unique_id, esp->sreg);
986 return -1;
987 }
988
989 printk(KERN_ERR PFX "esp%d: DMA error\n",
990 esp->host->unique_id);
991
992 /* XXX Reset the chip. XXX */
993 return -1;
994 }
995 break;
996 }
997
998 return 0;
999}
1000
1001static void esp_schedule_reset(struct esp *esp)
1002{
1003 esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1004 __builtin_return_address(0));
1005 esp->flags |= ESP_FLAG_RESETTING;
1006 esp_event(esp, ESP_EVENT_RESET);
1007}
1008
1009/* In order to avoid having to add a special half-reconnected state
1010 * into the driver we just sit here and poll through the rest of
1011 * the reselection process to get the tag message bytes.
1012 */
1013static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1014 struct esp_lun_data *lp)
1015{
1016 struct esp_cmd_entry *ent;
1017 int i;
1018
1019 if (!lp->num_tagged) {
1020 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1021 esp->host->unique_id);
1022 return NULL;
1023 }
1024
1025 esp_log_reconnect("ESP: reconnect tag, ");
1026
1027 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1028 if (esp->ops->irq_pending(esp))
1029 break;
1030 }
1031 if (i == ESP_QUICKIRQ_LIMIT) {
1032 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1033 esp->host->unique_id);
1034 return NULL;
1035 }
1036
1037 esp->sreg = esp_read8(ESP_STATUS);
1038 esp->ireg = esp_read8(ESP_INTRPT);
1039
1040 esp_log_reconnect("IRQ(%d:%x:%x), ",
1041 i, esp->ireg, esp->sreg);
1042
1043 if (esp->ireg & ESP_INTR_DC) {
1044 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1045 esp->host->unique_id);
1046 return NULL;
1047 }
1048
1049 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1050 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1051 esp->host->unique_id, esp->sreg);
1052 return NULL;
1053 }
1054
1055 /* DMA in the tag bytes... */
1056 esp->command_block[0] = 0xff;
1057 esp->command_block[1] = 0xff;
1058 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1059 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1060
1061 /* ACK the msssage. */
1062 scsi_esp_cmd(esp, ESP_CMD_MOK);
1063
1064 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1065 if (esp->ops->irq_pending(esp)) {
1066 esp->sreg = esp_read8(ESP_STATUS);
1067 esp->ireg = esp_read8(ESP_INTRPT);
1068 if (esp->ireg & ESP_INTR_FDONE)
1069 break;
1070 }
1071 udelay(1);
1072 }
1073 if (i == ESP_RESELECT_TAG_LIMIT) {
1074 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1075 esp->host->unique_id);
1076 return NULL;
1077 }
1078 esp->ops->dma_drain(esp);
1079 esp->ops->dma_invalidate(esp);
1080
1081 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1082 i, esp->ireg, esp->sreg,
1083 esp->command_block[0],
1084 esp->command_block[1]);
1085
1086 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1087 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1088 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1089 "type %02x.\n",
1090 esp->host->unique_id, esp->command_block[0]);
1091 return NULL;
1092 }
1093
1094 ent = lp->tagged_cmds[esp->command_block[1]];
1095 if (!ent) {
1096 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1097 "tag %02x.\n",
1098 esp->host->unique_id, esp->command_block[1]);
1099 return NULL;
1100 }
1101
1102 return ent;
1103}
1104
1105static int esp_reconnect(struct esp *esp)
1106{
1107 struct esp_cmd_entry *ent;
1108 struct esp_target_data *tp;
1109 struct esp_lun_data *lp;
1110 struct scsi_device *dev;
1111 int target, lun;
1112
1113 BUG_ON(esp->active_cmd);
1114 if (esp->rev == FASHME) {
1115 /* FASHME puts the target and lun numbers directly
1116 * into the fifo.
1117 */
1118 target = esp->fifo[0];
1119 lun = esp->fifo[1] & 0x7;
1120 } else {
1121 u8 bits = esp_read8(ESP_FDATA);
1122
1123 /* Older chips put the lun directly into the fifo, but
1124 * the target is given as a sample of the arbitration
1125 * lines on the bus at reselection time. So we should
1126 * see the ID of the ESP and the one reconnecting target
1127 * set in the bitmap.
1128 */
1129 if (!(bits & esp->scsi_id_mask))
1130 goto do_reset;
1131 bits &= ~esp->scsi_id_mask;
1132 if (!bits || (bits & (bits - 1)))
1133 goto do_reset;
1134
1135 target = ffs(bits) - 1;
1136 lun = (esp_read8(ESP_FDATA) & 0x7);
1137
1138 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1139 if (esp->rev == ESP100) {
1140 u8 ireg = esp_read8(ESP_INTRPT);
1141 /* This chip has a bug during reselection that can
1142 * cause a spurious illegal-command interrupt, which
1143 * we simply ACK here. Another possibility is a bus
1144 * reset so we must check for that.
1145 */
1146 if (ireg & ESP_INTR_SR)
1147 goto do_reset;
1148 }
1149 scsi_esp_cmd(esp, ESP_CMD_NULL);
1150 }
1151
1152 esp_write_tgt_sync(esp, target);
1153 esp_write_tgt_config3(esp, target);
1154
1155 scsi_esp_cmd(esp, ESP_CMD_MOK);
1156
1157 if (esp->rev == FASHME)
1158 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1159 ESP_BUSID);
1160
1161 tp = &esp->target[target];
1162 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1163 if (!dev) {
1164 printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1165 "tgt[%u] lun[%u]\n",
1166 esp->host->unique_id, target, lun);
1167 goto do_reset;
1168 }
1169 lp = dev->hostdata;
1170
1171 ent = lp->non_tagged_cmd;
1172 if (!ent) {
1173 ent = esp_reconnect_with_tag(esp, lp);
1174 if (!ent)
1175 goto do_reset;
1176 }
1177
1178 esp->active_cmd = ent;
1179
1180 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1181 esp->msg_out[0] = ABORT_TASK_SET;
1182 esp->msg_out_len = 1;
1183 scsi_esp_cmd(esp, ESP_CMD_SATN);
1184 }
1185
1186 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1187 esp_restore_pointers(esp, ent);
1188 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1189 return 1;
1190
1191do_reset:
1192 esp_schedule_reset(esp);
1193 return 0;
1194}
1195
1196static int esp_finish_select(struct esp *esp)
1197{
1198 struct esp_cmd_entry *ent;
1199 struct scsi_cmnd *cmd;
1200 u8 orig_select_state;
1201
1202 orig_select_state = esp->select_state;
1203
1204 /* No longer selecting. */
1205 esp->select_state = ESP_SELECT_NONE;
1206
1207 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1208 ent = esp->active_cmd;
1209 cmd = ent->cmd;
1210
1211 if (esp->ops->dma_error(esp)) {
1212 /* If we see a DMA error during or as a result of selection,
1213 * all bets are off.
1214 */
1215 esp_schedule_reset(esp);
1216 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1217 return 0;
1218 }
1219
1220 esp->ops->dma_invalidate(esp);
1221
1222 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1223 struct esp_target_data *tp = &esp->target[cmd->device->id];
1224
1225 /* Carefully back out of the selection attempt. Release
1226 * resources (such as DMA mapping & TAG) and reset state (such
1227 * as message out and command delivery variables).
1228 */
1229 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1230 esp_unmap_dma(esp, cmd);
1231 esp_free_lun_tag(ent, cmd->device->hostdata);
1232 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1233 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1234 esp->cmd_bytes_ptr = NULL;
1235 esp->cmd_bytes_left = 0;
1236 } else {
1237 esp->ops->unmap_single(esp, ent->sense_dma,
1238 SCSI_SENSE_BUFFERSIZE,
1239 DMA_FROM_DEVICE);
1240 ent->sense_ptr = NULL;
1241 }
1242
1243 /* Now that the state is unwound properly, put back onto
1244 * the issue queue. This command is no longer active.
1245 */
1246 list_del(&ent->list);
1247 list_add(&ent->list, &esp->queued_cmds);
1248 esp->active_cmd = NULL;
1249
1250 /* Return value ignored by caller, it directly invokes
1251 * esp_reconnect().
1252 */
1253 return 0;
1254 }
1255
1256 if (esp->ireg == ESP_INTR_DC) {
1257 struct scsi_device *dev = cmd->device;
1258
1259 /* Disconnect. Make sure we re-negotiate sync and
1260 * wide parameters if this target starts responding
1261 * again in the future.
1262 */
1263 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1264
1265 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1266 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1267 return 1;
1268 }
1269
1270 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1271 /* Selection successful. On pre-FAST chips we have
1272 * to do a NOP and possibly clean out the FIFO.
1273 */
1274 if (esp->rev <= ESP236) {
1275 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1276
1277 scsi_esp_cmd(esp, ESP_CMD_NULL);
1278
1279 if (!fcnt &&
1280 (!esp->prev_soff ||
1281 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1282 esp_flush_fifo(esp);
1283 }
1284
1285 /* If we are doing a slow command, negotiation, etc.
1286 * we'll do the right thing as we transition to the
1287 * next phase.
1288 */
1289 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1290 return 0;
1291 }
1292
1293 printk("ESP: Unexpected selection completion ireg[%x].\n",
1294 esp->ireg);
1295 esp_schedule_reset(esp);
1296 return 0;
1297}
1298
1299static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1300 struct scsi_cmnd *cmd)
1301{
1302 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1303
1304 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1305 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1306 fifo_cnt <<= 1;
1307
1308 ecount = 0;
1309 if (!(esp->sreg & ESP_STAT_TCNT)) {
1310 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1311 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1312 if (esp->rev == FASHME)
1313 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1314 }
1315
1316 bytes_sent = esp->data_dma_len;
1317 bytes_sent -= ecount;
1318
1319 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1320 bytes_sent -= fifo_cnt;
1321
1322 flush_fifo = 0;
1323 if (!esp->prev_soff) {
1324 /* Synchronous data transfer, always flush fifo. */
1325 flush_fifo = 1;
1326 } else {
1327 if (esp->rev == ESP100) {
1328 u32 fflags, phase;
1329
1330 /* ESP100 has a chip bug where in the synchronous data
1331 * phase it can mistake a final long REQ pulse from the
1332 * target as an extra data byte. Fun.
1333 *
1334 * To detect this case we resample the status register
1335 * and fifo flags. If we're still in a data phase and
1336 * we see spurious chunks in the fifo, we return error
1337 * to the caller which should reset and set things up
1338 * such that we only try future transfers to this
1339 * target in synchronous mode.
1340 */
1341 esp->sreg = esp_read8(ESP_STATUS);
1342 phase = esp->sreg & ESP_STAT_PMASK;
1343 fflags = esp_read8(ESP_FFLAGS);
1344
1345 if ((phase == ESP_DOP &&
1346 (fflags & ESP_FF_ONOTZERO)) ||
1347 (phase == ESP_DIP &&
1348 (fflags & ESP_FF_FBYTES)))
1349 return -1;
1350 }
1351 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1352 flush_fifo = 1;
1353 }
1354
1355 if (flush_fifo)
1356 esp_flush_fifo(esp);
1357
1358 return bytes_sent;
1359}
1360
1361static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1362 u8 scsi_period, u8 scsi_offset,
1363 u8 esp_stp, u8 esp_soff)
1364{
1365 spi_period(tp->starget) = scsi_period;
1366 spi_offset(tp->starget) = scsi_offset;
1367 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1368
1369 if (esp_soff) {
1370 esp_stp &= 0x1f;
1371 esp_soff |= esp->radelay;
1372 if (esp->rev >= FAS236) {
1373 u8 bit = ESP_CONFIG3_FSCSI;
1374 if (esp->rev >= FAS100A)
1375 bit = ESP_CONFIG3_FAST;
1376
1377 if (scsi_period < 50) {
1378 if (esp->rev == FASHME)
1379 esp_soff &= ~esp->radelay;
1380 tp->esp_config3 |= bit;
1381 } else {
1382 tp->esp_config3 &= ~bit;
1383 }
1384 esp->prev_cfg3 = tp->esp_config3;
1385 esp_write8(esp->prev_cfg3, ESP_CFG3);
1386 }
1387 }
1388
1389 tp->esp_period = esp->prev_stp = esp_stp;
1390 tp->esp_offset = esp->prev_soff = esp_soff;
1391
1392 esp_write8(esp_soff, ESP_SOFF);
1393 esp_write8(esp_stp, ESP_STP);
1394
1395 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1396
1397 spi_display_xfer_agreement(tp->starget);
1398}
1399
1400static void esp_msgin_reject(struct esp *esp)
1401{
1402 struct esp_cmd_entry *ent = esp->active_cmd;
1403 struct scsi_cmnd *cmd = ent->cmd;
1404 struct esp_target_data *tp;
1405 int tgt;
1406
1407 tgt = cmd->device->id;
1408 tp = &esp->target[tgt];
1409
1410 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1411 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1412
1413 if (!esp_need_to_nego_sync(tp)) {
1414 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1415 scsi_esp_cmd(esp, ESP_CMD_RATN);
1416 } else {
1417 esp->msg_out_len =
1418 spi_populate_sync_msg(&esp->msg_out[0],
1419 tp->nego_goal_period,
1420 tp->nego_goal_offset);
1421 tp->flags |= ESP_TGT_NEGO_SYNC;
1422 scsi_esp_cmd(esp, ESP_CMD_SATN);
1423 }
1424 return;
1425 }
1426
1427 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1428 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1429 tp->esp_period = 0;
1430 tp->esp_offset = 0;
1431 esp_setsync(esp, tp, 0, 0, 0, 0);
1432 scsi_esp_cmd(esp, ESP_CMD_RATN);
1433 return;
1434 }
1435
1436 esp->msg_out[0] = ABORT_TASK_SET;
1437 esp->msg_out_len = 1;
1438 scsi_esp_cmd(esp, ESP_CMD_SATN);
1439}
1440
1441static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1442{
1443 u8 period = esp->msg_in[3];
1444 u8 offset = esp->msg_in[4];
1445 u8 stp;
1446
1447 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1448 goto do_reject;
1449
1450 if (offset > 15)
1451 goto do_reject;
1452
1453 if (offset) {
1454 int rounded_up, one_clock;
1455
1456 if (period > esp->max_period) {
1457 period = offset = 0;
1458 goto do_sdtr;
1459 }
1460 if (period < esp->min_period)
1461 goto do_reject;
1462
1463 one_clock = esp->ccycle / 1000;
1464 rounded_up = (period << 2);
1465 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1466 stp = rounded_up;
1467 if (stp && esp->rev >= FAS236) {
1468 if (stp >= 50)
1469 stp--;
1470 }
1471 } else {
1472 stp = 0;
1473 }
1474
1475 esp_setsync(esp, tp, period, offset, stp, offset);
1476 return;
1477
1478do_reject:
1479 esp->msg_out[0] = MESSAGE_REJECT;
1480 esp->msg_out_len = 1;
1481 scsi_esp_cmd(esp, ESP_CMD_SATN);
1482 return;
1483
1484do_sdtr:
1485 tp->nego_goal_period = period;
1486 tp->nego_goal_offset = offset;
1487 esp->msg_out_len =
1488 spi_populate_sync_msg(&esp->msg_out[0],
1489 tp->nego_goal_period,
1490 tp->nego_goal_offset);
1491 scsi_esp_cmd(esp, ESP_CMD_SATN);
1492}
1493
1494static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1495{
1496 int size = 8 << esp->msg_in[3];
1497 u8 cfg3;
1498
1499 if (esp->rev != FASHME)
1500 goto do_reject;
1501
1502 if (size != 8 && size != 16)
1503 goto do_reject;
1504
1505 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1506 goto do_reject;
1507
1508 cfg3 = tp->esp_config3;
1509 if (size == 16) {
1510 tp->flags |= ESP_TGT_WIDE;
1511 cfg3 |= ESP_CONFIG3_EWIDE;
1512 } else {
1513 tp->flags &= ~ESP_TGT_WIDE;
1514 cfg3 &= ~ESP_CONFIG3_EWIDE;
1515 }
1516 tp->esp_config3 = cfg3;
1517 esp->prev_cfg3 = cfg3;
1518 esp_write8(cfg3, ESP_CFG3);
1519
1520 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1521
1522 spi_period(tp->starget) = 0;
1523 spi_offset(tp->starget) = 0;
1524 if (!esp_need_to_nego_sync(tp)) {
1525 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1526 scsi_esp_cmd(esp, ESP_CMD_RATN);
1527 } else {
1528 esp->msg_out_len =
1529 spi_populate_sync_msg(&esp->msg_out[0],
1530 tp->nego_goal_period,
1531 tp->nego_goal_offset);
1532 tp->flags |= ESP_TGT_NEGO_SYNC;
1533 scsi_esp_cmd(esp, ESP_CMD_SATN);
1534 }
1535 return;
1536
1537do_reject:
1538 esp->msg_out[0] = MESSAGE_REJECT;
1539 esp->msg_out_len = 1;
1540 scsi_esp_cmd(esp, ESP_CMD_SATN);
1541}
1542
1543static void esp_msgin_extended(struct esp *esp)
1544{
1545 struct esp_cmd_entry *ent = esp->active_cmd;
1546 struct scsi_cmnd *cmd = ent->cmd;
1547 struct esp_target_data *tp;
1548 int tgt = cmd->device->id;
1549
1550 tp = &esp->target[tgt];
1551 if (esp->msg_in[2] == EXTENDED_SDTR) {
1552 esp_msgin_sdtr(esp, tp);
1553 return;
1554 }
1555 if (esp->msg_in[2] == EXTENDED_WDTR) {
1556 esp_msgin_wdtr(esp, tp);
1557 return;
1558 }
1559
1560 printk("ESP: Unexpected extended msg type %x\n",
1561 esp->msg_in[2]);
1562
1563 esp->msg_out[0] = ABORT_TASK_SET;
1564 esp->msg_out_len = 1;
1565 scsi_esp_cmd(esp, ESP_CMD_SATN);
1566}
1567
1568/* Analyze msgin bytes received from target so far. Return non-zero
1569 * if there are more bytes needed to complete the message.
1570 */
1571static int esp_msgin_process(struct esp *esp)
1572{
1573 u8 msg0 = esp->msg_in[0];
1574 int len = esp->msg_in_len;
1575
1576 if (msg0 & 0x80) {
1577 /* Identify */
1578 printk("ESP: Unexpected msgin identify\n");
1579 return 0;
1580 }
1581
1582 switch (msg0) {
1583 case EXTENDED_MESSAGE:
1584 if (len == 1)
1585 return 1;
1586 if (len < esp->msg_in[1] + 2)
1587 return 1;
1588 esp_msgin_extended(esp);
1589 return 0;
1590
1591 case IGNORE_WIDE_RESIDUE: {
1592 struct esp_cmd_entry *ent;
1593 struct esp_cmd_priv *spriv;
1594 if (len == 1)
1595 return 1;
1596
1597 if (esp->msg_in[1] != 1)
1598 goto do_reject;
1599
1600 ent = esp->active_cmd;
1601 spriv = ESP_CMD_PRIV(ent->cmd);
1602
1603 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1604 spriv->cur_sg--;
1605 spriv->cur_residue = 1;
1606 } else
1607 spriv->cur_residue++;
1608 spriv->tot_residue++;
1609 return 0;
1610 }
1611 case NOP:
1612 return 0;
1613 case RESTORE_POINTERS:
1614 esp_restore_pointers(esp, esp->active_cmd);
1615 return 0;
1616 case SAVE_POINTERS:
1617 esp_save_pointers(esp, esp->active_cmd);
1618 return 0;
1619
1620 case COMMAND_COMPLETE:
1621 case DISCONNECT: {
1622 struct esp_cmd_entry *ent = esp->active_cmd;
1623
1624 ent->message = msg0;
1625 esp_event(esp, ESP_EVENT_FREE_BUS);
1626 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1627 return 0;
1628 }
1629 case MESSAGE_REJECT:
1630 esp_msgin_reject(esp);
1631 return 0;
1632
1633 default:
1634 do_reject:
1635 esp->msg_out[0] = MESSAGE_REJECT;
1636 esp->msg_out_len = 1;
1637 scsi_esp_cmd(esp, ESP_CMD_SATN);
1638 return 0;
1639 }
1640}
1641
1642static int esp_process_event(struct esp *esp)
1643{
1644 int write;
1645
1646again:
1647 write = 0;
1648 switch (esp->event) {
1649 case ESP_EVENT_CHECK_PHASE:
1650 switch (esp->sreg & ESP_STAT_PMASK) {
1651 case ESP_DOP:
1652 esp_event(esp, ESP_EVENT_DATA_OUT);
1653 break;
1654 case ESP_DIP:
1655 esp_event(esp, ESP_EVENT_DATA_IN);
1656 break;
1657 case ESP_STATP:
1658 esp_flush_fifo(esp);
1659 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1660 esp_event(esp, ESP_EVENT_STATUS);
1661 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1662 return 1;
1663
1664 case ESP_MOP:
1665 esp_event(esp, ESP_EVENT_MSGOUT);
1666 break;
1667
1668 case ESP_MIP:
1669 esp_event(esp, ESP_EVENT_MSGIN);
1670 break;
1671
1672 case ESP_CMDP:
1673 esp_event(esp, ESP_EVENT_CMD_START);
1674 break;
1675
1676 default:
1677 printk("ESP: Unexpected phase, sreg=%02x\n",
1678 esp->sreg);
1679 esp_schedule_reset(esp);
1680 return 0;
1681 }
1682 goto again;
1683 break;
1684
1685 case ESP_EVENT_DATA_IN:
1686 write = 1;
1687 /* fallthru */
1688
1689 case ESP_EVENT_DATA_OUT: {
1690 struct esp_cmd_entry *ent = esp->active_cmd;
1691 struct scsi_cmnd *cmd = ent->cmd;
1692 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1693 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1694
1695 if (esp->rev == ESP100)
1696 scsi_esp_cmd(esp, ESP_CMD_NULL);
1697
1698 if (write)
1699 ent->flags |= ESP_CMD_FLAG_WRITE;
1700 else
1701 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1702
1703 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1704 esp->data_dma_len = dma_len;
1705
1706 if (!dma_len) {
1707 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1708 esp->host->unique_id);
1709 printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n",
1710 esp->host->unique_id,
1711 esp_cur_dma_addr(ent, cmd),
1712 esp_cur_dma_len(ent, cmd));
1713 esp_schedule_reset(esp);
1714 return 0;
1715 }
1716
1717 esp_log_datastart("ESP: start data addr[%08x] len[%u] "
1718 "write(%d)\n",
1719 dma_addr, dma_len, write);
1720
1721 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1722 write, ESP_CMD_DMA | ESP_CMD_TI);
1723 esp_event(esp, ESP_EVENT_DATA_DONE);
1724 break;
1725 }
1726 case ESP_EVENT_DATA_DONE: {
1727 struct esp_cmd_entry *ent = esp->active_cmd;
1728 struct scsi_cmnd *cmd = ent->cmd;
1729 int bytes_sent;
1730
1731 if (esp->ops->dma_error(esp)) {
1732 printk("ESP: data done, DMA error, resetting\n");
1733 esp_schedule_reset(esp);
1734 return 0;
1735 }
1736
1737 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1738 /* XXX parity errors, etc. XXX */
1739
1740 esp->ops->dma_drain(esp);
1741 }
1742 esp->ops->dma_invalidate(esp);
1743
1744 if (esp->ireg != ESP_INTR_BSERV) {
1745 /* We should always see exactly a bus-service
1746 * interrupt at the end of a successful transfer.
1747 */
1748 printk("ESP: data done, not BSERV, resetting\n");
1749 esp_schedule_reset(esp);
1750 return 0;
1751 }
1752
1753 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1754
1755 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1756 ent->flags, bytes_sent);
1757
1758 if (bytes_sent < 0) {
1759 /* XXX force sync mode for this target XXX */
1760 esp_schedule_reset(esp);
1761 return 0;
1762 }
1763
1764 esp_advance_dma(esp, ent, cmd, bytes_sent);
1765 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1766 goto again;
1767 break;
1768 }
1769
1770 case ESP_EVENT_STATUS: {
1771 struct esp_cmd_entry *ent = esp->active_cmd;
1772
1773 if (esp->ireg & ESP_INTR_FDONE) {
1774 ent->status = esp_read8(ESP_FDATA);
1775 ent->message = esp_read8(ESP_FDATA);
1776 scsi_esp_cmd(esp, ESP_CMD_MOK);
1777 } else if (esp->ireg == ESP_INTR_BSERV) {
1778 ent->status = esp_read8(ESP_FDATA);
1779 ent->message = 0xff;
1780 esp_event(esp, ESP_EVENT_MSGIN);
1781 return 0;
1782 }
1783
1784 if (ent->message != COMMAND_COMPLETE) {
1785 printk("ESP: Unexpected message %x in status\n",
1786 ent->message);
1787 esp_schedule_reset(esp);
1788 return 0;
1789 }
1790
1791 esp_event(esp, ESP_EVENT_FREE_BUS);
1792 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1793 break;
1794 }
1795 case ESP_EVENT_FREE_BUS: {
1796 struct esp_cmd_entry *ent = esp->active_cmd;
1797 struct scsi_cmnd *cmd = ent->cmd;
1798
1799 if (ent->message == COMMAND_COMPLETE ||
1800 ent->message == DISCONNECT)
1801 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1802
1803 if (ent->message == COMMAND_COMPLETE) {
1804 esp_log_cmddone("ESP: Command done status[%x] "
1805 "message[%x]\n",
1806 ent->status, ent->message);
1807 if (ent->status == SAM_STAT_TASK_SET_FULL)
1808 esp_event_queue_full(esp, ent);
1809
1810 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1811 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1812 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1813 esp_autosense(esp, ent);
1814 } else {
1815 esp_cmd_is_done(esp, ent, cmd,
1816 compose_result(ent->status,
1817 ent->message,
1818 DID_OK));
1819 }
1820 } else if (ent->message == DISCONNECT) {
1821 esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1822 "tag[%x:%x]\n",
1823 cmd->device->id,
1824 ent->tag[0], ent->tag[1]);
1825
1826 esp->active_cmd = NULL;
1827 esp_maybe_execute_command(esp);
1828 } else {
1829 printk("ESP: Unexpected message %x in freebus\n",
1830 ent->message);
1831 esp_schedule_reset(esp);
1832 return 0;
1833 }
1834 if (esp->active_cmd)
1835 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1836 break;
1837 }
1838 case ESP_EVENT_MSGOUT: {
1839 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1840
1841 if (esp_debug & ESP_DEBUG_MSGOUT) {
1842 int i;
1843 printk("ESP: Sending message [ ");
1844 for (i = 0; i < esp->msg_out_len; i++)
1845 printk("%02x ", esp->msg_out[i]);
1846 printk("]\n");
1847 }
1848
1849 if (esp->rev == FASHME) {
1850 int i;
1851
1852 /* Always use the fifo. */
1853 for (i = 0; i < esp->msg_out_len; i++) {
1854 esp_write8(esp->msg_out[i], ESP_FDATA);
1855 esp_write8(0, ESP_FDATA);
1856 }
1857 scsi_esp_cmd(esp, ESP_CMD_TI);
1858 } else {
1859 if (esp->msg_out_len == 1) {
1860 esp_write8(esp->msg_out[0], ESP_FDATA);
1861 scsi_esp_cmd(esp, ESP_CMD_TI);
1862 } else {
1863 /* Use DMA. */
1864 memcpy(esp->command_block,
1865 esp->msg_out,
1866 esp->msg_out_len);
1867
1868 esp->ops->send_dma_cmd(esp,
1869 esp->command_block_dma,
1870 esp->msg_out_len,
1871 esp->msg_out_len,
1872 0,
1873 ESP_CMD_DMA|ESP_CMD_TI);
1874 }
1875 }
1876 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1877 break;
1878 }
1879 case ESP_EVENT_MSGOUT_DONE:
1880 if (esp->rev == FASHME) {
1881 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1882 } else {
1883 if (esp->msg_out_len > 1)
1884 esp->ops->dma_invalidate(esp);
1885 }
1886
1887 if (!(esp->ireg & ESP_INTR_DC)) {
1888 if (esp->rev != FASHME)
1889 scsi_esp_cmd(esp, ESP_CMD_NULL);
1890 }
1891 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1892 goto again;
1893 case ESP_EVENT_MSGIN:
1894 if (esp->ireg & ESP_INTR_BSERV) {
1895 if (esp->rev == FASHME) {
1896 if (!(esp_read8(ESP_STATUS2) &
1897 ESP_STAT2_FEMPTY))
1898 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1899 } else {
1900 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1901 if (esp->rev == ESP100)
1902 scsi_esp_cmd(esp, ESP_CMD_NULL);
1903 }
1904 scsi_esp_cmd(esp, ESP_CMD_TI);
1905 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1906 return 1;
1907 }
1908 if (esp->ireg & ESP_INTR_FDONE) {
1909 u8 val;
1910
1911 if (esp->rev == FASHME)
1912 val = esp->fifo[0];
1913 else
1914 val = esp_read8(ESP_FDATA);
1915 esp->msg_in[esp->msg_in_len++] = val;
1916
1917 esp_log_msgin("ESP: Got msgin byte %x\n", val);
1918
1919 if (!esp_msgin_process(esp))
1920 esp->msg_in_len = 0;
1921
1922 if (esp->rev == FASHME)
1923 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1924
1925 scsi_esp_cmd(esp, ESP_CMD_MOK);
1926
1927 if (esp->event != ESP_EVENT_FREE_BUS)
1928 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1929 } else {
1930 printk("ESP: MSGIN neither BSERV not FDON, resetting");
1931 esp_schedule_reset(esp);
1932 return 0;
1933 }
1934 break;
1935 case ESP_EVENT_CMD_START:
1936 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1937 esp->cmd_bytes_left);
1938 if (esp->rev == FASHME)
1939 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1940 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1941 esp->cmd_bytes_left, 16, 0,
1942 ESP_CMD_DMA | ESP_CMD_TI);
1943 esp_event(esp, ESP_EVENT_CMD_DONE);
1944 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1945 break;
1946 case ESP_EVENT_CMD_DONE:
1947 esp->ops->dma_invalidate(esp);
1948 if (esp->ireg & ESP_INTR_BSERV) {
1949 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1950 goto again;
1951 }
1952 esp_schedule_reset(esp);
1953 return 0;
1954 break;
1955
1956 case ESP_EVENT_RESET:
1957 scsi_esp_cmd(esp, ESP_CMD_RS);
1958 break;
1959
1960 default:
1961 printk("ESP: Unexpected event %x, resetting\n",
1962 esp->event);
1963 esp_schedule_reset(esp);
1964 return 0;
1965 break;
1966 }
1967 return 1;
1968}
1969
1970static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1971{
1972 struct scsi_cmnd *cmd = ent->cmd;
1973
1974 esp_unmap_dma(esp, cmd);
1975 esp_free_lun_tag(ent, cmd->device->hostdata);
1976 cmd->result = DID_RESET << 16;
1977
1978 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1979 esp->ops->unmap_single(esp, ent->sense_dma,
1980 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1981 ent->sense_ptr = NULL;
1982 }
1983
1984 cmd->scsi_done(cmd);
1985 list_del(&ent->list);
1986 esp_put_ent(esp, ent);
1987}
1988
1989static void esp_clear_hold(struct scsi_device *dev, void *data)
1990{
1991 struct esp_lun_data *lp = dev->hostdata;
1992
1993 BUG_ON(lp->num_tagged);
1994 lp->hold = 0;
1995}
1996
1997static void esp_reset_cleanup(struct esp *esp)
1998{
1999 struct esp_cmd_entry *ent, *tmp;
2000 int i;
2001
2002 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2003 struct scsi_cmnd *cmd = ent->cmd;
2004
2005 list_del(&ent->list);
2006 cmd->result = DID_RESET << 16;
2007 cmd->scsi_done(cmd);
2008 esp_put_ent(esp, ent);
2009 }
2010
2011 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2012 if (ent == esp->active_cmd)
2013 esp->active_cmd = NULL;
2014 esp_reset_cleanup_one(esp, ent);
2015 }
2016
2017 BUG_ON(esp->active_cmd != NULL);
2018
2019 /* Force renegotiation of sync/wide transfers. */
2020 for (i = 0; i < ESP_MAX_TARGET; i++) {
2021 struct esp_target_data *tp = &esp->target[i];
2022
2023 tp->esp_period = 0;
2024 tp->esp_offset = 0;
2025 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2026 ESP_CONFIG3_FSCSI |
2027 ESP_CONFIG3_FAST);
2028 tp->flags &= ~ESP_TGT_WIDE;
2029 tp->flags |= ESP_TGT_CHECK_NEGO;
2030
2031 if (tp->starget)
2032 starget_for_each_device(tp->starget, NULL,
2033 esp_clear_hold);
2034 }
2035}
2036
2037/* Runs under host->lock */
2038static void __esp_interrupt(struct esp *esp)
2039{
2040 int finish_reset, intr_done;
2041 u8 phase;
2042
2043 esp->sreg = esp_read8(ESP_STATUS);
2044
2045 if (esp->flags & ESP_FLAG_RESETTING) {
2046 finish_reset = 1;
2047 } else {
2048 if (esp_check_gross_error(esp))
2049 return;
2050
2051 finish_reset = esp_check_spur_intr(esp);
2052 if (finish_reset < 0)
2053 return;
2054 }
2055
2056 esp->ireg = esp_read8(ESP_INTRPT);
2057
2058 if (esp->ireg & ESP_INTR_SR)
2059 finish_reset = 1;
2060
2061 if (finish_reset) {
2062 esp_reset_cleanup(esp);
2063 if (esp->eh_reset) {
2064 complete(esp->eh_reset);
2065 esp->eh_reset = NULL;
2066 }
2067 return;
2068 }
2069
2070 phase = (esp->sreg & ESP_STAT_PMASK);
2071 if (esp->rev == FASHME) {
2072 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2073 esp->select_state == ESP_SELECT_NONE &&
2074 esp->event != ESP_EVENT_STATUS &&
2075 esp->event != ESP_EVENT_DATA_DONE) ||
2076 (esp->ireg & ESP_INTR_RSEL)) {
2077 esp->sreg2 = esp_read8(ESP_STATUS2);
2078 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2079 (esp->sreg2 & ESP_STAT2_F1BYTE))
2080 hme_read_fifo(esp);
2081 }
2082 }
2083
2084 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2085 "sreg2[%02x] ireg[%02x]\n",
2086 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2087
2088 intr_done = 0;
2089
2090 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2091 printk("ESP: unexpected IREG %02x\n", esp->ireg);
2092 if (esp->ireg & ESP_INTR_IC)
2093 esp_dump_cmd_log(esp);
2094
2095 esp_schedule_reset(esp);
2096 } else {
2097 if (!(esp->ireg & ESP_INTR_RSEL)) {
2098 /* Some combination of FDONE, BSERV, DC. */
2099 if (esp->select_state != ESP_SELECT_NONE)
2100 intr_done = esp_finish_select(esp);
2101 } else if (esp->ireg & ESP_INTR_RSEL) {
2102 if (esp->active_cmd)
2103 (void) esp_finish_select(esp);
2104 intr_done = esp_reconnect(esp);
2105 }
2106 }
2107 while (!intr_done)
2108 intr_done = esp_process_event(esp);
2109}
2110
2111irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2112{
2113 struct esp *esp = dev_id;
2114 unsigned long flags;
2115 irqreturn_t ret;
2116
2117 spin_lock_irqsave(esp->host->host_lock, flags);
2118 ret = IRQ_NONE;
2119 if (esp->ops->irq_pending(esp)) {
2120 ret = IRQ_HANDLED;
2121 for (;;) {
2122 int i;
2123
2124 __esp_interrupt(esp);
2125 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2126 break;
2127 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2128
2129 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2130 if (esp->ops->irq_pending(esp))
2131 break;
2132 }
2133 if (i == ESP_QUICKIRQ_LIMIT)
2134 break;
2135 }
2136 }
2137 spin_unlock_irqrestore(esp->host->host_lock, flags);
2138
2139 return ret;
2140}
2141EXPORT_SYMBOL(scsi_esp_intr);
2142
2143static void __devinit esp_get_revision(struct esp *esp)
2144{
2145 u8 val;
2146
2147 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2148 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2149 esp_write8(esp->config2, ESP_CFG2);
2150
2151 val = esp_read8(ESP_CFG2);
2152 val &= ~ESP_CONFIG2_MAGIC;
2153 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2154 /* If what we write to cfg2 does not come back, cfg2 is not
2155 * implemented, therefore this must be a plain esp100.
2156 */
2157 esp->rev = ESP100;
2158 } else {
2159 esp->config2 = 0;
2160 esp_set_all_config3(esp, 5);
2161 esp->prev_cfg3 = 5;
2162 esp_write8(esp->config2, ESP_CFG2);
2163 esp_write8(0, ESP_CFG3);
2164 esp_write8(esp->prev_cfg3, ESP_CFG3);
2165
2166 val = esp_read8(ESP_CFG3);
2167 if (val != 5) {
2168 /* The cfg2 register is implemented, however
2169 * cfg3 is not, must be esp100a.
2170 */
2171 esp->rev = ESP100A;
2172 } else {
2173 esp_set_all_config3(esp, 0);
2174 esp->prev_cfg3 = 0;
2175 esp_write8(esp->prev_cfg3, ESP_CFG3);
2176
2177 /* All of cfg{1,2,3} implemented, must be one of
2178 * the fas variants, figure out which one.
2179 */
2180 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2181 esp->rev = FAST;
2182 esp->sync_defp = SYNC_DEFP_FAST;
2183 } else {
2184 esp->rev = ESP236;
2185 }
2186 esp->config2 = 0;
2187 esp_write8(esp->config2, ESP_CFG2);
2188 }
2189 }
2190}
2191
2192static void __devinit esp_init_swstate(struct esp *esp)
2193{
2194 int i;
2195
2196 INIT_LIST_HEAD(&esp->queued_cmds);
2197 INIT_LIST_HEAD(&esp->active_cmds);
2198 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2199
2200 /* Start with a clear state, domain validation (via ->slave_configure,
2201 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2202 * commands.
2203 */
2204 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2205 esp->target[i].flags = 0;
2206 esp->target[i].nego_goal_period = 0;
2207 esp->target[i].nego_goal_offset = 0;
2208 esp->target[i].nego_goal_width = 0;
2209 esp->target[i].nego_goal_tags = 0;
2210 }
2211}
2212
2213/* This places the ESP into a known state at boot time. */
2214static void __devinit esp_bootup_reset(struct esp *esp)
2215{
2216 u8 val;
2217
2218 /* Reset the DMA */
2219 esp->ops->reset_dma(esp);
2220
2221 /* Reset the ESP */
2222 esp_reset_esp(esp);
2223
2224 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2225 val = esp_read8(ESP_CFG1);
2226 val |= ESP_CONFIG1_SRRDISAB;
2227 esp_write8(val, ESP_CFG1);
2228
2229 scsi_esp_cmd(esp, ESP_CMD_RS);
2230 udelay(400);
2231
2232 esp_write8(esp->config1, ESP_CFG1);
2233
2234 /* Eat any bitrot in the chip and we are done... */
2235 esp_read8(ESP_INTRPT);
2236}
2237
2238static void __devinit esp_set_clock_params(struct esp *esp)
2239{
2240 int fmhz;
2241 u8 ccf;
2242
2243 /* This is getting messy but it has to be done correctly or else
2244 * you get weird behavior all over the place. We are trying to
2245 * basically figure out three pieces of information.
2246 *
2247 * a) Clock Conversion Factor
2248 *
2249 * This is a representation of the input crystal clock frequency
2250 * going into the ESP on this machine. Any operation whose timing
2251 * is longer than 400ns depends on this value being correct. For
2252 * example, you'll get blips for arbitration/selection during high
2253 * load or with multiple targets if this is not set correctly.
2254 *
2255 * b) Selection Time-Out
2256 *
2257 * The ESP isn't very bright and will arbitrate for the bus and try
2258 * to select a target forever if you let it. This value tells the
2259 * ESP when it has taken too long to negotiate and that it should
2260 * interrupt the CPU so we can see what happened. The value is
2261 * computed as follows (from NCR/Symbios chip docs).
2262 *
2263 * (Time Out Period) * (Input Clock)
2264 * STO = ----------------------------------
2265 * (8192) * (Clock Conversion Factor)
2266 *
2267 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2268 *
2269 * c) Imperical constants for synchronous offset and transfer period
2270 * register values
2271 *
2272 * This entails the smallest and largest sync period we could ever
2273 * handle on this ESP.
2274 */
2275 fmhz = esp->cfreq;
2276
2277 ccf = ((fmhz / 1000000) + 4) / 5;
2278 if (ccf == 1)
2279 ccf = 2;
2280
2281 /* If we can't find anything reasonable, just assume 20MHZ.
2282 * This is the clock frequency of the older sun4c's where I've
2283 * been unable to find the clock-frequency PROM property. All
2284 * other machines provide useful values it seems.
2285 */
2286 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2287 fmhz = 20000000;
2288 ccf = 4;
2289 }
2290
2291 esp->cfact = (ccf == 8 ? 0 : ccf);
2292 esp->cfreq = fmhz;
2293 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2294 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2295 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2296 esp->sync_defp = SYNC_DEFP_SLOW;
2297}
2298
2299static const char *esp_chip_names[] = {
2300 "ESP100",
2301 "ESP100A",
2302 "ESP236",
2303 "FAS236",
2304 "FAS100A",
2305 "FAST",
2306 "FASHME",
2307};
2308
2309static struct scsi_transport_template *esp_transport_template;
2310
2311int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2312{
2313 static int instance;
2314 int err;
2315
2316 esp->host->transportt = esp_transport_template;
2317 esp->host->max_lun = ESP_MAX_LUN;
2318 esp->host->cmd_per_lun = 2;
2319
2320 esp_set_clock_params(esp);
2321
2322 esp_get_revision(esp);
2323
2324 esp_init_swstate(esp);
2325
2326 esp_bootup_reset(esp);
2327
2328 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2329 esp->host->unique_id, esp->regs, esp->dma_regs,
2330 esp->host->irq);
2331 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2332 esp->host->unique_id, esp_chip_names[esp->rev],
2333 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2334
2335 /* Let the SCSI bus reset settle. */
2336 ssleep(esp_bus_reset_settle);
2337
2338 err = scsi_add_host(esp->host, dev);
2339 if (err)
2340 return err;
2341
2342 esp->host->unique_id = instance++;
2343
2344 scsi_scan_host(esp->host);
2345
2346 return 0;
2347}
2348EXPORT_SYMBOL(scsi_esp_register);
2349
2350void __devexit scsi_esp_unregister(struct esp *esp)
2351{
2352 scsi_remove_host(esp->host);
2353}
2354EXPORT_SYMBOL(scsi_esp_unregister);
2355
2356static int esp_slave_alloc(struct scsi_device *dev)
2357{
2358 struct esp *esp = host_to_esp(dev->host);
2359 struct esp_target_data *tp = &esp->target[dev->id];
2360 struct esp_lun_data *lp;
2361
2362 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2363 if (!lp)
2364 return -ENOMEM;
2365 dev->hostdata = lp;
2366
2367 tp->starget = dev->sdev_target;
2368
2369 spi_min_period(tp->starget) = esp->min_period;
2370 spi_max_offset(tp->starget) = 15;
2371
2372 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2373 spi_max_width(tp->starget) = 1;
2374 else
2375 spi_max_width(tp->starget) = 0;
2376
2377 return 0;
2378}
2379
2380static int esp_slave_configure(struct scsi_device *dev)
2381{
2382 struct esp *esp = host_to_esp(dev->host);
2383 struct esp_target_data *tp = &esp->target[dev->id];
2384 int goal_tags, queue_depth;
2385
2386 goal_tags = 0;
2387
2388 if (dev->tagged_supported) {
2389 /* XXX make this configurable somehow XXX */
2390 goal_tags = ESP_DEFAULT_TAGS;
2391
2392 if (goal_tags > ESP_MAX_TAG)
2393 goal_tags = ESP_MAX_TAG;
2394 }
2395
2396 queue_depth = goal_tags;
2397 if (queue_depth < dev->host->cmd_per_lun)
2398 queue_depth = dev->host->cmd_per_lun;
2399
2400 if (goal_tags) {
2401 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2402 scsi_activate_tcq(dev, queue_depth);
2403 } else {
2404 scsi_deactivate_tcq(dev, queue_depth);
2405 }
2406 tp->flags |= ESP_TGT_DISCONNECT;
2407
2408 if (!spi_initial_dv(dev->sdev_target))
2409 spi_dv_device(dev);
2410
2411 return 0;
2412}
2413
2414static void esp_slave_destroy(struct scsi_device *dev)
2415{
2416 struct esp_lun_data *lp = dev->hostdata;
2417
2418 kfree(lp);
2419 dev->hostdata = NULL;
2420}
2421
2422static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2423{
2424 struct esp *esp = host_to_esp(cmd->device->host);
2425 struct esp_cmd_entry *ent, *tmp;
2426 struct completion eh_done;
2427 unsigned long flags;
2428
2429 /* XXX This helps a lot with debugging but might be a bit
2430 * XXX much for the final driver.
2431 */
2432 spin_lock_irqsave(esp->host->host_lock, flags);
2433 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2434 esp->host->unique_id, cmd, cmd->cmnd[0]);
2435 ent = esp->active_cmd;
2436 if (ent)
2437 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2438 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2439 list_for_each_entry(ent, &esp->queued_cmds, list) {
2440 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2441 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2442 }
2443 list_for_each_entry(ent, &esp->active_cmds, list) {
2444 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2445 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2446 }
2447 esp_dump_cmd_log(esp);
2448 spin_unlock_irqrestore(esp->host->host_lock, flags);
2449
2450 spin_lock_irqsave(esp->host->host_lock, flags);
2451
2452 ent = NULL;
2453 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2454 if (tmp->cmd == cmd) {
2455 ent = tmp;
2456 break;
2457 }
2458 }
2459
2460 if (ent) {
2461 /* Easiest case, we didn't even issue the command
2462 * yet so it is trivial to abort.
2463 */
2464 list_del(&ent->list);
2465
2466 cmd->result = DID_ABORT << 16;
2467 cmd->scsi_done(cmd);
2468
2469 esp_put_ent(esp, ent);
2470
2471 goto out_success;
2472 }
2473
2474 init_completion(&eh_done);
2475
2476 ent = esp->active_cmd;
2477 if (ent && ent->cmd == cmd) {
2478 /* Command is the currently active command on
2479 * the bus. If we already have an output message
2480 * pending, no dice.
2481 */
2482 if (esp->msg_out_len)
2483 goto out_failure;
2484
2485 /* Send out an abort, encouraging the target to
2486 * go to MSGOUT phase by asserting ATN.
2487 */
2488 esp->msg_out[0] = ABORT_TASK_SET;
2489 esp->msg_out_len = 1;
2490 ent->eh_done = &eh_done;
2491
2492 scsi_esp_cmd(esp, ESP_CMD_SATN);
2493 } else {
2494 /* The command is disconnected. This is not easy to
2495 * abort. For now we fail and let the scsi error
2496 * handling layer go try a scsi bus reset or host
2497 * reset.
2498 *
2499 * What we could do is put together a scsi command
2500 * solely for the purpose of sending an abort message
2501 * to the target. Coming up with all the code to
2502 * cook up scsi commands, special case them everywhere,
2503 * etc. is for questionable gain and it would be better
2504 * if the generic scsi error handling layer could do at
2505 * least some of that for us.
2506 *
2507 * Anyways this is an area for potential future improvement
2508 * in this driver.
2509 */
2510 goto out_failure;
2511 }
2512
2513 spin_unlock_irqrestore(esp->host->host_lock, flags);
2514
2515 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2516 spin_lock_irqsave(esp->host->host_lock, flags);
2517 ent->eh_done = NULL;
2518 spin_unlock_irqrestore(esp->host->host_lock, flags);
2519
2520 return FAILED;
2521 }
2522
2523 return SUCCESS;
2524
2525out_success:
2526 spin_unlock_irqrestore(esp->host->host_lock, flags);
2527 return SUCCESS;
2528
2529out_failure:
2530 /* XXX This might be a good location to set ESP_TGT_BROKEN
2531 * XXX since we know which target/lun in particular is
2532 * XXX causing trouble.
2533 */
2534 spin_unlock_irqrestore(esp->host->host_lock, flags);
2535 return FAILED;
2536}
2537
2538static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2539{
2540 struct esp *esp = host_to_esp(cmd->device->host);
2541 struct completion eh_reset;
2542 unsigned long flags;
2543
2544 init_completion(&eh_reset);
2545
2546 spin_lock_irqsave(esp->host->host_lock, flags);
2547
2548 esp->eh_reset = &eh_reset;
2549
2550 /* XXX This is too simple... We should add lots of
2551 * XXX checks here so that if we find that the chip is
2552 * XXX very wedged we return failure immediately so
2553 * XXX that we can perform a full chip reset.
2554 */
2555 esp->flags |= ESP_FLAG_RESETTING;
2556 scsi_esp_cmd(esp, ESP_CMD_RS);
2557
2558 spin_unlock_irqrestore(esp->host->host_lock, flags);
2559
2560 ssleep(esp_bus_reset_settle);
2561
2562 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2563 spin_lock_irqsave(esp->host->host_lock, flags);
2564 esp->eh_reset = NULL;
2565 spin_unlock_irqrestore(esp->host->host_lock, flags);
2566
2567 return FAILED;
2568 }
2569
2570 return SUCCESS;
2571}
2572
2573/* All bets are off, reset the entire device. */
2574static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2575{
2576 struct esp *esp = host_to_esp(cmd->device->host);
2577 unsigned long flags;
2578
2579 spin_lock_irqsave(esp->host->host_lock, flags);
2580 esp_bootup_reset(esp);
2581 esp_reset_cleanup(esp);
2582 spin_unlock_irqrestore(esp->host->host_lock, flags);
2583
2584 ssleep(esp_bus_reset_settle);
2585
2586 return SUCCESS;
2587}
2588
2589static const char *esp_info(struct Scsi_Host *host)
2590{
2591 return "esp";
2592}
2593
2594struct scsi_host_template scsi_esp_template = {
2595 .module = THIS_MODULE,
2596 .name = "esp",
2597 .info = esp_info,
2598 .queuecommand = esp_queuecommand,
2599 .slave_alloc = esp_slave_alloc,
2600 .slave_configure = esp_slave_configure,
2601 .slave_destroy = esp_slave_destroy,
2602 .eh_abort_handler = esp_eh_abort_handler,
2603 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2604 .eh_host_reset_handler = esp_eh_host_reset_handler,
2605 .can_queue = 7,
2606 .this_id = 7,
2607 .sg_tablesize = SG_ALL,
2608 .use_clustering = ENABLE_CLUSTERING,
2609 .max_sectors = 0xffff,
2610 .skip_settle_delay = 1,
2611};
2612EXPORT_SYMBOL(scsi_esp_template);
2613
2614static void esp_get_signalling(struct Scsi_Host *host)
2615{
2616 struct esp *esp = host_to_esp(host);
2617 enum spi_signal_type type;
2618
2619 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2620 type = SPI_SIGNAL_HVD;
2621 else
2622 type = SPI_SIGNAL_SE;
2623
2624 spi_signalling(host) = type;
2625}
2626
2627static void esp_set_offset(struct scsi_target *target, int offset)
2628{
2629 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2630 struct esp *esp = host_to_esp(host);
2631 struct esp_target_data *tp = &esp->target[target->id];
2632
2633 tp->nego_goal_offset = offset;
2634 tp->flags |= ESP_TGT_CHECK_NEGO;
2635}
2636
2637static void esp_set_period(struct scsi_target *target, int period)
2638{
2639 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2640 struct esp *esp = host_to_esp(host);
2641 struct esp_target_data *tp = &esp->target[target->id];
2642
2643 tp->nego_goal_period = period;
2644 tp->flags |= ESP_TGT_CHECK_NEGO;
2645}
2646
2647static void esp_set_width(struct scsi_target *target, int width)
2648{
2649 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2650 struct esp *esp = host_to_esp(host);
2651 struct esp_target_data *tp = &esp->target[target->id];
2652
2653 tp->nego_goal_width = (width ? 1 : 0);
2654 tp->flags |= ESP_TGT_CHECK_NEGO;
2655}
2656
2657static struct spi_function_template esp_transport_ops = {
2658 .set_offset = esp_set_offset,
2659 .show_offset = 1,
2660 .set_period = esp_set_period,
2661 .show_period = 1,
2662 .set_width = esp_set_width,
2663 .show_width = 1,
2664 .get_signalling = esp_get_signalling,
2665};
2666
2667static int __init esp_init(void)
2668{
2669 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2670 sizeof(struct esp_cmd_priv));
2671
2672 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2673 if (!esp_transport_template)
2674 return -ENODEV;
2675
2676 return 0;
2677}
2678
2679static void __exit esp_exit(void)
2680{
2681 spi_release_transport(esp_transport_template);
2682}
2683
2684MODULE_DESCRIPTION("ESP SCSI driver core");
2685MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2686MODULE_LICENSE("GPL");
2687MODULE_VERSION(DRV_VERSION);
2688
2689module_param(esp_bus_reset_settle, int, 0);
2690MODULE_PARM_DESC(esp_bus_reset_settle,
2691 "ESP scsi bus reset delay in seconds");
2692
2693module_param(esp_debug, int, 0);
2694MODULE_PARM_DESC(esp_debug,
2695"ESP bitmapped debugging message enable value:\n"
2696" 0x00000001 Log interrupt events\n"
2697" 0x00000002 Log scsi commands\n"
2698" 0x00000004 Log resets\n"
2699" 0x00000008 Log message in events\n"
2700" 0x00000010 Log message out events\n"
2701" 0x00000020 Log command completion\n"
2702" 0x00000040 Log disconnects\n"
2703" 0x00000080 Log data start\n"
2704" 0x00000100 Log data done\n"
2705" 0x00000200 Log reconnects\n"
2706" 0x00000400 Log auto-sense data\n"
2707);
2708
2709module_init(esp_init);
2710module_exit(esp_exit);