]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/esp_scsi.c
esp_scsi: convert to dev_printk
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / esp_scsi.c
CommitLineData
cd9ad58d
DM
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
e1f2a094 16#include <linux/irqreturn.h>
cd9ad58d
DM
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52
53#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \
a1a75b35 55 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \
a1a75b35 60 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \
a1a75b35 65 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \
a1a75b35 70 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \
a1a75b35 75 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
a1a75b35 80 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \
a1a75b35 85 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \
a1a75b35 90 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \
a1a75b35 95 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
a1a75b35 100 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
101} while (0)
102
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107 struct esp_event_ent *p)
108{
109 p->sreg = esp->sreg;
110 p->seqreg = esp->seqreg;
111 p->sreg2 = esp->sreg2;
112 p->ireg = esp->ireg;
113 p->select_state = esp->select_state;
114 p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119 struct esp_event_ent *p;
120 int idx = esp->esp_event_cur;
121
122 p = &esp->esp_event_log[idx];
123 p->type = ESP_EVENT_TYPE_CMD;
124 p->val = val;
125 esp_log_fill_regs(esp, p);
126
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129 esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135 struct esp_event_ent *p;
136 int idx = esp->esp_event_cur;
137
138 p = &esp->esp_event_log[idx];
139 p->type = ESP_EVENT_TYPE_EVENT;
140 p->val = val;
141 esp_log_fill_regs(esp, p);
142
143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145 esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150 int idx = esp->esp_event_cur;
151 int stop = idx;
152
a1a75b35 153 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
cd9ad58d
DM
154 do {
155 struct esp_event_ent *p = &esp->esp_event_log[idx];
156
a1a75b35
HR
157 shost_printk(KERN_INFO, esp->host,
158 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
159 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
160 idx,
161 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
162 p->val, p->sreg, p->seqreg,
163 p->sreg2, p->ireg, p->select_state, p->event);
cd9ad58d
DM
164
165 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
166 } while (idx != stop);
167}
168
169static void esp_flush_fifo(struct esp *esp)
170{
171 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
172 if (esp->rev == ESP236) {
173 int lim = 1000;
174
175 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
176 if (--lim == 0) {
a1a75b35
HR
177 shost_printk(KERN_ALERT, esp->host,
178 "ESP_FF_BYTES will not clear!\n");
cd9ad58d
DM
179 break;
180 }
181 udelay(1);
182 }
183 }
184}
185
186static void hme_read_fifo(struct esp *esp)
187{
188 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
189 int idx = 0;
190
191 while (fcnt--) {
192 esp->fifo[idx++] = esp_read8(ESP_FDATA);
193 esp->fifo[idx++] = esp_read8(ESP_FDATA);
194 }
195 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
196 esp_write8(0, ESP_FDATA);
197 esp->fifo[idx++] = esp_read8(ESP_FDATA);
198 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
199 }
200 esp->fifo_cnt = idx;
201}
202
203static void esp_set_all_config3(struct esp *esp, u8 val)
204{
205 int i;
206
207 for (i = 0; i < ESP_MAX_TARGET; i++)
208 esp->target[i].esp_config3 = val;
209}
210
211/* Reset the ESP chip, _not_ the SCSI bus. */
212static void esp_reset_esp(struct esp *esp)
213{
214 u8 family_code, version;
215
216 /* Now reset the ESP chip */
217 scsi_esp_cmd(esp, ESP_CMD_RC);
218 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
a793804f
DM
219 if (esp->rev == FAST)
220 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
cd9ad58d
DM
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222
cd9ad58d
DM
223 /* This is the only point at which it is reliable to read
224 * the ID-code for a fast ESP chip variants.
225 */
226 esp->max_period = ((35 * esp->ccycle) / 1000);
227 if (esp->rev == FAST) {
228 version = esp_read8(ESP_UID);
229 family_code = (version & 0xf8) >> 3;
230 if (family_code == 0x02)
231 esp->rev = FAS236;
232 else if (family_code == 0x0a)
233 esp->rev = FASHME; /* Version is usually '5'. */
234 else
235 esp->rev = FAS100A;
236 esp->min_period = ((4 * esp->ccycle) / 1000);
237 } else {
238 esp->min_period = ((5 * esp->ccycle) / 1000);
239 }
240 esp->max_period = (esp->max_period + 3)>>2;
241 esp->min_period = (esp->min_period + 3)>>2;
242
243 esp_write8(esp->config1, ESP_CFG1);
244 switch (esp->rev) {
245 case ESP100:
246 /* nothing to do */
247 break;
248
249 case ESP100A:
250 esp_write8(esp->config2, ESP_CFG2);
251 break;
252
253 case ESP236:
254 /* Slow 236 */
255 esp_write8(esp->config2, ESP_CFG2);
256 esp->prev_cfg3 = esp->target[0].esp_config3;
257 esp_write8(esp->prev_cfg3, ESP_CFG3);
258 break;
259
260 case FASHME:
261 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
262 /* fallthrough... */
263
264 case FAS236:
265 /* Fast 236 or HME */
266 esp_write8(esp->config2, ESP_CFG2);
267 if (esp->rev == FASHME) {
268 u8 cfg3 = esp->target[0].esp_config3;
269
270 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
271 if (esp->scsi_id >= 8)
272 cfg3 |= ESP_CONFIG3_IDBIT3;
273 esp_set_all_config3(esp, cfg3);
274 } else {
275 u32 cfg3 = esp->target[0].esp_config3;
276
277 cfg3 |= ESP_CONFIG3_FCLK;
278 esp_set_all_config3(esp, cfg3);
279 }
280 esp->prev_cfg3 = esp->target[0].esp_config3;
281 esp_write8(esp->prev_cfg3, ESP_CFG3);
282 if (esp->rev == FASHME) {
283 esp->radelay = 80;
284 } else {
285 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
286 esp->radelay = 0;
287 else
288 esp->radelay = 96;
289 }
290 break;
291
292 case FAS100A:
293 /* Fast 100a */
294 esp_write8(esp->config2, ESP_CFG2);
295 esp_set_all_config3(esp,
296 (esp->target[0].esp_config3 |
297 ESP_CONFIG3_FCLOCK));
298 esp->prev_cfg3 = esp->target[0].esp_config3;
299 esp_write8(esp->prev_cfg3, ESP_CFG3);
300 esp->radelay = 32;
301 break;
302
303 default:
304 break;
305 }
306
a793804f
DM
307 /* Reload the configuration registers */
308 esp_write8(esp->cfact, ESP_CFACT);
309
310 esp->prev_stp = 0;
311 esp_write8(esp->prev_stp, ESP_STP);
312
313 esp->prev_soff = 0;
314 esp_write8(esp->prev_soff, ESP_SOFF);
315
316 esp_write8(esp->neg_defp, ESP_TIMEO);
317
cd9ad58d
DM
318 /* Eat any bitrot in the chip */
319 esp_read8(ESP_INTRPT);
320 udelay(100);
321}
322
323static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
324{
325 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
4c2baaaf 326 struct scatterlist *sg = scsi_sglist(cmd);
cd9ad58d
DM
327 int dir = cmd->sc_data_direction;
328 int total, i;
329
330 if (dir == DMA_NONE)
331 return;
332
4c2baaaf 333 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
cd9ad58d
DM
334 spriv->cur_residue = sg_dma_len(sg);
335 spriv->cur_sg = sg;
336
337 total = 0;
338 for (i = 0; i < spriv->u.num_sg; i++)
339 total += sg_dma_len(&sg[i]);
340 spriv->tot_residue = total;
341}
342
343static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
344 struct scsi_cmnd *cmd)
345{
346 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
347
348 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
349 return ent->sense_dma +
350 (ent->sense_ptr - cmd->sense_buffer);
351 }
352
353 return sg_dma_address(p->cur_sg) +
354 (sg_dma_len(p->cur_sg) -
355 p->cur_residue);
356}
357
358static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
359 struct scsi_cmnd *cmd)
360{
361 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
362
363 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
364 return SCSI_SENSE_BUFFERSIZE -
365 (ent->sense_ptr - cmd->sense_buffer);
366 }
367 return p->cur_residue;
368}
369
370static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
371 struct scsi_cmnd *cmd, unsigned int len)
372{
373 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
374
375 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
376 ent->sense_ptr += len;
377 return;
378 }
379
380 p->cur_residue -= len;
381 p->tot_residue -= len;
382 if (p->cur_residue < 0 || p->tot_residue < 0) {
a1a75b35
HR
383 shost_printk(KERN_ERR, esp->host,
384 "Data transfer overflow.\n");
385 shost_printk(KERN_ERR, esp->host,
386 "cur_residue[%d] tot_residue[%d] len[%u]\n",
387 p->cur_residue, p->tot_residue, len);
cd9ad58d
DM
388 p->cur_residue = 0;
389 p->tot_residue = 0;
390 }
391 if (!p->cur_residue && p->tot_residue) {
392 p->cur_sg++;
393 p->cur_residue = sg_dma_len(p->cur_sg);
394 }
395}
396
397static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
398{
399 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
400 int dir = cmd->sc_data_direction;
401
402 if (dir == DMA_NONE)
403 return;
404
4c2baaaf 405 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
cd9ad58d
DM
406}
407
408static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
409{
410 struct scsi_cmnd *cmd = ent->cmd;
411 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
412
413 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
414 ent->saved_sense_ptr = ent->sense_ptr;
415 return;
416 }
417 ent->saved_cur_residue = spriv->cur_residue;
418 ent->saved_cur_sg = spriv->cur_sg;
419 ent->saved_tot_residue = spriv->tot_residue;
420}
421
422static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
423{
424 struct scsi_cmnd *cmd = ent->cmd;
425 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
426
427 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
428 ent->sense_ptr = ent->saved_sense_ptr;
429 return;
430 }
431 spriv->cur_residue = ent->saved_cur_residue;
432 spriv->cur_sg = ent->saved_cur_sg;
433 spriv->tot_residue = ent->saved_tot_residue;
434}
435
436static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
437{
438 if (cmd->cmd_len == 6 ||
439 cmd->cmd_len == 10 ||
440 cmd->cmd_len == 12) {
441 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
442 } else {
443 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
444 }
445}
446
447static void esp_write_tgt_config3(struct esp *esp, int tgt)
448{
449 if (esp->rev > ESP100A) {
450 u8 val = esp->target[tgt].esp_config3;
451
452 if (val != esp->prev_cfg3) {
453 esp->prev_cfg3 = val;
454 esp_write8(val, ESP_CFG3);
455 }
456 }
457}
458
459static void esp_write_tgt_sync(struct esp *esp, int tgt)
460{
461 u8 off = esp->target[tgt].esp_offset;
462 u8 per = esp->target[tgt].esp_period;
463
464 if (off != esp->prev_soff) {
465 esp->prev_soff = off;
466 esp_write8(off, ESP_SOFF);
467 }
468 if (per != esp->prev_stp) {
469 esp->prev_stp = per;
470 esp_write8(per, ESP_STP);
471 }
472}
473
474static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
475{
476 if (esp->rev == FASHME) {
477 /* Arbitrary segment boundaries, 24-bit counts. */
478 if (dma_len > (1U << 24))
479 dma_len = (1U << 24);
480 } else {
481 u32 base, end;
482
483 /* ESP chip limits other variants by 16-bits of transfer
484 * count. Actually on FAS100A and FAS236 we could get
485 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
486 * in the ESP_CFG2 register but that causes other unwanted
487 * changes so we don't use it currently.
488 */
489 if (dma_len > (1U << 16))
490 dma_len = (1U << 16);
491
492 /* All of the DMA variants hooked up to these chips
493 * cannot handle crossing a 24-bit address boundary.
494 */
495 base = dma_addr & ((1U << 24) - 1U);
496 end = base + dma_len;
497 if (end > (1U << 24))
498 end = (1U <<24);
499 dma_len = end - base;
500 }
501 return dma_len;
502}
503
504static int esp_need_to_nego_wide(struct esp_target_data *tp)
505{
506 struct scsi_target *target = tp->starget;
507
508 return spi_width(target) != tp->nego_goal_width;
509}
510
511static int esp_need_to_nego_sync(struct esp_target_data *tp)
512{
513 struct scsi_target *target = tp->starget;
514
515 /* When offset is zero, period is "don't care". */
516 if (!spi_offset(target) && !tp->nego_goal_offset)
517 return 0;
518
519 if (spi_offset(target) == tp->nego_goal_offset &&
520 spi_period(target) == tp->nego_goal_period)
521 return 0;
522
523 return 1;
524}
525
526static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
527 struct esp_lun_data *lp)
528{
21af8107 529 if (!ent->orig_tag[0]) {
cd9ad58d
DM
530 /* Non-tagged, slot already taken? */
531 if (lp->non_tagged_cmd)
532 return -EBUSY;
533
534 if (lp->hold) {
535 /* We are being held by active tagged
536 * commands.
537 */
538 if (lp->num_tagged)
539 return -EBUSY;
540
541 /* Tagged commands completed, we can unplug
542 * the queue and run this untagged command.
543 */
544 lp->hold = 0;
545 } else if (lp->num_tagged) {
546 /* Plug the queue until num_tagged decreases
547 * to zero in esp_free_lun_tag.
548 */
549 lp->hold = 1;
550 return -EBUSY;
551 }
552
553 lp->non_tagged_cmd = ent;
554 return 0;
555 } else {
556 /* Tagged command, see if blocked by a
557 * non-tagged one.
558 */
559 if (lp->non_tagged_cmd || lp->hold)
560 return -EBUSY;
561 }
562
21af8107 563 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
cd9ad58d 564
21af8107 565 lp->tagged_cmds[ent->orig_tag[1]] = ent;
cd9ad58d
DM
566 lp->num_tagged++;
567
568 return 0;
569}
570
571static void esp_free_lun_tag(struct esp_cmd_entry *ent,
572 struct esp_lun_data *lp)
573{
21af8107
DM
574 if (ent->orig_tag[0]) {
575 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
576 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
cd9ad58d
DM
577 lp->num_tagged--;
578 } else {
579 BUG_ON(lp->non_tagged_cmd != ent);
580 lp->non_tagged_cmd = NULL;
581 }
582}
583
584/* When a contingent allegiance conditon is created, we force feed a
585 * REQUEST_SENSE command to the device to fetch the sense data. I
586 * tried many other schemes, relying on the scsi error handling layer
587 * to send out the REQUEST_SENSE automatically, but this was difficult
588 * to get right especially in the presence of applications like smartd
589 * which use SG_IO to send out their own REQUEST_SENSE commands.
590 */
591static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
592{
593 struct scsi_cmnd *cmd = ent->cmd;
594 struct scsi_device *dev = cmd->device;
595 int tgt, lun;
596 u8 *p, val;
597
598 tgt = dev->id;
599 lun = dev->lun;
600
601
602 if (!ent->sense_ptr) {
a1a75b35
HR
603 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
604 tgt, lun);
cd9ad58d
DM
605
606 ent->sense_ptr = cmd->sense_buffer;
607 ent->sense_dma = esp->ops->map_single(esp,
608 ent->sense_ptr,
609 SCSI_SENSE_BUFFERSIZE,
610 DMA_FROM_DEVICE);
611 }
612 ent->saved_sense_ptr = ent->sense_ptr;
613
614 esp->active_cmd = ent;
615
616 p = esp->command_block;
617 esp->msg_out_len = 0;
618
619 *p++ = IDENTIFY(0, lun);
620 *p++ = REQUEST_SENSE;
621 *p++ = ((dev->scsi_level <= SCSI_2) ?
622 (lun << 5) : 0);
623 *p++ = 0;
624 *p++ = 0;
625 *p++ = SCSI_SENSE_BUFFERSIZE;
626 *p++ = 0;
627
628 esp->select_state = ESP_SELECT_BASIC;
629
630 val = tgt;
631 if (esp->rev == FASHME)
632 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
633 esp_write8(val, ESP_BUSID);
634
635 esp_write_tgt_sync(esp, tgt);
636 esp_write_tgt_config3(esp, tgt);
637
638 val = (p - esp->command_block);
639
640 if (esp->rev == FASHME)
641 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
642 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
643 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
644}
645
646static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
647{
648 struct esp_cmd_entry *ent;
649
650 list_for_each_entry(ent, &esp->queued_cmds, list) {
651 struct scsi_cmnd *cmd = ent->cmd;
652 struct scsi_device *dev = cmd->device;
653 struct esp_lun_data *lp = dev->hostdata;
654
655 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
656 ent->tag[0] = 0;
657 ent->tag[1] = 0;
658 return ent;
659 }
660
50668633 661 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
cd9ad58d
DM
662 ent->tag[0] = 0;
663 ent->tag[1] = 0;
664 }
21af8107
DM
665 ent->orig_tag[0] = ent->tag[0];
666 ent->orig_tag[1] = ent->tag[1];
cd9ad58d
DM
667
668 if (esp_alloc_lun_tag(ent, lp) < 0)
669 continue;
670
671 return ent;
672 }
673
674 return NULL;
675}
676
677static void esp_maybe_execute_command(struct esp *esp)
678{
679 struct esp_target_data *tp;
680 struct esp_lun_data *lp;
681 struct scsi_device *dev;
682 struct scsi_cmnd *cmd;
683 struct esp_cmd_entry *ent;
684 int tgt, lun, i;
685 u32 val, start_cmd;
686 u8 *p;
687
688 if (esp->active_cmd ||
689 (esp->flags & ESP_FLAG_RESETTING))
690 return;
691
692 ent = find_and_prep_issuable_command(esp);
693 if (!ent)
694 return;
695
696 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
697 esp_autosense(esp, ent);
698 return;
699 }
700
701 cmd = ent->cmd;
702 dev = cmd->device;
703 tgt = dev->id;
704 lun = dev->lun;
705 tp = &esp->target[tgt];
706 lp = dev->hostdata;
707
63ce2499 708 list_move(&ent->list, &esp->active_cmds);
cd9ad58d
DM
709
710 esp->active_cmd = ent;
711
712 esp_map_dma(esp, cmd);
713 esp_save_pointers(esp, ent);
714
715 esp_check_command_len(esp, cmd);
716
717 p = esp->command_block;
718
719 esp->msg_out_len = 0;
720 if (tp->flags & ESP_TGT_CHECK_NEGO) {
721 /* Need to negotiate. If the target is broken
722 * go for synchronous transfers and non-wide.
723 */
724 if (tp->flags & ESP_TGT_BROKEN) {
725 tp->flags &= ~ESP_TGT_DISCONNECT;
726 tp->nego_goal_period = 0;
727 tp->nego_goal_offset = 0;
728 tp->nego_goal_width = 0;
729 tp->nego_goal_tags = 0;
730 }
731
732 /* If the settings are not changing, skip this. */
733 if (spi_width(tp->starget) == tp->nego_goal_width &&
734 spi_period(tp->starget) == tp->nego_goal_period &&
735 spi_offset(tp->starget) == tp->nego_goal_offset) {
736 tp->flags &= ~ESP_TGT_CHECK_NEGO;
737 goto build_identify;
738 }
739
740 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
741 esp->msg_out_len =
742 spi_populate_width_msg(&esp->msg_out[0],
743 (tp->nego_goal_width ?
744 1 : 0));
745 tp->flags |= ESP_TGT_NEGO_WIDE;
746 } else if (esp_need_to_nego_sync(tp)) {
747 esp->msg_out_len =
748 spi_populate_sync_msg(&esp->msg_out[0],
749 tp->nego_goal_period,
750 tp->nego_goal_offset);
751 tp->flags |= ESP_TGT_NEGO_SYNC;
752 } else {
753 tp->flags &= ~ESP_TGT_CHECK_NEGO;
754 }
755
756 /* Process it like a slow command. */
757 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
758 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
759 }
760
761build_identify:
762 /* If we don't have a lun-data struct yet, we're probing
763 * so do not disconnect. Also, do not disconnect unless
764 * we have a tag on this command.
765 */
766 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
767 *p++ = IDENTIFY(1, lun);
768 else
769 *p++ = IDENTIFY(0, lun);
770
771 if (ent->tag[0] && esp->rev == ESP100) {
772 /* ESP100 lacks select w/atn3 command, use select
773 * and stop instead.
774 */
775 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
776 }
777
778 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
779 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
780 if (ent->tag[0]) {
781 *p++ = ent->tag[0];
782 *p++ = ent->tag[1];
783
784 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
785 }
786
787 for (i = 0; i < cmd->cmd_len; i++)
788 *p++ = cmd->cmnd[i];
789
790 esp->select_state = ESP_SELECT_BASIC;
791 } else {
792 esp->cmd_bytes_left = cmd->cmd_len;
793 esp->cmd_bytes_ptr = &cmd->cmnd[0];
794
795 if (ent->tag[0]) {
796 for (i = esp->msg_out_len - 1;
797 i >= 0; i--)
798 esp->msg_out[i + 2] = esp->msg_out[i];
799 esp->msg_out[0] = ent->tag[0];
800 esp->msg_out[1] = ent->tag[1];
801 esp->msg_out_len += 2;
802 }
803
804 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
805 esp->select_state = ESP_SELECT_MSGOUT;
806 }
807 val = tgt;
808 if (esp->rev == FASHME)
809 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
810 esp_write8(val, ESP_BUSID);
811
812 esp_write_tgt_sync(esp, tgt);
813 esp_write_tgt_config3(esp, tgt);
814
815 val = (p - esp->command_block);
816
817 if (esp_debug & ESP_DEBUG_SCSICMD) {
818 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
819 for (i = 0; i < cmd->cmd_len; i++)
820 printk("%02x ", cmd->cmnd[i]);
821 printk("]\n");
822 }
823
824 if (esp->rev == FASHME)
825 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
826 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
827 val, 16, 0, start_cmd);
828}
829
830static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
831{
832 struct list_head *head = &esp->esp_cmd_pool;
833 struct esp_cmd_entry *ret;
834
835 if (list_empty(head)) {
836 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
837 } else {
838 ret = list_entry(head->next, struct esp_cmd_entry, list);
839 list_del(&ret->list);
840 memset(ret, 0, sizeof(*ret));
841 }
842 return ret;
843}
844
845static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
846{
847 list_add(&ent->list, &esp->esp_cmd_pool);
848}
849
850static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
851 struct scsi_cmnd *cmd, unsigned int result)
852{
853 struct scsi_device *dev = cmd->device;
854 int tgt = dev->id;
855 int lun = dev->lun;
856
857 esp->active_cmd = NULL;
858 esp_unmap_dma(esp, cmd);
859 esp_free_lun_tag(ent, dev->hostdata);
860 cmd->result = result;
861
862 if (ent->eh_done) {
863 complete(ent->eh_done);
864 ent->eh_done = NULL;
865 }
866
867 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
868 esp->ops->unmap_single(esp, ent->sense_dma,
869 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
870 ent->sense_ptr = NULL;
871
872 /* Restore the message/status bytes to what we actually
873 * saw originally. Also, report that we are providing
874 * the sense data.
875 */
876 cmd->result = ((DRIVER_SENSE << 24) |
877 (DID_OK << 16) |
878 (COMMAND_COMPLETE << 8) |
879 (SAM_STAT_CHECK_CONDITION << 0));
880
881 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
882 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
883 int i;
884
885 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
886 esp->host->unique_id, tgt, lun);
887 for (i = 0; i < 18; i++)
888 printk("%02x ", cmd->sense_buffer[i]);
889 printk("]\n");
890 }
891 }
892
893 cmd->scsi_done(cmd);
894
895 list_del(&ent->list);
896 esp_put_ent(esp, ent);
897
898 esp_maybe_execute_command(esp);
899}
900
901static unsigned int compose_result(unsigned int status, unsigned int message,
902 unsigned int driver_code)
903{
904 return (status | (message << 8) | (driver_code << 16));
905}
906
907static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
908{
909 struct scsi_device *dev = ent->cmd->device;
910 struct esp_lun_data *lp = dev->hostdata;
911
912 scsi_track_queue_full(dev, lp->num_tagged - 1);
913}
914
f281233d 915static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cd9ad58d
DM
916{
917 struct scsi_device *dev = cmd->device;
2b14ec78 918 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
919 struct esp_cmd_priv *spriv;
920 struct esp_cmd_entry *ent;
921
922 ent = esp_get_ent(esp);
923 if (!ent)
924 return SCSI_MLQUEUE_HOST_BUSY;
925
926 ent->cmd = cmd;
927
928 cmd->scsi_done = done;
929
930 spriv = ESP_CMD_PRIV(cmd);
931 spriv->u.dma_addr = ~(dma_addr_t)0x0;
932
933 list_add_tail(&ent->list, &esp->queued_cmds);
934
935 esp_maybe_execute_command(esp);
936
937 return 0;
938}
939
f281233d
JG
940static DEF_SCSI_QCMD(esp_queuecommand)
941
cd9ad58d
DM
942static int esp_check_gross_error(struct esp *esp)
943{
944 if (esp->sreg & ESP_STAT_SPAM) {
945 /* Gross Error, could be one of:
946 * - top of fifo overwritten
947 * - top of command register overwritten
948 * - DMA programmed with wrong direction
949 * - improper phase change
950 */
a1a75b35
HR
951 shost_printk(KERN_ERR, esp->host,
952 "Gross error sreg[%02x]\n", esp->sreg);
cd9ad58d
DM
953 /* XXX Reset the chip. XXX */
954 return 1;
955 }
956 return 0;
957}
958
959static int esp_check_spur_intr(struct esp *esp)
960{
961 switch (esp->rev) {
962 case ESP100:
963 case ESP100A:
964 /* The interrupt pending bit of the status register cannot
965 * be trusted on these revisions.
966 */
967 esp->sreg &= ~ESP_STAT_INTR;
968 break;
969
970 default:
971 if (!(esp->sreg & ESP_STAT_INTR)) {
972 esp->ireg = esp_read8(ESP_INTRPT);
973 if (esp->ireg & ESP_INTR_SR)
974 return 1;
975
976 /* If the DMA is indicating interrupt pending and the
977 * ESP is not, the only possibility is a DMA error.
978 */
979 if (!esp->ops->dma_error(esp)) {
a1a75b35
HR
980 shost_printk(KERN_ERR, esp->host,
981 "Spurious irq, sreg=%02x.\n",
982 esp->sreg);
cd9ad58d
DM
983 return -1;
984 }
985
a1a75b35 986 shost_printk(KERN_ERR, esp->host, "DMA error\n");
cd9ad58d
DM
987
988 /* XXX Reset the chip. XXX */
989 return -1;
990 }
991 break;
992 }
993
994 return 0;
995}
996
997static void esp_schedule_reset(struct esp *esp)
998{
a1a75b35 999 esp_log_reset("esp_schedule_reset() from %pf\n",
cd9ad58d
DM
1000 __builtin_return_address(0));
1001 esp->flags |= ESP_FLAG_RESETTING;
1002 esp_event(esp, ESP_EVENT_RESET);
1003}
1004
1005/* In order to avoid having to add a special half-reconnected state
1006 * into the driver we just sit here and poll through the rest of
1007 * the reselection process to get the tag message bytes.
1008 */
1009static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1010 struct esp_lun_data *lp)
1011{
1012 struct esp_cmd_entry *ent;
1013 int i;
1014
1015 if (!lp->num_tagged) {
a1a75b35
HR
1016 shost_printk(KERN_ERR, esp->host,
1017 "Reconnect w/num_tagged==0\n");
cd9ad58d
DM
1018 return NULL;
1019 }
1020
a1a75b35 1021 esp_log_reconnect("reconnect tag, ");
cd9ad58d
DM
1022
1023 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1024 if (esp->ops->irq_pending(esp))
1025 break;
1026 }
1027 if (i == ESP_QUICKIRQ_LIMIT) {
a1a75b35
HR
1028 shost_printk(KERN_ERR, esp->host,
1029 "Reconnect IRQ1 timeout\n");
cd9ad58d
DM
1030 return NULL;
1031 }
1032
1033 esp->sreg = esp_read8(ESP_STATUS);
1034 esp->ireg = esp_read8(ESP_INTRPT);
1035
1036 esp_log_reconnect("IRQ(%d:%x:%x), ",
1037 i, esp->ireg, esp->sreg);
1038
1039 if (esp->ireg & ESP_INTR_DC) {
a1a75b35
HR
1040 shost_printk(KERN_ERR, esp->host,
1041 "Reconnect, got disconnect.\n");
cd9ad58d
DM
1042 return NULL;
1043 }
1044
1045 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
a1a75b35
HR
1046 shost_printk(KERN_ERR, esp->host,
1047 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
cd9ad58d
DM
1048 return NULL;
1049 }
1050
1051 /* DMA in the tag bytes... */
1052 esp->command_block[0] = 0xff;
1053 esp->command_block[1] = 0xff;
1054 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1055 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1056
70f23fd6 1057 /* ACK the message. */
cd9ad58d
DM
1058 scsi_esp_cmd(esp, ESP_CMD_MOK);
1059
1060 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1061 if (esp->ops->irq_pending(esp)) {
1062 esp->sreg = esp_read8(ESP_STATUS);
1063 esp->ireg = esp_read8(ESP_INTRPT);
1064 if (esp->ireg & ESP_INTR_FDONE)
1065 break;
1066 }
1067 udelay(1);
1068 }
1069 if (i == ESP_RESELECT_TAG_LIMIT) {
a1a75b35 1070 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
cd9ad58d
DM
1071 return NULL;
1072 }
1073 esp->ops->dma_drain(esp);
1074 esp->ops->dma_invalidate(esp);
1075
1076 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1077 i, esp->ireg, esp->sreg,
1078 esp->command_block[0],
1079 esp->command_block[1]);
1080
1081 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1082 esp->command_block[0] > ORDERED_QUEUE_TAG) {
a1a75b35
HR
1083 shost_printk(KERN_ERR, esp->host,
1084 "Reconnect, bad tag type %02x.\n",
1085 esp->command_block[0]);
cd9ad58d
DM
1086 return NULL;
1087 }
1088
1089 ent = lp->tagged_cmds[esp->command_block[1]];
1090 if (!ent) {
a1a75b35
HR
1091 shost_printk(KERN_ERR, esp->host,
1092 "Reconnect, no entry for tag %02x.\n",
1093 esp->command_block[1]);
cd9ad58d
DM
1094 return NULL;
1095 }
1096
1097 return ent;
1098}
1099
1100static int esp_reconnect(struct esp *esp)
1101{
1102 struct esp_cmd_entry *ent;
1103 struct esp_target_data *tp;
1104 struct esp_lun_data *lp;
1105 struct scsi_device *dev;
1106 int target, lun;
1107
1108 BUG_ON(esp->active_cmd);
1109 if (esp->rev == FASHME) {
1110 /* FASHME puts the target and lun numbers directly
1111 * into the fifo.
1112 */
1113 target = esp->fifo[0];
1114 lun = esp->fifo[1] & 0x7;
1115 } else {
1116 u8 bits = esp_read8(ESP_FDATA);
1117
1118 /* Older chips put the lun directly into the fifo, but
1119 * the target is given as a sample of the arbitration
1120 * lines on the bus at reselection time. So we should
1121 * see the ID of the ESP and the one reconnecting target
1122 * set in the bitmap.
1123 */
1124 if (!(bits & esp->scsi_id_mask))
1125 goto do_reset;
1126 bits &= ~esp->scsi_id_mask;
1127 if (!bits || (bits & (bits - 1)))
1128 goto do_reset;
1129
1130 target = ffs(bits) - 1;
1131 lun = (esp_read8(ESP_FDATA) & 0x7);
1132
1133 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1134 if (esp->rev == ESP100) {
1135 u8 ireg = esp_read8(ESP_INTRPT);
1136 /* This chip has a bug during reselection that can
1137 * cause a spurious illegal-command interrupt, which
1138 * we simply ACK here. Another possibility is a bus
1139 * reset so we must check for that.
1140 */
1141 if (ireg & ESP_INTR_SR)
1142 goto do_reset;
1143 }
1144 scsi_esp_cmd(esp, ESP_CMD_NULL);
1145 }
1146
1147 esp_write_tgt_sync(esp, target);
1148 esp_write_tgt_config3(esp, target);
1149
1150 scsi_esp_cmd(esp, ESP_CMD_MOK);
1151
1152 if (esp->rev == FASHME)
1153 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1154 ESP_BUSID);
1155
1156 tp = &esp->target[target];
1157 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1158 if (!dev) {
a1a75b35
HR
1159 shost_printk(KERN_ERR, esp->host,
1160 "Reconnect, no lp tgt[%u] lun[%u]\n",
1161 target, lun);
cd9ad58d
DM
1162 goto do_reset;
1163 }
1164 lp = dev->hostdata;
1165
1166 ent = lp->non_tagged_cmd;
1167 if (!ent) {
1168 ent = esp_reconnect_with_tag(esp, lp);
1169 if (!ent)
1170 goto do_reset;
1171 }
1172
1173 esp->active_cmd = ent;
1174
1175 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1176 esp->msg_out[0] = ABORT_TASK_SET;
1177 esp->msg_out_len = 1;
1178 scsi_esp_cmd(esp, ESP_CMD_SATN);
1179 }
1180
1181 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1182 esp_restore_pointers(esp, ent);
1183 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1184 return 1;
1185
1186do_reset:
1187 esp_schedule_reset(esp);
1188 return 0;
1189}
1190
1191static int esp_finish_select(struct esp *esp)
1192{
1193 struct esp_cmd_entry *ent;
1194 struct scsi_cmnd *cmd;
1195 u8 orig_select_state;
1196
1197 orig_select_state = esp->select_state;
1198
1199 /* No longer selecting. */
1200 esp->select_state = ESP_SELECT_NONE;
1201
1202 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1203 ent = esp->active_cmd;
1204 cmd = ent->cmd;
1205
1206 if (esp->ops->dma_error(esp)) {
1207 /* If we see a DMA error during or as a result of selection,
1208 * all bets are off.
1209 */
1210 esp_schedule_reset(esp);
1211 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1212 return 0;
1213 }
1214
1215 esp->ops->dma_invalidate(esp);
1216
1217 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1218 struct esp_target_data *tp = &esp->target[cmd->device->id];
1219
1220 /* Carefully back out of the selection attempt. Release
1221 * resources (such as DMA mapping & TAG) and reset state (such
1222 * as message out and command delivery variables).
1223 */
1224 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1225 esp_unmap_dma(esp, cmd);
1226 esp_free_lun_tag(ent, cmd->device->hostdata);
1227 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1228 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1229 esp->cmd_bytes_ptr = NULL;
1230 esp->cmd_bytes_left = 0;
1231 } else {
1232 esp->ops->unmap_single(esp, ent->sense_dma,
1233 SCSI_SENSE_BUFFERSIZE,
1234 DMA_FROM_DEVICE);
1235 ent->sense_ptr = NULL;
1236 }
1237
1238 /* Now that the state is unwound properly, put back onto
1239 * the issue queue. This command is no longer active.
1240 */
63ce2499 1241 list_move(&ent->list, &esp->queued_cmds);
cd9ad58d
DM
1242 esp->active_cmd = NULL;
1243
1244 /* Return value ignored by caller, it directly invokes
1245 * esp_reconnect().
1246 */
1247 return 0;
1248 }
1249
1250 if (esp->ireg == ESP_INTR_DC) {
1251 struct scsi_device *dev = cmd->device;
1252
1253 /* Disconnect. Make sure we re-negotiate sync and
1254 * wide parameters if this target starts responding
1255 * again in the future.
1256 */
1257 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1258
1259 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1260 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1261 return 1;
1262 }
1263
1264 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1265 /* Selection successful. On pre-FAST chips we have
1266 * to do a NOP and possibly clean out the FIFO.
1267 */
1268 if (esp->rev <= ESP236) {
1269 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1270
1271 scsi_esp_cmd(esp, ESP_CMD_NULL);
1272
1273 if (!fcnt &&
1274 (!esp->prev_soff ||
1275 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1276 esp_flush_fifo(esp);
1277 }
1278
1279 /* If we are doing a slow command, negotiation, etc.
1280 * we'll do the right thing as we transition to the
1281 * next phase.
1282 */
1283 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1284 return 0;
1285 }
1286
a1a75b35
HR
1287 shost_printk(KERN_INFO, esp->host,
1288 "Unexpected selection completion ireg[%x]\n", esp->ireg);
cd9ad58d
DM
1289 esp_schedule_reset(esp);
1290 return 0;
1291}
1292
1293static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1294 struct scsi_cmnd *cmd)
1295{
1296 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1297
1298 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1299 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1300 fifo_cnt <<= 1;
1301
1302 ecount = 0;
1303 if (!(esp->sreg & ESP_STAT_TCNT)) {
1304 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1305 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1306 if (esp->rev == FASHME)
1307 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1308 }
1309
1310 bytes_sent = esp->data_dma_len;
1311 bytes_sent -= ecount;
1312
1313 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1314 bytes_sent -= fifo_cnt;
1315
1316 flush_fifo = 0;
1317 if (!esp->prev_soff) {
1318 /* Synchronous data transfer, always flush fifo. */
1319 flush_fifo = 1;
1320 } else {
1321 if (esp->rev == ESP100) {
1322 u32 fflags, phase;
1323
1324 /* ESP100 has a chip bug where in the synchronous data
1325 * phase it can mistake a final long REQ pulse from the
1326 * target as an extra data byte. Fun.
1327 *
1328 * To detect this case we resample the status register
1329 * and fifo flags. If we're still in a data phase and
1330 * we see spurious chunks in the fifo, we return error
1331 * to the caller which should reset and set things up
1332 * such that we only try future transfers to this
1333 * target in synchronous mode.
1334 */
1335 esp->sreg = esp_read8(ESP_STATUS);
1336 phase = esp->sreg & ESP_STAT_PMASK;
1337 fflags = esp_read8(ESP_FFLAGS);
1338
1339 if ((phase == ESP_DOP &&
1340 (fflags & ESP_FF_ONOTZERO)) ||
1341 (phase == ESP_DIP &&
1342 (fflags & ESP_FF_FBYTES)))
1343 return -1;
1344 }
1345 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1346 flush_fifo = 1;
1347 }
1348
1349 if (flush_fifo)
1350 esp_flush_fifo(esp);
1351
1352 return bytes_sent;
1353}
1354
1355static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1356 u8 scsi_period, u8 scsi_offset,
1357 u8 esp_stp, u8 esp_soff)
1358{
1359 spi_period(tp->starget) = scsi_period;
1360 spi_offset(tp->starget) = scsi_offset;
1361 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1362
1363 if (esp_soff) {
1364 esp_stp &= 0x1f;
1365 esp_soff |= esp->radelay;
1366 if (esp->rev >= FAS236) {
1367 u8 bit = ESP_CONFIG3_FSCSI;
1368 if (esp->rev >= FAS100A)
1369 bit = ESP_CONFIG3_FAST;
1370
1371 if (scsi_period < 50) {
1372 if (esp->rev == FASHME)
1373 esp_soff &= ~esp->radelay;
1374 tp->esp_config3 |= bit;
1375 } else {
1376 tp->esp_config3 &= ~bit;
1377 }
1378 esp->prev_cfg3 = tp->esp_config3;
1379 esp_write8(esp->prev_cfg3, ESP_CFG3);
1380 }
1381 }
1382
1383 tp->esp_period = esp->prev_stp = esp_stp;
1384 tp->esp_offset = esp->prev_soff = esp_soff;
1385
1386 esp_write8(esp_soff, ESP_SOFF);
1387 esp_write8(esp_stp, ESP_STP);
1388
1389 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1390
1391 spi_display_xfer_agreement(tp->starget);
1392}
1393
1394static void esp_msgin_reject(struct esp *esp)
1395{
1396 struct esp_cmd_entry *ent = esp->active_cmd;
1397 struct scsi_cmnd *cmd = ent->cmd;
1398 struct esp_target_data *tp;
1399 int tgt;
1400
1401 tgt = cmd->device->id;
1402 tp = &esp->target[tgt];
1403
1404 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1405 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1406
1407 if (!esp_need_to_nego_sync(tp)) {
1408 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1409 scsi_esp_cmd(esp, ESP_CMD_RATN);
1410 } else {
1411 esp->msg_out_len =
1412 spi_populate_sync_msg(&esp->msg_out[0],
1413 tp->nego_goal_period,
1414 tp->nego_goal_offset);
1415 tp->flags |= ESP_TGT_NEGO_SYNC;
1416 scsi_esp_cmd(esp, ESP_CMD_SATN);
1417 }
1418 return;
1419 }
1420
1421 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1422 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1423 tp->esp_period = 0;
1424 tp->esp_offset = 0;
1425 esp_setsync(esp, tp, 0, 0, 0, 0);
1426 scsi_esp_cmd(esp, ESP_CMD_RATN);
1427 return;
1428 }
1429
1430 esp->msg_out[0] = ABORT_TASK_SET;
1431 esp->msg_out_len = 1;
1432 scsi_esp_cmd(esp, ESP_CMD_SATN);
1433}
1434
1435static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1436{
1437 u8 period = esp->msg_in[3];
1438 u8 offset = esp->msg_in[4];
1439 u8 stp;
1440
1441 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1442 goto do_reject;
1443
1444 if (offset > 15)
1445 goto do_reject;
1446
1447 if (offset) {
237abac6 1448 int one_clock;
cd9ad58d
DM
1449
1450 if (period > esp->max_period) {
1451 period = offset = 0;
1452 goto do_sdtr;
1453 }
1454 if (period < esp->min_period)
1455 goto do_reject;
1456
1457 one_clock = esp->ccycle / 1000;
237abac6 1458 stp = DIV_ROUND_UP(period << 2, one_clock);
cd9ad58d
DM
1459 if (stp && esp->rev >= FAS236) {
1460 if (stp >= 50)
1461 stp--;
1462 }
1463 } else {
1464 stp = 0;
1465 }
1466
1467 esp_setsync(esp, tp, period, offset, stp, offset);
1468 return;
1469
1470do_reject:
1471 esp->msg_out[0] = MESSAGE_REJECT;
1472 esp->msg_out_len = 1;
1473 scsi_esp_cmd(esp, ESP_CMD_SATN);
1474 return;
1475
1476do_sdtr:
1477 tp->nego_goal_period = period;
1478 tp->nego_goal_offset = offset;
1479 esp->msg_out_len =
1480 spi_populate_sync_msg(&esp->msg_out[0],
1481 tp->nego_goal_period,
1482 tp->nego_goal_offset);
1483 scsi_esp_cmd(esp, ESP_CMD_SATN);
1484}
1485
1486static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1487{
1488 int size = 8 << esp->msg_in[3];
1489 u8 cfg3;
1490
1491 if (esp->rev != FASHME)
1492 goto do_reject;
1493
1494 if (size != 8 && size != 16)
1495 goto do_reject;
1496
1497 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1498 goto do_reject;
1499
1500 cfg3 = tp->esp_config3;
1501 if (size == 16) {
1502 tp->flags |= ESP_TGT_WIDE;
1503 cfg3 |= ESP_CONFIG3_EWIDE;
1504 } else {
1505 tp->flags &= ~ESP_TGT_WIDE;
1506 cfg3 &= ~ESP_CONFIG3_EWIDE;
1507 }
1508 tp->esp_config3 = cfg3;
1509 esp->prev_cfg3 = cfg3;
1510 esp_write8(cfg3, ESP_CFG3);
1511
1512 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1513
1514 spi_period(tp->starget) = 0;
1515 spi_offset(tp->starget) = 0;
1516 if (!esp_need_to_nego_sync(tp)) {
1517 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1518 scsi_esp_cmd(esp, ESP_CMD_RATN);
1519 } else {
1520 esp->msg_out_len =
1521 spi_populate_sync_msg(&esp->msg_out[0],
1522 tp->nego_goal_period,
1523 tp->nego_goal_offset);
1524 tp->flags |= ESP_TGT_NEGO_SYNC;
1525 scsi_esp_cmd(esp, ESP_CMD_SATN);
1526 }
1527 return;
1528
1529do_reject:
1530 esp->msg_out[0] = MESSAGE_REJECT;
1531 esp->msg_out_len = 1;
1532 scsi_esp_cmd(esp, ESP_CMD_SATN);
1533}
1534
1535static void esp_msgin_extended(struct esp *esp)
1536{
1537 struct esp_cmd_entry *ent = esp->active_cmd;
1538 struct scsi_cmnd *cmd = ent->cmd;
1539 struct esp_target_data *tp;
1540 int tgt = cmd->device->id;
1541
1542 tp = &esp->target[tgt];
1543 if (esp->msg_in[2] == EXTENDED_SDTR) {
1544 esp_msgin_sdtr(esp, tp);
1545 return;
1546 }
1547 if (esp->msg_in[2] == EXTENDED_WDTR) {
1548 esp_msgin_wdtr(esp, tp);
1549 return;
1550 }
1551
a1a75b35
HR
1552 shost_printk(KERN_INFO, esp->host,
1553 "Unexpected extended msg type %x\n", esp->msg_in[2]);
cd9ad58d
DM
1554
1555 esp->msg_out[0] = ABORT_TASK_SET;
1556 esp->msg_out_len = 1;
1557 scsi_esp_cmd(esp, ESP_CMD_SATN);
1558}
1559
1560/* Analyze msgin bytes received from target so far. Return non-zero
1561 * if there are more bytes needed to complete the message.
1562 */
1563static int esp_msgin_process(struct esp *esp)
1564{
1565 u8 msg0 = esp->msg_in[0];
1566 int len = esp->msg_in_len;
1567
1568 if (msg0 & 0x80) {
1569 /* Identify */
a1a75b35
HR
1570 shost_printk(KERN_INFO, esp->host,
1571 "Unexpected msgin identify\n");
cd9ad58d
DM
1572 return 0;
1573 }
1574
1575 switch (msg0) {
1576 case EXTENDED_MESSAGE:
1577 if (len == 1)
1578 return 1;
1579 if (len < esp->msg_in[1] + 2)
1580 return 1;
1581 esp_msgin_extended(esp);
1582 return 0;
1583
1584 case IGNORE_WIDE_RESIDUE: {
1585 struct esp_cmd_entry *ent;
1586 struct esp_cmd_priv *spriv;
1587 if (len == 1)
1588 return 1;
1589
1590 if (esp->msg_in[1] != 1)
1591 goto do_reject;
1592
1593 ent = esp->active_cmd;
1594 spriv = ESP_CMD_PRIV(ent->cmd);
1595
1596 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1597 spriv->cur_sg--;
1598 spriv->cur_residue = 1;
1599 } else
1600 spriv->cur_residue++;
1601 spriv->tot_residue++;
1602 return 0;
1603 }
1604 case NOP:
1605 return 0;
1606 case RESTORE_POINTERS:
1607 esp_restore_pointers(esp, esp->active_cmd);
1608 return 0;
1609 case SAVE_POINTERS:
1610 esp_save_pointers(esp, esp->active_cmd);
1611 return 0;
1612
1613 case COMMAND_COMPLETE:
1614 case DISCONNECT: {
1615 struct esp_cmd_entry *ent = esp->active_cmd;
1616
1617 ent->message = msg0;
1618 esp_event(esp, ESP_EVENT_FREE_BUS);
1619 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1620 return 0;
1621 }
1622 case MESSAGE_REJECT:
1623 esp_msgin_reject(esp);
1624 return 0;
1625
1626 default:
1627 do_reject:
1628 esp->msg_out[0] = MESSAGE_REJECT;
1629 esp->msg_out_len = 1;
1630 scsi_esp_cmd(esp, ESP_CMD_SATN);
1631 return 0;
1632 }
1633}
1634
1635static int esp_process_event(struct esp *esp)
1636{
1637 int write;
1638
1639again:
1640 write = 0;
1641 switch (esp->event) {
1642 case ESP_EVENT_CHECK_PHASE:
1643 switch (esp->sreg & ESP_STAT_PMASK) {
1644 case ESP_DOP:
1645 esp_event(esp, ESP_EVENT_DATA_OUT);
1646 break;
1647 case ESP_DIP:
1648 esp_event(esp, ESP_EVENT_DATA_IN);
1649 break;
1650 case ESP_STATP:
1651 esp_flush_fifo(esp);
1652 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1653 esp_event(esp, ESP_EVENT_STATUS);
1654 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1655 return 1;
1656
1657 case ESP_MOP:
1658 esp_event(esp, ESP_EVENT_MSGOUT);
1659 break;
1660
1661 case ESP_MIP:
1662 esp_event(esp, ESP_EVENT_MSGIN);
1663 break;
1664
1665 case ESP_CMDP:
1666 esp_event(esp, ESP_EVENT_CMD_START);
1667 break;
1668
1669 default:
a1a75b35
HR
1670 shost_printk(KERN_INFO, esp->host,
1671 "Unexpected phase, sreg=%02x\n",
1672 esp->sreg);
cd9ad58d
DM
1673 esp_schedule_reset(esp);
1674 return 0;
1675 }
1676 goto again;
1677 break;
1678
1679 case ESP_EVENT_DATA_IN:
1680 write = 1;
1681 /* fallthru */
1682
1683 case ESP_EVENT_DATA_OUT: {
1684 struct esp_cmd_entry *ent = esp->active_cmd;
1685 struct scsi_cmnd *cmd = ent->cmd;
1686 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1687 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1688
1689 if (esp->rev == ESP100)
1690 scsi_esp_cmd(esp, ESP_CMD_NULL);
1691
1692 if (write)
1693 ent->flags |= ESP_CMD_FLAG_WRITE;
1694 else
1695 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1696
6fe07aaf
FT
1697 if (esp->ops->dma_length_limit)
1698 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1699 dma_len);
1700 else
1701 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1702
cd9ad58d
DM
1703 esp->data_dma_len = dma_len;
1704
1705 if (!dma_len) {
a1a75b35
HR
1706 shost_printk(KERN_ERR, esp->host,
1707 "DMA length is zero!\n");
1708 shost_printk(KERN_ERR, esp->host,
1709 "cur adr[%08llx] len[%08x]\n",
1710 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1711 esp_cur_dma_len(ent, cmd));
cd9ad58d
DM
1712 esp_schedule_reset(esp);
1713 return 0;
1714 }
1715
a1a75b35 1716 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
e1f2a094 1717 (unsigned long long)dma_addr, dma_len, write);
cd9ad58d
DM
1718
1719 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1720 write, ESP_CMD_DMA | ESP_CMD_TI);
1721 esp_event(esp, ESP_EVENT_DATA_DONE);
1722 break;
1723 }
1724 case ESP_EVENT_DATA_DONE: {
1725 struct esp_cmd_entry *ent = esp->active_cmd;
1726 struct scsi_cmnd *cmd = ent->cmd;
1727 int bytes_sent;
1728
1729 if (esp->ops->dma_error(esp)) {
a1a75b35
HR
1730 shost_printk(KERN_INFO, esp->host,
1731 "data done, DMA error, resetting\n");
cd9ad58d
DM
1732 esp_schedule_reset(esp);
1733 return 0;
1734 }
1735
1736 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1737 /* XXX parity errors, etc. XXX */
1738
1739 esp->ops->dma_drain(esp);
1740 }
1741 esp->ops->dma_invalidate(esp);
1742
1743 if (esp->ireg != ESP_INTR_BSERV) {
1744 /* We should always see exactly a bus-service
1745 * interrupt at the end of a successful transfer.
1746 */
a1a75b35
HR
1747 shost_printk(KERN_INFO, esp->host,
1748 "data done, not BSERV, resetting\n");
cd9ad58d
DM
1749 esp_schedule_reset(esp);
1750 return 0;
1751 }
1752
1753 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1754
a1a75b35 1755 esp_log_datadone("data done flgs[%x] sent[%d]\n",
cd9ad58d
DM
1756 ent->flags, bytes_sent);
1757
1758 if (bytes_sent < 0) {
1759 /* XXX force sync mode for this target XXX */
1760 esp_schedule_reset(esp);
1761 return 0;
1762 }
1763
1764 esp_advance_dma(esp, ent, cmd, bytes_sent);
1765 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1766 goto again;
cd9ad58d
DM
1767 }
1768
1769 case ESP_EVENT_STATUS: {
1770 struct esp_cmd_entry *ent = esp->active_cmd;
1771
1772 if (esp->ireg & ESP_INTR_FDONE) {
1773 ent->status = esp_read8(ESP_FDATA);
1774 ent->message = esp_read8(ESP_FDATA);
1775 scsi_esp_cmd(esp, ESP_CMD_MOK);
1776 } else if (esp->ireg == ESP_INTR_BSERV) {
1777 ent->status = esp_read8(ESP_FDATA);
1778 ent->message = 0xff;
1779 esp_event(esp, ESP_EVENT_MSGIN);
1780 return 0;
1781 }
1782
1783 if (ent->message != COMMAND_COMPLETE) {
a1a75b35
HR
1784 shost_printk(KERN_INFO, esp->host,
1785 "Unexpected message %x in status\n",
1786 ent->message);
cd9ad58d
DM
1787 esp_schedule_reset(esp);
1788 return 0;
1789 }
1790
1791 esp_event(esp, ESP_EVENT_FREE_BUS);
1792 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1793 break;
1794 }
1795 case ESP_EVENT_FREE_BUS: {
1796 struct esp_cmd_entry *ent = esp->active_cmd;
1797 struct scsi_cmnd *cmd = ent->cmd;
1798
1799 if (ent->message == COMMAND_COMPLETE ||
1800 ent->message == DISCONNECT)
1801 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1802
1803 if (ent->message == COMMAND_COMPLETE) {
a1a75b35 1804 esp_log_cmddone("Command done status[%x] message[%x]\n",
cd9ad58d
DM
1805 ent->status, ent->message);
1806 if (ent->status == SAM_STAT_TASK_SET_FULL)
1807 esp_event_queue_full(esp, ent);
1808
1809 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1810 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1811 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1812 esp_autosense(esp, ent);
1813 } else {
1814 esp_cmd_is_done(esp, ent, cmd,
1815 compose_result(ent->status,
1816 ent->message,
1817 DID_OK));
1818 }
1819 } else if (ent->message == DISCONNECT) {
a1a75b35 1820 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
cd9ad58d
DM
1821 cmd->device->id,
1822 ent->tag[0], ent->tag[1]);
1823
1824 esp->active_cmd = NULL;
1825 esp_maybe_execute_command(esp);
1826 } else {
a1a75b35
HR
1827 shost_printk(KERN_INFO, esp->host,
1828 "Unexpected message %x in freebus\n",
1829 ent->message);
cd9ad58d
DM
1830 esp_schedule_reset(esp);
1831 return 0;
1832 }
1833 if (esp->active_cmd)
1834 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1835 break;
1836 }
1837 case ESP_EVENT_MSGOUT: {
1838 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1839
1840 if (esp_debug & ESP_DEBUG_MSGOUT) {
1841 int i;
1842 printk("ESP: Sending message [ ");
1843 for (i = 0; i < esp->msg_out_len; i++)
1844 printk("%02x ", esp->msg_out[i]);
1845 printk("]\n");
1846 }
1847
1848 if (esp->rev == FASHME) {
1849 int i;
1850
1851 /* Always use the fifo. */
1852 for (i = 0; i < esp->msg_out_len; i++) {
1853 esp_write8(esp->msg_out[i], ESP_FDATA);
1854 esp_write8(0, ESP_FDATA);
1855 }
1856 scsi_esp_cmd(esp, ESP_CMD_TI);
1857 } else {
1858 if (esp->msg_out_len == 1) {
1859 esp_write8(esp->msg_out[0], ESP_FDATA);
1860 scsi_esp_cmd(esp, ESP_CMD_TI);
1861 } else {
1862 /* Use DMA. */
1863 memcpy(esp->command_block,
1864 esp->msg_out,
1865 esp->msg_out_len);
1866
1867 esp->ops->send_dma_cmd(esp,
1868 esp->command_block_dma,
1869 esp->msg_out_len,
1870 esp->msg_out_len,
1871 0,
1872 ESP_CMD_DMA|ESP_CMD_TI);
1873 }
1874 }
1875 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1876 break;
1877 }
1878 case ESP_EVENT_MSGOUT_DONE:
1879 if (esp->rev == FASHME) {
1880 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1881 } else {
1882 if (esp->msg_out_len > 1)
1883 esp->ops->dma_invalidate(esp);
1884 }
1885
1886 if (!(esp->ireg & ESP_INTR_DC)) {
1887 if (esp->rev != FASHME)
1888 scsi_esp_cmd(esp, ESP_CMD_NULL);
1889 }
1890 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1891 goto again;
1892 case ESP_EVENT_MSGIN:
1893 if (esp->ireg & ESP_INTR_BSERV) {
1894 if (esp->rev == FASHME) {
1895 if (!(esp_read8(ESP_STATUS2) &
1896 ESP_STAT2_FEMPTY))
1897 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898 } else {
1899 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1900 if (esp->rev == ESP100)
1901 scsi_esp_cmd(esp, ESP_CMD_NULL);
1902 }
1903 scsi_esp_cmd(esp, ESP_CMD_TI);
1904 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1905 return 1;
1906 }
1907 if (esp->ireg & ESP_INTR_FDONE) {
1908 u8 val;
1909
1910 if (esp->rev == FASHME)
1911 val = esp->fifo[0];
1912 else
1913 val = esp_read8(ESP_FDATA);
1914 esp->msg_in[esp->msg_in_len++] = val;
1915
a1a75b35 1916 esp_log_msgin("Got msgin byte %x\n", val);
cd9ad58d
DM
1917
1918 if (!esp_msgin_process(esp))
1919 esp->msg_in_len = 0;
1920
1921 if (esp->rev == FASHME)
1922 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1923
1924 scsi_esp_cmd(esp, ESP_CMD_MOK);
1925
1926 if (esp->event != ESP_EVENT_FREE_BUS)
1927 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1928 } else {
a1a75b35
HR
1929 shost_printk(KERN_INFO, esp->host,
1930 "MSGIN neither BSERV not FDON, resetting");
cd9ad58d
DM
1931 esp_schedule_reset(esp);
1932 return 0;
1933 }
1934 break;
1935 case ESP_EVENT_CMD_START:
1936 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1937 esp->cmd_bytes_left);
1938 if (esp->rev == FASHME)
1939 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1940 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1941 esp->cmd_bytes_left, 16, 0,
1942 ESP_CMD_DMA | ESP_CMD_TI);
1943 esp_event(esp, ESP_EVENT_CMD_DONE);
1944 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1945 break;
1946 case ESP_EVENT_CMD_DONE:
1947 esp->ops->dma_invalidate(esp);
1948 if (esp->ireg & ESP_INTR_BSERV) {
1949 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1950 goto again;
1951 }
1952 esp_schedule_reset(esp);
1953 return 0;
1954 break;
1955
1956 case ESP_EVENT_RESET:
1957 scsi_esp_cmd(esp, ESP_CMD_RS);
1958 break;
1959
1960 default:
a1a75b35
HR
1961 shost_printk(KERN_INFO, esp->host,
1962 "Unexpected event %x, resetting\n", esp->event);
cd9ad58d
DM
1963 esp_schedule_reset(esp);
1964 return 0;
1965 break;
1966 }
1967 return 1;
1968}
1969
1970static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1971{
1972 struct scsi_cmnd *cmd = ent->cmd;
1973
1974 esp_unmap_dma(esp, cmd);
1975 esp_free_lun_tag(ent, cmd->device->hostdata);
1976 cmd->result = DID_RESET << 16;
1977
1978 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1979 esp->ops->unmap_single(esp, ent->sense_dma,
1980 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1981 ent->sense_ptr = NULL;
1982 }
1983
1984 cmd->scsi_done(cmd);
1985 list_del(&ent->list);
1986 esp_put_ent(esp, ent);
1987}
1988
1989static void esp_clear_hold(struct scsi_device *dev, void *data)
1990{
1991 struct esp_lun_data *lp = dev->hostdata;
1992
1993 BUG_ON(lp->num_tagged);
1994 lp->hold = 0;
1995}
1996
1997static void esp_reset_cleanup(struct esp *esp)
1998{
1999 struct esp_cmd_entry *ent, *tmp;
2000 int i;
2001
2002 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2003 struct scsi_cmnd *cmd = ent->cmd;
2004
2005 list_del(&ent->list);
2006 cmd->result = DID_RESET << 16;
2007 cmd->scsi_done(cmd);
2008 esp_put_ent(esp, ent);
2009 }
2010
2011 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2012 if (ent == esp->active_cmd)
2013 esp->active_cmd = NULL;
2014 esp_reset_cleanup_one(esp, ent);
2015 }
2016
2017 BUG_ON(esp->active_cmd != NULL);
2018
2019 /* Force renegotiation of sync/wide transfers. */
2020 for (i = 0; i < ESP_MAX_TARGET; i++) {
2021 struct esp_target_data *tp = &esp->target[i];
2022
2023 tp->esp_period = 0;
2024 tp->esp_offset = 0;
2025 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2026 ESP_CONFIG3_FSCSI |
2027 ESP_CONFIG3_FAST);
2028 tp->flags &= ~ESP_TGT_WIDE;
2029 tp->flags |= ESP_TGT_CHECK_NEGO;
2030
2031 if (tp->starget)
522939d4
MR
2032 __starget_for_each_device(tp->starget, NULL,
2033 esp_clear_hold);
cd9ad58d 2034 }
204abf28 2035 esp->flags &= ~ESP_FLAG_RESETTING;
cd9ad58d
DM
2036}
2037
2038/* Runs under host->lock */
2039static void __esp_interrupt(struct esp *esp)
2040{
2041 int finish_reset, intr_done;
2042 u8 phase;
2043
2044 esp->sreg = esp_read8(ESP_STATUS);
2045
2046 if (esp->flags & ESP_FLAG_RESETTING) {
2047 finish_reset = 1;
2048 } else {
2049 if (esp_check_gross_error(esp))
2050 return;
2051
2052 finish_reset = esp_check_spur_intr(esp);
2053 if (finish_reset < 0)
2054 return;
2055 }
2056
2057 esp->ireg = esp_read8(ESP_INTRPT);
2058
2059 if (esp->ireg & ESP_INTR_SR)
2060 finish_reset = 1;
2061
2062 if (finish_reset) {
2063 esp_reset_cleanup(esp);
2064 if (esp->eh_reset) {
2065 complete(esp->eh_reset);
2066 esp->eh_reset = NULL;
2067 }
2068 return;
2069 }
2070
2071 phase = (esp->sreg & ESP_STAT_PMASK);
2072 if (esp->rev == FASHME) {
2073 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2074 esp->select_state == ESP_SELECT_NONE &&
2075 esp->event != ESP_EVENT_STATUS &&
2076 esp->event != ESP_EVENT_DATA_DONE) ||
2077 (esp->ireg & ESP_INTR_RSEL)) {
2078 esp->sreg2 = esp_read8(ESP_STATUS2);
2079 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2080 (esp->sreg2 & ESP_STAT2_F1BYTE))
2081 hme_read_fifo(esp);
2082 }
2083 }
2084
a1a75b35 2085 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
cd9ad58d
DM
2086 "sreg2[%02x] ireg[%02x]\n",
2087 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2088
2089 intr_done = 0;
2090
2091 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
a1a75b35
HR
2092 shost_printk(KERN_INFO, esp->host,
2093 "unexpected IREG %02x\n", esp->ireg);
cd9ad58d
DM
2094 if (esp->ireg & ESP_INTR_IC)
2095 esp_dump_cmd_log(esp);
2096
2097 esp_schedule_reset(esp);
2098 } else {
2099 if (!(esp->ireg & ESP_INTR_RSEL)) {
2100 /* Some combination of FDONE, BSERV, DC. */
2101 if (esp->select_state != ESP_SELECT_NONE)
2102 intr_done = esp_finish_select(esp);
2103 } else if (esp->ireg & ESP_INTR_RSEL) {
2104 if (esp->active_cmd)
2105 (void) esp_finish_select(esp);
2106 intr_done = esp_reconnect(esp);
2107 }
2108 }
2109 while (!intr_done)
2110 intr_done = esp_process_event(esp);
2111}
2112
2113irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2114{
2115 struct esp *esp = dev_id;
2116 unsigned long flags;
2117 irqreturn_t ret;
2118
2119 spin_lock_irqsave(esp->host->host_lock, flags);
2120 ret = IRQ_NONE;
2121 if (esp->ops->irq_pending(esp)) {
2122 ret = IRQ_HANDLED;
2123 for (;;) {
2124 int i;
2125
2126 __esp_interrupt(esp);
2127 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2128 break;
2129 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2130
2131 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2132 if (esp->ops->irq_pending(esp))
2133 break;
2134 }
2135 if (i == ESP_QUICKIRQ_LIMIT)
2136 break;
2137 }
2138 }
2139 spin_unlock_irqrestore(esp->host->host_lock, flags);
2140
2141 return ret;
2142}
2143EXPORT_SYMBOL(scsi_esp_intr);
2144
76246808 2145static void esp_get_revision(struct esp *esp)
cd9ad58d
DM
2146{
2147 u8 val;
2148
2149 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2150 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2151 esp_write8(esp->config2, ESP_CFG2);
2152
2153 val = esp_read8(ESP_CFG2);
2154 val &= ~ESP_CONFIG2_MAGIC;
2155 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2156 /* If what we write to cfg2 does not come back, cfg2 is not
2157 * implemented, therefore this must be a plain esp100.
2158 */
2159 esp->rev = ESP100;
2160 } else {
2161 esp->config2 = 0;
2162 esp_set_all_config3(esp, 5);
2163 esp->prev_cfg3 = 5;
2164 esp_write8(esp->config2, ESP_CFG2);
2165 esp_write8(0, ESP_CFG3);
2166 esp_write8(esp->prev_cfg3, ESP_CFG3);
2167
2168 val = esp_read8(ESP_CFG3);
2169 if (val != 5) {
2170 /* The cfg2 register is implemented, however
2171 * cfg3 is not, must be esp100a.
2172 */
2173 esp->rev = ESP100A;
2174 } else {
2175 esp_set_all_config3(esp, 0);
2176 esp->prev_cfg3 = 0;
2177 esp_write8(esp->prev_cfg3, ESP_CFG3);
2178
2179 /* All of cfg{1,2,3} implemented, must be one of
2180 * the fas variants, figure out which one.
2181 */
2182 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2183 esp->rev = FAST;
2184 esp->sync_defp = SYNC_DEFP_FAST;
2185 } else {
2186 esp->rev = ESP236;
2187 }
2188 esp->config2 = 0;
2189 esp_write8(esp->config2, ESP_CFG2);
2190 }
2191 }
2192}
2193
76246808 2194static void esp_init_swstate(struct esp *esp)
cd9ad58d
DM
2195{
2196 int i;
2197
2198 INIT_LIST_HEAD(&esp->queued_cmds);
2199 INIT_LIST_HEAD(&esp->active_cmds);
2200 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2201
2202 /* Start with a clear state, domain validation (via ->slave_configure,
2203 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2204 * commands.
2205 */
2206 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2207 esp->target[i].flags = 0;
2208 esp->target[i].nego_goal_period = 0;
2209 esp->target[i].nego_goal_offset = 0;
2210 esp->target[i].nego_goal_width = 0;
2211 esp->target[i].nego_goal_tags = 0;
2212 }
2213}
2214
2215/* This places the ESP into a known state at boot time. */
d679f805 2216static void esp_bootup_reset(struct esp *esp)
cd9ad58d
DM
2217{
2218 u8 val;
2219
2220 /* Reset the DMA */
2221 esp->ops->reset_dma(esp);
2222
2223 /* Reset the ESP */
2224 esp_reset_esp(esp);
2225
2226 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2227 val = esp_read8(ESP_CFG1);
2228 val |= ESP_CONFIG1_SRRDISAB;
2229 esp_write8(val, ESP_CFG1);
2230
2231 scsi_esp_cmd(esp, ESP_CMD_RS);
2232 udelay(400);
2233
2234 esp_write8(esp->config1, ESP_CFG1);
2235
2236 /* Eat any bitrot in the chip and we are done... */
2237 esp_read8(ESP_INTRPT);
2238}
2239
76246808 2240static void esp_set_clock_params(struct esp *esp)
cd9ad58d 2241{
6fe07aaf 2242 int fhz;
cd9ad58d
DM
2243 u8 ccf;
2244
2245 /* This is getting messy but it has to be done correctly or else
2246 * you get weird behavior all over the place. We are trying to
2247 * basically figure out three pieces of information.
2248 *
2249 * a) Clock Conversion Factor
2250 *
2251 * This is a representation of the input crystal clock frequency
2252 * going into the ESP on this machine. Any operation whose timing
2253 * is longer than 400ns depends on this value being correct. For
2254 * example, you'll get blips for arbitration/selection during high
2255 * load or with multiple targets if this is not set correctly.
2256 *
2257 * b) Selection Time-Out
2258 *
2259 * The ESP isn't very bright and will arbitrate for the bus and try
2260 * to select a target forever if you let it. This value tells the
2261 * ESP when it has taken too long to negotiate and that it should
2262 * interrupt the CPU so we can see what happened. The value is
2263 * computed as follows (from NCR/Symbios chip docs).
2264 *
2265 * (Time Out Period) * (Input Clock)
2266 * STO = ----------------------------------
2267 * (8192) * (Clock Conversion Factor)
2268 *
2269 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2270 *
2271 * c) Imperical constants for synchronous offset and transfer period
2272 * register values
2273 *
2274 * This entails the smallest and largest sync period we could ever
2275 * handle on this ESP.
2276 */
6fe07aaf 2277 fhz = esp->cfreq;
cd9ad58d 2278
6fe07aaf 2279 ccf = ((fhz / 1000000) + 4) / 5;
cd9ad58d
DM
2280 if (ccf == 1)
2281 ccf = 2;
2282
2283 /* If we can't find anything reasonable, just assume 20MHZ.
2284 * This is the clock frequency of the older sun4c's where I've
2285 * been unable to find the clock-frequency PROM property. All
2286 * other machines provide useful values it seems.
2287 */
6fe07aaf
FT
2288 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2289 fhz = 20000000;
cd9ad58d
DM
2290 ccf = 4;
2291 }
2292
2293 esp->cfact = (ccf == 8 ? 0 : ccf);
6fe07aaf
FT
2294 esp->cfreq = fhz;
2295 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
cd9ad58d 2296 esp->ctick = ESP_TICK(ccf, esp->ccycle);
6fe07aaf 2297 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
cd9ad58d
DM
2298 esp->sync_defp = SYNC_DEFP_SLOW;
2299}
2300
2301static const char *esp_chip_names[] = {
2302 "ESP100",
2303 "ESP100A",
2304 "ESP236",
2305 "FAS236",
2306 "FAS100A",
2307 "FAST",
2308 "FASHME",
2309};
2310
2311static struct scsi_transport_template *esp_transport_template;
2312
76246808 2313int scsi_esp_register(struct esp *esp, struct device *dev)
cd9ad58d
DM
2314{
2315 static int instance;
2316 int err;
2317
3707a186
HR
2318 if (!esp->num_tags)
2319 esp->num_tags = ESP_DEFAULT_TAGS;
2320 else if (esp->num_tags >= ESP_MAX_TAG)
2321 esp->num_tags = ESP_MAX_TAG - 1;
cd9ad58d
DM
2322 esp->host->transportt = esp_transport_template;
2323 esp->host->max_lun = ESP_MAX_LUN;
2324 esp->host->cmd_per_lun = 2;
ff4abd6c 2325 esp->host->unique_id = instance;
cd9ad58d
DM
2326
2327 esp_set_clock_params(esp);
2328
2329 esp_get_revision(esp);
2330
2331 esp_init_swstate(esp);
2332
2333 esp_bootup_reset(esp);
2334
a1a75b35
HR
2335 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2336 esp->host->unique_id, esp->regs, esp->dma_regs,
2337 esp->host->irq);
2338 dev_printk(KERN_INFO, dev,
2339 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2340 esp->host->unique_id, esp_chip_names[esp->rev],
2341 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
cd9ad58d
DM
2342
2343 /* Let the SCSI bus reset settle. */
2344 ssleep(esp_bus_reset_settle);
2345
2346 err = scsi_add_host(esp->host, dev);
2347 if (err)
2348 return err;
2349
ff4abd6c 2350 instance++;
cd9ad58d
DM
2351
2352 scsi_scan_host(esp->host);
2353
2354 return 0;
2355}
2356EXPORT_SYMBOL(scsi_esp_register);
2357
76246808 2358void scsi_esp_unregister(struct esp *esp)
cd9ad58d
DM
2359{
2360 scsi_remove_host(esp->host);
2361}
2362EXPORT_SYMBOL(scsi_esp_unregister);
2363
ec5e69f6
JB
2364static int esp_target_alloc(struct scsi_target *starget)
2365{
2366 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2367 struct esp_target_data *tp = &esp->target[starget->id];
2368
2369 tp->starget = starget;
2370
2371 return 0;
2372}
2373
2374static void esp_target_destroy(struct scsi_target *starget)
2375{
2376 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2377 struct esp_target_data *tp = &esp->target[starget->id];
2378
2379 tp->starget = NULL;
2380}
2381
cd9ad58d
DM
2382static int esp_slave_alloc(struct scsi_device *dev)
2383{
2b14ec78 2384 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
2385 struct esp_target_data *tp = &esp->target[dev->id];
2386 struct esp_lun_data *lp;
2387
2388 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2389 if (!lp)
2390 return -ENOMEM;
2391 dev->hostdata = lp;
2392
cd9ad58d
DM
2393 spi_min_period(tp->starget) = esp->min_period;
2394 spi_max_offset(tp->starget) = 15;
2395
2396 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2397 spi_max_width(tp->starget) = 1;
2398 else
2399 spi_max_width(tp->starget) = 0;
2400
2401 return 0;
2402}
2403
2404static int esp_slave_configure(struct scsi_device *dev)
2405{
2b14ec78 2406 struct esp *esp = shost_priv(dev->host);
cd9ad58d 2407 struct esp_target_data *tp = &esp->target[dev->id];
cd9ad58d 2408
3707a186
HR
2409 if (dev->tagged_supported)
2410 scsi_change_queue_depth(dev, esp->num_tags);
cd9ad58d 2411
cd9ad58d
DM
2412 tp->flags |= ESP_TGT_DISCONNECT;
2413
2414 if (!spi_initial_dv(dev->sdev_target))
2415 spi_dv_device(dev);
2416
2417 return 0;
2418}
2419
2420static void esp_slave_destroy(struct scsi_device *dev)
2421{
2422 struct esp_lun_data *lp = dev->hostdata;
2423
2424 kfree(lp);
2425 dev->hostdata = NULL;
2426}
2427
2428static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2429{
2b14ec78 2430 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2431 struct esp_cmd_entry *ent, *tmp;
2432 struct completion eh_done;
2433 unsigned long flags;
2434
2435 /* XXX This helps a lot with debugging but might be a bit
2436 * XXX much for the final driver.
2437 */
2438 spin_lock_irqsave(esp->host->host_lock, flags);
a1a75b35
HR
2439 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2440 cmd, cmd->cmnd[0]);
cd9ad58d
DM
2441 ent = esp->active_cmd;
2442 if (ent)
a1a75b35
HR
2443 shost_printk(KERN_ERR, esp->host,
2444 "Current command [%p:%02x]\n",
2445 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d 2446 list_for_each_entry(ent, &esp->queued_cmds, list) {
a1a75b35
HR
2447 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2448 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2449 }
2450 list_for_each_entry(ent, &esp->active_cmds, list) {
a1a75b35
HR
2451 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2452 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2453 }
2454 esp_dump_cmd_log(esp);
2455 spin_unlock_irqrestore(esp->host->host_lock, flags);
2456
2457 spin_lock_irqsave(esp->host->host_lock, flags);
2458
2459 ent = NULL;
2460 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2461 if (tmp->cmd == cmd) {
2462 ent = tmp;
2463 break;
2464 }
2465 }
2466
2467 if (ent) {
2468 /* Easiest case, we didn't even issue the command
2469 * yet so it is trivial to abort.
2470 */
2471 list_del(&ent->list);
2472
2473 cmd->result = DID_ABORT << 16;
2474 cmd->scsi_done(cmd);
2475
2476 esp_put_ent(esp, ent);
2477
2478 goto out_success;
2479 }
2480
2481 init_completion(&eh_done);
2482
2483 ent = esp->active_cmd;
2484 if (ent && ent->cmd == cmd) {
2485 /* Command is the currently active command on
2486 * the bus. If we already have an output message
2487 * pending, no dice.
2488 */
2489 if (esp->msg_out_len)
2490 goto out_failure;
2491
2492 /* Send out an abort, encouraging the target to
2493 * go to MSGOUT phase by asserting ATN.
2494 */
2495 esp->msg_out[0] = ABORT_TASK_SET;
2496 esp->msg_out_len = 1;
2497 ent->eh_done = &eh_done;
2498
2499 scsi_esp_cmd(esp, ESP_CMD_SATN);
2500 } else {
2501 /* The command is disconnected. This is not easy to
2502 * abort. For now we fail and let the scsi error
2503 * handling layer go try a scsi bus reset or host
2504 * reset.
2505 *
2506 * What we could do is put together a scsi command
2507 * solely for the purpose of sending an abort message
2508 * to the target. Coming up with all the code to
2509 * cook up scsi commands, special case them everywhere,
2510 * etc. is for questionable gain and it would be better
2511 * if the generic scsi error handling layer could do at
2512 * least some of that for us.
2513 *
2514 * Anyways this is an area for potential future improvement
2515 * in this driver.
2516 */
2517 goto out_failure;
2518 }
2519
2520 spin_unlock_irqrestore(esp->host->host_lock, flags);
2521
2522 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2523 spin_lock_irqsave(esp->host->host_lock, flags);
2524 ent->eh_done = NULL;
2525 spin_unlock_irqrestore(esp->host->host_lock, flags);
2526
2527 return FAILED;
2528 }
2529
2530 return SUCCESS;
2531
2532out_success:
2533 spin_unlock_irqrestore(esp->host->host_lock, flags);
2534 return SUCCESS;
2535
2536out_failure:
2537 /* XXX This might be a good location to set ESP_TGT_BROKEN
2538 * XXX since we know which target/lun in particular is
2539 * XXX causing trouble.
2540 */
2541 spin_unlock_irqrestore(esp->host->host_lock, flags);
2542 return FAILED;
2543}
2544
2545static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2546{
2b14ec78 2547 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2548 struct completion eh_reset;
2549 unsigned long flags;
2550
2551 init_completion(&eh_reset);
2552
2553 spin_lock_irqsave(esp->host->host_lock, flags);
2554
2555 esp->eh_reset = &eh_reset;
2556
2557 /* XXX This is too simple... We should add lots of
2558 * XXX checks here so that if we find that the chip is
2559 * XXX very wedged we return failure immediately so
2560 * XXX that we can perform a full chip reset.
2561 */
2562 esp->flags |= ESP_FLAG_RESETTING;
2563 scsi_esp_cmd(esp, ESP_CMD_RS);
2564
2565 spin_unlock_irqrestore(esp->host->host_lock, flags);
2566
2567 ssleep(esp_bus_reset_settle);
2568
2569 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2570 spin_lock_irqsave(esp->host->host_lock, flags);
2571 esp->eh_reset = NULL;
2572 spin_unlock_irqrestore(esp->host->host_lock, flags);
2573
2574 return FAILED;
2575 }
2576
2577 return SUCCESS;
2578}
2579
2580/* All bets are off, reset the entire device. */
2581static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2582{
2b14ec78 2583 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2584 unsigned long flags;
2585
2586 spin_lock_irqsave(esp->host->host_lock, flags);
2587 esp_bootup_reset(esp);
2588 esp_reset_cleanup(esp);
2589 spin_unlock_irqrestore(esp->host->host_lock, flags);
2590
2591 ssleep(esp_bus_reset_settle);
2592
2593 return SUCCESS;
2594}
2595
2596static const char *esp_info(struct Scsi_Host *host)
2597{
2598 return "esp";
2599}
2600
2601struct scsi_host_template scsi_esp_template = {
2602 .module = THIS_MODULE,
2603 .name = "esp",
2604 .info = esp_info,
2605 .queuecommand = esp_queuecommand,
ec5e69f6
JB
2606 .target_alloc = esp_target_alloc,
2607 .target_destroy = esp_target_destroy,
cd9ad58d
DM
2608 .slave_alloc = esp_slave_alloc,
2609 .slave_configure = esp_slave_configure,
2610 .slave_destroy = esp_slave_destroy,
2611 .eh_abort_handler = esp_eh_abort_handler,
2612 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2613 .eh_host_reset_handler = esp_eh_host_reset_handler,
2614 .can_queue = 7,
2615 .this_id = 7,
2616 .sg_tablesize = SG_ALL,
2617 .use_clustering = ENABLE_CLUSTERING,
2618 .max_sectors = 0xffff,
2619 .skip_settle_delay = 1,
2ecb204d 2620 .use_blk_tags = 1,
cd9ad58d
DM
2621};
2622EXPORT_SYMBOL(scsi_esp_template);
2623
2624static void esp_get_signalling(struct Scsi_Host *host)
2625{
2b14ec78 2626 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2627 enum spi_signal_type type;
2628
2629 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2630 type = SPI_SIGNAL_HVD;
2631 else
2632 type = SPI_SIGNAL_SE;
2633
2634 spi_signalling(host) = type;
2635}
2636
2637static void esp_set_offset(struct scsi_target *target, int offset)
2638{
2639 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2640 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2641 struct esp_target_data *tp = &esp->target[target->id];
2642
02507a80
FT
2643 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2644 tp->nego_goal_offset = 0;
2645 else
2646 tp->nego_goal_offset = offset;
cd9ad58d
DM
2647 tp->flags |= ESP_TGT_CHECK_NEGO;
2648}
2649
2650static void esp_set_period(struct scsi_target *target, int period)
2651{
2652 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2653 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2654 struct esp_target_data *tp = &esp->target[target->id];
2655
2656 tp->nego_goal_period = period;
2657 tp->flags |= ESP_TGT_CHECK_NEGO;
2658}
2659
2660static void esp_set_width(struct scsi_target *target, int width)
2661{
2662 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2663 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2664 struct esp_target_data *tp = &esp->target[target->id];
2665
2666 tp->nego_goal_width = (width ? 1 : 0);
2667 tp->flags |= ESP_TGT_CHECK_NEGO;
2668}
2669
2670static struct spi_function_template esp_transport_ops = {
2671 .set_offset = esp_set_offset,
2672 .show_offset = 1,
2673 .set_period = esp_set_period,
2674 .show_period = 1,
2675 .set_width = esp_set_width,
2676 .show_width = 1,
2677 .get_signalling = esp_get_signalling,
2678};
2679
2680static int __init esp_init(void)
2681{
2682 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2683 sizeof(struct esp_cmd_priv));
2684
2685 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2686 if (!esp_transport_template)
2687 return -ENODEV;
2688
2689 return 0;
2690}
2691
2692static void __exit esp_exit(void)
2693{
2694 spi_release_transport(esp_transport_template);
2695}
2696
2697MODULE_DESCRIPTION("ESP SCSI driver core");
2698MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2699MODULE_LICENSE("GPL");
2700MODULE_VERSION(DRV_VERSION);
2701
2702module_param(esp_bus_reset_settle, int, 0);
2703MODULE_PARM_DESC(esp_bus_reset_settle,
2704 "ESP scsi bus reset delay in seconds");
2705
2706module_param(esp_debug, int, 0);
2707MODULE_PARM_DESC(esp_debug,
2708"ESP bitmapped debugging message enable value:\n"
2709" 0x00000001 Log interrupt events\n"
2710" 0x00000002 Log scsi commands\n"
2711" 0x00000004 Log resets\n"
2712" 0x00000008 Log message in events\n"
2713" 0x00000010 Log message out events\n"
2714" 0x00000020 Log command completion\n"
2715" 0x00000040 Log disconnects\n"
2716" 0x00000080 Log data start\n"
2717" 0x00000100 Log data done\n"
2718" 0x00000200 Log reconnects\n"
2719" 0x00000400 Log auto-sense data\n"
2720);
2721
2722module_init(esp_init);
2723module_exit(esp_exit);