]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/esp_scsi.c
[SCSI] hptiop: convert to use the data buffer accessors
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / esp_scsi.c
CommitLineData
cd9ad58d
DM
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
e1f2a094 16#include <linux/irqreturn.h>
cd9ad58d
DM
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52
53#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107 struct esp_event_ent *p)
108{
109 p->sreg = esp->sreg;
110 p->seqreg = esp->seqreg;
111 p->sreg2 = esp->sreg2;
112 p->ireg = esp->ireg;
113 p->select_state = esp->select_state;
114 p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119 struct esp_event_ent *p;
120 int idx = esp->esp_event_cur;
121
122 p = &esp->esp_event_log[idx];
123 p->type = ESP_EVENT_TYPE_CMD;
124 p->val = val;
125 esp_log_fill_regs(esp, p);
126
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129 esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135 struct esp_event_ent *p;
136 int idx = esp->esp_event_cur;
137
138 p = &esp->esp_event_log[idx];
139 p->type = ESP_EVENT_TYPE_EVENT;
140 p->val = val;
141 esp_log_fill_regs(esp, p);
142
143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145 esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150 int idx = esp->esp_event_cur;
151 int stop = idx;
152
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154 esp->host->unique_id);
155 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159 esp->host->unique_id, idx,
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162 printk("val[%02x] sreg[%02x] seqreg[%02x] "
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164 p->val, p->sreg, p->seqreg,
165 p->sreg2, p->ireg, p->select_state, p->event);
166
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174 if (esp->rev == ESP236) {
175 int lim = 1000;
176
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180 "will not clear!\n",
181 esp->host->unique_id);
182 break;
183 }
184 udelay(1);
185 }
186 }
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192 int idx = 0;
193
194 while (fcnt--) {
195 esp->fifo[idx++] = esp_read8(ESP_FDATA);
196 esp->fifo[idx++] = esp_read8(ESP_FDATA);
197 }
198 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199 esp_write8(0, ESP_FDATA);
200 esp->fifo[idx++] = esp_read8(ESP_FDATA);
201 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202 }
203 esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208 int i;
209
210 for (i = 0; i < ESP_MAX_TARGET; i++)
211 esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217 u8 family_code, version;
218
219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants.
237 */
238 esp->max_period = ((35 * esp->ccycle) / 1000);
239 if (esp->rev == FAST) {
240 version = esp_read8(ESP_UID);
241 family_code = (version & 0xf8) >> 3;
242 if (family_code == 0x02)
243 esp->rev = FAS236;
244 else if (family_code == 0x0a)
245 esp->rev = FASHME; /* Version is usually '5'. */
246 else
247 esp->rev = FAS100A;
248 esp->min_period = ((4 * esp->ccycle) / 1000);
249 } else {
250 esp->min_period = ((5 * esp->ccycle) / 1000);
251 }
252 esp->max_period = (esp->max_period + 3)>>2;
253 esp->min_period = (esp->min_period + 3)>>2;
254
255 esp_write8(esp->config1, ESP_CFG1);
256 switch (esp->rev) {
257 case ESP100:
258 /* nothing to do */
259 break;
260
261 case ESP100A:
262 esp_write8(esp->config2, ESP_CFG2);
263 break;
264
265 case ESP236:
266 /* Slow 236 */
267 esp_write8(esp->config2, ESP_CFG2);
268 esp->prev_cfg3 = esp->target[0].esp_config3;
269 esp_write8(esp->prev_cfg3, ESP_CFG3);
270 break;
271
272 case FASHME:
273 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
274 /* fallthrough... */
275
276 case FAS236:
277 /* Fast 236 or HME */
278 esp_write8(esp->config2, ESP_CFG2);
279 if (esp->rev == FASHME) {
280 u8 cfg3 = esp->target[0].esp_config3;
281
282 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
283 if (esp->scsi_id >= 8)
284 cfg3 |= ESP_CONFIG3_IDBIT3;
285 esp_set_all_config3(esp, cfg3);
286 } else {
287 u32 cfg3 = esp->target[0].esp_config3;
288
289 cfg3 |= ESP_CONFIG3_FCLK;
290 esp_set_all_config3(esp, cfg3);
291 }
292 esp->prev_cfg3 = esp->target[0].esp_config3;
293 esp_write8(esp->prev_cfg3, ESP_CFG3);
294 if (esp->rev == FASHME) {
295 esp->radelay = 80;
296 } else {
297 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
298 esp->radelay = 0;
299 else
300 esp->radelay = 96;
301 }
302 break;
303
304 case FAS100A:
305 /* Fast 100a */
306 esp_write8(esp->config2, ESP_CFG2);
307 esp_set_all_config3(esp,
308 (esp->target[0].esp_config3 |
309 ESP_CONFIG3_FCLOCK));
310 esp->prev_cfg3 = esp->target[0].esp_config3;
311 esp_write8(esp->prev_cfg3, ESP_CFG3);
312 esp->radelay = 32;
313 break;
314
315 default:
316 break;
317 }
318
319 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT);
321 udelay(100);
322}
323
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{
326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
327 struct scatterlist *sg = cmd->request_buffer;
328 int dir = cmd->sc_data_direction;
329 int total, i;
330
331 if (dir == DMA_NONE)
332 return;
333
334 BUG_ON(cmd->use_sg == 0);
335
336 spriv->u.num_sg = esp->ops->map_sg(esp, sg,
337 cmd->use_sg, dir);
338 spriv->cur_residue = sg_dma_len(sg);
339 spriv->cur_sg = sg;
340
341 total = 0;
342 for (i = 0; i < spriv->u.num_sg; i++)
343 total += sg_dma_len(&sg[i]);
344 spriv->tot_residue = total;
345}
346
347static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
348 struct scsi_cmnd *cmd)
349{
350 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
351
352 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
353 return ent->sense_dma +
354 (ent->sense_ptr - cmd->sense_buffer);
355 }
356
357 return sg_dma_address(p->cur_sg) +
358 (sg_dma_len(p->cur_sg) -
359 p->cur_residue);
360}
361
362static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
363 struct scsi_cmnd *cmd)
364{
365 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
366
367 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
368 return SCSI_SENSE_BUFFERSIZE -
369 (ent->sense_ptr - cmd->sense_buffer);
370 }
371 return p->cur_residue;
372}
373
374static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
375 struct scsi_cmnd *cmd, unsigned int len)
376{
377 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
378
379 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380 ent->sense_ptr += len;
381 return;
382 }
383
384 p->cur_residue -= len;
385 p->tot_residue -= len;
386 if (p->cur_residue < 0 || p->tot_residue < 0) {
387 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
388 esp->host->unique_id);
389 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
390 "len[%u]\n",
391 esp->host->unique_id,
392 p->cur_residue, p->tot_residue, len);
393 p->cur_residue = 0;
394 p->tot_residue = 0;
395 }
396 if (!p->cur_residue && p->tot_residue) {
397 p->cur_sg++;
398 p->cur_residue = sg_dma_len(p->cur_sg);
399 }
400}
401
402static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
403{
404 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
405 int dir = cmd->sc_data_direction;
406
407 if (dir == DMA_NONE)
408 return;
409
410 esp->ops->unmap_sg(esp, cmd->request_buffer,
411 spriv->u.num_sg, dir);
412}
413
414static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
415{
416 struct scsi_cmnd *cmd = ent->cmd;
417 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
418
419 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
420 ent->saved_sense_ptr = ent->sense_ptr;
421 return;
422 }
423 ent->saved_cur_residue = spriv->cur_residue;
424 ent->saved_cur_sg = spriv->cur_sg;
425 ent->saved_tot_residue = spriv->tot_residue;
426}
427
428static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
429{
430 struct scsi_cmnd *cmd = ent->cmd;
431 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
432
433 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
434 ent->sense_ptr = ent->saved_sense_ptr;
435 return;
436 }
437 spriv->cur_residue = ent->saved_cur_residue;
438 spriv->cur_sg = ent->saved_cur_sg;
439 spriv->tot_residue = ent->saved_tot_residue;
440}
441
442static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
443{
444 if (cmd->cmd_len == 6 ||
445 cmd->cmd_len == 10 ||
446 cmd->cmd_len == 12) {
447 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
448 } else {
449 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
450 }
451}
452
453static void esp_write_tgt_config3(struct esp *esp, int tgt)
454{
455 if (esp->rev > ESP100A) {
456 u8 val = esp->target[tgt].esp_config3;
457
458 if (val != esp->prev_cfg3) {
459 esp->prev_cfg3 = val;
460 esp_write8(val, ESP_CFG3);
461 }
462 }
463}
464
465static void esp_write_tgt_sync(struct esp *esp, int tgt)
466{
467 u8 off = esp->target[tgt].esp_offset;
468 u8 per = esp->target[tgt].esp_period;
469
470 if (off != esp->prev_soff) {
471 esp->prev_soff = off;
472 esp_write8(off, ESP_SOFF);
473 }
474 if (per != esp->prev_stp) {
475 esp->prev_stp = per;
476 esp_write8(per, ESP_STP);
477 }
478}
479
480static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
481{
482 if (esp->rev == FASHME) {
483 /* Arbitrary segment boundaries, 24-bit counts. */
484 if (dma_len > (1U << 24))
485 dma_len = (1U << 24);
486 } else {
487 u32 base, end;
488
489 /* ESP chip limits other variants by 16-bits of transfer
490 * count. Actually on FAS100A and FAS236 we could get
491 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
492 * in the ESP_CFG2 register but that causes other unwanted
493 * changes so we don't use it currently.
494 */
495 if (dma_len > (1U << 16))
496 dma_len = (1U << 16);
497
498 /* All of the DMA variants hooked up to these chips
499 * cannot handle crossing a 24-bit address boundary.
500 */
501 base = dma_addr & ((1U << 24) - 1U);
502 end = base + dma_len;
503 if (end > (1U << 24))
504 end = (1U <<24);
505 dma_len = end - base;
506 }
507 return dma_len;
508}
509
510static int esp_need_to_nego_wide(struct esp_target_data *tp)
511{
512 struct scsi_target *target = tp->starget;
513
514 return spi_width(target) != tp->nego_goal_width;
515}
516
517static int esp_need_to_nego_sync(struct esp_target_data *tp)
518{
519 struct scsi_target *target = tp->starget;
520
521 /* When offset is zero, period is "don't care". */
522 if (!spi_offset(target) && !tp->nego_goal_offset)
523 return 0;
524
525 if (spi_offset(target) == tp->nego_goal_offset &&
526 spi_period(target) == tp->nego_goal_period)
527 return 0;
528
529 return 1;
530}
531
532static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
533 struct esp_lun_data *lp)
534{
535 if (!ent->tag[0]) {
536 /* Non-tagged, slot already taken? */
537 if (lp->non_tagged_cmd)
538 return -EBUSY;
539
540 if (lp->hold) {
541 /* We are being held by active tagged
542 * commands.
543 */
544 if (lp->num_tagged)
545 return -EBUSY;
546
547 /* Tagged commands completed, we can unplug
548 * the queue and run this untagged command.
549 */
550 lp->hold = 0;
551 } else if (lp->num_tagged) {
552 /* Plug the queue until num_tagged decreases
553 * to zero in esp_free_lun_tag.
554 */
555 lp->hold = 1;
556 return -EBUSY;
557 }
558
559 lp->non_tagged_cmd = ent;
560 return 0;
561 } else {
562 /* Tagged command, see if blocked by a
563 * non-tagged one.
564 */
565 if (lp->non_tagged_cmd || lp->hold)
566 return -EBUSY;
567 }
568
569 BUG_ON(lp->tagged_cmds[ent->tag[1]]);
570
571 lp->tagged_cmds[ent->tag[1]] = ent;
572 lp->num_tagged++;
573
574 return 0;
575}
576
577static void esp_free_lun_tag(struct esp_cmd_entry *ent,
578 struct esp_lun_data *lp)
579{
580 if (ent->tag[0]) {
581 BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
582 lp->tagged_cmds[ent->tag[1]] = NULL;
583 lp->num_tagged--;
584 } else {
585 BUG_ON(lp->non_tagged_cmd != ent);
586 lp->non_tagged_cmd = NULL;
587 }
588}
589
590/* When a contingent allegiance conditon is created, we force feed a
591 * REQUEST_SENSE command to the device to fetch the sense data. I
592 * tried many other schemes, relying on the scsi error handling layer
593 * to send out the REQUEST_SENSE automatically, but this was difficult
594 * to get right especially in the presence of applications like smartd
595 * which use SG_IO to send out their own REQUEST_SENSE commands.
596 */
597static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
598{
599 struct scsi_cmnd *cmd = ent->cmd;
600 struct scsi_device *dev = cmd->device;
601 int tgt, lun;
602 u8 *p, val;
603
604 tgt = dev->id;
605 lun = dev->lun;
606
607
608 if (!ent->sense_ptr) {
609 esp_log_autosense("esp%d: Doing auto-sense for "
610 "tgt[%d] lun[%d]\n",
611 esp->host->unique_id, tgt, lun);
612
613 ent->sense_ptr = cmd->sense_buffer;
614 ent->sense_dma = esp->ops->map_single(esp,
615 ent->sense_ptr,
616 SCSI_SENSE_BUFFERSIZE,
617 DMA_FROM_DEVICE);
618 }
619 ent->saved_sense_ptr = ent->sense_ptr;
620
621 esp->active_cmd = ent;
622
623 p = esp->command_block;
624 esp->msg_out_len = 0;
625
626 *p++ = IDENTIFY(0, lun);
627 *p++ = REQUEST_SENSE;
628 *p++ = ((dev->scsi_level <= SCSI_2) ?
629 (lun << 5) : 0);
630 *p++ = 0;
631 *p++ = 0;
632 *p++ = SCSI_SENSE_BUFFERSIZE;
633 *p++ = 0;
634
635 esp->select_state = ESP_SELECT_BASIC;
636
637 val = tgt;
638 if (esp->rev == FASHME)
639 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
640 esp_write8(val, ESP_BUSID);
641
642 esp_write_tgt_sync(esp, tgt);
643 esp_write_tgt_config3(esp, tgt);
644
645 val = (p - esp->command_block);
646
647 if (esp->rev == FASHME)
648 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
649 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
650 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
651}
652
653static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
654{
655 struct esp_cmd_entry *ent;
656
657 list_for_each_entry(ent, &esp->queued_cmds, list) {
658 struct scsi_cmnd *cmd = ent->cmd;
659 struct scsi_device *dev = cmd->device;
660 struct esp_lun_data *lp = dev->hostdata;
661
662 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
663 ent->tag[0] = 0;
664 ent->tag[1] = 0;
665 return ent;
666 }
667
668 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
669 ent->tag[0] = 0;
670 ent->tag[1] = 0;
671 }
672
673 if (esp_alloc_lun_tag(ent, lp) < 0)
674 continue;
675
676 return ent;
677 }
678
679 return NULL;
680}
681
682static void esp_maybe_execute_command(struct esp *esp)
683{
684 struct esp_target_data *tp;
685 struct esp_lun_data *lp;
686 struct scsi_device *dev;
687 struct scsi_cmnd *cmd;
688 struct esp_cmd_entry *ent;
689 int tgt, lun, i;
690 u32 val, start_cmd;
691 u8 *p;
692
693 if (esp->active_cmd ||
694 (esp->flags & ESP_FLAG_RESETTING))
695 return;
696
697 ent = find_and_prep_issuable_command(esp);
698 if (!ent)
699 return;
700
701 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
702 esp_autosense(esp, ent);
703 return;
704 }
705
706 cmd = ent->cmd;
707 dev = cmd->device;
708 tgt = dev->id;
709 lun = dev->lun;
710 tp = &esp->target[tgt];
711 lp = dev->hostdata;
712
713 list_del(&ent->list);
714 list_add(&ent->list, &esp->active_cmds);
715
716 esp->active_cmd = ent;
717
718 esp_map_dma(esp, cmd);
719 esp_save_pointers(esp, ent);
720
721 esp_check_command_len(esp, cmd);
722
723 p = esp->command_block;
724
725 esp->msg_out_len = 0;
726 if (tp->flags & ESP_TGT_CHECK_NEGO) {
727 /* Need to negotiate. If the target is broken
728 * go for synchronous transfers and non-wide.
729 */
730 if (tp->flags & ESP_TGT_BROKEN) {
731 tp->flags &= ~ESP_TGT_DISCONNECT;
732 tp->nego_goal_period = 0;
733 tp->nego_goal_offset = 0;
734 tp->nego_goal_width = 0;
735 tp->nego_goal_tags = 0;
736 }
737
738 /* If the settings are not changing, skip this. */
739 if (spi_width(tp->starget) == tp->nego_goal_width &&
740 spi_period(tp->starget) == tp->nego_goal_period &&
741 spi_offset(tp->starget) == tp->nego_goal_offset) {
742 tp->flags &= ~ESP_TGT_CHECK_NEGO;
743 goto build_identify;
744 }
745
746 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
747 esp->msg_out_len =
748 spi_populate_width_msg(&esp->msg_out[0],
749 (tp->nego_goal_width ?
750 1 : 0));
751 tp->flags |= ESP_TGT_NEGO_WIDE;
752 } else if (esp_need_to_nego_sync(tp)) {
753 esp->msg_out_len =
754 spi_populate_sync_msg(&esp->msg_out[0],
755 tp->nego_goal_period,
756 tp->nego_goal_offset);
757 tp->flags |= ESP_TGT_NEGO_SYNC;
758 } else {
759 tp->flags &= ~ESP_TGT_CHECK_NEGO;
760 }
761
762 /* Process it like a slow command. */
763 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
764 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
765 }
766
767build_identify:
768 /* If we don't have a lun-data struct yet, we're probing
769 * so do not disconnect. Also, do not disconnect unless
770 * we have a tag on this command.
771 */
772 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
773 *p++ = IDENTIFY(1, lun);
774 else
775 *p++ = IDENTIFY(0, lun);
776
777 if (ent->tag[0] && esp->rev == ESP100) {
778 /* ESP100 lacks select w/atn3 command, use select
779 * and stop instead.
780 */
781 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
782 }
783
784 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
785 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
786 if (ent->tag[0]) {
787 *p++ = ent->tag[0];
788 *p++ = ent->tag[1];
789
790 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
791 }
792
793 for (i = 0; i < cmd->cmd_len; i++)
794 *p++ = cmd->cmnd[i];
795
796 esp->select_state = ESP_SELECT_BASIC;
797 } else {
798 esp->cmd_bytes_left = cmd->cmd_len;
799 esp->cmd_bytes_ptr = &cmd->cmnd[0];
800
801 if (ent->tag[0]) {
802 for (i = esp->msg_out_len - 1;
803 i >= 0; i--)
804 esp->msg_out[i + 2] = esp->msg_out[i];
805 esp->msg_out[0] = ent->tag[0];
806 esp->msg_out[1] = ent->tag[1];
807 esp->msg_out_len += 2;
808 }
809
810 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
811 esp->select_state = ESP_SELECT_MSGOUT;
812 }
813 val = tgt;
814 if (esp->rev == FASHME)
815 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
816 esp_write8(val, ESP_BUSID);
817
818 esp_write_tgt_sync(esp, tgt);
819 esp_write_tgt_config3(esp, tgt);
820
821 val = (p - esp->command_block);
822
823 if (esp_debug & ESP_DEBUG_SCSICMD) {
824 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
825 for (i = 0; i < cmd->cmd_len; i++)
826 printk("%02x ", cmd->cmnd[i]);
827 printk("]\n");
828 }
829
830 if (esp->rev == FASHME)
831 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
832 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
833 val, 16, 0, start_cmd);
834}
835
836static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
837{
838 struct list_head *head = &esp->esp_cmd_pool;
839 struct esp_cmd_entry *ret;
840
841 if (list_empty(head)) {
842 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
843 } else {
844 ret = list_entry(head->next, struct esp_cmd_entry, list);
845 list_del(&ret->list);
846 memset(ret, 0, sizeof(*ret));
847 }
848 return ret;
849}
850
851static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
852{
853 list_add(&ent->list, &esp->esp_cmd_pool);
854}
855
856static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
857 struct scsi_cmnd *cmd, unsigned int result)
858{
859 struct scsi_device *dev = cmd->device;
860 int tgt = dev->id;
861 int lun = dev->lun;
862
863 esp->active_cmd = NULL;
864 esp_unmap_dma(esp, cmd);
865 esp_free_lun_tag(ent, dev->hostdata);
866 cmd->result = result;
867
868 if (ent->eh_done) {
869 complete(ent->eh_done);
870 ent->eh_done = NULL;
871 }
872
873 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
874 esp->ops->unmap_single(esp, ent->sense_dma,
875 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
876 ent->sense_ptr = NULL;
877
878 /* Restore the message/status bytes to what we actually
879 * saw originally. Also, report that we are providing
880 * the sense data.
881 */
882 cmd->result = ((DRIVER_SENSE << 24) |
883 (DID_OK << 16) |
884 (COMMAND_COMPLETE << 8) |
885 (SAM_STAT_CHECK_CONDITION << 0));
886
887 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
888 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
889 int i;
890
891 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
892 esp->host->unique_id, tgt, lun);
893 for (i = 0; i < 18; i++)
894 printk("%02x ", cmd->sense_buffer[i]);
895 printk("]\n");
896 }
897 }
898
899 cmd->scsi_done(cmd);
900
901 list_del(&ent->list);
902 esp_put_ent(esp, ent);
903
904 esp_maybe_execute_command(esp);
905}
906
907static unsigned int compose_result(unsigned int status, unsigned int message,
908 unsigned int driver_code)
909{
910 return (status | (message << 8) | (driver_code << 16));
911}
912
913static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
914{
915 struct scsi_device *dev = ent->cmd->device;
916 struct esp_lun_data *lp = dev->hostdata;
917
918 scsi_track_queue_full(dev, lp->num_tagged - 1);
919}
920
921static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
922{
923 struct scsi_device *dev = cmd->device;
2b14ec78 924 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
925 struct esp_cmd_priv *spriv;
926 struct esp_cmd_entry *ent;
927
928 ent = esp_get_ent(esp);
929 if (!ent)
930 return SCSI_MLQUEUE_HOST_BUSY;
931
932 ent->cmd = cmd;
933
934 cmd->scsi_done = done;
935
936 spriv = ESP_CMD_PRIV(cmd);
937 spriv->u.dma_addr = ~(dma_addr_t)0x0;
938
939 list_add_tail(&ent->list, &esp->queued_cmds);
940
941 esp_maybe_execute_command(esp);
942
943 return 0;
944}
945
946static int esp_check_gross_error(struct esp *esp)
947{
948 if (esp->sreg & ESP_STAT_SPAM) {
949 /* Gross Error, could be one of:
950 * - top of fifo overwritten
951 * - top of command register overwritten
952 * - DMA programmed with wrong direction
953 * - improper phase change
954 */
955 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
956 esp->host->unique_id, esp->sreg);
957 /* XXX Reset the chip. XXX */
958 return 1;
959 }
960 return 0;
961}
962
963static int esp_check_spur_intr(struct esp *esp)
964{
965 switch (esp->rev) {
966 case ESP100:
967 case ESP100A:
968 /* The interrupt pending bit of the status register cannot
969 * be trusted on these revisions.
970 */
971 esp->sreg &= ~ESP_STAT_INTR;
972 break;
973
974 default:
975 if (!(esp->sreg & ESP_STAT_INTR)) {
976 esp->ireg = esp_read8(ESP_INTRPT);
977 if (esp->ireg & ESP_INTR_SR)
978 return 1;
979
980 /* If the DMA is indicating interrupt pending and the
981 * ESP is not, the only possibility is a DMA error.
982 */
983 if (!esp->ops->dma_error(esp)) {
984 printk(KERN_ERR PFX "esp%d: Spurious irq, "
985 "sreg=%x.\n",
986 esp->host->unique_id, esp->sreg);
987 return -1;
988 }
989
990 printk(KERN_ERR PFX "esp%d: DMA error\n",
991 esp->host->unique_id);
992
993 /* XXX Reset the chip. XXX */
994 return -1;
995 }
996 break;
997 }
998
999 return 0;
1000}
1001
1002static void esp_schedule_reset(struct esp *esp)
1003{
1004 esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1005 __builtin_return_address(0));
1006 esp->flags |= ESP_FLAG_RESETTING;
1007 esp_event(esp, ESP_EVENT_RESET);
1008}
1009
1010/* In order to avoid having to add a special half-reconnected state
1011 * into the driver we just sit here and poll through the rest of
1012 * the reselection process to get the tag message bytes.
1013 */
1014static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1015 struct esp_lun_data *lp)
1016{
1017 struct esp_cmd_entry *ent;
1018 int i;
1019
1020 if (!lp->num_tagged) {
1021 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1022 esp->host->unique_id);
1023 return NULL;
1024 }
1025
1026 esp_log_reconnect("ESP: reconnect tag, ");
1027
1028 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1029 if (esp->ops->irq_pending(esp))
1030 break;
1031 }
1032 if (i == ESP_QUICKIRQ_LIMIT) {
1033 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1034 esp->host->unique_id);
1035 return NULL;
1036 }
1037
1038 esp->sreg = esp_read8(ESP_STATUS);
1039 esp->ireg = esp_read8(ESP_INTRPT);
1040
1041 esp_log_reconnect("IRQ(%d:%x:%x), ",
1042 i, esp->ireg, esp->sreg);
1043
1044 if (esp->ireg & ESP_INTR_DC) {
1045 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1046 esp->host->unique_id);
1047 return NULL;
1048 }
1049
1050 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1051 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1052 esp->host->unique_id, esp->sreg);
1053 return NULL;
1054 }
1055
1056 /* DMA in the tag bytes... */
1057 esp->command_block[0] = 0xff;
1058 esp->command_block[1] = 0xff;
1059 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1060 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1061
1062 /* ACK the msssage. */
1063 scsi_esp_cmd(esp, ESP_CMD_MOK);
1064
1065 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1066 if (esp->ops->irq_pending(esp)) {
1067 esp->sreg = esp_read8(ESP_STATUS);
1068 esp->ireg = esp_read8(ESP_INTRPT);
1069 if (esp->ireg & ESP_INTR_FDONE)
1070 break;
1071 }
1072 udelay(1);
1073 }
1074 if (i == ESP_RESELECT_TAG_LIMIT) {
1075 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1076 esp->host->unique_id);
1077 return NULL;
1078 }
1079 esp->ops->dma_drain(esp);
1080 esp->ops->dma_invalidate(esp);
1081
1082 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1083 i, esp->ireg, esp->sreg,
1084 esp->command_block[0],
1085 esp->command_block[1]);
1086
1087 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1088 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1089 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1090 "type %02x.\n",
1091 esp->host->unique_id, esp->command_block[0]);
1092 return NULL;
1093 }
1094
1095 ent = lp->tagged_cmds[esp->command_block[1]];
1096 if (!ent) {
1097 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1098 "tag %02x.\n",
1099 esp->host->unique_id, esp->command_block[1]);
1100 return NULL;
1101 }
1102
1103 return ent;
1104}
1105
1106static int esp_reconnect(struct esp *esp)
1107{
1108 struct esp_cmd_entry *ent;
1109 struct esp_target_data *tp;
1110 struct esp_lun_data *lp;
1111 struct scsi_device *dev;
1112 int target, lun;
1113
1114 BUG_ON(esp->active_cmd);
1115 if (esp->rev == FASHME) {
1116 /* FASHME puts the target and lun numbers directly
1117 * into the fifo.
1118 */
1119 target = esp->fifo[0];
1120 lun = esp->fifo[1] & 0x7;
1121 } else {
1122 u8 bits = esp_read8(ESP_FDATA);
1123
1124 /* Older chips put the lun directly into the fifo, but
1125 * the target is given as a sample of the arbitration
1126 * lines on the bus at reselection time. So we should
1127 * see the ID of the ESP and the one reconnecting target
1128 * set in the bitmap.
1129 */
1130 if (!(bits & esp->scsi_id_mask))
1131 goto do_reset;
1132 bits &= ~esp->scsi_id_mask;
1133 if (!bits || (bits & (bits - 1)))
1134 goto do_reset;
1135
1136 target = ffs(bits) - 1;
1137 lun = (esp_read8(ESP_FDATA) & 0x7);
1138
1139 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1140 if (esp->rev == ESP100) {
1141 u8 ireg = esp_read8(ESP_INTRPT);
1142 /* This chip has a bug during reselection that can
1143 * cause a spurious illegal-command interrupt, which
1144 * we simply ACK here. Another possibility is a bus
1145 * reset so we must check for that.
1146 */
1147 if (ireg & ESP_INTR_SR)
1148 goto do_reset;
1149 }
1150 scsi_esp_cmd(esp, ESP_CMD_NULL);
1151 }
1152
1153 esp_write_tgt_sync(esp, target);
1154 esp_write_tgt_config3(esp, target);
1155
1156 scsi_esp_cmd(esp, ESP_CMD_MOK);
1157
1158 if (esp->rev == FASHME)
1159 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1160 ESP_BUSID);
1161
1162 tp = &esp->target[target];
1163 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1164 if (!dev) {
1165 printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1166 "tgt[%u] lun[%u]\n",
1167 esp->host->unique_id, target, lun);
1168 goto do_reset;
1169 }
1170 lp = dev->hostdata;
1171
1172 ent = lp->non_tagged_cmd;
1173 if (!ent) {
1174 ent = esp_reconnect_with_tag(esp, lp);
1175 if (!ent)
1176 goto do_reset;
1177 }
1178
1179 esp->active_cmd = ent;
1180
1181 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1182 esp->msg_out[0] = ABORT_TASK_SET;
1183 esp->msg_out_len = 1;
1184 scsi_esp_cmd(esp, ESP_CMD_SATN);
1185 }
1186
1187 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1188 esp_restore_pointers(esp, ent);
1189 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1190 return 1;
1191
1192do_reset:
1193 esp_schedule_reset(esp);
1194 return 0;
1195}
1196
1197static int esp_finish_select(struct esp *esp)
1198{
1199 struct esp_cmd_entry *ent;
1200 struct scsi_cmnd *cmd;
1201 u8 orig_select_state;
1202
1203 orig_select_state = esp->select_state;
1204
1205 /* No longer selecting. */
1206 esp->select_state = ESP_SELECT_NONE;
1207
1208 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1209 ent = esp->active_cmd;
1210 cmd = ent->cmd;
1211
1212 if (esp->ops->dma_error(esp)) {
1213 /* If we see a DMA error during or as a result of selection,
1214 * all bets are off.
1215 */
1216 esp_schedule_reset(esp);
1217 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1218 return 0;
1219 }
1220
1221 esp->ops->dma_invalidate(esp);
1222
1223 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1224 struct esp_target_data *tp = &esp->target[cmd->device->id];
1225
1226 /* Carefully back out of the selection attempt. Release
1227 * resources (such as DMA mapping & TAG) and reset state (such
1228 * as message out and command delivery variables).
1229 */
1230 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1231 esp_unmap_dma(esp, cmd);
1232 esp_free_lun_tag(ent, cmd->device->hostdata);
1233 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1234 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1235 esp->cmd_bytes_ptr = NULL;
1236 esp->cmd_bytes_left = 0;
1237 } else {
1238 esp->ops->unmap_single(esp, ent->sense_dma,
1239 SCSI_SENSE_BUFFERSIZE,
1240 DMA_FROM_DEVICE);
1241 ent->sense_ptr = NULL;
1242 }
1243
1244 /* Now that the state is unwound properly, put back onto
1245 * the issue queue. This command is no longer active.
1246 */
1247 list_del(&ent->list);
1248 list_add(&ent->list, &esp->queued_cmds);
1249 esp->active_cmd = NULL;
1250
1251 /* Return value ignored by caller, it directly invokes
1252 * esp_reconnect().
1253 */
1254 return 0;
1255 }
1256
1257 if (esp->ireg == ESP_INTR_DC) {
1258 struct scsi_device *dev = cmd->device;
1259
1260 /* Disconnect. Make sure we re-negotiate sync and
1261 * wide parameters if this target starts responding
1262 * again in the future.
1263 */
1264 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1265
1266 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1267 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1268 return 1;
1269 }
1270
1271 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1272 /* Selection successful. On pre-FAST chips we have
1273 * to do a NOP and possibly clean out the FIFO.
1274 */
1275 if (esp->rev <= ESP236) {
1276 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1277
1278 scsi_esp_cmd(esp, ESP_CMD_NULL);
1279
1280 if (!fcnt &&
1281 (!esp->prev_soff ||
1282 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1283 esp_flush_fifo(esp);
1284 }
1285
1286 /* If we are doing a slow command, negotiation, etc.
1287 * we'll do the right thing as we transition to the
1288 * next phase.
1289 */
1290 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1291 return 0;
1292 }
1293
1294 printk("ESP: Unexpected selection completion ireg[%x].\n",
1295 esp->ireg);
1296 esp_schedule_reset(esp);
1297 return 0;
1298}
1299
1300static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1301 struct scsi_cmnd *cmd)
1302{
1303 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1304
1305 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1306 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1307 fifo_cnt <<= 1;
1308
1309 ecount = 0;
1310 if (!(esp->sreg & ESP_STAT_TCNT)) {
1311 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1312 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1313 if (esp->rev == FASHME)
1314 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1315 }
1316
1317 bytes_sent = esp->data_dma_len;
1318 bytes_sent -= ecount;
1319
1320 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1321 bytes_sent -= fifo_cnt;
1322
1323 flush_fifo = 0;
1324 if (!esp->prev_soff) {
1325 /* Synchronous data transfer, always flush fifo. */
1326 flush_fifo = 1;
1327 } else {
1328 if (esp->rev == ESP100) {
1329 u32 fflags, phase;
1330
1331 /* ESP100 has a chip bug where in the synchronous data
1332 * phase it can mistake a final long REQ pulse from the
1333 * target as an extra data byte. Fun.
1334 *
1335 * To detect this case we resample the status register
1336 * and fifo flags. If we're still in a data phase and
1337 * we see spurious chunks in the fifo, we return error
1338 * to the caller which should reset and set things up
1339 * such that we only try future transfers to this
1340 * target in synchronous mode.
1341 */
1342 esp->sreg = esp_read8(ESP_STATUS);
1343 phase = esp->sreg & ESP_STAT_PMASK;
1344 fflags = esp_read8(ESP_FFLAGS);
1345
1346 if ((phase == ESP_DOP &&
1347 (fflags & ESP_FF_ONOTZERO)) ||
1348 (phase == ESP_DIP &&
1349 (fflags & ESP_FF_FBYTES)))
1350 return -1;
1351 }
1352 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1353 flush_fifo = 1;
1354 }
1355
1356 if (flush_fifo)
1357 esp_flush_fifo(esp);
1358
1359 return bytes_sent;
1360}
1361
1362static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1363 u8 scsi_period, u8 scsi_offset,
1364 u8 esp_stp, u8 esp_soff)
1365{
1366 spi_period(tp->starget) = scsi_period;
1367 spi_offset(tp->starget) = scsi_offset;
1368 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1369
1370 if (esp_soff) {
1371 esp_stp &= 0x1f;
1372 esp_soff |= esp->radelay;
1373 if (esp->rev >= FAS236) {
1374 u8 bit = ESP_CONFIG3_FSCSI;
1375 if (esp->rev >= FAS100A)
1376 bit = ESP_CONFIG3_FAST;
1377
1378 if (scsi_period < 50) {
1379 if (esp->rev == FASHME)
1380 esp_soff &= ~esp->radelay;
1381 tp->esp_config3 |= bit;
1382 } else {
1383 tp->esp_config3 &= ~bit;
1384 }
1385 esp->prev_cfg3 = tp->esp_config3;
1386 esp_write8(esp->prev_cfg3, ESP_CFG3);
1387 }
1388 }
1389
1390 tp->esp_period = esp->prev_stp = esp_stp;
1391 tp->esp_offset = esp->prev_soff = esp_soff;
1392
1393 esp_write8(esp_soff, ESP_SOFF);
1394 esp_write8(esp_stp, ESP_STP);
1395
1396 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1397
1398 spi_display_xfer_agreement(tp->starget);
1399}
1400
1401static void esp_msgin_reject(struct esp *esp)
1402{
1403 struct esp_cmd_entry *ent = esp->active_cmd;
1404 struct scsi_cmnd *cmd = ent->cmd;
1405 struct esp_target_data *tp;
1406 int tgt;
1407
1408 tgt = cmd->device->id;
1409 tp = &esp->target[tgt];
1410
1411 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1412 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1413
1414 if (!esp_need_to_nego_sync(tp)) {
1415 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1416 scsi_esp_cmd(esp, ESP_CMD_RATN);
1417 } else {
1418 esp->msg_out_len =
1419 spi_populate_sync_msg(&esp->msg_out[0],
1420 tp->nego_goal_period,
1421 tp->nego_goal_offset);
1422 tp->flags |= ESP_TGT_NEGO_SYNC;
1423 scsi_esp_cmd(esp, ESP_CMD_SATN);
1424 }
1425 return;
1426 }
1427
1428 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1429 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1430 tp->esp_period = 0;
1431 tp->esp_offset = 0;
1432 esp_setsync(esp, tp, 0, 0, 0, 0);
1433 scsi_esp_cmd(esp, ESP_CMD_RATN);
1434 return;
1435 }
1436
1437 esp->msg_out[0] = ABORT_TASK_SET;
1438 esp->msg_out_len = 1;
1439 scsi_esp_cmd(esp, ESP_CMD_SATN);
1440}
1441
1442static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1443{
1444 u8 period = esp->msg_in[3];
1445 u8 offset = esp->msg_in[4];
1446 u8 stp;
1447
1448 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1449 goto do_reject;
1450
1451 if (offset > 15)
1452 goto do_reject;
1453
1454 if (offset) {
1455 int rounded_up, one_clock;
1456
1457 if (period > esp->max_period) {
1458 period = offset = 0;
1459 goto do_sdtr;
1460 }
1461 if (period < esp->min_period)
1462 goto do_reject;
1463
1464 one_clock = esp->ccycle / 1000;
1465 rounded_up = (period << 2);
1466 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1467 stp = rounded_up;
1468 if (stp && esp->rev >= FAS236) {
1469 if (stp >= 50)
1470 stp--;
1471 }
1472 } else {
1473 stp = 0;
1474 }
1475
1476 esp_setsync(esp, tp, period, offset, stp, offset);
1477 return;
1478
1479do_reject:
1480 esp->msg_out[0] = MESSAGE_REJECT;
1481 esp->msg_out_len = 1;
1482 scsi_esp_cmd(esp, ESP_CMD_SATN);
1483 return;
1484
1485do_sdtr:
1486 tp->nego_goal_period = period;
1487 tp->nego_goal_offset = offset;
1488 esp->msg_out_len =
1489 spi_populate_sync_msg(&esp->msg_out[0],
1490 tp->nego_goal_period,
1491 tp->nego_goal_offset);
1492 scsi_esp_cmd(esp, ESP_CMD_SATN);
1493}
1494
1495static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1496{
1497 int size = 8 << esp->msg_in[3];
1498 u8 cfg3;
1499
1500 if (esp->rev != FASHME)
1501 goto do_reject;
1502
1503 if (size != 8 && size != 16)
1504 goto do_reject;
1505
1506 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1507 goto do_reject;
1508
1509 cfg3 = tp->esp_config3;
1510 if (size == 16) {
1511 tp->flags |= ESP_TGT_WIDE;
1512 cfg3 |= ESP_CONFIG3_EWIDE;
1513 } else {
1514 tp->flags &= ~ESP_TGT_WIDE;
1515 cfg3 &= ~ESP_CONFIG3_EWIDE;
1516 }
1517 tp->esp_config3 = cfg3;
1518 esp->prev_cfg3 = cfg3;
1519 esp_write8(cfg3, ESP_CFG3);
1520
1521 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1522
1523 spi_period(tp->starget) = 0;
1524 spi_offset(tp->starget) = 0;
1525 if (!esp_need_to_nego_sync(tp)) {
1526 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1527 scsi_esp_cmd(esp, ESP_CMD_RATN);
1528 } else {
1529 esp->msg_out_len =
1530 spi_populate_sync_msg(&esp->msg_out[0],
1531 tp->nego_goal_period,
1532 tp->nego_goal_offset);
1533 tp->flags |= ESP_TGT_NEGO_SYNC;
1534 scsi_esp_cmd(esp, ESP_CMD_SATN);
1535 }
1536 return;
1537
1538do_reject:
1539 esp->msg_out[0] = MESSAGE_REJECT;
1540 esp->msg_out_len = 1;
1541 scsi_esp_cmd(esp, ESP_CMD_SATN);
1542}
1543
1544static void esp_msgin_extended(struct esp *esp)
1545{
1546 struct esp_cmd_entry *ent = esp->active_cmd;
1547 struct scsi_cmnd *cmd = ent->cmd;
1548 struct esp_target_data *tp;
1549 int tgt = cmd->device->id;
1550
1551 tp = &esp->target[tgt];
1552 if (esp->msg_in[2] == EXTENDED_SDTR) {
1553 esp_msgin_sdtr(esp, tp);
1554 return;
1555 }
1556 if (esp->msg_in[2] == EXTENDED_WDTR) {
1557 esp_msgin_wdtr(esp, tp);
1558 return;
1559 }
1560
1561 printk("ESP: Unexpected extended msg type %x\n",
1562 esp->msg_in[2]);
1563
1564 esp->msg_out[0] = ABORT_TASK_SET;
1565 esp->msg_out_len = 1;
1566 scsi_esp_cmd(esp, ESP_CMD_SATN);
1567}
1568
1569/* Analyze msgin bytes received from target so far. Return non-zero
1570 * if there are more bytes needed to complete the message.
1571 */
1572static int esp_msgin_process(struct esp *esp)
1573{
1574 u8 msg0 = esp->msg_in[0];
1575 int len = esp->msg_in_len;
1576
1577 if (msg0 & 0x80) {
1578 /* Identify */
1579 printk("ESP: Unexpected msgin identify\n");
1580 return 0;
1581 }
1582
1583 switch (msg0) {
1584 case EXTENDED_MESSAGE:
1585 if (len == 1)
1586 return 1;
1587 if (len < esp->msg_in[1] + 2)
1588 return 1;
1589 esp_msgin_extended(esp);
1590 return 0;
1591
1592 case IGNORE_WIDE_RESIDUE: {
1593 struct esp_cmd_entry *ent;
1594 struct esp_cmd_priv *spriv;
1595 if (len == 1)
1596 return 1;
1597
1598 if (esp->msg_in[1] != 1)
1599 goto do_reject;
1600
1601 ent = esp->active_cmd;
1602 spriv = ESP_CMD_PRIV(ent->cmd);
1603
1604 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1605 spriv->cur_sg--;
1606 spriv->cur_residue = 1;
1607 } else
1608 spriv->cur_residue++;
1609 spriv->tot_residue++;
1610 return 0;
1611 }
1612 case NOP:
1613 return 0;
1614 case RESTORE_POINTERS:
1615 esp_restore_pointers(esp, esp->active_cmd);
1616 return 0;
1617 case SAVE_POINTERS:
1618 esp_save_pointers(esp, esp->active_cmd);
1619 return 0;
1620
1621 case COMMAND_COMPLETE:
1622 case DISCONNECT: {
1623 struct esp_cmd_entry *ent = esp->active_cmd;
1624
1625 ent->message = msg0;
1626 esp_event(esp, ESP_EVENT_FREE_BUS);
1627 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1628 return 0;
1629 }
1630 case MESSAGE_REJECT:
1631 esp_msgin_reject(esp);
1632 return 0;
1633
1634 default:
1635 do_reject:
1636 esp->msg_out[0] = MESSAGE_REJECT;
1637 esp->msg_out_len = 1;
1638 scsi_esp_cmd(esp, ESP_CMD_SATN);
1639 return 0;
1640 }
1641}
1642
1643static int esp_process_event(struct esp *esp)
1644{
1645 int write;
1646
1647again:
1648 write = 0;
1649 switch (esp->event) {
1650 case ESP_EVENT_CHECK_PHASE:
1651 switch (esp->sreg & ESP_STAT_PMASK) {
1652 case ESP_DOP:
1653 esp_event(esp, ESP_EVENT_DATA_OUT);
1654 break;
1655 case ESP_DIP:
1656 esp_event(esp, ESP_EVENT_DATA_IN);
1657 break;
1658 case ESP_STATP:
1659 esp_flush_fifo(esp);
1660 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1661 esp_event(esp, ESP_EVENT_STATUS);
1662 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1663 return 1;
1664
1665 case ESP_MOP:
1666 esp_event(esp, ESP_EVENT_MSGOUT);
1667 break;
1668
1669 case ESP_MIP:
1670 esp_event(esp, ESP_EVENT_MSGIN);
1671 break;
1672
1673 case ESP_CMDP:
1674 esp_event(esp, ESP_EVENT_CMD_START);
1675 break;
1676
1677 default:
1678 printk("ESP: Unexpected phase, sreg=%02x\n",
1679 esp->sreg);
1680 esp_schedule_reset(esp);
1681 return 0;
1682 }
1683 goto again;
1684 break;
1685
1686 case ESP_EVENT_DATA_IN:
1687 write = 1;
1688 /* fallthru */
1689
1690 case ESP_EVENT_DATA_OUT: {
1691 struct esp_cmd_entry *ent = esp->active_cmd;
1692 struct scsi_cmnd *cmd = ent->cmd;
1693 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1694 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1695
1696 if (esp->rev == ESP100)
1697 scsi_esp_cmd(esp, ESP_CMD_NULL);
1698
1699 if (write)
1700 ent->flags |= ESP_CMD_FLAG_WRITE;
1701 else
1702 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1703
1704 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1705 esp->data_dma_len = dma_len;
1706
1707 if (!dma_len) {
1708 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1709 esp->host->unique_id);
e1f2a094 1710 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
cd9ad58d 1711 esp->host->unique_id,
e1f2a094 1712 (unsigned long long)esp_cur_dma_addr(ent, cmd),
cd9ad58d
DM
1713 esp_cur_dma_len(ent, cmd));
1714 esp_schedule_reset(esp);
1715 return 0;
1716 }
1717
e1f2a094 1718 esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
cd9ad58d 1719 "write(%d)\n",
e1f2a094 1720 (unsigned long long)dma_addr, dma_len, write);
cd9ad58d
DM
1721
1722 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1723 write, ESP_CMD_DMA | ESP_CMD_TI);
1724 esp_event(esp, ESP_EVENT_DATA_DONE);
1725 break;
1726 }
1727 case ESP_EVENT_DATA_DONE: {
1728 struct esp_cmd_entry *ent = esp->active_cmd;
1729 struct scsi_cmnd *cmd = ent->cmd;
1730 int bytes_sent;
1731
1732 if (esp->ops->dma_error(esp)) {
1733 printk("ESP: data done, DMA error, resetting\n");
1734 esp_schedule_reset(esp);
1735 return 0;
1736 }
1737
1738 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1739 /* XXX parity errors, etc. XXX */
1740
1741 esp->ops->dma_drain(esp);
1742 }
1743 esp->ops->dma_invalidate(esp);
1744
1745 if (esp->ireg != ESP_INTR_BSERV) {
1746 /* We should always see exactly a bus-service
1747 * interrupt at the end of a successful transfer.
1748 */
1749 printk("ESP: data done, not BSERV, resetting\n");
1750 esp_schedule_reset(esp);
1751 return 0;
1752 }
1753
1754 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1755
1756 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1757 ent->flags, bytes_sent);
1758
1759 if (bytes_sent < 0) {
1760 /* XXX force sync mode for this target XXX */
1761 esp_schedule_reset(esp);
1762 return 0;
1763 }
1764
1765 esp_advance_dma(esp, ent, cmd, bytes_sent);
1766 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1767 goto again;
1768 break;
1769 }
1770
1771 case ESP_EVENT_STATUS: {
1772 struct esp_cmd_entry *ent = esp->active_cmd;
1773
1774 if (esp->ireg & ESP_INTR_FDONE) {
1775 ent->status = esp_read8(ESP_FDATA);
1776 ent->message = esp_read8(ESP_FDATA);
1777 scsi_esp_cmd(esp, ESP_CMD_MOK);
1778 } else if (esp->ireg == ESP_INTR_BSERV) {
1779 ent->status = esp_read8(ESP_FDATA);
1780 ent->message = 0xff;
1781 esp_event(esp, ESP_EVENT_MSGIN);
1782 return 0;
1783 }
1784
1785 if (ent->message != COMMAND_COMPLETE) {
1786 printk("ESP: Unexpected message %x in status\n",
1787 ent->message);
1788 esp_schedule_reset(esp);
1789 return 0;
1790 }
1791
1792 esp_event(esp, ESP_EVENT_FREE_BUS);
1793 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1794 break;
1795 }
1796 case ESP_EVENT_FREE_BUS: {
1797 struct esp_cmd_entry *ent = esp->active_cmd;
1798 struct scsi_cmnd *cmd = ent->cmd;
1799
1800 if (ent->message == COMMAND_COMPLETE ||
1801 ent->message == DISCONNECT)
1802 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1803
1804 if (ent->message == COMMAND_COMPLETE) {
1805 esp_log_cmddone("ESP: Command done status[%x] "
1806 "message[%x]\n",
1807 ent->status, ent->message);
1808 if (ent->status == SAM_STAT_TASK_SET_FULL)
1809 esp_event_queue_full(esp, ent);
1810
1811 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1812 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1813 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1814 esp_autosense(esp, ent);
1815 } else {
1816 esp_cmd_is_done(esp, ent, cmd,
1817 compose_result(ent->status,
1818 ent->message,
1819 DID_OK));
1820 }
1821 } else if (ent->message == DISCONNECT) {
1822 esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1823 "tag[%x:%x]\n",
1824 cmd->device->id,
1825 ent->tag[0], ent->tag[1]);
1826
1827 esp->active_cmd = NULL;
1828 esp_maybe_execute_command(esp);
1829 } else {
1830 printk("ESP: Unexpected message %x in freebus\n",
1831 ent->message);
1832 esp_schedule_reset(esp);
1833 return 0;
1834 }
1835 if (esp->active_cmd)
1836 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1837 break;
1838 }
1839 case ESP_EVENT_MSGOUT: {
1840 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1841
1842 if (esp_debug & ESP_DEBUG_MSGOUT) {
1843 int i;
1844 printk("ESP: Sending message [ ");
1845 for (i = 0; i < esp->msg_out_len; i++)
1846 printk("%02x ", esp->msg_out[i]);
1847 printk("]\n");
1848 }
1849
1850 if (esp->rev == FASHME) {
1851 int i;
1852
1853 /* Always use the fifo. */
1854 for (i = 0; i < esp->msg_out_len; i++) {
1855 esp_write8(esp->msg_out[i], ESP_FDATA);
1856 esp_write8(0, ESP_FDATA);
1857 }
1858 scsi_esp_cmd(esp, ESP_CMD_TI);
1859 } else {
1860 if (esp->msg_out_len == 1) {
1861 esp_write8(esp->msg_out[0], ESP_FDATA);
1862 scsi_esp_cmd(esp, ESP_CMD_TI);
1863 } else {
1864 /* Use DMA. */
1865 memcpy(esp->command_block,
1866 esp->msg_out,
1867 esp->msg_out_len);
1868
1869 esp->ops->send_dma_cmd(esp,
1870 esp->command_block_dma,
1871 esp->msg_out_len,
1872 esp->msg_out_len,
1873 0,
1874 ESP_CMD_DMA|ESP_CMD_TI);
1875 }
1876 }
1877 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1878 break;
1879 }
1880 case ESP_EVENT_MSGOUT_DONE:
1881 if (esp->rev == FASHME) {
1882 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1883 } else {
1884 if (esp->msg_out_len > 1)
1885 esp->ops->dma_invalidate(esp);
1886 }
1887
1888 if (!(esp->ireg & ESP_INTR_DC)) {
1889 if (esp->rev != FASHME)
1890 scsi_esp_cmd(esp, ESP_CMD_NULL);
1891 }
1892 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1893 goto again;
1894 case ESP_EVENT_MSGIN:
1895 if (esp->ireg & ESP_INTR_BSERV) {
1896 if (esp->rev == FASHME) {
1897 if (!(esp_read8(ESP_STATUS2) &
1898 ESP_STAT2_FEMPTY))
1899 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1900 } else {
1901 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902 if (esp->rev == ESP100)
1903 scsi_esp_cmd(esp, ESP_CMD_NULL);
1904 }
1905 scsi_esp_cmd(esp, ESP_CMD_TI);
1906 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1907 return 1;
1908 }
1909 if (esp->ireg & ESP_INTR_FDONE) {
1910 u8 val;
1911
1912 if (esp->rev == FASHME)
1913 val = esp->fifo[0];
1914 else
1915 val = esp_read8(ESP_FDATA);
1916 esp->msg_in[esp->msg_in_len++] = val;
1917
1918 esp_log_msgin("ESP: Got msgin byte %x\n", val);
1919
1920 if (!esp_msgin_process(esp))
1921 esp->msg_in_len = 0;
1922
1923 if (esp->rev == FASHME)
1924 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1925
1926 scsi_esp_cmd(esp, ESP_CMD_MOK);
1927
1928 if (esp->event != ESP_EVENT_FREE_BUS)
1929 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1930 } else {
1931 printk("ESP: MSGIN neither BSERV not FDON, resetting");
1932 esp_schedule_reset(esp);
1933 return 0;
1934 }
1935 break;
1936 case ESP_EVENT_CMD_START:
1937 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1938 esp->cmd_bytes_left);
1939 if (esp->rev == FASHME)
1940 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1942 esp->cmd_bytes_left, 16, 0,
1943 ESP_CMD_DMA | ESP_CMD_TI);
1944 esp_event(esp, ESP_EVENT_CMD_DONE);
1945 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1946 break;
1947 case ESP_EVENT_CMD_DONE:
1948 esp->ops->dma_invalidate(esp);
1949 if (esp->ireg & ESP_INTR_BSERV) {
1950 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951 goto again;
1952 }
1953 esp_schedule_reset(esp);
1954 return 0;
1955 break;
1956
1957 case ESP_EVENT_RESET:
1958 scsi_esp_cmd(esp, ESP_CMD_RS);
1959 break;
1960
1961 default:
1962 printk("ESP: Unexpected event %x, resetting\n",
1963 esp->event);
1964 esp_schedule_reset(esp);
1965 return 0;
1966 break;
1967 }
1968 return 1;
1969}
1970
1971static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1972{
1973 struct scsi_cmnd *cmd = ent->cmd;
1974
1975 esp_unmap_dma(esp, cmd);
1976 esp_free_lun_tag(ent, cmd->device->hostdata);
1977 cmd->result = DID_RESET << 16;
1978
1979 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1980 esp->ops->unmap_single(esp, ent->sense_dma,
1981 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1982 ent->sense_ptr = NULL;
1983 }
1984
1985 cmd->scsi_done(cmd);
1986 list_del(&ent->list);
1987 esp_put_ent(esp, ent);
1988}
1989
1990static void esp_clear_hold(struct scsi_device *dev, void *data)
1991{
1992 struct esp_lun_data *lp = dev->hostdata;
1993
1994 BUG_ON(lp->num_tagged);
1995 lp->hold = 0;
1996}
1997
1998static void esp_reset_cleanup(struct esp *esp)
1999{
2000 struct esp_cmd_entry *ent, *tmp;
2001 int i;
2002
2003 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2004 struct scsi_cmnd *cmd = ent->cmd;
2005
2006 list_del(&ent->list);
2007 cmd->result = DID_RESET << 16;
2008 cmd->scsi_done(cmd);
2009 esp_put_ent(esp, ent);
2010 }
2011
2012 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2013 if (ent == esp->active_cmd)
2014 esp->active_cmd = NULL;
2015 esp_reset_cleanup_one(esp, ent);
2016 }
2017
2018 BUG_ON(esp->active_cmd != NULL);
2019
2020 /* Force renegotiation of sync/wide transfers. */
2021 for (i = 0; i < ESP_MAX_TARGET; i++) {
2022 struct esp_target_data *tp = &esp->target[i];
2023
2024 tp->esp_period = 0;
2025 tp->esp_offset = 0;
2026 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2027 ESP_CONFIG3_FSCSI |
2028 ESP_CONFIG3_FAST);
2029 tp->flags &= ~ESP_TGT_WIDE;
2030 tp->flags |= ESP_TGT_CHECK_NEGO;
2031
2032 if (tp->starget)
2033 starget_for_each_device(tp->starget, NULL,
2034 esp_clear_hold);
2035 }
2036}
2037
2038/* Runs under host->lock */
2039static void __esp_interrupt(struct esp *esp)
2040{
2041 int finish_reset, intr_done;
2042 u8 phase;
2043
2044 esp->sreg = esp_read8(ESP_STATUS);
2045
2046 if (esp->flags & ESP_FLAG_RESETTING) {
2047 finish_reset = 1;
2048 } else {
2049 if (esp_check_gross_error(esp))
2050 return;
2051
2052 finish_reset = esp_check_spur_intr(esp);
2053 if (finish_reset < 0)
2054 return;
2055 }
2056
2057 esp->ireg = esp_read8(ESP_INTRPT);
2058
2059 if (esp->ireg & ESP_INTR_SR)
2060 finish_reset = 1;
2061
2062 if (finish_reset) {
2063 esp_reset_cleanup(esp);
2064 if (esp->eh_reset) {
2065 complete(esp->eh_reset);
2066 esp->eh_reset = NULL;
2067 }
2068 return;
2069 }
2070
2071 phase = (esp->sreg & ESP_STAT_PMASK);
2072 if (esp->rev == FASHME) {
2073 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2074 esp->select_state == ESP_SELECT_NONE &&
2075 esp->event != ESP_EVENT_STATUS &&
2076 esp->event != ESP_EVENT_DATA_DONE) ||
2077 (esp->ireg & ESP_INTR_RSEL)) {
2078 esp->sreg2 = esp_read8(ESP_STATUS2);
2079 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2080 (esp->sreg2 & ESP_STAT2_F1BYTE))
2081 hme_read_fifo(esp);
2082 }
2083 }
2084
2085 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2086 "sreg2[%02x] ireg[%02x]\n",
2087 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2088
2089 intr_done = 0;
2090
2091 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2092 printk("ESP: unexpected IREG %02x\n", esp->ireg);
2093 if (esp->ireg & ESP_INTR_IC)
2094 esp_dump_cmd_log(esp);
2095
2096 esp_schedule_reset(esp);
2097 } else {
2098 if (!(esp->ireg & ESP_INTR_RSEL)) {
2099 /* Some combination of FDONE, BSERV, DC. */
2100 if (esp->select_state != ESP_SELECT_NONE)
2101 intr_done = esp_finish_select(esp);
2102 } else if (esp->ireg & ESP_INTR_RSEL) {
2103 if (esp->active_cmd)
2104 (void) esp_finish_select(esp);
2105 intr_done = esp_reconnect(esp);
2106 }
2107 }
2108 while (!intr_done)
2109 intr_done = esp_process_event(esp);
2110}
2111
2112irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2113{
2114 struct esp *esp = dev_id;
2115 unsigned long flags;
2116 irqreturn_t ret;
2117
2118 spin_lock_irqsave(esp->host->host_lock, flags);
2119 ret = IRQ_NONE;
2120 if (esp->ops->irq_pending(esp)) {
2121 ret = IRQ_HANDLED;
2122 for (;;) {
2123 int i;
2124
2125 __esp_interrupt(esp);
2126 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2127 break;
2128 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2129
2130 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2131 if (esp->ops->irq_pending(esp))
2132 break;
2133 }
2134 if (i == ESP_QUICKIRQ_LIMIT)
2135 break;
2136 }
2137 }
2138 spin_unlock_irqrestore(esp->host->host_lock, flags);
2139
2140 return ret;
2141}
2142EXPORT_SYMBOL(scsi_esp_intr);
2143
2144static void __devinit esp_get_revision(struct esp *esp)
2145{
2146 u8 val;
2147
2148 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2149 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2150 esp_write8(esp->config2, ESP_CFG2);
2151
2152 val = esp_read8(ESP_CFG2);
2153 val &= ~ESP_CONFIG2_MAGIC;
2154 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2155 /* If what we write to cfg2 does not come back, cfg2 is not
2156 * implemented, therefore this must be a plain esp100.
2157 */
2158 esp->rev = ESP100;
2159 } else {
2160 esp->config2 = 0;
2161 esp_set_all_config3(esp, 5);
2162 esp->prev_cfg3 = 5;
2163 esp_write8(esp->config2, ESP_CFG2);
2164 esp_write8(0, ESP_CFG3);
2165 esp_write8(esp->prev_cfg3, ESP_CFG3);
2166
2167 val = esp_read8(ESP_CFG3);
2168 if (val != 5) {
2169 /* The cfg2 register is implemented, however
2170 * cfg3 is not, must be esp100a.
2171 */
2172 esp->rev = ESP100A;
2173 } else {
2174 esp_set_all_config3(esp, 0);
2175 esp->prev_cfg3 = 0;
2176 esp_write8(esp->prev_cfg3, ESP_CFG3);
2177
2178 /* All of cfg{1,2,3} implemented, must be one of
2179 * the fas variants, figure out which one.
2180 */
2181 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2182 esp->rev = FAST;
2183 esp->sync_defp = SYNC_DEFP_FAST;
2184 } else {
2185 esp->rev = ESP236;
2186 }
2187 esp->config2 = 0;
2188 esp_write8(esp->config2, ESP_CFG2);
2189 }
2190 }
2191}
2192
2193static void __devinit esp_init_swstate(struct esp *esp)
2194{
2195 int i;
2196
2197 INIT_LIST_HEAD(&esp->queued_cmds);
2198 INIT_LIST_HEAD(&esp->active_cmds);
2199 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2200
2201 /* Start with a clear state, domain validation (via ->slave_configure,
2202 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2203 * commands.
2204 */
2205 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2206 esp->target[i].flags = 0;
2207 esp->target[i].nego_goal_period = 0;
2208 esp->target[i].nego_goal_offset = 0;
2209 esp->target[i].nego_goal_width = 0;
2210 esp->target[i].nego_goal_tags = 0;
2211 }
2212}
2213
2214/* This places the ESP into a known state at boot time. */
d679f805 2215static void esp_bootup_reset(struct esp *esp)
cd9ad58d
DM
2216{
2217 u8 val;
2218
2219 /* Reset the DMA */
2220 esp->ops->reset_dma(esp);
2221
2222 /* Reset the ESP */
2223 esp_reset_esp(esp);
2224
2225 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2226 val = esp_read8(ESP_CFG1);
2227 val |= ESP_CONFIG1_SRRDISAB;
2228 esp_write8(val, ESP_CFG1);
2229
2230 scsi_esp_cmd(esp, ESP_CMD_RS);
2231 udelay(400);
2232
2233 esp_write8(esp->config1, ESP_CFG1);
2234
2235 /* Eat any bitrot in the chip and we are done... */
2236 esp_read8(ESP_INTRPT);
2237}
2238
2239static void __devinit esp_set_clock_params(struct esp *esp)
2240{
2241 int fmhz;
2242 u8 ccf;
2243
2244 /* This is getting messy but it has to be done correctly or else
2245 * you get weird behavior all over the place. We are trying to
2246 * basically figure out three pieces of information.
2247 *
2248 * a) Clock Conversion Factor
2249 *
2250 * This is a representation of the input crystal clock frequency
2251 * going into the ESP on this machine. Any operation whose timing
2252 * is longer than 400ns depends on this value being correct. For
2253 * example, you'll get blips for arbitration/selection during high
2254 * load or with multiple targets if this is not set correctly.
2255 *
2256 * b) Selection Time-Out
2257 *
2258 * The ESP isn't very bright and will arbitrate for the bus and try
2259 * to select a target forever if you let it. This value tells the
2260 * ESP when it has taken too long to negotiate and that it should
2261 * interrupt the CPU so we can see what happened. The value is
2262 * computed as follows (from NCR/Symbios chip docs).
2263 *
2264 * (Time Out Period) * (Input Clock)
2265 * STO = ----------------------------------
2266 * (8192) * (Clock Conversion Factor)
2267 *
2268 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2269 *
2270 * c) Imperical constants for synchronous offset and transfer period
2271 * register values
2272 *
2273 * This entails the smallest and largest sync period we could ever
2274 * handle on this ESP.
2275 */
2276 fmhz = esp->cfreq;
2277
2278 ccf = ((fmhz / 1000000) + 4) / 5;
2279 if (ccf == 1)
2280 ccf = 2;
2281
2282 /* If we can't find anything reasonable, just assume 20MHZ.
2283 * This is the clock frequency of the older sun4c's where I've
2284 * been unable to find the clock-frequency PROM property. All
2285 * other machines provide useful values it seems.
2286 */
2287 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2288 fmhz = 20000000;
2289 ccf = 4;
2290 }
2291
2292 esp->cfact = (ccf == 8 ? 0 : ccf);
2293 esp->cfreq = fmhz;
2294 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2295 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2296 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2297 esp->sync_defp = SYNC_DEFP_SLOW;
2298}
2299
2300static const char *esp_chip_names[] = {
2301 "ESP100",
2302 "ESP100A",
2303 "ESP236",
2304 "FAS236",
2305 "FAS100A",
2306 "FAST",
2307 "FASHME",
2308};
2309
2310static struct scsi_transport_template *esp_transport_template;
2311
2312int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2313{
2314 static int instance;
2315 int err;
2316
2317 esp->host->transportt = esp_transport_template;
2318 esp->host->max_lun = ESP_MAX_LUN;
2319 esp->host->cmd_per_lun = 2;
2320
2321 esp_set_clock_params(esp);
2322
2323 esp_get_revision(esp);
2324
2325 esp_init_swstate(esp);
2326
2327 esp_bootup_reset(esp);
2328
2329 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2330 esp->host->unique_id, esp->regs, esp->dma_regs,
2331 esp->host->irq);
2332 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2333 esp->host->unique_id, esp_chip_names[esp->rev],
2334 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2335
2336 /* Let the SCSI bus reset settle. */
2337 ssleep(esp_bus_reset_settle);
2338
2339 err = scsi_add_host(esp->host, dev);
2340 if (err)
2341 return err;
2342
2343 esp->host->unique_id = instance++;
2344
2345 scsi_scan_host(esp->host);
2346
2347 return 0;
2348}
2349EXPORT_SYMBOL(scsi_esp_register);
2350
2351void __devexit scsi_esp_unregister(struct esp *esp)
2352{
2353 scsi_remove_host(esp->host);
2354}
2355EXPORT_SYMBOL(scsi_esp_unregister);
2356
2357static int esp_slave_alloc(struct scsi_device *dev)
2358{
2b14ec78 2359 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
2360 struct esp_target_data *tp = &esp->target[dev->id];
2361 struct esp_lun_data *lp;
2362
2363 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2364 if (!lp)
2365 return -ENOMEM;
2366 dev->hostdata = lp;
2367
2368 tp->starget = dev->sdev_target;
2369
2370 spi_min_period(tp->starget) = esp->min_period;
2371 spi_max_offset(tp->starget) = 15;
2372
2373 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2374 spi_max_width(tp->starget) = 1;
2375 else
2376 spi_max_width(tp->starget) = 0;
2377
2378 return 0;
2379}
2380
2381static int esp_slave_configure(struct scsi_device *dev)
2382{
2b14ec78 2383 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
2384 struct esp_target_data *tp = &esp->target[dev->id];
2385 int goal_tags, queue_depth;
2386
2387 goal_tags = 0;
2388
2389 if (dev->tagged_supported) {
2390 /* XXX make this configurable somehow XXX */
2391 goal_tags = ESP_DEFAULT_TAGS;
2392
2393 if (goal_tags > ESP_MAX_TAG)
2394 goal_tags = ESP_MAX_TAG;
2395 }
2396
2397 queue_depth = goal_tags;
2398 if (queue_depth < dev->host->cmd_per_lun)
2399 queue_depth = dev->host->cmd_per_lun;
2400
2401 if (goal_tags) {
2402 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2403 scsi_activate_tcq(dev, queue_depth);
2404 } else {
2405 scsi_deactivate_tcq(dev, queue_depth);
2406 }
2407 tp->flags |= ESP_TGT_DISCONNECT;
2408
2409 if (!spi_initial_dv(dev->sdev_target))
2410 spi_dv_device(dev);
2411
2412 return 0;
2413}
2414
2415static void esp_slave_destroy(struct scsi_device *dev)
2416{
2417 struct esp_lun_data *lp = dev->hostdata;
2418
2419 kfree(lp);
2420 dev->hostdata = NULL;
2421}
2422
2423static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2424{
2b14ec78 2425 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2426 struct esp_cmd_entry *ent, *tmp;
2427 struct completion eh_done;
2428 unsigned long flags;
2429
2430 /* XXX This helps a lot with debugging but might be a bit
2431 * XXX much for the final driver.
2432 */
2433 spin_lock_irqsave(esp->host->host_lock, flags);
2434 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2435 esp->host->unique_id, cmd, cmd->cmnd[0]);
2436 ent = esp->active_cmd;
2437 if (ent)
2438 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2439 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2440 list_for_each_entry(ent, &esp->queued_cmds, list) {
2441 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2442 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2443 }
2444 list_for_each_entry(ent, &esp->active_cmds, list) {
2445 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2446 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2447 }
2448 esp_dump_cmd_log(esp);
2449 spin_unlock_irqrestore(esp->host->host_lock, flags);
2450
2451 spin_lock_irqsave(esp->host->host_lock, flags);
2452
2453 ent = NULL;
2454 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2455 if (tmp->cmd == cmd) {
2456 ent = tmp;
2457 break;
2458 }
2459 }
2460
2461 if (ent) {
2462 /* Easiest case, we didn't even issue the command
2463 * yet so it is trivial to abort.
2464 */
2465 list_del(&ent->list);
2466
2467 cmd->result = DID_ABORT << 16;
2468 cmd->scsi_done(cmd);
2469
2470 esp_put_ent(esp, ent);
2471
2472 goto out_success;
2473 }
2474
2475 init_completion(&eh_done);
2476
2477 ent = esp->active_cmd;
2478 if (ent && ent->cmd == cmd) {
2479 /* Command is the currently active command on
2480 * the bus. If we already have an output message
2481 * pending, no dice.
2482 */
2483 if (esp->msg_out_len)
2484 goto out_failure;
2485
2486 /* Send out an abort, encouraging the target to
2487 * go to MSGOUT phase by asserting ATN.
2488 */
2489 esp->msg_out[0] = ABORT_TASK_SET;
2490 esp->msg_out_len = 1;
2491 ent->eh_done = &eh_done;
2492
2493 scsi_esp_cmd(esp, ESP_CMD_SATN);
2494 } else {
2495 /* The command is disconnected. This is not easy to
2496 * abort. For now we fail and let the scsi error
2497 * handling layer go try a scsi bus reset or host
2498 * reset.
2499 *
2500 * What we could do is put together a scsi command
2501 * solely for the purpose of sending an abort message
2502 * to the target. Coming up with all the code to
2503 * cook up scsi commands, special case them everywhere,
2504 * etc. is for questionable gain and it would be better
2505 * if the generic scsi error handling layer could do at
2506 * least some of that for us.
2507 *
2508 * Anyways this is an area for potential future improvement
2509 * in this driver.
2510 */
2511 goto out_failure;
2512 }
2513
2514 spin_unlock_irqrestore(esp->host->host_lock, flags);
2515
2516 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2517 spin_lock_irqsave(esp->host->host_lock, flags);
2518 ent->eh_done = NULL;
2519 spin_unlock_irqrestore(esp->host->host_lock, flags);
2520
2521 return FAILED;
2522 }
2523
2524 return SUCCESS;
2525
2526out_success:
2527 spin_unlock_irqrestore(esp->host->host_lock, flags);
2528 return SUCCESS;
2529
2530out_failure:
2531 /* XXX This might be a good location to set ESP_TGT_BROKEN
2532 * XXX since we know which target/lun in particular is
2533 * XXX causing trouble.
2534 */
2535 spin_unlock_irqrestore(esp->host->host_lock, flags);
2536 return FAILED;
2537}
2538
2539static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2540{
2b14ec78 2541 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2542 struct completion eh_reset;
2543 unsigned long flags;
2544
2545 init_completion(&eh_reset);
2546
2547 spin_lock_irqsave(esp->host->host_lock, flags);
2548
2549 esp->eh_reset = &eh_reset;
2550
2551 /* XXX This is too simple... We should add lots of
2552 * XXX checks here so that if we find that the chip is
2553 * XXX very wedged we return failure immediately so
2554 * XXX that we can perform a full chip reset.
2555 */
2556 esp->flags |= ESP_FLAG_RESETTING;
2557 scsi_esp_cmd(esp, ESP_CMD_RS);
2558
2559 spin_unlock_irqrestore(esp->host->host_lock, flags);
2560
2561 ssleep(esp_bus_reset_settle);
2562
2563 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2564 spin_lock_irqsave(esp->host->host_lock, flags);
2565 esp->eh_reset = NULL;
2566 spin_unlock_irqrestore(esp->host->host_lock, flags);
2567
2568 return FAILED;
2569 }
2570
2571 return SUCCESS;
2572}
2573
2574/* All bets are off, reset the entire device. */
2575static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2576{
2b14ec78 2577 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2578 unsigned long flags;
2579
2580 spin_lock_irqsave(esp->host->host_lock, flags);
2581 esp_bootup_reset(esp);
2582 esp_reset_cleanup(esp);
2583 spin_unlock_irqrestore(esp->host->host_lock, flags);
2584
2585 ssleep(esp_bus_reset_settle);
2586
2587 return SUCCESS;
2588}
2589
2590static const char *esp_info(struct Scsi_Host *host)
2591{
2592 return "esp";
2593}
2594
2595struct scsi_host_template scsi_esp_template = {
2596 .module = THIS_MODULE,
2597 .name = "esp",
2598 .info = esp_info,
2599 .queuecommand = esp_queuecommand,
2600 .slave_alloc = esp_slave_alloc,
2601 .slave_configure = esp_slave_configure,
2602 .slave_destroy = esp_slave_destroy,
2603 .eh_abort_handler = esp_eh_abort_handler,
2604 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2605 .eh_host_reset_handler = esp_eh_host_reset_handler,
2606 .can_queue = 7,
2607 .this_id = 7,
2608 .sg_tablesize = SG_ALL,
2609 .use_clustering = ENABLE_CLUSTERING,
2610 .max_sectors = 0xffff,
2611 .skip_settle_delay = 1,
2612};
2613EXPORT_SYMBOL(scsi_esp_template);
2614
2615static void esp_get_signalling(struct Scsi_Host *host)
2616{
2b14ec78 2617 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2618 enum spi_signal_type type;
2619
2620 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2621 type = SPI_SIGNAL_HVD;
2622 else
2623 type = SPI_SIGNAL_SE;
2624
2625 spi_signalling(host) = type;
2626}
2627
2628static void esp_set_offset(struct scsi_target *target, int offset)
2629{
2630 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2631 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2632 struct esp_target_data *tp = &esp->target[target->id];
2633
2634 tp->nego_goal_offset = offset;
2635 tp->flags |= ESP_TGT_CHECK_NEGO;
2636}
2637
2638static void esp_set_period(struct scsi_target *target, int period)
2639{
2640 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2641 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2642 struct esp_target_data *tp = &esp->target[target->id];
2643
2644 tp->nego_goal_period = period;
2645 tp->flags |= ESP_TGT_CHECK_NEGO;
2646}
2647
2648static void esp_set_width(struct scsi_target *target, int width)
2649{
2650 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2651 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2652 struct esp_target_data *tp = &esp->target[target->id];
2653
2654 tp->nego_goal_width = (width ? 1 : 0);
2655 tp->flags |= ESP_TGT_CHECK_NEGO;
2656}
2657
2658static struct spi_function_template esp_transport_ops = {
2659 .set_offset = esp_set_offset,
2660 .show_offset = 1,
2661 .set_period = esp_set_period,
2662 .show_period = 1,
2663 .set_width = esp_set_width,
2664 .show_width = 1,
2665 .get_signalling = esp_get_signalling,
2666};
2667
2668static int __init esp_init(void)
2669{
2670 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2671 sizeof(struct esp_cmd_priv));
2672
2673 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2674 if (!esp_transport_template)
2675 return -ENODEV;
2676
2677 return 0;
2678}
2679
2680static void __exit esp_exit(void)
2681{
2682 spi_release_transport(esp_transport_template);
2683}
2684
2685MODULE_DESCRIPTION("ESP SCSI driver core");
2686MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2687MODULE_LICENSE("GPL");
2688MODULE_VERSION(DRV_VERSION);
2689
2690module_param(esp_bus_reset_settle, int, 0);
2691MODULE_PARM_DESC(esp_bus_reset_settle,
2692 "ESP scsi bus reset delay in seconds");
2693
2694module_param(esp_debug, int, 0);
2695MODULE_PARM_DESC(esp_debug,
2696"ESP bitmapped debugging message enable value:\n"
2697" 0x00000001 Log interrupt events\n"
2698" 0x00000002 Log scsi commands\n"
2699" 0x00000004 Log resets\n"
2700" 0x00000008 Log message in events\n"
2701" 0x00000010 Log message out events\n"
2702" 0x00000020 Log command completion\n"
2703" 0x00000040 Log disconnects\n"
2704" 0x00000080 Log data start\n"
2705" 0x00000100 Log data done\n"
2706" 0x00000200 Log reconnects\n"
2707" 0x00000400 Log auto-sense data\n"
2708);
2709
2710module_init(esp_init);
2711module_exit(esp_exit);