]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/dec_esp.c
IRQ: Typedef the IRQ handler function type
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / dec_esp.c
1 /*
2 * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations
3 * and TURBOchannel PMAZ-A cards
4 *
5 * TURBOchannel changes by Harald Koerfgen
6 * PMAZ-A support by David Airlie
7 *
8 * based on jazz_esp.c:
9 * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
12 *
13 * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org>
14 * Be warned the PMAZ-AA works currently as a single card.
15 * Dont try to put multiple cards in one machine - They are
16 * both detected but it may crash under high load garbling your
17 * data.
18 * 20001005 - Initialization fixes for 2.4.0-test9
19 * Florian Lohoff <flo@rfc822.org>
20 *
21 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/delay.h>
26 #include <linux/types.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/blkdev.h>
30 #include <linux/proc_fs.h>
31 #include <linux/spinlock.h>
32 #include <linux/stat.h>
33
34 #include <asm/dma.h>
35 #include <asm/irq.h>
36 #include <asm/pgtable.h>
37 #include <asm/system.h>
38
39 #include <asm/dec/interrupts.h>
40 #include <asm/dec/ioasic.h>
41 #include <asm/dec/ioasic_addrs.h>
42 #include <asm/dec/ioasic_ints.h>
43 #include <asm/dec/machtype.h>
44 #include <asm/dec/system.h>
45 #include <asm/dec/tc.h>
46
47 #define DEC_SCSI_SREG 0
48 #define DEC_SCSI_DMAREG 0x40000
49 #define DEC_SCSI_SRAM 0x80000
50 #define DEC_SCSI_DIAG 0xC0000
51
52 #include "scsi.h"
53 #include <scsi/scsi_host.h>
54 #include "NCR53C9x.h"
55
56 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
57 static void dma_drain(struct NCR_ESP *esp);
58 static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
59 static void dma_dump_state(struct NCR_ESP *esp);
60 static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
61 static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
62 static void dma_ints_off(struct NCR_ESP *esp);
63 static void dma_ints_on(struct NCR_ESP *esp);
64 static int dma_irq_p(struct NCR_ESP *esp);
65 static int dma_ports_p(struct NCR_ESP *esp);
66 static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
67 static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
68 static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
69 static void dma_advance_sg(struct scsi_cmnd * sp);
70
71 static void pmaz_dma_drain(struct NCR_ESP *esp);
72 static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
73 static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
74 static void pmaz_dma_ints_off(struct NCR_ESP *esp);
75 static void pmaz_dma_ints_on(struct NCR_ESP *esp);
76 static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
77 static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
78
79 #define TC_ESP_RAM_SIZE 0x20000
80 #define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
81 #define ESP_NCMD 7
82
83 #define TC_ESP_DMAR_MASK 0x1ffff
84 #define TC_ESP_DMAR_WRITE 0x80000000
85 #define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
86
87 u32 esp_virt_buffer;
88 int scsi_current_length;
89
90 volatile unsigned char cmd_buffer[16];
91 volatile unsigned char pmaz_cmd_buffer[16];
92 /* This is where all commands are put
93 * before they are trasfered to the ESP chip
94 * via PIO.
95 */
96
97 static irqreturn_t scsi_dma_merr_int(int, void *, struct pt_regs *);
98 static irqreturn_t scsi_dma_err_int(int, void *, struct pt_regs *);
99 static irqreturn_t scsi_dma_int(int, void *, struct pt_regs *);
100
101 static int dec_esp_detect(struct scsi_host_template * tpnt);
102
103 static int dec_esp_release(struct Scsi_Host *shost)
104 {
105 if (shost->irq)
106 free_irq(shost->irq, NULL);
107 if (shost->io_port && shost->n_io_port)
108 release_region(shost->io_port, shost->n_io_port);
109 scsi_unregister(shost);
110 return 0;
111 }
112
113 static struct scsi_host_template driver_template = {
114 .proc_name = "dec_esp",
115 .proc_info = esp_proc_info,
116 .name = "NCR53C94",
117 .detect = dec_esp_detect,
118 .slave_alloc = esp_slave_alloc,
119 .slave_destroy = esp_slave_destroy,
120 .release = dec_esp_release,
121 .info = esp_info,
122 .queuecommand = esp_queue,
123 .eh_abort_handler = esp_abort,
124 .eh_bus_reset_handler = esp_reset,
125 .can_queue = 7,
126 .this_id = 7,
127 .sg_tablesize = SG_ALL,
128 .cmd_per_lun = 1,
129 .use_clustering = DISABLE_CLUSTERING,
130 };
131
132
133 #include "scsi_module.c"
134
135 /***************************************************************** Detection */
136 static int dec_esp_detect(struct scsi_host_template * tpnt)
137 {
138 struct NCR_ESP *esp;
139 struct ConfigDev *esp_dev;
140 int slot;
141 unsigned long mem_start;
142
143 if (IOASIC) {
144 esp_dev = 0;
145 esp = esp_allocate(tpnt, (void *) esp_dev);
146
147 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1;
149
150 /* Required functions */
151 esp->dma_bytes_sent = &dma_bytes_sent;
152 esp->dma_can_transfer = &dma_can_transfer;
153 esp->dma_dump_state = &dma_dump_state;
154 esp->dma_init_read = &dma_init_read;
155 esp->dma_init_write = &dma_init_write;
156 esp->dma_ints_off = &dma_ints_off;
157 esp->dma_ints_on = &dma_ints_on;
158 esp->dma_irq_p = &dma_irq_p;
159 esp->dma_ports_p = &dma_ports_p;
160 esp->dma_setup = &dma_setup;
161
162 /* Optional functions */
163 esp->dma_barrier = 0;
164 esp->dma_drain = &dma_drain;
165 esp->dma_invalidate = 0;
166 esp->dma_irq_entry = 0;
167 esp->dma_irq_exit = 0;
168 esp->dma_poll = 0;
169 esp->dma_reset = 0;
170 esp->dma_led_off = 0;
171 esp->dma_led_on = 0;
172
173 /* virtual DMA functions */
174 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
175 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
176 esp->dma_mmu_release_scsi_one = 0;
177 esp->dma_mmu_release_scsi_sgl = 0;
178 esp->dma_advance_sg = &dma_advance_sg;
179
180
181 /* SCSI chip speed */
182 esp->cfreq = 25000000;
183
184 esp->dregs = 0;
185
186 /* ESP register base */
187 esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base +
188 IOASIC_SCSI);
189
190 /* Set the command buffer */
191 esp->esp_command = (volatile unsigned char *) cmd_buffer;
192
193 /* get virtual dma address for command buffer */
194 esp->esp_command_dvma = virt_to_phys(cmd_buffer);
195
196 esp->irq = dec_interrupt[DEC_IRQ_ASC];
197
198 esp->scsi_id = 7;
199
200 /* Check for differential SCSI-bus */
201 esp->diff = 0;
202
203 esp_initialize(esp);
204
205 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
206 "ncr53c94", esp->ehost))
207 goto err_dealloc;
208 if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
209 scsi_dma_merr_int, IRQF_DISABLED,
210 "ncr53c94 error", esp->ehost))
211 goto err_free_irq;
212 if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
213 scsi_dma_err_int, IRQF_DISABLED,
214 "ncr53c94 overrun", esp->ehost))
215 goto err_free_irq_merr;
216 if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA],
217 scsi_dma_int, IRQF_DISABLED,
218 "ncr53c94 dma", esp->ehost))
219 goto err_free_irq_err;
220
221 }
222
223 if (TURBOCHANNEL) {
224 while ((slot = search_tc_card("PMAZ-AA")) >= 0) {
225 claim_tc_card(slot);
226
227 esp_dev = 0;
228 esp = esp_allocate(tpnt, (void *) esp_dev);
229
230 mem_start = get_tc_base_addr(slot);
231
232 /* Store base addr into esp struct */
233 esp->slot = CPHYSADDR(mem_start);
234
235 esp->dregs = 0;
236 esp->eregs = (void *)CKSEG1ADDR(mem_start +
237 DEC_SCSI_SREG);
238 esp->do_pio_cmds = 1;
239
240 /* Set the command buffer */
241 esp->esp_command = (volatile unsigned char *) pmaz_cmd_buffer;
242
243 /* get virtual dma address for command buffer */
244 esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
245
246 esp->cfreq = get_tc_speed();
247
248 esp->irq = get_tc_irq_nr(slot);
249
250 /* Required functions */
251 esp->dma_bytes_sent = &dma_bytes_sent;
252 esp->dma_can_transfer = &dma_can_transfer;
253 esp->dma_dump_state = &dma_dump_state;
254 esp->dma_init_read = &pmaz_dma_init_read;
255 esp->dma_init_write = &pmaz_dma_init_write;
256 esp->dma_ints_off = &pmaz_dma_ints_off;
257 esp->dma_ints_on = &pmaz_dma_ints_on;
258 esp->dma_irq_p = &dma_irq_p;
259 esp->dma_ports_p = &dma_ports_p;
260 esp->dma_setup = &pmaz_dma_setup;
261
262 /* Optional functions */
263 esp->dma_barrier = 0;
264 esp->dma_drain = &pmaz_dma_drain;
265 esp->dma_invalidate = 0;
266 esp->dma_irq_entry = 0;
267 esp->dma_irq_exit = 0;
268 esp->dma_poll = 0;
269 esp->dma_reset = 0;
270 esp->dma_led_off = 0;
271 esp->dma_led_on = 0;
272
273 esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
274 esp->dma_mmu_get_scsi_sgl = 0;
275 esp->dma_mmu_release_scsi_one = 0;
276 esp->dma_mmu_release_scsi_sgl = 0;
277 esp->dma_advance_sg = 0;
278
279 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
280 "PMAZ_AA", esp->ehost)) {
281 esp_deallocate(esp);
282 release_tc_card(slot);
283 continue;
284 }
285 esp->scsi_id = 7;
286 esp->diff = 0;
287 esp_initialize(esp);
288 }
289 }
290
291 if(nesps) {
292 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
293 esps_running = esps_in_use;
294 return esps_in_use;
295 }
296 return 0;
297
298 err_free_irq_err:
299 free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], scsi_dma_err_int);
300 err_free_irq_merr:
301 free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], scsi_dma_merr_int);
302 err_free_irq:
303 free_irq(esp->irq, esp_intr);
304 err_dealloc:
305 esp_deallocate(esp);
306 return 0;
307 }
308
309 /************************************************************* DMA Functions */
310 static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id, struct pt_regs *regs)
311 {
312 printk("Got unexpected SCSI DMA Interrupt! < ");
313 printk("SCSI_DMA_MEMRDERR ");
314 printk(">\n");
315
316 return IRQ_HANDLED;
317 }
318
319 static irqreturn_t scsi_dma_err_int(int irq, void *dev_id, struct pt_regs *regs)
320 {
321 /* empty */
322
323 return IRQ_HANDLED;
324 }
325
326 static irqreturn_t scsi_dma_int(int irq, void *dev_id, struct pt_regs *regs)
327 {
328 u32 scsi_next_ptr;
329
330 scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
331
332 /* next page */
333 scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
334 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
335 fast_iob();
336
337 return IRQ_HANDLED;
338 }
339
340 static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
341 {
342 return fifo_count;
343 }
344
345 static void dma_drain(struct NCR_ESP *esp)
346 {
347 u32 nw, data0, data1, scsi_data_ptr;
348 u16 *p;
349
350 nw = ioasic_read(IO_REG_SCSI_SCR);
351
352 /*
353 * Is there something in the dma buffers left?
354 */
355 if (nw) {
356 scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
357 p = phys_to_virt(scsi_data_ptr);
358 switch (nw) {
359 case 1:
360 data0 = ioasic_read(IO_REG_SCSI_SDR0);
361 p[0] = data0 & 0xffff;
362 break;
363 case 2:
364 data0 = ioasic_read(IO_REG_SCSI_SDR0);
365 p[0] = data0 & 0xffff;
366 p[1] = (data0 >> 16) & 0xffff;
367 break;
368 case 3:
369 data0 = ioasic_read(IO_REG_SCSI_SDR0);
370 data1 = ioasic_read(IO_REG_SCSI_SDR1);
371 p[0] = data0 & 0xffff;
372 p[1] = (data0 >> 16) & 0xffff;
373 p[2] = data1 & 0xffff;
374 break;
375 default:
376 printk("Strange: %d words in dma buffer left\n", nw);
377 break;
378 }
379 }
380 }
381
382 static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
383 {
384 return sp->SCp.this_residual;
385 }
386
387 static void dma_dump_state(struct NCR_ESP *esp)
388 {
389 }
390
391 static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
392 {
393 u32 scsi_next_ptr, ioasic_ssr;
394 unsigned long flags;
395
396 if (vaddress & 3)
397 panic("dec_esp.c: unable to handle partial word transfers, yet...");
398
399 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
400
401 spin_lock_irqsave(&ioasic_ssr_lock, flags);
402
403 fast_mb();
404 ioasic_ssr = ioasic_read(IO_REG_SSR);
405
406 ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
407 ioasic_write(IO_REG_SSR, ioasic_ssr);
408
409 fast_wmb();
410 ioasic_write(IO_REG_SCSI_SCR, 0);
411 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
412
413 /* prepare for next page */
414 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
415 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
416
417 ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
418 fast_wmb();
419 ioasic_write(IO_REG_SSR, ioasic_ssr);
420
421 fast_iob();
422 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
423 }
424
425 static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
426 {
427 u32 scsi_next_ptr, ioasic_ssr;
428 unsigned long flags;
429
430 if (vaddress & 3)
431 panic("dec_esp.c: unable to handle partial word transfers, yet...");
432
433 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
434
435 spin_lock_irqsave(&ioasic_ssr_lock, flags);
436
437 fast_mb();
438 ioasic_ssr = ioasic_read(IO_REG_SSR);
439
440 ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
441 ioasic_write(IO_REG_SSR, ioasic_ssr);
442
443 fast_wmb();
444 ioasic_write(IO_REG_SCSI_SCR, 0);
445 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
446
447 /* prepare for next page */
448 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
449 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
450
451 ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
452 fast_wmb();
453 ioasic_write(IO_REG_SSR, ioasic_ssr);
454
455 fast_iob();
456 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
457 }
458
459 static void dma_ints_off(struct NCR_ESP *esp)
460 {
461 disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
462 }
463
464 static void dma_ints_on(struct NCR_ESP *esp)
465 {
466 enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
467 }
468
469 static int dma_irq_p(struct NCR_ESP *esp)
470 {
471 return (esp->eregs->esp_status & ESP_STAT_INTR);
472 }
473
474 static int dma_ports_p(struct NCR_ESP *esp)
475 {
476 /*
477 * FIXME: what's this good for?
478 */
479 return 1;
480 }
481
482 static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
483 {
484 /*
485 * DMA_ST_WRITE means "move data from device to memory"
486 * so when (write) is true, it actually means READ!
487 */
488 if (write)
489 dma_init_read(esp, addr, count);
490 else
491 dma_init_write(esp, addr, count);
492 }
493
494 static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
495 {
496 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
497 }
498
499 static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
500 {
501 int sz = sp->SCp.buffers_residual;
502 struct scatterlist *sg = sp->SCp.buffer;
503
504 while (sz >= 0) {
505 sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
506 sz--;
507 }
508 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
509 }
510
511 static void dma_advance_sg(struct scsi_cmnd * sp)
512 {
513 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
514 }
515
516 static void pmaz_dma_drain(struct NCR_ESP *esp)
517 {
518 memcpy(phys_to_virt(esp_virt_buffer),
519 (void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
520 ESP_TGT_DMA_SIZE),
521 scsi_current_length);
522 }
523
524 static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
525 {
526 volatile u32 *dmareg =
527 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
528
529 if (length > ESP_TGT_DMA_SIZE)
530 length = ESP_TGT_DMA_SIZE;
531
532 *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
533
534 iob();
535
536 esp_virt_buffer = vaddress;
537 scsi_current_length = length;
538 }
539
540 static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
541 {
542 volatile u32 *dmareg =
543 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
544
545 memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
546 ESP_TGT_DMA_SIZE),
547 phys_to_virt(vaddress), length);
548
549 wmb();
550 *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
551
552 iob();
553 }
554
555 static void pmaz_dma_ints_off(struct NCR_ESP *esp)
556 {
557 }
558
559 static void pmaz_dma_ints_on(struct NCR_ESP *esp)
560 {
561 }
562
563 static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
564 {
565 /*
566 * DMA_ST_WRITE means "move data from device to memory"
567 * so when (write) is true, it actually means READ!
568 */
569 if (write)
570 pmaz_dma_init_read(esp, addr, count);
571 else
572 pmaz_dma_init_write(esp, addr, count);
573 }
574
575 static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
576 {
577 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
578 }