]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/hamradio/dmascc.c
Merge tag 'for-5.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / hamradio / dmascc.c
CommitLineData
74ba9207 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Driver for high-speed SCC boards (those with DMA support)
4 * Copyright (C) 1997-2000 Klaus Kudielka
5 *
6 * S5SCC/DMA support by Janko Koleznik S52HI
1da177e4
LT
7 */
8
9
10#include <linux/module.h>
1977f032 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/if_arp.h>
15#include <linux/in.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/netdevice.h>
5a0e3ad6 22#include <linux/slab.h>
1da177e4
LT
23#include <linux/rtnetlink.h>
24#include <linux/sockios.h>
25#include <linux/workqueue.h>
60063497 26#include <linux/atomic.h>
1da177e4
LT
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/irq.h>
7c0f6ba6 30#include <linux/uaccess.h>
1da177e4
LT
31#include <net/ax25.h>
32#include "z8530.h"
33
34
35/* Number of buffers per channel */
36
37#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
38#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
39#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
40
41
42/* Cards supported */
43
44#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
45 0, 8, 1843200, 3686400 }
46#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
47 0, 8, 3686400, 7372800 }
48#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
49 0, 4, 6144000, 6144000 }
50#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
51 0, 8, 4915200, 9830400 }
52
53#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
54
55#define TMR_0_HZ 25600 /* Frequency of timer 0 */
56
57#define TYPE_PI 0
58#define TYPE_PI2 1
59#define TYPE_TWIN 2
60#define TYPE_S5 3
61#define NUM_TYPES 4
62
63#define MAX_NUM_DEVS 32
64
65
66/* SCC chips supported */
67
68#define Z8530 0
69#define Z85C30 1
70#define Z85230 2
71
72#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
73
74
75/* I/O registers */
76
77/* 8530 registers relative to card base */
78#define SCCB_CMD 0x00
79#define SCCB_DATA 0x01
80#define SCCA_CMD 0x02
81#define SCCA_DATA 0x03
82
83/* 8253/8254 registers relative to card base */
84#define TMR_CNT0 0x00
85#define TMR_CNT1 0x01
86#define TMR_CNT2 0x02
87#define TMR_CTRL 0x03
88
89/* Additional PI/PI2 registers relative to card base */
90#define PI_DREQ_MASK 0x04
91
92/* Additional PackeTwin registers relative to card base */
93#define TWIN_INT_REG 0x08
94#define TWIN_CLR_TMR1 0x09
95#define TWIN_CLR_TMR2 0x0a
96#define TWIN_SPARE_1 0x0b
97#define TWIN_DMA_CFG 0x08
98#define TWIN_SERIAL_CFG 0x09
99#define TWIN_DMA_CLR_FF 0x0a
100#define TWIN_SPARE_2 0x0b
101
102
103/* PackeTwin I/O register values */
104
105/* INT_REG */
106#define TWIN_SCC_MSK 0x01
107#define TWIN_TMR1_MSK 0x02
108#define TWIN_TMR2_MSK 0x04
109#define TWIN_INT_MSK 0x07
110
111/* SERIAL_CFG */
112#define TWIN_DTRA_ON 0x01
113#define TWIN_DTRB_ON 0x02
114#define TWIN_EXTCLKA 0x04
115#define TWIN_EXTCLKB 0x08
116#define TWIN_LOOPA_ON 0x10
117#define TWIN_LOOPB_ON 0x20
118#define TWIN_EI 0x80
119
120/* DMA_CFG */
121#define TWIN_DMA_HDX_T1 0x08
122#define TWIN_DMA_HDX_R1 0x0a
123#define TWIN_DMA_HDX_T3 0x14
124#define TWIN_DMA_HDX_R3 0x16
125#define TWIN_DMA_FDX_T3R1 0x1b
126#define TWIN_DMA_FDX_T1R3 0x1d
127
128
129/* Status values */
130
131#define IDLE 0
132#define TX_HEAD 1
133#define TX_DATA 2
134#define TX_PAUSE 3
135#define TX_TAIL 4
136#define RTS_OFF 5
137#define WAIT 6
138#define DCD_ON 7
139#define RX_ON 8
140#define DCD_OFF 9
141
142
143/* Ioctls */
144
145#define SIOCGSCCPARAM SIOCDEVPRIVATE
146#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
147
148
149/* Data types */
150
151struct scc_param {
152 int pclk_hz; /* frequency of BRG input (don't change) */
153 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
154 int nrzi; /* 0 (nrz), 1 (nrzi) */
155 int clocks; /* see dmascc_cfg documentation */
156 int txdelay; /* [1/TMR_0_HZ] */
157 int txtimeout; /* [1/HZ] */
158 int txtail; /* [1/TMR_0_HZ] */
159 int waittime; /* [1/TMR_0_HZ] */
160 int slottime; /* [1/TMR_0_HZ] */
161 int persist; /* 1 ... 256 */
162 int dma; /* -1 (disable), 0, 1, 3 */
163 int txpause; /* [1/TMR_0_HZ] */
164 int rtsoff; /* [1/TMR_0_HZ] */
165 int dcdon; /* [1/TMR_0_HZ] */
166 int dcdoff; /* [1/TMR_0_HZ] */
167};
168
169struct scc_hardware {
170 char *name;
171 int io_region;
172 int io_delta;
173 int io_size;
174 int num_devs;
175 int scc_offset;
176 int tmr_offset;
177 int tmr_hz;
178 int pclk_hz;
179};
180
181struct scc_priv {
182 int type;
183 int chip;
184 struct net_device *dev;
185 struct scc_info *info;
13c0582d 186
1da177e4
LT
187 int channel;
188 int card_base, scc_cmd, scc_data;
189 int tmr_cnt, tmr_ctrl, tmr_mode;
190 struct scc_param param;
191 char rx_buf[NUM_RX_BUF][BUF_SIZE];
192 int rx_len[NUM_RX_BUF];
193 int rx_ptr;
194 struct work_struct rx_work;
195 int rx_head, rx_tail, rx_count;
196 int rx_over;
197 char tx_buf[NUM_TX_BUF][BUF_SIZE];
198 int tx_len[NUM_TX_BUF];
199 int tx_ptr;
200 int tx_head, tx_tail, tx_count;
201 int state;
202 unsigned long tx_start;
203 int rr0;
204 spinlock_t *register_lock; /* Per scc_info */
205 spinlock_t ring_lock;
206};
207
208struct scc_info {
209 int irq_used;
210 int twin_serial_cfg;
211 struct net_device *dev[2];
212 struct scc_priv priv[2];
213 struct scc_info *next;
214 spinlock_t register_lock; /* Per device register lock */
215};
216
217
218/* Function declarations */
219static int setup_adapter(int card_base, int type, int n) __init;
220
221static void write_scc(struct scc_priv *priv, int reg, int val);
222static void write_scc_data(struct scc_priv *priv, int val, int fast);
223static int read_scc(struct scc_priv *priv, int reg);
224static int read_scc_data(struct scc_priv *priv);
225
226static int scc_open(struct net_device *dev);
227static int scc_close(struct net_device *dev);
228static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
229static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
1da177e4
LT
230static int scc_set_mac_address(struct net_device *dev, void *sa);
231
232static inline void tx_on(struct scc_priv *priv);
233static inline void rx_on(struct scc_priv *priv);
234static inline void rx_off(struct scc_priv *priv);
235static void start_timer(struct scc_priv *priv, int t, int r15);
236static inline unsigned char random(void);
237
238static inline void z8530_isr(struct scc_info *info);
7d12e780 239static irqreturn_t scc_isr(int irq, void *dev_id);
1da177e4
LT
240static void rx_isr(struct scc_priv *priv);
241static void special_condition(struct scc_priv *priv, int rc);
7a87b6c2 242static void rx_bh(struct work_struct *);
1da177e4
LT
243static void tx_isr(struct scc_priv *priv);
244static void es_isr(struct scc_priv *priv);
245static void tm_isr(struct scc_priv *priv);
246
247
248/* Initialization variables */
249
250static int io[MAX_NUM_DEVS] __initdata = { 0, };
251
cd8d627a
RD
252/* Beware! hw[] is also used in dmascc_exit(). */
253static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
1da177e4
LT
254
255
256/* Global variables */
257
258static struct scc_info *first;
259static unsigned long rand;
260
261
262MODULE_AUTHOR("Klaus Kudielka");
263MODULE_DESCRIPTION("Driver for high-speed SCC boards");
b658e5d8 264module_param_hw_array(io, int, ioport, NULL, 0);
1da177e4
LT
265MODULE_LICENSE("GPL");
266
267static void __exit dmascc_exit(void)
268{
269 int i;
270 struct scc_info *info;
271
272 while (first) {
273 info = first;
274
275 /* Unregister devices */
276 for (i = 0; i < 2; i++)
277 unregister_netdev(info->dev[i]);
278
279 /* Reset board */
280 if (info->priv[0].type == TYPE_TWIN)
281 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
282 write_scc(&info->priv[0], R9, FHWRES);
283 release_region(info->dev[0]->base_addr,
284 hw[info->priv[0].type].io_size);
285
286 for (i = 0; i < 2; i++)
287 free_netdev(info->dev[i]);
288
289 /* Free memory */
290 first = info->next;
291 kfree(info);
292 }
293}
294
1da177e4
LT
295static int __init dmascc_init(void)
296{
297 int h, i, j, n;
298 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
299 t1[MAX_NUM_DEVS];
300 unsigned t_val;
301 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
302 counting[MAX_NUM_DEVS];
303
304 /* Initialize random number generator */
305 rand = jiffies;
306 /* Cards found = 0 */
307 n = 0;
308 /* Warning message */
309 if (!io[0])
310 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
311
312 /* Run autodetection for each card type */
313 for (h = 0; h < NUM_TYPES; h++) {
314
315 if (io[0]) {
316 /* User-specified I/O address regions */
317 for (i = 0; i < hw[h].num_devs; i++)
318 base[i] = 0;
319 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
320 j = (io[i] -
321 hw[h].io_region) / hw[h].io_delta;
8e95a202
JP
322 if (j >= 0 && j < hw[h].num_devs &&
323 hw[h].io_region +
1da177e4
LT
324 j * hw[h].io_delta == io[i]) {
325 base[j] = io[i];
326 }
327 }
328 } else {
329 /* Default I/O address regions */
330 for (i = 0; i < hw[h].num_devs; i++) {
331 base[i] =
332 hw[h].io_region + i * hw[h].io_delta;
333 }
334 }
335
336 /* Check valid I/O address regions */
337 for (i = 0; i < hw[h].num_devs; i++)
338 if (base[i]) {
339 if (!request_region
340 (base[i], hw[h].io_size, "dmascc"))
341 base[i] = 0;
342 else {
343 tcmd[i] =
344 base[i] + hw[h].tmr_offset +
345 TMR_CTRL;
346 t0[i] =
347 base[i] + hw[h].tmr_offset +
348 TMR_CNT0;
349 t1[i] =
350 base[i] + hw[h].tmr_offset +
351 TMR_CNT1;
352 }
353 }
354
355 /* Start timers */
356 for (i = 0; i < hw[h].num_devs; i++)
357 if (base[i]) {
358 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
359 outb(0x36, tcmd[i]);
360 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
361 t0[i]);
362 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
363 t0[i]);
364 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
365 outb(0x70, tcmd[i]);
366 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
367 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
368 start[i] = jiffies;
369 delay[i] = 0;
370 counting[i] = 1;
371 /* Timer 2: LSB+MSB, Mode 0 */
372 outb(0xb0, tcmd[i]);
373 }
374 time = jiffies;
375 /* Wait until counter registers are loaded */
376 udelay(2000000 / TMR_0_HZ);
377
378 /* Timing loop */
379 while (jiffies - time < 13) {
380 for (i = 0; i < hw[h].num_devs; i++)
381 if (base[i] && counting[i]) {
382 /* Read back Timer 1: latch; read LSB; read MSB */
383 outb(0x40, tcmd[i]);
384 t_val =
385 inb(t1[i]) + (inb(t1[i]) << 8);
386 /* Also check whether counter did wrap */
8e95a202
JP
387 if (t_val == 0 ||
388 t_val > TMR_0_HZ / HZ * 10)
1da177e4
LT
389 counting[i] = 0;
390 delay[i] = jiffies - start[i];
391 }
392 }
393
394 /* Evaluate measurements */
395 for (i = 0; i < hw[h].num_devs; i++)
396 if (base[i]) {
397 if ((delay[i] >= 9 && delay[i] <= 11) &&
398 /* Ok, we have found an adapter */
399 (setup_adapter(base[i], h, n) == 0))
400 n++;
401 else
402 release_region(base[i],
403 hw[h].io_size);
404 }
405
406 } /* NUM_TYPES */
407
408 /* If any adapter was successfully initialized, return ok */
409 if (n)
410 return 0;
411
412 /* If no adapter found, return error */
413 printk(KERN_INFO "dmascc: no adapters found\n");
414 return -EIO;
415}
416
417module_init(dmascc_init);
418module_exit(dmascc_exit);
419
e2fdbc03 420static void __init dev_setup(struct net_device *dev)
1da177e4
LT
421{
422 dev->type = ARPHRD_AX25;
c4bc7ee2 423 dev->hard_header_len = AX25_MAX_HEADER_LEN;
1da177e4 424 dev->mtu = 1500;
c4bc7ee2 425 dev->addr_len = AX25_ADDR_LEN;
1da177e4 426 dev->tx_queue_len = 64;
15b1c0e8
RB
427 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
428 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1da177e4
LT
429}
430
52db6250
SH
431static const struct net_device_ops scc_netdev_ops = {
432 .ndo_open = scc_open,
433 .ndo_stop = scc_close,
434 .ndo_start_xmit = scc_send_packet,
435 .ndo_do_ioctl = scc_ioctl,
3e8af307 436 .ndo_set_mac_address = scc_set_mac_address,
52db6250
SH
437};
438
1da177e4
LT
439static int __init setup_adapter(int card_base, int type, int n)
440{
37ace20a 441 int i, irq, chip, err;
1da177e4
LT
442 struct scc_info *info;
443 struct net_device *dev;
444 struct scc_priv *priv;
445 unsigned long time;
446 unsigned int irqs;
447 int tmr_base = card_base + hw[type].tmr_offset;
448 int scc_base = card_base + hw[type].scc_offset;
449 char *chipnames[] = CHIPNAMES;
450
dd00cc48
YP
451 /* Initialize what is necessary for write_scc and write_scc_data */
452 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
37ace20a
AKC
453 if (!info) {
454 err = -ENOMEM;
1da177e4 455 goto out;
37ace20a 456 }
1da177e4 457
c835a677 458 info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
1da177e4
LT
459 if (!info->dev[0]) {
460 printk(KERN_ERR "dmascc: "
461 "could not allocate memory for %s at %#3x\n",
462 hw[type].name, card_base);
37ace20a 463 err = -ENOMEM;
1da177e4
LT
464 goto out1;
465 }
466
c835a677 467 info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
1da177e4
LT
468 if (!info->dev[1]) {
469 printk(KERN_ERR "dmascc: "
470 "could not allocate memory for %s at %#3x\n",
471 hw[type].name, card_base);
37ace20a 472 err = -ENOMEM;
1da177e4
LT
473 goto out2;
474 }
475 spin_lock_init(&info->register_lock);
476
477 priv = &info->priv[0];
478 priv->type = type;
479 priv->card_base = card_base;
480 priv->scc_cmd = scc_base + SCCA_CMD;
481 priv->scc_data = scc_base + SCCA_DATA;
482 priv->register_lock = &info->register_lock;
483
484 /* Reset SCC */
485 write_scc(priv, R9, FHWRES | MIE | NV);
486
487 /* Determine type of chip by enabling SDLC/HDLC enhancements */
488 write_scc(priv, R15, SHDLCE);
489 if (!read_scc(priv, R15)) {
490 /* WR7' not present. This is an ordinary Z8530 SCC. */
491 chip = Z8530;
492 } else {
493 /* Put one character in TX FIFO */
494 write_scc_data(priv, 0, 0);
495 if (read_scc(priv, R0) & Tx_BUF_EMP) {
496 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
497 chip = Z85230;
498 } else {
499 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
500 chip = Z85C30;
501 }
502 }
503 write_scc(priv, R15, 0);
504
505 /* Start IRQ auto-detection */
506 irqs = probe_irq_on();
507
508 /* Enable interrupts */
509 if (type == TYPE_TWIN) {
510 outb(0, card_base + TWIN_DMA_CFG);
511 inb(card_base + TWIN_CLR_TMR1);
512 inb(card_base + TWIN_CLR_TMR2);
513 info->twin_serial_cfg = TWIN_EI;
514 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
515 } else {
516 write_scc(priv, R15, CTSIE);
517 write_scc(priv, R0, RES_EXT_INT);
518 write_scc(priv, R1, EXT_INT_ENAB);
519 }
520
521 /* Start timer */
522 outb(1, tmr_base + TMR_CNT1);
523 outb(0, tmr_base + TMR_CNT1);
524
525 /* Wait and detect IRQ */
526 time = jiffies;
527 while (jiffies - time < 2 + HZ / TMR_0_HZ);
528 irq = probe_irq_off(irqs);
529
530 /* Clear pending interrupt, disable interrupts */
531 if (type == TYPE_TWIN) {
532 inb(card_base + TWIN_CLR_TMR1);
533 } else {
534 write_scc(priv, R1, 0);
535 write_scc(priv, R15, 0);
536 write_scc(priv, R0, RES_EXT_INT);
537 }
538
539 if (irq <= 0) {
540 printk(KERN_ERR
541 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
542 hw[type].name, card_base, irq);
37ace20a 543 err = -ENODEV;
1da177e4
LT
544 goto out3;
545 }
546
547 /* Set up data structures */
548 for (i = 0; i < 2; i++) {
549 dev = info->dev[i];
550 priv = &info->priv[i];
551 priv->type = type;
552 priv->chip = chip;
553 priv->dev = dev;
554 priv->info = info;
555 priv->channel = i;
556 spin_lock_init(&priv->ring_lock);
557 priv->register_lock = &info->register_lock;
558 priv->card_base = card_base;
559 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
560 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
561 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
562 priv->tmr_ctrl = tmr_base + TMR_CTRL;
563 priv->tmr_mode = i ? 0xb0 : 0x70;
564 priv->param.pclk_hz = hw[type].pclk_hz;
565 priv->param.brg_tc = -1;
566 priv->param.clocks = TCTRxCP | RCRTxCP;
567 priv->param.persist = 256;
568 priv->param.dma = -1;
7a87b6c2 569 INIT_WORK(&priv->rx_work, rx_bh);
f4bdd264 570 dev->ml_priv = priv;
9877e105 571 snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
1da177e4
LT
572 dev->base_addr = card_base;
573 dev->irq = irq;
52db6250 574 dev->netdev_ops = &scc_netdev_ops;
3b04ddde 575 dev->header_ops = &ax25_header_ops;
1da177e4
LT
576 }
577 if (register_netdev(info->dev[0])) {
578 printk(KERN_ERR "dmascc: could not register %s\n",
579 info->dev[0]->name);
37ace20a 580 err = -ENODEV;
1da177e4
LT
581 goto out3;
582 }
583 if (register_netdev(info->dev[1])) {
584 printk(KERN_ERR "dmascc: could not register %s\n",
585 info->dev[1]->name);
37ace20a 586 err = -ENODEV;
1da177e4
LT
587 goto out4;
588 }
589
590
591 info->next = first;
592 first = info;
593 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
594 hw[type].name, chipnames[chip], card_base, irq);
595 return 0;
596
597 out4:
598 unregister_netdev(info->dev[0]);
599 out3:
600 if (info->priv[0].type == TYPE_TWIN)
601 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
602 write_scc(&info->priv[0], R9, FHWRES);
603 free_netdev(info->dev[1]);
604 out2:
605 free_netdev(info->dev[0]);
606 out1:
607 kfree(info);
608 out:
37ace20a 609 return err;
1da177e4
LT
610}
611
612
613/* Driver functions */
614
615static void write_scc(struct scc_priv *priv, int reg, int val)
616{
617 unsigned long flags;
618 switch (priv->type) {
619 case TYPE_S5:
620 if (reg)
621 outb(reg, priv->scc_cmd);
622 outb(val, priv->scc_cmd);
623 return;
624 case TYPE_TWIN:
625 if (reg)
626 outb_p(reg, priv->scc_cmd);
627 outb_p(val, priv->scc_cmd);
628 return;
629 default:
630 spin_lock_irqsave(priv->register_lock, flags);
631 outb_p(0, priv->card_base + PI_DREQ_MASK);
632 if (reg)
633 outb_p(reg, priv->scc_cmd);
634 outb_p(val, priv->scc_cmd);
635 outb(1, priv->card_base + PI_DREQ_MASK);
636 spin_unlock_irqrestore(priv->register_lock, flags);
637 return;
638 }
639}
640
641
642static void write_scc_data(struct scc_priv *priv, int val, int fast)
643{
644 unsigned long flags;
645 switch (priv->type) {
646 case TYPE_S5:
647 outb(val, priv->scc_data);
648 return;
649 case TYPE_TWIN:
650 outb_p(val, priv->scc_data);
651 return;
652 default:
653 if (fast)
654 outb_p(val, priv->scc_data);
655 else {
656 spin_lock_irqsave(priv->register_lock, flags);
657 outb_p(0, priv->card_base + PI_DREQ_MASK);
658 outb_p(val, priv->scc_data);
659 outb(1, priv->card_base + PI_DREQ_MASK);
660 spin_unlock_irqrestore(priv->register_lock, flags);
661 }
662 return;
663 }
664}
665
666
667static int read_scc(struct scc_priv *priv, int reg)
668{
669 int rc;
670 unsigned long flags;
671 switch (priv->type) {
672 case TYPE_S5:
673 if (reg)
674 outb(reg, priv->scc_cmd);
675 return inb(priv->scc_cmd);
676 case TYPE_TWIN:
677 if (reg)
678 outb_p(reg, priv->scc_cmd);
679 return inb_p(priv->scc_cmd);
680 default:
681 spin_lock_irqsave(priv->register_lock, flags);
682 outb_p(0, priv->card_base + PI_DREQ_MASK);
683 if (reg)
684 outb_p(reg, priv->scc_cmd);
685 rc = inb_p(priv->scc_cmd);
686 outb(1, priv->card_base + PI_DREQ_MASK);
687 spin_unlock_irqrestore(priv->register_lock, flags);
688 return rc;
689 }
690}
691
692
693static int read_scc_data(struct scc_priv *priv)
694{
695 int rc;
696 unsigned long flags;
697 switch (priv->type) {
698 case TYPE_S5:
699 return inb(priv->scc_data);
700 case TYPE_TWIN:
701 return inb_p(priv->scc_data);
702 default:
703 spin_lock_irqsave(priv->register_lock, flags);
704 outb_p(0, priv->card_base + PI_DREQ_MASK);
705 rc = inb_p(priv->scc_data);
706 outb(1, priv->card_base + PI_DREQ_MASK);
707 spin_unlock_irqrestore(priv->register_lock, flags);
708 return rc;
709 }
710}
711
712
713static int scc_open(struct net_device *dev)
714{
f4bdd264 715 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
716 struct scc_info *info = priv->info;
717 int card_base = priv->card_base;
718
719 /* Request IRQ if not already used by other channel */
720 if (!info->irq_used) {
721 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
722 return -EAGAIN;
723 }
724 }
725 info->irq_used++;
726
727 /* Request DMA if required */
728 if (priv->param.dma >= 0) {
729 if (request_dma(priv->param.dma, "dmascc")) {
730 if (--info->irq_used == 0)
731 free_irq(dev->irq, info);
732 return -EAGAIN;
733 } else {
734 unsigned long flags = claim_dma_lock();
735 clear_dma_ff(priv->param.dma);
736 release_dma_lock(flags);
737 }
738 }
739
740 /* Initialize local variables */
741 priv->rx_ptr = 0;
742 priv->rx_over = 0;
743 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
744 priv->state = IDLE;
745 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
746 priv->tx_ptr = 0;
747
748 /* Reset channel */
749 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
750 /* X1 clock, SDLC mode */
751 write_scc(priv, R4, SDLC | X1CLK);
752 /* DMA */
753 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
754 /* 8 bit RX char, RX disable */
755 write_scc(priv, R3, Rx8);
756 /* 8 bit TX char, TX disable */
757 write_scc(priv, R5, Tx8);
758 /* SDLC address field */
759 write_scc(priv, R6, 0);
760 /* SDLC flag */
761 write_scc(priv, R7, FLAG);
762 switch (priv->chip) {
763 case Z85C30:
764 /* Select WR7' */
765 write_scc(priv, R15, SHDLCE);
766 /* Auto EOM reset */
767 write_scc(priv, R7, AUTOEOM);
768 write_scc(priv, R15, 0);
769 break;
770 case Z85230:
771 /* Select WR7' */
772 write_scc(priv, R15, SHDLCE);
773 /* The following bits are set (see 2.5.2.1):
774 - Automatic EOM reset
775 - Interrupt request if RX FIFO is half full
776 This bit should be ignored in DMA mode (according to the
777 documentation), but actually isn't. The receiver doesn't work if
778 it is set. Thus, we have to clear it in DMA mode.
779 - Interrupt/DMA request if TX FIFO is completely empty
780 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
781 compatibility).
782 b) If cleared, DMA requests may follow each other very quickly,
783 filling up the TX FIFO.
784 Advantage: TX works even in case of high bus latency.
785 Disadvantage: Edge-triggered DMA request circuitry may miss
786 a request. No more data is delivered, resulting
787 in a TX FIFO underrun.
788 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
789 The PackeTwin doesn't. I don't know about the PI, but let's
790 assume it behaves like the PI2.
791 */
792 if (priv->param.dma >= 0) {
793 if (priv->type == TYPE_TWIN)
794 write_scc(priv, R7, AUTOEOM | TXFIFOE);
795 else
796 write_scc(priv, R7, AUTOEOM);
797 } else {
798 write_scc(priv, R7, AUTOEOM | RXFIFOH);
799 }
800 write_scc(priv, R15, 0);
801 break;
802 }
803 /* Preset CRC, NRZ(I) encoding */
804 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
805
806 /* Configure baud rate generator */
807 if (priv->param.brg_tc >= 0) {
808 /* Program BR generator */
809 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
810 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
811 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
812 PackeTwin, not connected on the PI2); set DPLL source to BRG */
813 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
814 /* Enable DPLL */
815 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
816 } else {
817 /* Disable BR generator */
818 write_scc(priv, R14, DTRREQ | BRSRC);
819 }
820
821 /* Configure clocks */
822 if (priv->type == TYPE_TWIN) {
823 /* Disable external TX clock receiver */
824 outb((info->twin_serial_cfg &=
825 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
826 card_base + TWIN_SERIAL_CFG);
827 }
828 write_scc(priv, R11, priv->param.clocks);
829 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
830 /* Enable external TX clock receiver */
831 outb((info->twin_serial_cfg |=
832 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
833 card_base + TWIN_SERIAL_CFG);
834 }
835
836 /* Configure PackeTwin */
837 if (priv->type == TYPE_TWIN) {
838 /* Assert DTR, enable interrupts */
839 outb((info->twin_serial_cfg |= TWIN_EI |
840 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
841 card_base + TWIN_SERIAL_CFG);
842 }
843
844 /* Read current status */
845 priv->rr0 = read_scc(priv, R0);
846 /* Enable DCD interrupt */
847 write_scc(priv, R15, DCDIE);
848
849 netif_start_queue(dev);
850
851 return 0;
852}
853
854
855static int scc_close(struct net_device *dev)
856{
f4bdd264 857 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
858 struct scc_info *info = priv->info;
859 int card_base = priv->card_base;
860
861 netif_stop_queue(dev);
862
863 if (priv->type == TYPE_TWIN) {
864 /* Drop DTR */
865 outb((info->twin_serial_cfg &=
866 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
867 card_base + TWIN_SERIAL_CFG);
868 }
869
870 /* Reset channel, free DMA and IRQ */
871 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
872 if (priv->param.dma >= 0) {
873 if (priv->type == TYPE_TWIN)
874 outb(0, card_base + TWIN_DMA_CFG);
875 free_dma(priv->param.dma);
876 }
877 if (--info->irq_used == 0)
878 free_irq(dev->irq, info);
879
880 return 0;
881}
882
883
884static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
885{
f4bdd264 886 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
887
888 switch (cmd) {
889 case SIOCGSCCPARAM:
890 if (copy_to_user
891 (ifr->ifr_data, &priv->param,
892 sizeof(struct scc_param)))
893 return -EFAULT;
894 return 0;
895 case SIOCSSCCPARAM:
896 if (!capable(CAP_NET_ADMIN))
897 return -EPERM;
898 if (netif_running(dev))
899 return -EAGAIN;
900 if (copy_from_user
901 (&priv->param, ifr->ifr_data,
902 sizeof(struct scc_param)))
903 return -EFAULT;
904 return 0;
905 default:
906 return -EINVAL;
907 }
908}
909
910
911static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
912{
f4bdd264 913 struct scc_priv *priv = dev->ml_priv;
1da177e4
LT
914 unsigned long flags;
915 int i;
916
1d5da757
EB
917 if (skb->protocol == htons(ETH_P_IP))
918 return ax25_ip_xmit(skb);
919
1da177e4
LT
920 /* Temporarily stop the scheduler feeding us packets */
921 netif_stop_queue(dev);
922
923 /* Transfer data to DMA buffer */
924 i = priv->tx_head;
d626f62b 925 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
1da177e4
LT
926 priv->tx_len[i] = skb->len - 1;
927
928 /* Clear interrupts while we touch our circular buffers */
929
930 spin_lock_irqsave(&priv->ring_lock, flags);
931 /* Move the ring buffer's head */
932 priv->tx_head = (i + 1) % NUM_TX_BUF;
933 priv->tx_count++;
934
935 /* If we just filled up the last buffer, leave queue stopped.
936 The higher layers must wait until we have a DMA buffer
937 to accept the data. */
938 if (priv->tx_count < NUM_TX_BUF)
939 netif_wake_queue(dev);
940
941 /* Set new TX state */
942 if (priv->state == IDLE) {
943 /* Assert RTS, start timer */
944 priv->state = TX_HEAD;
945 priv->tx_start = jiffies;
946 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
947 write_scc(priv, R15, 0);
948 start_timer(priv, priv->param.txdelay, 0);
949 }
950
951 /* Turn interrupts back on and free buffer */
952 spin_unlock_irqrestore(&priv->ring_lock, flags);
953 dev_kfree_skb(skb);
954
6ed10654 955 return NETDEV_TX_OK;
1da177e4
LT
956}
957
958
1da177e4
LT
959static int scc_set_mac_address(struct net_device *dev, void *sa)
960{
961 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
962 dev->addr_len);
963 return 0;
964}
965
966
967static inline void tx_on(struct scc_priv *priv)
968{
969 int i, n;
970 unsigned long flags;
971
972 if (priv->param.dma >= 0) {
973 n = (priv->chip == Z85230) ? 3 : 1;
974 /* Program DMA controller */
975 flags = claim_dma_lock();
976 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
977 set_dma_addr(priv->param.dma,
978 (int) priv->tx_buf[priv->tx_tail] + n);
979 set_dma_count(priv->param.dma,
980 priv->tx_len[priv->tx_tail] - n);
981 release_dma_lock(flags);
982 /* Enable TX underrun interrupt */
983 write_scc(priv, R15, TxUIE);
984 /* Configure DREQ */
985 if (priv->type == TYPE_TWIN)
986 outb((priv->param.dma ==
987 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
988 priv->card_base + TWIN_DMA_CFG);
989 else
990 write_scc(priv, R1,
991 EXT_INT_ENAB | WT_FN_RDYFN |
992 WT_RDY_ENAB);
993 /* Write first byte(s) */
994 spin_lock_irqsave(priv->register_lock, flags);
995 for (i = 0; i < n; i++)
996 write_scc_data(priv,
997 priv->tx_buf[priv->tx_tail][i], 1);
998 enable_dma(priv->param.dma);
999 spin_unlock_irqrestore(priv->register_lock, flags);
1000 } else {
1001 write_scc(priv, R15, TxUIE);
1002 write_scc(priv, R1,
1003 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1004 tx_isr(priv);
1005 }
1006 /* Reset EOM latch if we do not have the AUTOEOM feature */
1007 if (priv->chip == Z8530)
1008 write_scc(priv, R0, RES_EOM_L);
1009}
1010
1011
1012static inline void rx_on(struct scc_priv *priv)
1013{
1014 unsigned long flags;
1015
1016 /* Clear RX FIFO */
1017 while (read_scc(priv, R0) & Rx_CH_AV)
1018 read_scc_data(priv);
1019 priv->rx_over = 0;
1020 if (priv->param.dma >= 0) {
1021 /* Program DMA controller */
1022 flags = claim_dma_lock();
1023 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1024 set_dma_addr(priv->param.dma,
1025 (int) priv->rx_buf[priv->rx_head]);
1026 set_dma_count(priv->param.dma, BUF_SIZE);
1027 release_dma_lock(flags);
1028 enable_dma(priv->param.dma);
1029 /* Configure PackeTwin DMA */
1030 if (priv->type == TYPE_TWIN) {
1031 outb((priv->param.dma ==
1032 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1033 priv->card_base + TWIN_DMA_CFG);
1034 }
1035 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1036 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1037 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1038 } else {
1039 /* Reset current frame */
1040 priv->rx_ptr = 0;
1041 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1042 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1043 WT_FN_RDYFN);
1044 }
1045 write_scc(priv, R0, ERR_RES);
1046 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1047}
1048
1049
1050static inline void rx_off(struct scc_priv *priv)
1051{
1052 /* Disable receiver */
1053 write_scc(priv, R3, Rx8);
1054 /* Disable DREQ / RX interrupt */
1055 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1056 outb(0, priv->card_base + TWIN_DMA_CFG);
1057 else
1058 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1059 /* Disable DMA */
1060 if (priv->param.dma >= 0)
1061 disable_dma(priv->param.dma);
1062}
1063
1064
1065static void start_timer(struct scc_priv *priv, int t, int r15)
1066{
1da177e4
LT
1067 outb(priv->tmr_mode, priv->tmr_ctrl);
1068 if (t == 0) {
1069 tm_isr(priv);
1070 } else if (t > 0) {
1da177e4
LT
1071 outb(t & 0xFF, priv->tmr_cnt);
1072 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1073 if (priv->type != TYPE_TWIN) {
1074 write_scc(priv, R15, r15 | CTSIE);
1075 priv->rr0 |= CTS;
1076 }
1da177e4
LT
1077 }
1078}
1079
1080
1081static inline unsigned char random(void)
1082{
1083 /* See "Numerical Recipes in C", second edition, p. 284 */
1084 rand = rand * 1664525L + 1013904223L;
1085 return (unsigned char) (rand >> 24);
1086}
1087
1088static inline void z8530_isr(struct scc_info *info)
1089{
1090 int is, i = 100;
1091
1092 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1093 if (is & CHARxIP) {
1094 rx_isr(&info->priv[0]);
1095 } else if (is & CHATxIP) {
1096 tx_isr(&info->priv[0]);
1097 } else if (is & CHAEXT) {
1098 es_isr(&info->priv[0]);
1099 } else if (is & CHBRxIP) {
1100 rx_isr(&info->priv[1]);
1101 } else if (is & CHBTxIP) {
1102 tx_isr(&info->priv[1]);
1103 } else {
1104 es_isr(&info->priv[1]);
1105 }
1106 write_scc(&info->priv[0], R0, RES_H_IUS);
1107 i++;
1108 }
1109 if (i < 0) {
1110 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1111 is);
1112 }
1113 /* Ok, no interrupts pending from this 8530. The INT line should
1114 be inactive now. */
1115}
1116
1117
7d12e780 1118static irqreturn_t scc_isr(int irq, void *dev_id)
1da177e4
LT
1119{
1120 struct scc_info *info = dev_id;
1121
1122 spin_lock(info->priv[0].register_lock);
1123 /* At this point interrupts are enabled, and the interrupt under service
1124 is already acknowledged, but masked off.
1125
1126 Interrupt processing: We loop until we know that the IRQ line is
1127 low. If another positive edge occurs afterwards during the ISR,
1128 another interrupt will be triggered by the interrupt controller
1129 as soon as the IRQ level is enabled again (see asm/irq.h).
1130
1131 Bottom-half handlers will be processed after scc_isr(). This is
1132 important, since we only have small ringbuffers and want new data
1133 to be fetched/delivered immediately. */
1134
1135 if (info->priv[0].type == TYPE_TWIN) {
1136 int is, card_base = info->priv[0].card_base;
1137 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1138 TWIN_INT_MSK) {
1139 if (is & TWIN_SCC_MSK) {
1140 z8530_isr(info);
1141 } else if (is & TWIN_TMR1_MSK) {
1142 inb(card_base + TWIN_CLR_TMR1);
1143 tm_isr(&info->priv[0]);
1144 } else {
1145 inb(card_base + TWIN_CLR_TMR2);
1146 tm_isr(&info->priv[1]);
1147 }
1148 }
1149 } else
1150 z8530_isr(info);
1151 spin_unlock(info->priv[0].register_lock);
1152 return IRQ_HANDLED;
1153}
1154
1155
1156static void rx_isr(struct scc_priv *priv)
1157{
1158 if (priv->param.dma >= 0) {
1159 /* Check special condition and perform error reset. See 2.4.7.5. */
1160 special_condition(priv, read_scc(priv, R1));
1161 write_scc(priv, R0, ERR_RES);
1162 } else {
1163 /* Check special condition for each character. Error reset not necessary.
1164 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1165 int rc;
1166 while (read_scc(priv, R0) & Rx_CH_AV) {
1167 rc = read_scc(priv, R1);
1168 if (priv->rx_ptr < BUF_SIZE)
1169 priv->rx_buf[priv->rx_head][priv->
1170 rx_ptr++] =
1171 read_scc_data(priv);
1172 else {
1173 priv->rx_over = 2;
1174 read_scc_data(priv);
1175 }
1176 special_condition(priv, rc);
1177 }
1178 }
1179}
1180
1181
1182static void special_condition(struct scc_priv *priv, int rc)
1183{
1184 int cb;
1185 unsigned long flags;
1186
1187 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1188
1189 if (rc & Rx_OVR) {
1190 /* Receiver overrun */
1191 priv->rx_over = 1;
1192 if (priv->param.dma < 0)
1193 write_scc(priv, R0, ERR_RES);
1194 } else if (rc & END_FR) {
1195 /* End of frame. Get byte count */
1196 if (priv->param.dma >= 0) {
1197 flags = claim_dma_lock();
1198 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1199 2;
1200 release_dma_lock(flags);
1201 } else {
1202 cb = priv->rx_ptr - 2;
1203 }
1204 if (priv->rx_over) {
1205 /* We had an overrun */
13c0582d 1206 priv->dev->stats.rx_errors++;
1da177e4 1207 if (priv->rx_over == 2)
13c0582d 1208 priv->dev->stats.rx_length_errors++;
1da177e4 1209 else
13c0582d 1210 priv->dev->stats.rx_fifo_errors++;
1da177e4
LT
1211 priv->rx_over = 0;
1212 } else if (rc & CRC_ERR) {
1213 /* Count invalid CRC only if packet length >= minimum */
1214 if (cb >= 15) {
13c0582d
SH
1215 priv->dev->stats.rx_errors++;
1216 priv->dev->stats.rx_crc_errors++;
1da177e4
LT
1217 }
1218 } else {
1219 if (cb >= 15) {
1220 if (priv->rx_count < NUM_RX_BUF - 1) {
1221 /* Put good frame in FIFO */
1222 priv->rx_len[priv->rx_head] = cb;
1223 priv->rx_head =
1224 (priv->rx_head +
1225 1) % NUM_RX_BUF;
1226 priv->rx_count++;
1227 schedule_work(&priv->rx_work);
1228 } else {
13c0582d
SH
1229 priv->dev->stats.rx_errors++;
1230 priv->dev->stats.rx_over_errors++;
1da177e4
LT
1231 }
1232 }
1233 }
1234 /* Get ready for new frame */
1235 if (priv->param.dma >= 0) {
1236 flags = claim_dma_lock();
1237 set_dma_addr(priv->param.dma,
1238 (int) priv->rx_buf[priv->rx_head]);
1239 set_dma_count(priv->param.dma, BUF_SIZE);
1240 release_dma_lock(flags);
1241 } else {
1242 priv->rx_ptr = 0;
1243 }
1244 }
1245}
1246
1247
7a87b6c2 1248static void rx_bh(struct work_struct *ugli_api)
1da177e4 1249{
7a87b6c2 1250 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1da177e4
LT
1251 int i = priv->rx_tail;
1252 int cb;
1253 unsigned long flags;
1254 struct sk_buff *skb;
1255 unsigned char *data;
1256
1257 spin_lock_irqsave(&priv->ring_lock, flags);
1258 while (priv->rx_count) {
1259 spin_unlock_irqrestore(&priv->ring_lock, flags);
1260 cb = priv->rx_len[i];
1261 /* Allocate buffer */
1262 skb = dev_alloc_skb(cb + 1);
1263 if (skb == NULL) {
1264 /* Drop packet */
13c0582d 1265 priv->dev->stats.rx_dropped++;
1da177e4
LT
1266 } else {
1267 /* Fill buffer */
1268 data = skb_put(skb, cb + 1);
1269 data[0] = 0;
1270 memcpy(&data[1], priv->rx_buf[i], cb);
56cb5156 1271 skb->protocol = ax25_type_trans(skb, priv->dev);
1da177e4 1272 netif_rx(skb);
13c0582d
SH
1273 priv->dev->stats.rx_packets++;
1274 priv->dev->stats.rx_bytes += cb;
1da177e4
LT
1275 }
1276 spin_lock_irqsave(&priv->ring_lock, flags);
1277 /* Move tail */
1278 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1279 priv->rx_count--;
1280 }
1281 spin_unlock_irqrestore(&priv->ring_lock, flags);
1282}
1283
1284
1285static void tx_isr(struct scc_priv *priv)
1286{
1287 int i = priv->tx_tail, p = priv->tx_ptr;
1288
1289 /* Suspend TX interrupts if we don't want to send anything.
1290 See Figure 2-22. */
1291 if (p == priv->tx_len[i]) {
1292 write_scc(priv, R0, RES_Tx_P);
1293 return;
1294 }
1295
1296 /* Write characters */
1297 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1298 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1299 }
1300
1301 /* Reset EOM latch of Z8530 */
1302 if (!priv->tx_ptr && p && priv->chip == Z8530)
1303 write_scc(priv, R0, RES_EOM_L);
1304
1305 priv->tx_ptr = p;
1306}
1307
1308
1309static void es_isr(struct scc_priv *priv)
1310{
1311 int i, rr0, drr0, res;
1312 unsigned long flags;
1313
1314 /* Read status, reset interrupt bit (open latches) */
1315 rr0 = read_scc(priv, R0);
1316 write_scc(priv, R0, RES_EXT_INT);
1317 drr0 = priv->rr0 ^ rr0;
1318 priv->rr0 = rr0;
1319
1320 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1321 it might have already been cleared again by AUTOEOM. */
1322 if (priv->state == TX_DATA) {
1323 /* Get remaining bytes */
1324 i = priv->tx_tail;
1325 if (priv->param.dma >= 0) {
1326 disable_dma(priv->param.dma);
1327 flags = claim_dma_lock();
1328 res = get_dma_residue(priv->param.dma);
1329 release_dma_lock(flags);
1330 } else {
1331 res = priv->tx_len[i] - priv->tx_ptr;
1332 priv->tx_ptr = 0;
1333 }
1334 /* Disable DREQ / TX interrupt */
1335 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1336 outb(0, priv->card_base + TWIN_DMA_CFG);
1337 else
1338 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1339 if (res) {
1340 /* Update packet statistics */
13c0582d
SH
1341 priv->dev->stats.tx_errors++;
1342 priv->dev->stats.tx_fifo_errors++;
1da177e4
LT
1343 /* Other underrun interrupts may already be waiting */
1344 write_scc(priv, R0, RES_EXT_INT);
1345 write_scc(priv, R0, RES_EXT_INT);
1346 } else {
1347 /* Update packet statistics */
13c0582d
SH
1348 priv->dev->stats.tx_packets++;
1349 priv->dev->stats.tx_bytes += priv->tx_len[i];
1da177e4
LT
1350 /* Remove frame from FIFO */
1351 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1352 priv->tx_count--;
1353 /* Inform upper layers */
1354 netif_wake_queue(priv->dev);
1355 }
1356 /* Switch state */
1357 write_scc(priv, R15, 0);
1358 if (priv->tx_count &&
1359 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1360 priv->state = TX_PAUSE;
1361 start_timer(priv, priv->param.txpause, 0);
1362 } else {
1363 priv->state = TX_TAIL;
1364 start_timer(priv, priv->param.txtail, 0);
1365 }
1366 }
1367
1368 /* DCD transition */
1369 if (drr0 & DCD) {
1370 if (rr0 & DCD) {
1371 switch (priv->state) {
1372 case IDLE:
1373 case WAIT:
1374 priv->state = DCD_ON;
1375 write_scc(priv, R15, 0);
1376 start_timer(priv, priv->param.dcdon, 0);
1377 }
1378 } else {
1379 switch (priv->state) {
1380 case RX_ON:
1381 rx_off(priv);
1382 priv->state = DCD_OFF;
1383 write_scc(priv, R15, 0);
1384 start_timer(priv, priv->param.dcdoff, 0);
1385 }
1386 }
1387 }
1388
1389 /* CTS transition */
1390 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1391 tm_isr(priv);
1392
1393}
1394
1395
1396static void tm_isr(struct scc_priv *priv)
1397{
1398 switch (priv->state) {
1399 case TX_HEAD:
1400 case TX_PAUSE:
1401 tx_on(priv);
1402 priv->state = TX_DATA;
1403 break;
1404 case TX_TAIL:
1405 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1406 priv->state = RTS_OFF;
1407 if (priv->type != TYPE_TWIN)
1408 write_scc(priv, R15, 0);
1409 start_timer(priv, priv->param.rtsoff, 0);
1410 break;
1411 case RTS_OFF:
1412 write_scc(priv, R15, DCDIE);
1413 priv->rr0 = read_scc(priv, R0);
1414 if (priv->rr0 & DCD) {
13c0582d 1415 priv->dev->stats.collisions++;
1da177e4
LT
1416 rx_on(priv);
1417 priv->state = RX_ON;
1418 } else {
1419 priv->state = WAIT;
1420 start_timer(priv, priv->param.waittime, DCDIE);
1421 }
1422 break;
1423 case WAIT:
1424 if (priv->tx_count) {
1425 priv->state = TX_HEAD;
1426 priv->tx_start = jiffies;
1427 write_scc(priv, R5,
1428 TxCRC_ENAB | RTS | TxENAB | Tx8);
1429 write_scc(priv, R15, 0);
1430 start_timer(priv, priv->param.txdelay, 0);
1431 } else {
1432 priv->state = IDLE;
1433 if (priv->type != TYPE_TWIN)
1434 write_scc(priv, R15, DCDIE);
1435 }
1436 break;
1437 case DCD_ON:
1438 case DCD_OFF:
1439 write_scc(priv, R15, DCDIE);
1440 priv->rr0 = read_scc(priv, R0);
1441 if (priv->rr0 & DCD) {
1442 rx_on(priv);
1443 priv->state = RX_ON;
1444 } else {
1445 priv->state = WAIT;
1446 start_timer(priv,
1447 random() / priv->param.persist *
1448 priv->param.slottime, DCDIE);
1449 }
1450 break;
1451 }
1452}