]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/char/synclink.c
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[mirror_ubuntu-zesty-kernel.git] / drivers / char / synclink.c
1 /*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 4.37 2005/09/07 13:13:19 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #if defined(__i386__)
57 # define BREAKPOINT() asm(" int $3");
58 #else
59 # define BREAKPOINT() { }
60 #endif
61
62 #define MAX_ISA_DEVICES 10
63 #define MAX_PCI_DEVICES 10
64 #define MAX_TOTAL_DEVICES 20
65
66 #include <linux/config.h>
67 #include <linux/module.h>
68 #include <linux/errno.h>
69 #include <linux/signal.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <linux/pci.h>
74 #include <linux/tty.h>
75 #include <linux/tty_flip.h>
76 #include <linux/serial.h>
77 #include <linux/major.h>
78 #include <linux/string.h>
79 #include <linux/fcntl.h>
80 #include <linux/ptrace.h>
81 #include <linux/ioport.h>
82 #include <linux/mm.h>
83 #include <linux/slab.h>
84 #include <linux/delay.h>
85
86 #include <linux/netdevice.h>
87
88 #include <linux/vmalloc.h>
89 #include <linux/init.h>
90 #include <asm/serial.h>
91
92 #include <linux/delay.h>
93 #include <linux/ioctl.h>
94
95 #include <asm/system.h>
96 #include <asm/io.h>
97 #include <asm/irq.h>
98 #include <asm/dma.h>
99 #include <linux/bitops.h>
100 #include <asm/types.h>
101 #include <linux/termios.h>
102 #include <linux/workqueue.h>
103 #include <linux/hdlc.h>
104
105 #ifdef CONFIG_HDLC_MODULE
106 #define CONFIG_HDLC 1
107 #endif
108
109 #define GET_USER(error,value,addr) error = get_user(value,addr)
110 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
111 #define PUT_USER(error,value,addr) error = put_user(value,addr)
112 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
113
114 #include <asm/uaccess.h>
115
116 #include "linux/synclink.h"
117
118 #define RCLRVALUE 0xffff
119
120 static MGSL_PARAMS default_params = {
121 MGSL_MODE_HDLC, /* unsigned long mode */
122 0, /* unsigned char loopback; */
123 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
124 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
125 0, /* unsigned long clock_speed; */
126 0xff, /* unsigned char addr_filter; */
127 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
128 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
129 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
130 9600, /* unsigned long data_rate; */
131 8, /* unsigned char data_bits; */
132 1, /* unsigned char stop_bits; */
133 ASYNC_PARITY_NONE /* unsigned char parity; */
134 };
135
136 #define SHARED_MEM_ADDRESS_SIZE 0x40000
137 #define BUFFERLISTSIZE (PAGE_SIZE)
138 #define DMABUFFERSIZE (PAGE_SIZE)
139 #define MAXRXFRAMES 7
140
141 typedef struct _DMABUFFERENTRY
142 {
143 u32 phys_addr; /* 32-bit flat physical address of data buffer */
144 volatile u16 count; /* buffer size/data count */
145 volatile u16 status; /* Control/status field */
146 volatile u16 rcc; /* character count field */
147 u16 reserved; /* padding required by 16C32 */
148 u32 link; /* 32-bit flat link to next buffer entry */
149 char *virt_addr; /* virtual address of data buffer */
150 u32 phys_entry; /* physical address of this buffer entry */
151 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
152
153 /* The queue of BH actions to be performed */
154
155 #define BH_RECEIVE 1
156 #define BH_TRANSMIT 2
157 #define BH_STATUS 4
158
159 #define IO_PIN_SHUTDOWN_LIMIT 100
160
161 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
162
163 struct _input_signal_events {
164 int ri_up;
165 int ri_down;
166 int dsr_up;
167 int dsr_down;
168 int dcd_up;
169 int dcd_down;
170 int cts_up;
171 int cts_down;
172 };
173
174 /* transmit holding buffer definitions*/
175 #define MAX_TX_HOLDING_BUFFERS 5
176 struct tx_holding_buffer {
177 int buffer_size;
178 unsigned char * buffer;
179 };
180
181
182 /*
183 * Device instance data structure
184 */
185
186 struct mgsl_struct {
187 int magic;
188 int flags;
189 int count; /* count of opens */
190 int line;
191 int hw_version;
192 unsigned short close_delay;
193 unsigned short closing_wait; /* time to wait before closing */
194
195 struct mgsl_icount icount;
196
197 struct tty_struct *tty;
198 int timeout;
199 int x_char; /* xon/xoff character */
200 int blocked_open; /* # of blocked opens */
201 u16 read_status_mask;
202 u16 ignore_status_mask;
203 unsigned char *xmit_buf;
204 int xmit_head;
205 int xmit_tail;
206 int xmit_cnt;
207
208 wait_queue_head_t open_wait;
209 wait_queue_head_t close_wait;
210
211 wait_queue_head_t status_event_wait_q;
212 wait_queue_head_t event_wait_q;
213 struct timer_list tx_timer; /* HDLC transmit timeout timer */
214 struct mgsl_struct *next_device; /* device list link */
215
216 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
217 struct work_struct task; /* task structure for scheduling bh */
218
219 u32 EventMask; /* event trigger mask */
220 u32 RecordedEvents; /* pending events */
221
222 u32 max_frame_size; /* as set by device config */
223
224 u32 pending_bh;
225
226 int bh_running; /* Protection from multiple */
227 int isr_overflow;
228 int bh_requested;
229
230 int dcd_chkcount; /* check counts to prevent */
231 int cts_chkcount; /* too many IRQs if a signal */
232 int dsr_chkcount; /* is floating */
233 int ri_chkcount;
234
235 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
236 unsigned long buffer_list_phys;
237
238 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
239 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
240 unsigned int current_rx_buffer;
241
242 int num_tx_dma_buffers; /* number of tx dma frames required */
243 int tx_dma_buffers_used;
244 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
245 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
246 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
247 int current_tx_buffer; /* next tx dma buffer to be loaded */
248
249 unsigned char *intermediate_rxbuffer;
250
251 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
252 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
253 int put_tx_holding_index; /* next tx holding buffer to store user request */
254 int tx_holding_count; /* number of tx holding buffers waiting */
255 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
256
257 int rx_enabled;
258 int rx_overflow;
259 int rx_rcc_underrun;
260
261 int tx_enabled;
262 int tx_active;
263 u32 idle_mode;
264
265 u16 cmr_value;
266 u16 tcsr_value;
267
268 char device_name[25]; /* device instance name */
269
270 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
271 unsigned char bus; /* expansion bus number (zero based) */
272 unsigned char function; /* PCI device number */
273
274 unsigned int io_base; /* base I/O address of adapter */
275 unsigned int io_addr_size; /* size of the I/O address range */
276 int io_addr_requested; /* nonzero if I/O address requested */
277
278 unsigned int irq_level; /* interrupt level */
279 unsigned long irq_flags;
280 int irq_requested; /* nonzero if IRQ requested */
281
282 unsigned int dma_level; /* DMA channel */
283 int dma_requested; /* nonzero if dma channel requested */
284
285 u16 mbre_bit;
286 u16 loopback_bits;
287 u16 usc_idle_mode;
288
289 MGSL_PARAMS params; /* communications parameters */
290
291 unsigned char serial_signals; /* current serial signal states */
292
293 int irq_occurred; /* for diagnostics use */
294 unsigned int init_error; /* Initialization startup error (DIAGS) */
295 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
296
297 u32 last_mem_alloc;
298 unsigned char* memory_base; /* shared memory address (PCI only) */
299 u32 phys_memory_base;
300 int shared_mem_requested;
301
302 unsigned char* lcr_base; /* local config registers (PCI only) */
303 u32 phys_lcr_base;
304 u32 lcr_offset;
305 int lcr_mem_requested;
306
307 u32 misc_ctrl_value;
308 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
309 char char_buf[MAX_ASYNC_BUFFER_SIZE];
310 BOOLEAN drop_rts_on_tx_done;
311
312 BOOLEAN loopmode_insert_requested;
313 BOOLEAN loopmode_send_done_requested;
314
315 struct _input_signal_events input_signal_events;
316
317 /* generic HDLC device parts */
318 int netcount;
319 int dosyncppp;
320 spinlock_t netlock;
321
322 #ifdef CONFIG_HDLC
323 struct net_device *netdev;
324 #endif
325 };
326
327 #define MGSL_MAGIC 0x5401
328
329 /*
330 * The size of the serial xmit buffer is 1 page, or 4096 bytes
331 */
332 #ifndef SERIAL_XMIT_SIZE
333 #define SERIAL_XMIT_SIZE 4096
334 #endif
335
336 /*
337 * These macros define the offsets used in calculating the
338 * I/O address of the specified USC registers.
339 */
340
341
342 #define DCPIN 2 /* Bit 1 of I/O address */
343 #define SDPIN 4 /* Bit 2 of I/O address */
344
345 #define DCAR 0 /* DMA command/address register */
346 #define CCAR SDPIN /* channel command/address register */
347 #define DATAREG DCPIN + SDPIN /* serial data register */
348 #define MSBONLY 0x41
349 #define LSBONLY 0x40
350
351 /*
352 * These macros define the register address (ordinal number)
353 * used for writing address/value pairs to the USC.
354 */
355
356 #define CMR 0x02 /* Channel mode Register */
357 #define CCSR 0x04 /* Channel Command/status Register */
358 #define CCR 0x06 /* Channel Control Register */
359 #define PSR 0x08 /* Port status Register */
360 #define PCR 0x0a /* Port Control Register */
361 #define TMDR 0x0c /* Test mode Data Register */
362 #define TMCR 0x0e /* Test mode Control Register */
363 #define CMCR 0x10 /* Clock mode Control Register */
364 #define HCR 0x12 /* Hardware Configuration Register */
365 #define IVR 0x14 /* Interrupt Vector Register */
366 #define IOCR 0x16 /* Input/Output Control Register */
367 #define ICR 0x18 /* Interrupt Control Register */
368 #define DCCR 0x1a /* Daisy Chain Control Register */
369 #define MISR 0x1c /* Misc Interrupt status Register */
370 #define SICR 0x1e /* status Interrupt Control Register */
371 #define RDR 0x20 /* Receive Data Register */
372 #define RMR 0x22 /* Receive mode Register */
373 #define RCSR 0x24 /* Receive Command/status Register */
374 #define RICR 0x26 /* Receive Interrupt Control Register */
375 #define RSR 0x28 /* Receive Sync Register */
376 #define RCLR 0x2a /* Receive count Limit Register */
377 #define RCCR 0x2c /* Receive Character count Register */
378 #define TC0R 0x2e /* Time Constant 0 Register */
379 #define TDR 0x30 /* Transmit Data Register */
380 #define TMR 0x32 /* Transmit mode Register */
381 #define TCSR 0x34 /* Transmit Command/status Register */
382 #define TICR 0x36 /* Transmit Interrupt Control Register */
383 #define TSR 0x38 /* Transmit Sync Register */
384 #define TCLR 0x3a /* Transmit count Limit Register */
385 #define TCCR 0x3c /* Transmit Character count Register */
386 #define TC1R 0x3e /* Time Constant 1 Register */
387
388
389 /*
390 * MACRO DEFINITIONS FOR DMA REGISTERS
391 */
392
393 #define DCR 0x06 /* DMA Control Register (shared) */
394 #define DACR 0x08 /* DMA Array count Register (shared) */
395 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
396 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
397 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
398 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
399 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
400
401 #define TDMR 0x02 /* Transmit DMA mode Register */
402 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
403 #define TBCR 0x2a /* Transmit Byte count Register */
404 #define TARL 0x2c /* Transmit Address Register (low) */
405 #define TARU 0x2e /* Transmit Address Register (high) */
406 #define NTBCR 0x3a /* Next Transmit Byte count Register */
407 #define NTARL 0x3c /* Next Transmit Address Register (low) */
408 #define NTARU 0x3e /* Next Transmit Address Register (high) */
409
410 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
411 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
412 #define RBCR 0xaa /* Receive Byte count Register */
413 #define RARL 0xac /* Receive Address Register (low) */
414 #define RARU 0xae /* Receive Address Register (high) */
415 #define NRBCR 0xba /* Next Receive Byte count Register */
416 #define NRARL 0xbc /* Next Receive Address Register (low) */
417 #define NRARU 0xbe /* Next Receive Address Register (high) */
418
419
420 /*
421 * MACRO DEFINITIONS FOR MODEM STATUS BITS
422 */
423
424 #define MODEMSTATUS_DTR 0x80
425 #define MODEMSTATUS_DSR 0x40
426 #define MODEMSTATUS_RTS 0x20
427 #define MODEMSTATUS_CTS 0x10
428 #define MODEMSTATUS_RI 0x04
429 #define MODEMSTATUS_DCD 0x01
430
431
432 /*
433 * Channel Command/Address Register (CCAR) Command Codes
434 */
435
436 #define RTCmd_Null 0x0000
437 #define RTCmd_ResetHighestIus 0x1000
438 #define RTCmd_TriggerChannelLoadDma 0x2000
439 #define RTCmd_TriggerRxDma 0x2800
440 #define RTCmd_TriggerTxDma 0x3000
441 #define RTCmd_TriggerRxAndTxDma 0x3800
442 #define RTCmd_PurgeRxFifo 0x4800
443 #define RTCmd_PurgeTxFifo 0x5000
444 #define RTCmd_PurgeRxAndTxFifo 0x5800
445 #define RTCmd_LoadRcc 0x6800
446 #define RTCmd_LoadTcc 0x7000
447 #define RTCmd_LoadRccAndTcc 0x7800
448 #define RTCmd_LoadTC0 0x8800
449 #define RTCmd_LoadTC1 0x9000
450 #define RTCmd_LoadTC0AndTC1 0x9800
451 #define RTCmd_SerialDataLSBFirst 0xa000
452 #define RTCmd_SerialDataMSBFirst 0xa800
453 #define RTCmd_SelectBigEndian 0xb000
454 #define RTCmd_SelectLittleEndian 0xb800
455
456
457 /*
458 * DMA Command/Address Register (DCAR) Command Codes
459 */
460
461 #define DmaCmd_Null 0x0000
462 #define DmaCmd_ResetTxChannel 0x1000
463 #define DmaCmd_ResetRxChannel 0x1200
464 #define DmaCmd_StartTxChannel 0x2000
465 #define DmaCmd_StartRxChannel 0x2200
466 #define DmaCmd_ContinueTxChannel 0x3000
467 #define DmaCmd_ContinueRxChannel 0x3200
468 #define DmaCmd_PauseTxChannel 0x4000
469 #define DmaCmd_PauseRxChannel 0x4200
470 #define DmaCmd_AbortTxChannel 0x5000
471 #define DmaCmd_AbortRxChannel 0x5200
472 #define DmaCmd_InitTxChannel 0x7000
473 #define DmaCmd_InitRxChannel 0x7200
474 #define DmaCmd_ResetHighestDmaIus 0x8000
475 #define DmaCmd_ResetAllChannels 0x9000
476 #define DmaCmd_StartAllChannels 0xa000
477 #define DmaCmd_ContinueAllChannels 0xb000
478 #define DmaCmd_PauseAllChannels 0xc000
479 #define DmaCmd_AbortAllChannels 0xd000
480 #define DmaCmd_InitAllChannels 0xf000
481
482 #define TCmd_Null 0x0000
483 #define TCmd_ClearTxCRC 0x2000
484 #define TCmd_SelectTicrTtsaData 0x4000
485 #define TCmd_SelectTicrTxFifostatus 0x5000
486 #define TCmd_SelectTicrIntLevel 0x6000
487 #define TCmd_SelectTicrdma_level 0x7000
488 #define TCmd_SendFrame 0x8000
489 #define TCmd_SendAbort 0x9000
490 #define TCmd_EnableDleInsertion 0xc000
491 #define TCmd_DisableDleInsertion 0xd000
492 #define TCmd_ClearEofEom 0xe000
493 #define TCmd_SetEofEom 0xf000
494
495 #define RCmd_Null 0x0000
496 #define RCmd_ClearRxCRC 0x2000
497 #define RCmd_EnterHuntmode 0x3000
498 #define RCmd_SelectRicrRtsaData 0x4000
499 #define RCmd_SelectRicrRxFifostatus 0x5000
500 #define RCmd_SelectRicrIntLevel 0x6000
501 #define RCmd_SelectRicrdma_level 0x7000
502
503 /*
504 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
505 */
506
507 #define RECEIVE_STATUS BIT5
508 #define RECEIVE_DATA BIT4
509 #define TRANSMIT_STATUS BIT3
510 #define TRANSMIT_DATA BIT2
511 #define IO_PIN BIT1
512 #define MISC BIT0
513
514
515 /*
516 * Receive status Bits in Receive Command/status Register RCSR
517 */
518
519 #define RXSTATUS_SHORT_FRAME BIT8
520 #define RXSTATUS_CODE_VIOLATION BIT8
521 #define RXSTATUS_EXITED_HUNT BIT7
522 #define RXSTATUS_IDLE_RECEIVED BIT6
523 #define RXSTATUS_BREAK_RECEIVED BIT5
524 #define RXSTATUS_ABORT_RECEIVED BIT5
525 #define RXSTATUS_RXBOUND BIT4
526 #define RXSTATUS_CRC_ERROR BIT3
527 #define RXSTATUS_FRAMING_ERROR BIT3
528 #define RXSTATUS_ABORT BIT2
529 #define RXSTATUS_PARITY_ERROR BIT2
530 #define RXSTATUS_OVERRUN BIT1
531 #define RXSTATUS_DATA_AVAILABLE BIT0
532 #define RXSTATUS_ALL 0x01f6
533 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
534
535 /*
536 * Values for setting transmit idle mode in
537 * Transmit Control/status Register (TCSR)
538 */
539 #define IDLEMODE_FLAGS 0x0000
540 #define IDLEMODE_ALT_ONE_ZERO 0x0100
541 #define IDLEMODE_ZERO 0x0200
542 #define IDLEMODE_ONE 0x0300
543 #define IDLEMODE_ALT_MARK_SPACE 0x0500
544 #define IDLEMODE_SPACE 0x0600
545 #define IDLEMODE_MARK 0x0700
546 #define IDLEMODE_MASK 0x0700
547
548 /*
549 * IUSC revision identifiers
550 */
551 #define IUSC_SL1660 0x4d44
552 #define IUSC_PRE_SL1660 0x4553
553
554 /*
555 * Transmit status Bits in Transmit Command/status Register (TCSR)
556 */
557
558 #define TCSR_PRESERVE 0x0F00
559
560 #define TCSR_UNDERWAIT BIT11
561 #define TXSTATUS_PREAMBLE_SENT BIT7
562 #define TXSTATUS_IDLE_SENT BIT6
563 #define TXSTATUS_ABORT_SENT BIT5
564 #define TXSTATUS_EOF_SENT BIT4
565 #define TXSTATUS_EOM_SENT BIT4
566 #define TXSTATUS_CRC_SENT BIT3
567 #define TXSTATUS_ALL_SENT BIT2
568 #define TXSTATUS_UNDERRUN BIT1
569 #define TXSTATUS_FIFO_EMPTY BIT0
570 #define TXSTATUS_ALL 0x00fa
571 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
572
573
574 #define MISCSTATUS_RXC_LATCHED BIT15
575 #define MISCSTATUS_RXC BIT14
576 #define MISCSTATUS_TXC_LATCHED BIT13
577 #define MISCSTATUS_TXC BIT12
578 #define MISCSTATUS_RI_LATCHED BIT11
579 #define MISCSTATUS_RI BIT10
580 #define MISCSTATUS_DSR_LATCHED BIT9
581 #define MISCSTATUS_DSR BIT8
582 #define MISCSTATUS_DCD_LATCHED BIT7
583 #define MISCSTATUS_DCD BIT6
584 #define MISCSTATUS_CTS_LATCHED BIT5
585 #define MISCSTATUS_CTS BIT4
586 #define MISCSTATUS_RCC_UNDERRUN BIT3
587 #define MISCSTATUS_DPLL_NO_SYNC BIT2
588 #define MISCSTATUS_BRG1_ZERO BIT1
589 #define MISCSTATUS_BRG0_ZERO BIT0
590
591 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
592 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
593
594 #define SICR_RXC_ACTIVE BIT15
595 #define SICR_RXC_INACTIVE BIT14
596 #define SICR_RXC (BIT15+BIT14)
597 #define SICR_TXC_ACTIVE BIT13
598 #define SICR_TXC_INACTIVE BIT12
599 #define SICR_TXC (BIT13+BIT12)
600 #define SICR_RI_ACTIVE BIT11
601 #define SICR_RI_INACTIVE BIT10
602 #define SICR_RI (BIT11+BIT10)
603 #define SICR_DSR_ACTIVE BIT9
604 #define SICR_DSR_INACTIVE BIT8
605 #define SICR_DSR (BIT9+BIT8)
606 #define SICR_DCD_ACTIVE BIT7
607 #define SICR_DCD_INACTIVE BIT6
608 #define SICR_DCD (BIT7+BIT6)
609 #define SICR_CTS_ACTIVE BIT5
610 #define SICR_CTS_INACTIVE BIT4
611 #define SICR_CTS (BIT5+BIT4)
612 #define SICR_RCC_UNDERFLOW BIT3
613 #define SICR_DPLL_NO_SYNC BIT2
614 #define SICR_BRG1_ZERO BIT1
615 #define SICR_BRG0_ZERO BIT0
616
617 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
618 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
619 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
620 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
621 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
622
623 #define usc_EnableInterrupts( a, b ) \
624 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
625
626 #define usc_DisableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
628
629 #define usc_EnableMasterIrqBit(a) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
631
632 #define usc_DisableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
634
635 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
636
637 /*
638 * Transmit status Bits in Transmit Control status Register (TCSR)
639 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
640 */
641
642 #define TXSTATUS_PREAMBLE_SENT BIT7
643 #define TXSTATUS_IDLE_SENT BIT6
644 #define TXSTATUS_ABORT_SENT BIT5
645 #define TXSTATUS_EOF BIT4
646 #define TXSTATUS_CRC_SENT BIT3
647 #define TXSTATUS_ALL_SENT BIT2
648 #define TXSTATUS_UNDERRUN BIT1
649 #define TXSTATUS_FIFO_EMPTY BIT0
650
651 #define DICR_MASTER BIT15
652 #define DICR_TRANSMIT BIT0
653 #define DICR_RECEIVE BIT1
654
655 #define usc_EnableDmaInterrupts(a,b) \
656 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
657
658 #define usc_DisableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
660
661 #define usc_EnableStatusIrqs(a,b) \
662 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
663
664 #define usc_DisablestatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
666
667 /* Transmit status Bits in Transmit Control status Register (TCSR) */
668 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
669
670
671 #define DISABLE_UNCONDITIONAL 0
672 #define DISABLE_END_OF_FRAME 1
673 #define ENABLE_UNCONDITIONAL 2
674 #define ENABLE_AUTO_CTS 3
675 #define ENABLE_AUTO_DCD 3
676 #define usc_EnableTransmitter(a,b) \
677 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
678 #define usc_EnableReceiver(a,b) \
679 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
680
681 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
682 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
683 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
684
685 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
686 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
687 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
688 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
689 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
690
691 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
692 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
693
694 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
695
696 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
697 static void usc_start_receiver( struct mgsl_struct *info );
698 static void usc_stop_receiver( struct mgsl_struct *info );
699
700 static void usc_start_transmitter( struct mgsl_struct *info );
701 static void usc_stop_transmitter( struct mgsl_struct *info );
702 static void usc_set_txidle( struct mgsl_struct *info );
703 static void usc_load_txfifo( struct mgsl_struct *info );
704
705 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
706 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
707
708 static void usc_get_serial_signals( struct mgsl_struct *info );
709 static void usc_set_serial_signals( struct mgsl_struct *info );
710
711 static void usc_reset( struct mgsl_struct *info );
712
713 static void usc_set_sync_mode( struct mgsl_struct *info );
714 static void usc_set_sdlc_mode( struct mgsl_struct *info );
715 static void usc_set_async_mode( struct mgsl_struct *info );
716 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
717
718 static void usc_loopback_frame( struct mgsl_struct *info );
719
720 static void mgsl_tx_timeout(unsigned long context);
721
722
723 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
724 static void usc_loopmode_insert_request( struct mgsl_struct * info );
725 static int usc_loopmode_active( struct mgsl_struct * info);
726 static void usc_loopmode_send_done( struct mgsl_struct * info );
727
728 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
729
730 #ifdef CONFIG_HDLC
731 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
732 static void hdlcdev_tx_done(struct mgsl_struct *info);
733 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
734 static int hdlcdev_init(struct mgsl_struct *info);
735 static void hdlcdev_exit(struct mgsl_struct *info);
736 #endif
737
738 /*
739 * Defines a BUS descriptor value for the PCI adapter
740 * local bus address ranges.
741 */
742
743 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
744 (0x00400020 + \
745 ((WrHold) << 30) + \
746 ((WrDly) << 28) + \
747 ((RdDly) << 26) + \
748 ((Nwdd) << 20) + \
749 ((Nwad) << 15) + \
750 ((Nxda) << 13) + \
751 ((Nrdd) << 11) + \
752 ((Nrad) << 6) )
753
754 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
755
756 /*
757 * Adapter diagnostic routines
758 */
759 static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
760 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
761 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
762 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
763 static int mgsl_adapter_test( struct mgsl_struct *info );
764
765 /*
766 * device and resource management routines
767 */
768 static int mgsl_claim_resources(struct mgsl_struct *info);
769 static void mgsl_release_resources(struct mgsl_struct *info);
770 static void mgsl_add_device(struct mgsl_struct *info);
771 static struct mgsl_struct* mgsl_allocate_device(void);
772
773 /*
774 * DMA buffer manupulation functions.
775 */
776 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
777 static int mgsl_get_rx_frame( struct mgsl_struct *info );
778 static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
779 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
780 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
781 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
782 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
783 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
784
785 /*
786 * DMA and Shared Memory buffer allocation and formatting
787 */
788 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
789 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
790 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
791 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
792 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
793 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
794 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
795 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
796 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
797 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
798 static int load_next_tx_holding_buffer(struct mgsl_struct *info);
799 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
800
801 /*
802 * Bottom half interrupt handlers
803 */
804 static void mgsl_bh_handler(void* Context);
805 static void mgsl_bh_receive(struct mgsl_struct *info);
806 static void mgsl_bh_transmit(struct mgsl_struct *info);
807 static void mgsl_bh_status(struct mgsl_struct *info);
808
809 /*
810 * Interrupt handler routines and dispatch table.
811 */
812 static void mgsl_isr_null( struct mgsl_struct *info );
813 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
814 static void mgsl_isr_receive_data( struct mgsl_struct *info );
815 static void mgsl_isr_receive_status( struct mgsl_struct *info );
816 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
817 static void mgsl_isr_io_pin( struct mgsl_struct *info );
818 static void mgsl_isr_misc( struct mgsl_struct *info );
819 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
820 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
821
822 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
823
824 static isr_dispatch_func UscIsrTable[7] =
825 {
826 mgsl_isr_null,
827 mgsl_isr_misc,
828 mgsl_isr_io_pin,
829 mgsl_isr_transmit_data,
830 mgsl_isr_transmit_status,
831 mgsl_isr_receive_data,
832 mgsl_isr_receive_status
833 };
834
835 /*
836 * ioctl call handlers
837 */
838 static int tiocmget(struct tty_struct *tty, struct file *file);
839 static int tiocmset(struct tty_struct *tty, struct file *file,
840 unsigned int set, unsigned int clear);
841 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
842 __user *user_icount);
843 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
844 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
845 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
846 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
847 static int mgsl_txenable(struct mgsl_struct * info, int enable);
848 static int mgsl_txabort(struct mgsl_struct * info);
849 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
850 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
851 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
852
853 /* set non-zero on successful registration with PCI subsystem */
854 static int pci_registered;
855
856 /*
857 * Global linked list of SyncLink devices
858 */
859 static struct mgsl_struct *mgsl_device_list;
860 static int mgsl_device_count;
861
862 /*
863 * Set this param to non-zero to load eax with the
864 * .text section address and breakpoint on module load.
865 * This is useful for use with gdb and add-symbol-file command.
866 */
867 static int break_on_load;
868
869 /*
870 * Driver major number, defaults to zero to get auto
871 * assigned major number. May be forced as module parameter.
872 */
873 static int ttymajor;
874
875 /*
876 * Array of user specified options for ISA adapters.
877 */
878 static int io[MAX_ISA_DEVICES];
879 static int irq[MAX_ISA_DEVICES];
880 static int dma[MAX_ISA_DEVICES];
881 static int debug_level;
882 static int maxframe[MAX_TOTAL_DEVICES];
883 static int dosyncppp[MAX_TOTAL_DEVICES];
884 static int txdmabufs[MAX_TOTAL_DEVICES];
885 static int txholdbufs[MAX_TOTAL_DEVICES];
886
887 module_param(break_on_load, bool, 0);
888 module_param(ttymajor, int, 0);
889 module_param_array(io, int, NULL, 0);
890 module_param_array(irq, int, NULL, 0);
891 module_param_array(dma, int, NULL, 0);
892 module_param(debug_level, int, 0);
893 module_param_array(maxframe, int, NULL, 0);
894 module_param_array(dosyncppp, int, NULL, 0);
895 module_param_array(txdmabufs, int, NULL, 0);
896 module_param_array(txholdbufs, int, NULL, 0);
897
898 static char *driver_name = "SyncLink serial driver";
899 static char *driver_version = "$Revision: 4.37 $";
900
901 static int synclink_init_one (struct pci_dev *dev,
902 const struct pci_device_id *ent);
903 static void synclink_remove_one (struct pci_dev *dev);
904
905 static struct pci_device_id synclink_pci_tbl[] = {
906 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
907 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
908 { 0, }, /* terminate list */
909 };
910 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
911
912 MODULE_LICENSE("GPL");
913
914 static struct pci_driver synclink_pci_driver = {
915 .name = "synclink",
916 .id_table = synclink_pci_tbl,
917 .probe = synclink_init_one,
918 .remove = __devexit_p(synclink_remove_one),
919 };
920
921 static struct tty_driver *serial_driver;
922
923 /* number of characters left in xmit buffer before we ask for more */
924 #define WAKEUP_CHARS 256
925
926
927 static void mgsl_change_params(struct mgsl_struct *info);
928 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
929
930 /*
931 * 1st function defined in .text section. Calling this function in
932 * init_module() followed by a breakpoint allows a remote debugger
933 * (gdb) to get the .text address for the add-symbol-file command.
934 * This allows remote debugging of dynamically loadable modules.
935 */
936 static void* mgsl_get_text_ptr(void)
937 {
938 return mgsl_get_text_ptr;
939 }
940
941 /*
942 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
943 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
944 * and some other program tries to do a serial write at the same time.
945 * Since the lock will only come under contention when the system is
946 * swapping and available memory is low, it makes sense to share one
947 * buffer across all the serial ioports, since it significantly saves
948 * memory if large numbers of serial ports are open.
949 */
950 static unsigned char *tmp_buf;
951 static DECLARE_MUTEX(tmp_buf_sem);
952
953 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
954 char *name, const char *routine)
955 {
956 #ifdef MGSL_PARANOIA_CHECK
957 static const char *badmagic =
958 "Warning: bad magic number for mgsl struct (%s) in %s\n";
959 static const char *badinfo =
960 "Warning: null mgsl_struct for (%s) in %s\n";
961
962 if (!info) {
963 printk(badinfo, name, routine);
964 return 1;
965 }
966 if (info->magic != MGSL_MAGIC) {
967 printk(badmagic, name, routine);
968 return 1;
969 }
970 #else
971 if (!info)
972 return 1;
973 #endif
974 return 0;
975 }
976
977 /**
978 * line discipline callback wrappers
979 *
980 * The wrappers maintain line discipline references
981 * while calling into the line discipline.
982 *
983 * ldisc_receive_buf - pass receive data to line discipline
984 */
985
986 static void ldisc_receive_buf(struct tty_struct *tty,
987 const __u8 *data, char *flags, int count)
988 {
989 struct tty_ldisc *ld;
990 if (!tty)
991 return;
992 ld = tty_ldisc_ref(tty);
993 if (ld) {
994 if (ld->receive_buf)
995 ld->receive_buf(tty, data, flags, count);
996 tty_ldisc_deref(ld);
997 }
998 }
999
1000 /* mgsl_stop() throttle (stop) transmitter
1001 *
1002 * Arguments: tty pointer to tty info structure
1003 * Return Value: None
1004 */
1005 static void mgsl_stop(struct tty_struct *tty)
1006 {
1007 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1008 unsigned long flags;
1009
1010 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1011 return;
1012
1013 if ( debug_level >= DEBUG_LEVEL_INFO )
1014 printk("mgsl_stop(%s)\n",info->device_name);
1015
1016 spin_lock_irqsave(&info->irq_spinlock,flags);
1017 if (info->tx_enabled)
1018 usc_stop_transmitter(info);
1019 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1020
1021 } /* end of mgsl_stop() */
1022
1023 /* mgsl_start() release (start) transmitter
1024 *
1025 * Arguments: tty pointer to tty info structure
1026 * Return Value: None
1027 */
1028 static void mgsl_start(struct tty_struct *tty)
1029 {
1030 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1031 unsigned long flags;
1032
1033 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1034 return;
1035
1036 if ( debug_level >= DEBUG_LEVEL_INFO )
1037 printk("mgsl_start(%s)\n",info->device_name);
1038
1039 spin_lock_irqsave(&info->irq_spinlock,flags);
1040 if (!info->tx_enabled)
1041 usc_start_transmitter(info);
1042 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1043
1044 } /* end of mgsl_start() */
1045
1046 /*
1047 * Bottom half work queue access functions
1048 */
1049
1050 /* mgsl_bh_action() Return next bottom half action to perform.
1051 * Return Value: BH action code or 0 if nothing to do.
1052 */
1053 static int mgsl_bh_action(struct mgsl_struct *info)
1054 {
1055 unsigned long flags;
1056 int rc = 0;
1057
1058 spin_lock_irqsave(&info->irq_spinlock,flags);
1059
1060 if (info->pending_bh & BH_RECEIVE) {
1061 info->pending_bh &= ~BH_RECEIVE;
1062 rc = BH_RECEIVE;
1063 } else if (info->pending_bh & BH_TRANSMIT) {
1064 info->pending_bh &= ~BH_TRANSMIT;
1065 rc = BH_TRANSMIT;
1066 } else if (info->pending_bh & BH_STATUS) {
1067 info->pending_bh &= ~BH_STATUS;
1068 rc = BH_STATUS;
1069 }
1070
1071 if (!rc) {
1072 /* Mark BH routine as complete */
1073 info->bh_running = 0;
1074 info->bh_requested = 0;
1075 }
1076
1077 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1078
1079 return rc;
1080 }
1081
1082 /*
1083 * Perform bottom half processing of work items queued by ISR.
1084 */
1085 static void mgsl_bh_handler(void* Context)
1086 {
1087 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1088 int action;
1089
1090 if (!info)
1091 return;
1092
1093 if ( debug_level >= DEBUG_LEVEL_BH )
1094 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1095 __FILE__,__LINE__,info->device_name);
1096
1097 info->bh_running = 1;
1098
1099 while((action = mgsl_bh_action(info)) != 0) {
1100
1101 /* Process work item */
1102 if ( debug_level >= DEBUG_LEVEL_BH )
1103 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1104 __FILE__,__LINE__,action);
1105
1106 switch (action) {
1107
1108 case BH_RECEIVE:
1109 mgsl_bh_receive(info);
1110 break;
1111 case BH_TRANSMIT:
1112 mgsl_bh_transmit(info);
1113 break;
1114 case BH_STATUS:
1115 mgsl_bh_status(info);
1116 break;
1117 default:
1118 /* unknown work item ID */
1119 printk("Unknown work item ID=%08X!\n", action);
1120 break;
1121 }
1122 }
1123
1124 if ( debug_level >= DEBUG_LEVEL_BH )
1125 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1126 __FILE__,__LINE__,info->device_name);
1127 }
1128
1129 static void mgsl_bh_receive(struct mgsl_struct *info)
1130 {
1131 int (*get_rx_frame)(struct mgsl_struct *info) =
1132 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1133
1134 if ( debug_level >= DEBUG_LEVEL_BH )
1135 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1136 __FILE__,__LINE__,info->device_name);
1137
1138 do
1139 {
1140 if (info->rx_rcc_underrun) {
1141 unsigned long flags;
1142 spin_lock_irqsave(&info->irq_spinlock,flags);
1143 usc_start_receiver(info);
1144 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1145 return;
1146 }
1147 } while(get_rx_frame(info));
1148 }
1149
1150 static void mgsl_bh_transmit(struct mgsl_struct *info)
1151 {
1152 struct tty_struct *tty = info->tty;
1153 unsigned long flags;
1154
1155 if ( debug_level >= DEBUG_LEVEL_BH )
1156 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1157 __FILE__,__LINE__,info->device_name);
1158
1159 if (tty) {
1160 tty_wakeup(tty);
1161 wake_up_interruptible(&tty->write_wait);
1162 }
1163
1164 /* if transmitter idle and loopmode_send_done_requested
1165 * then start echoing RxD to TxD
1166 */
1167 spin_lock_irqsave(&info->irq_spinlock,flags);
1168 if ( !info->tx_active && info->loopmode_send_done_requested )
1169 usc_loopmode_send_done( info );
1170 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1171 }
1172
1173 static void mgsl_bh_status(struct mgsl_struct *info)
1174 {
1175 if ( debug_level >= DEBUG_LEVEL_BH )
1176 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1177 __FILE__,__LINE__,info->device_name);
1178
1179 info->ri_chkcount = 0;
1180 info->dsr_chkcount = 0;
1181 info->dcd_chkcount = 0;
1182 info->cts_chkcount = 0;
1183 }
1184
1185 /* mgsl_isr_receive_status()
1186 *
1187 * Service a receive status interrupt. The type of status
1188 * interrupt is indicated by the state of the RCSR.
1189 * This is only used for HDLC mode.
1190 *
1191 * Arguments: info pointer to device instance data
1192 * Return Value: None
1193 */
1194 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1195 {
1196 u16 status = usc_InReg( info, RCSR );
1197
1198 if ( debug_level >= DEBUG_LEVEL_ISR )
1199 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1200 __FILE__,__LINE__,status);
1201
1202 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1203 info->loopmode_insert_requested &&
1204 usc_loopmode_active(info) )
1205 {
1206 ++info->icount.rxabort;
1207 info->loopmode_insert_requested = FALSE;
1208
1209 /* clear CMR:13 to start echoing RxD to TxD */
1210 info->cmr_value &= ~BIT13;
1211 usc_OutReg(info, CMR, info->cmr_value);
1212
1213 /* disable received abort irq (no longer required) */
1214 usc_OutReg(info, RICR,
1215 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1216 }
1217
1218 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1219 if (status & RXSTATUS_EXITED_HUNT)
1220 info->icount.exithunt++;
1221 if (status & RXSTATUS_IDLE_RECEIVED)
1222 info->icount.rxidle++;
1223 wake_up_interruptible(&info->event_wait_q);
1224 }
1225
1226 if (status & RXSTATUS_OVERRUN){
1227 info->icount.rxover++;
1228 usc_process_rxoverrun_sync( info );
1229 }
1230
1231 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1232 usc_UnlatchRxstatusBits( info, status );
1233
1234 } /* end of mgsl_isr_receive_status() */
1235
1236 /* mgsl_isr_transmit_status()
1237 *
1238 * Service a transmit status interrupt
1239 * HDLC mode :end of transmit frame
1240 * Async mode:all data is sent
1241 * transmit status is indicated by bits in the TCSR.
1242 *
1243 * Arguments: info pointer to device instance data
1244 * Return Value: None
1245 */
1246 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1247 {
1248 u16 status = usc_InReg( info, TCSR );
1249
1250 if ( debug_level >= DEBUG_LEVEL_ISR )
1251 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1252 __FILE__,__LINE__,status);
1253
1254 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1255 usc_UnlatchTxstatusBits( info, status );
1256
1257 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1258 {
1259 /* finished sending HDLC abort. This may leave */
1260 /* the TxFifo with data from the aborted frame */
1261 /* so purge the TxFifo. Also shutdown the DMA */
1262 /* channel in case there is data remaining in */
1263 /* the DMA buffer */
1264 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1265 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1266 }
1267
1268 if ( status & TXSTATUS_EOF_SENT )
1269 info->icount.txok++;
1270 else if ( status & TXSTATUS_UNDERRUN )
1271 info->icount.txunder++;
1272 else if ( status & TXSTATUS_ABORT_SENT )
1273 info->icount.txabort++;
1274 else
1275 info->icount.txunder++;
1276
1277 info->tx_active = 0;
1278 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1279 del_timer(&info->tx_timer);
1280
1281 if ( info->drop_rts_on_tx_done ) {
1282 usc_get_serial_signals( info );
1283 if ( info->serial_signals & SerialSignal_RTS ) {
1284 info->serial_signals &= ~SerialSignal_RTS;
1285 usc_set_serial_signals( info );
1286 }
1287 info->drop_rts_on_tx_done = 0;
1288 }
1289
1290 #ifdef CONFIG_HDLC
1291 if (info->netcount)
1292 hdlcdev_tx_done(info);
1293 else
1294 #endif
1295 {
1296 if (info->tty->stopped || info->tty->hw_stopped) {
1297 usc_stop_transmitter(info);
1298 return;
1299 }
1300 info->pending_bh |= BH_TRANSMIT;
1301 }
1302
1303 } /* end of mgsl_isr_transmit_status() */
1304
1305 /* mgsl_isr_io_pin()
1306 *
1307 * Service an Input/Output pin interrupt. The type of
1308 * interrupt is indicated by bits in the MISR
1309 *
1310 * Arguments: info pointer to device instance data
1311 * Return Value: None
1312 */
1313 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1314 {
1315 struct mgsl_icount *icount;
1316 u16 status = usc_InReg( info, MISR );
1317
1318 if ( debug_level >= DEBUG_LEVEL_ISR )
1319 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1320 __FILE__,__LINE__,status);
1321
1322 usc_ClearIrqPendingBits( info, IO_PIN );
1323 usc_UnlatchIostatusBits( info, status );
1324
1325 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1326 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1327 icount = &info->icount;
1328 /* update input line counters */
1329 if (status & MISCSTATUS_RI_LATCHED) {
1330 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331 usc_DisablestatusIrqs(info,SICR_RI);
1332 icount->rng++;
1333 if ( status & MISCSTATUS_RI )
1334 info->input_signal_events.ri_up++;
1335 else
1336 info->input_signal_events.ri_down++;
1337 }
1338 if (status & MISCSTATUS_DSR_LATCHED) {
1339 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1340 usc_DisablestatusIrqs(info,SICR_DSR);
1341 icount->dsr++;
1342 if ( status & MISCSTATUS_DSR )
1343 info->input_signal_events.dsr_up++;
1344 else
1345 info->input_signal_events.dsr_down++;
1346 }
1347 if (status & MISCSTATUS_DCD_LATCHED) {
1348 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1349 usc_DisablestatusIrqs(info,SICR_DCD);
1350 icount->dcd++;
1351 if (status & MISCSTATUS_DCD) {
1352 info->input_signal_events.dcd_up++;
1353 } else
1354 info->input_signal_events.dcd_down++;
1355 #ifdef CONFIG_HDLC
1356 if (info->netcount)
1357 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
1358 #endif
1359 }
1360 if (status & MISCSTATUS_CTS_LATCHED)
1361 {
1362 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1363 usc_DisablestatusIrqs(info,SICR_CTS);
1364 icount->cts++;
1365 if ( status & MISCSTATUS_CTS )
1366 info->input_signal_events.cts_up++;
1367 else
1368 info->input_signal_events.cts_down++;
1369 }
1370 wake_up_interruptible(&info->status_event_wait_q);
1371 wake_up_interruptible(&info->event_wait_q);
1372
1373 if ( (info->flags & ASYNC_CHECK_CD) &&
1374 (status & MISCSTATUS_DCD_LATCHED) ) {
1375 if ( debug_level >= DEBUG_LEVEL_ISR )
1376 printk("%s CD now %s...", info->device_name,
1377 (status & MISCSTATUS_DCD) ? "on" : "off");
1378 if (status & MISCSTATUS_DCD)
1379 wake_up_interruptible(&info->open_wait);
1380 else {
1381 if ( debug_level >= DEBUG_LEVEL_ISR )
1382 printk("doing serial hangup...");
1383 if (info->tty)
1384 tty_hangup(info->tty);
1385 }
1386 }
1387
1388 if ( (info->flags & ASYNC_CTS_FLOW) &&
1389 (status & MISCSTATUS_CTS_LATCHED) ) {
1390 if (info->tty->hw_stopped) {
1391 if (status & MISCSTATUS_CTS) {
1392 if ( debug_level >= DEBUG_LEVEL_ISR )
1393 printk("CTS tx start...");
1394 if (info->tty)
1395 info->tty->hw_stopped = 0;
1396 usc_start_transmitter(info);
1397 info->pending_bh |= BH_TRANSMIT;
1398 return;
1399 }
1400 } else {
1401 if (!(status & MISCSTATUS_CTS)) {
1402 if ( debug_level >= DEBUG_LEVEL_ISR )
1403 printk("CTS tx stop...");
1404 if (info->tty)
1405 info->tty->hw_stopped = 1;
1406 usc_stop_transmitter(info);
1407 }
1408 }
1409 }
1410 }
1411
1412 info->pending_bh |= BH_STATUS;
1413
1414 /* for diagnostics set IRQ flag */
1415 if ( status & MISCSTATUS_TXC_LATCHED ){
1416 usc_OutReg( info, SICR,
1417 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1418 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1419 info->irq_occurred = 1;
1420 }
1421
1422 } /* end of mgsl_isr_io_pin() */
1423
1424 /* mgsl_isr_transmit_data()
1425 *
1426 * Service a transmit data interrupt (async mode only).
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
1431 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1432 {
1433 if ( debug_level >= DEBUG_LEVEL_ISR )
1434 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1435 __FILE__,__LINE__,info->xmit_cnt);
1436
1437 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1438
1439 if (info->tty->stopped || info->tty->hw_stopped) {
1440 usc_stop_transmitter(info);
1441 return;
1442 }
1443
1444 if ( info->xmit_cnt )
1445 usc_load_txfifo( info );
1446 else
1447 info->tx_active = 0;
1448
1449 if (info->xmit_cnt < WAKEUP_CHARS)
1450 info->pending_bh |= BH_TRANSMIT;
1451
1452 } /* end of mgsl_isr_transmit_data() */
1453
1454 /* mgsl_isr_receive_data()
1455 *
1456 * Service a receive data interrupt. This occurs
1457 * when operating in asynchronous interrupt transfer mode.
1458 * The receive data FIFO is flushed to the receive data buffers.
1459 *
1460 * Arguments: info pointer to device instance data
1461 * Return Value: None
1462 */
1463 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1464 {
1465 int Fifocount;
1466 u16 status;
1467 unsigned char DataByte;
1468 struct tty_struct *tty = info->tty;
1469 struct mgsl_icount *icount = &info->icount;
1470
1471 if ( debug_level >= DEBUG_LEVEL_ISR )
1472 printk("%s(%d):mgsl_isr_receive_data\n",
1473 __FILE__,__LINE__);
1474
1475 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1476
1477 /* select FIFO status for RICR readback */
1478 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1479
1480 /* clear the Wordstatus bit so that status readback */
1481 /* only reflects the status of this byte */
1482 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1483
1484 /* flush the receive FIFO */
1485
1486 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1487 /* read one byte from RxFIFO */
1488 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1489 info->io_base + CCAR );
1490 DataByte = inb( info->io_base + CCAR );
1491
1492 /* get the status of the received byte */
1493 status = usc_InReg(info, RCSR);
1494 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1495 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1496 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1497
1498 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
1499 continue;
1500
1501 *tty->flip.char_buf_ptr = DataByte;
1502 icount->rx++;
1503
1504 *tty->flip.flag_buf_ptr = 0;
1505 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1506 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1507 printk("rxerr=%04X\n",status);
1508 /* update error statistics */
1509 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1510 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1511 icount->brk++;
1512 } else if (status & RXSTATUS_PARITY_ERROR)
1513 icount->parity++;
1514 else if (status & RXSTATUS_FRAMING_ERROR)
1515 icount->frame++;
1516 else if (status & RXSTATUS_OVERRUN) {
1517 /* must issue purge fifo cmd before */
1518 /* 16C32 accepts more receive chars */
1519 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1520 icount->overrun++;
1521 }
1522
1523 /* discard char if tty control flags say so */
1524 if (status & info->ignore_status_mask)
1525 continue;
1526
1527 status &= info->read_status_mask;
1528
1529 if (status & RXSTATUS_BREAK_RECEIVED) {
1530 *tty->flip.flag_buf_ptr = TTY_BREAK;
1531 if (info->flags & ASYNC_SAK)
1532 do_SAK(tty);
1533 } else if (status & RXSTATUS_PARITY_ERROR)
1534 *tty->flip.flag_buf_ptr = TTY_PARITY;
1535 else if (status & RXSTATUS_FRAMING_ERROR)
1536 *tty->flip.flag_buf_ptr = TTY_FRAME;
1537 if (status & RXSTATUS_OVERRUN) {
1538 /* Overrun is special, since it's
1539 * reported immediately, and doesn't
1540 * affect the current character
1541 */
1542 if (tty->flip.count < TTY_FLIPBUF_SIZE) {
1543 tty->flip.count++;
1544 tty->flip.flag_buf_ptr++;
1545 tty->flip.char_buf_ptr++;
1546 *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1547 }
1548 }
1549 } /* end of if (error) */
1550
1551 tty->flip.flag_buf_ptr++;
1552 tty->flip.char_buf_ptr++;
1553 tty->flip.count++;
1554 }
1555
1556 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1557 printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
1558 __FILE__,__LINE__,tty->flip.count);
1559 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1560 __FILE__,__LINE__,icount->rx,icount->brk,
1561 icount->parity,icount->frame,icount->overrun);
1562 }
1563
1564 if ( tty->flip.count )
1565 tty_flip_buffer_push(tty);
1566 }
1567
1568 /* mgsl_isr_misc()
1569 *
1570 * Service a miscellaneos interrupt source.
1571 *
1572 * Arguments: info pointer to device extension (instance data)
1573 * Return Value: None
1574 */
1575 static void mgsl_isr_misc( struct mgsl_struct *info )
1576 {
1577 u16 status = usc_InReg( info, MISR );
1578
1579 if ( debug_level >= DEBUG_LEVEL_ISR )
1580 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1581 __FILE__,__LINE__,status);
1582
1583 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1584 (info->params.mode == MGSL_MODE_HDLC)) {
1585
1586 /* turn off receiver and rx DMA */
1587 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1588 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1589 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1590 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1591 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1592
1593 /* schedule BH handler to restart receiver */
1594 info->pending_bh |= BH_RECEIVE;
1595 info->rx_rcc_underrun = 1;
1596 }
1597
1598 usc_ClearIrqPendingBits( info, MISC );
1599 usc_UnlatchMiscstatusBits( info, status );
1600
1601 } /* end of mgsl_isr_misc() */
1602
1603 /* mgsl_isr_null()
1604 *
1605 * Services undefined interrupt vectors from the
1606 * USC. (hence this function SHOULD never be called)
1607 *
1608 * Arguments: info pointer to device extension (instance data)
1609 * Return Value: None
1610 */
1611 static void mgsl_isr_null( struct mgsl_struct *info )
1612 {
1613
1614 } /* end of mgsl_isr_null() */
1615
1616 /* mgsl_isr_receive_dma()
1617 *
1618 * Service a receive DMA channel interrupt.
1619 * For this driver there are two sources of receive DMA interrupts
1620 * as identified in the Receive DMA mode Register (RDMR):
1621 *
1622 * BIT3 EOA/EOL End of List, all receive buffers in receive
1623 * buffer list have been filled (no more free buffers
1624 * available). The DMA controller has shut down.
1625 *
1626 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1627 * DMA buffer is terminated in response to completion
1628 * of a good frame or a frame with errors. The status
1629 * of the frame is stored in the buffer entry in the
1630 * list of receive buffer entries.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
1635 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1636 {
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1640 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1641
1642 /* Read the receive DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644 status = usc_InDmaReg( info, RDMR );
1645
1646 if ( debug_level >= DEBUG_LEVEL_ISR )
1647 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1648 __FILE__,__LINE__,info->device_name,status);
1649
1650 info->pending_bh |= BH_RECEIVE;
1651
1652 if ( status & BIT3 ) {
1653 info->rx_overflow = 1;
1654 info->icount.buf_overrun++;
1655 }
1656
1657 } /* end of mgsl_isr_receive_dma() */
1658
1659 /* mgsl_isr_transmit_dma()
1660 *
1661 * This function services a transmit DMA channel interrupt.
1662 *
1663 * For this driver there is one source of transmit DMA interrupts
1664 * as identified in the Transmit DMA Mode Register (TDMR):
1665 *
1666 * BIT2 EOB End of Buffer. This interrupt occurs when a
1667 * transmit DMA buffer has been emptied.
1668 *
1669 * The driver maintains enough transmit DMA buffers to hold at least
1670 * one max frame size transmit frame. When operating in a buffered
1671 * transmit mode, there may be enough transmit DMA buffers to hold at
1672 * least two or more max frame size frames. On an EOB condition,
1673 * determine if there are any queued transmit buffers and copy into
1674 * transmit DMA buffers if we have room.
1675 *
1676 * Arguments: info pointer to device instance data
1677 * Return Value: None
1678 */
1679 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1680 {
1681 u16 status;
1682
1683 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1684 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1685
1686 /* Read the transmit DMA status to identify interrupt type. */
1687 /* This also clears the status bits. */
1688
1689 status = usc_InDmaReg( info, TDMR );
1690
1691 if ( debug_level >= DEBUG_LEVEL_ISR )
1692 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1693 __FILE__,__LINE__,info->device_name,status);
1694
1695 if ( status & BIT2 ) {
1696 --info->tx_dma_buffers_used;
1697
1698 /* if there are transmit frames queued,
1699 * try to load the next one
1700 */
1701 if ( load_next_tx_holding_buffer(info) ) {
1702 /* if call returns non-zero value, we have
1703 * at least one free tx holding buffer
1704 */
1705 info->pending_bh |= BH_TRANSMIT;
1706 }
1707 }
1708
1709 } /* end of mgsl_isr_transmit_dma() */
1710
1711 /* mgsl_interrupt()
1712 *
1713 * Interrupt service routine entry point.
1714 *
1715 * Arguments:
1716 *
1717 * irq interrupt number that caused interrupt
1718 * dev_id device ID supplied during interrupt registration
1719 * regs interrupted processor context
1720 *
1721 * Return Value: None
1722 */
1723 static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1724 {
1725 struct mgsl_struct * info;
1726 u16 UscVector;
1727 u16 DmaVector;
1728
1729 if ( debug_level >= DEBUG_LEVEL_ISR )
1730 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1731 __FILE__,__LINE__,irq);
1732
1733 info = (struct mgsl_struct *)dev_id;
1734 if (!info)
1735 return IRQ_NONE;
1736
1737 spin_lock(&info->irq_spinlock);
1738
1739 for(;;) {
1740 /* Read the interrupt vectors from hardware. */
1741 UscVector = usc_InReg(info, IVR) >> 9;
1742 DmaVector = usc_InDmaReg(info, DIVR);
1743
1744 if ( debug_level >= DEBUG_LEVEL_ISR )
1745 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1746 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1747
1748 if ( !UscVector && !DmaVector )
1749 break;
1750
1751 /* Dispatch interrupt vector */
1752 if ( UscVector )
1753 (*UscIsrTable[UscVector])(info);
1754 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1755 mgsl_isr_transmit_dma(info);
1756 else
1757 mgsl_isr_receive_dma(info);
1758
1759 if ( info->isr_overflow ) {
1760 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1761 __FILE__,__LINE__,info->device_name, irq);
1762 usc_DisableMasterIrqBit(info);
1763 usc_DisableDmaInterrupts(info,DICR_MASTER);
1764 break;
1765 }
1766 }
1767
1768 /* Request bottom half processing if there's something
1769 * for it to do and the bh is not already running
1770 */
1771
1772 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1773 if ( debug_level >= DEBUG_LEVEL_ISR )
1774 printk("%s(%d):%s queueing bh task.\n",
1775 __FILE__,__LINE__,info->device_name);
1776 schedule_work(&info->task);
1777 info->bh_requested = 1;
1778 }
1779
1780 spin_unlock(&info->irq_spinlock);
1781
1782 if ( debug_level >= DEBUG_LEVEL_ISR )
1783 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1784 __FILE__,__LINE__,irq);
1785 return IRQ_HANDLED;
1786 } /* end of mgsl_interrupt() */
1787
1788 /* startup()
1789 *
1790 * Initialize and start device.
1791 *
1792 * Arguments: info pointer to device instance data
1793 * Return Value: 0 if success, otherwise error code
1794 */
1795 static int startup(struct mgsl_struct * info)
1796 {
1797 int retval = 0;
1798
1799 if ( debug_level >= DEBUG_LEVEL_INFO )
1800 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1801
1802 if (info->flags & ASYNC_INITIALIZED)
1803 return 0;
1804
1805 if (!info->xmit_buf) {
1806 /* allocate a page of memory for a transmit buffer */
1807 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1808 if (!info->xmit_buf) {
1809 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1810 __FILE__,__LINE__,info->device_name);
1811 return -ENOMEM;
1812 }
1813 }
1814
1815 info->pending_bh = 0;
1816
1817 memset(&info->icount, 0, sizeof(info->icount));
1818
1819 init_timer(&info->tx_timer);
1820 info->tx_timer.data = (unsigned long)info;
1821 info->tx_timer.function = mgsl_tx_timeout;
1822
1823 /* Allocate and claim adapter resources */
1824 retval = mgsl_claim_resources(info);
1825
1826 /* perform existence check and diagnostics */
1827 if ( !retval )
1828 retval = mgsl_adapter_test(info);
1829
1830 if ( retval ) {
1831 if (capable(CAP_SYS_ADMIN) && info->tty)
1832 set_bit(TTY_IO_ERROR, &info->tty->flags);
1833 mgsl_release_resources(info);
1834 return retval;
1835 }
1836
1837 /* program hardware for current parameters */
1838 mgsl_change_params(info);
1839
1840 if (info->tty)
1841 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1842
1843 info->flags |= ASYNC_INITIALIZED;
1844
1845 return 0;
1846
1847 } /* end of startup() */
1848
1849 /* shutdown()
1850 *
1851 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1852 *
1853 * Arguments: info pointer to device instance data
1854 * Return Value: None
1855 */
1856 static void shutdown(struct mgsl_struct * info)
1857 {
1858 unsigned long flags;
1859
1860 if (!(info->flags & ASYNC_INITIALIZED))
1861 return;
1862
1863 if (debug_level >= DEBUG_LEVEL_INFO)
1864 printk("%s(%d):mgsl_shutdown(%s)\n",
1865 __FILE__,__LINE__, info->device_name );
1866
1867 /* clear status wait queue because status changes */
1868 /* can't happen after shutting down the hardware */
1869 wake_up_interruptible(&info->status_event_wait_q);
1870 wake_up_interruptible(&info->event_wait_q);
1871
1872 del_timer(&info->tx_timer);
1873
1874 if (info->xmit_buf) {
1875 free_page((unsigned long) info->xmit_buf);
1876 info->xmit_buf = NULL;
1877 }
1878
1879 spin_lock_irqsave(&info->irq_spinlock,flags);
1880 usc_DisableMasterIrqBit(info);
1881 usc_stop_receiver(info);
1882 usc_stop_transmitter(info);
1883 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1884 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1885 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1886
1887 /* Disable DMAEN (Port 7, Bit 14) */
1888 /* This disconnects the DMA request signal from the ISA bus */
1889 /* on the ISA adapter. This has no effect for the PCI adapter */
1890 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1891
1892 /* Disable INTEN (Port 6, Bit12) */
1893 /* This disconnects the IRQ request signal to the ISA bus */
1894 /* on the ISA adapter. This has no effect for the PCI adapter */
1895 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1896
1897 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1898 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1899 usc_set_serial_signals(info);
1900 }
1901
1902 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1903
1904 mgsl_release_resources(info);
1905
1906 if (info->tty)
1907 set_bit(TTY_IO_ERROR, &info->tty->flags);
1908
1909 info->flags &= ~ASYNC_INITIALIZED;
1910
1911 } /* end of shutdown() */
1912
1913 static void mgsl_program_hw(struct mgsl_struct *info)
1914 {
1915 unsigned long flags;
1916
1917 spin_lock_irqsave(&info->irq_spinlock,flags);
1918
1919 usc_stop_receiver(info);
1920 usc_stop_transmitter(info);
1921 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1922
1923 if (info->params.mode == MGSL_MODE_HDLC ||
1924 info->params.mode == MGSL_MODE_RAW ||
1925 info->netcount)
1926 usc_set_sync_mode(info);
1927 else
1928 usc_set_async_mode(info);
1929
1930 usc_set_serial_signals(info);
1931
1932 info->dcd_chkcount = 0;
1933 info->cts_chkcount = 0;
1934 info->ri_chkcount = 0;
1935 info->dsr_chkcount = 0;
1936
1937 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1938 usc_EnableInterrupts(info, IO_PIN);
1939 usc_get_serial_signals(info);
1940
1941 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1942 usc_start_receiver(info);
1943
1944 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1945 }
1946
1947 /* Reconfigure adapter based on new parameters
1948 */
1949 static void mgsl_change_params(struct mgsl_struct *info)
1950 {
1951 unsigned cflag;
1952 int bits_per_char;
1953
1954 if (!info->tty || !info->tty->termios)
1955 return;
1956
1957 if (debug_level >= DEBUG_LEVEL_INFO)
1958 printk("%s(%d):mgsl_change_params(%s)\n",
1959 __FILE__,__LINE__, info->device_name );
1960
1961 cflag = info->tty->termios->c_cflag;
1962
1963 /* if B0 rate (hangup) specified then negate DTR and RTS */
1964 /* otherwise assert DTR and RTS */
1965 if (cflag & CBAUD)
1966 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1967 else
1968 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1969
1970 /* byte size and parity */
1971
1972 switch (cflag & CSIZE) {
1973 case CS5: info->params.data_bits = 5; break;
1974 case CS6: info->params.data_bits = 6; break;
1975 case CS7: info->params.data_bits = 7; break;
1976 case CS8: info->params.data_bits = 8; break;
1977 /* Never happens, but GCC is too dumb to figure it out */
1978 default: info->params.data_bits = 7; break;
1979 }
1980
1981 if (cflag & CSTOPB)
1982 info->params.stop_bits = 2;
1983 else
1984 info->params.stop_bits = 1;
1985
1986 info->params.parity = ASYNC_PARITY_NONE;
1987 if (cflag & PARENB) {
1988 if (cflag & PARODD)
1989 info->params.parity = ASYNC_PARITY_ODD;
1990 else
1991 info->params.parity = ASYNC_PARITY_EVEN;
1992 #ifdef CMSPAR
1993 if (cflag & CMSPAR)
1994 info->params.parity = ASYNC_PARITY_SPACE;
1995 #endif
1996 }
1997
1998 /* calculate number of jiffies to transmit a full
1999 * FIFO (32 bytes) at specified data rate
2000 */
2001 bits_per_char = info->params.data_bits +
2002 info->params.stop_bits + 1;
2003
2004 /* if port data rate is set to 460800 or less then
2005 * allow tty settings to override, otherwise keep the
2006 * current data rate.
2007 */
2008 if (info->params.data_rate <= 460800)
2009 info->params.data_rate = tty_get_baud_rate(info->tty);
2010
2011 if ( info->params.data_rate ) {
2012 info->timeout = (32*HZ*bits_per_char) /
2013 info->params.data_rate;
2014 }
2015 info->timeout += HZ/50; /* Add .02 seconds of slop */
2016
2017 if (cflag & CRTSCTS)
2018 info->flags |= ASYNC_CTS_FLOW;
2019 else
2020 info->flags &= ~ASYNC_CTS_FLOW;
2021
2022 if (cflag & CLOCAL)
2023 info->flags &= ~ASYNC_CHECK_CD;
2024 else
2025 info->flags |= ASYNC_CHECK_CD;
2026
2027 /* process tty input control flags */
2028
2029 info->read_status_mask = RXSTATUS_OVERRUN;
2030 if (I_INPCK(info->tty))
2031 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2032 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2033 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2034
2035 if (I_IGNPAR(info->tty))
2036 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2037 if (I_IGNBRK(info->tty)) {
2038 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2039 /* If ignoring parity and break indicators, ignore
2040 * overruns too. (For real raw support).
2041 */
2042 if (I_IGNPAR(info->tty))
2043 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2044 }
2045
2046 mgsl_program_hw(info);
2047
2048 } /* end of mgsl_change_params() */
2049
2050 /* mgsl_put_char()
2051 *
2052 * Add a character to the transmit buffer.
2053 *
2054 * Arguments: tty pointer to tty information structure
2055 * ch character to add to transmit buffer
2056 *
2057 * Return Value: None
2058 */
2059 static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2060 {
2061 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2062 unsigned long flags;
2063
2064 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2065 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2066 __FILE__,__LINE__,ch,info->device_name);
2067 }
2068
2069 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2070 return;
2071
2072 if (!tty || !info->xmit_buf)
2073 return;
2074
2075 spin_lock_irqsave(&info->irq_spinlock,flags);
2076
2077 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2078
2079 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2080 info->xmit_buf[info->xmit_head++] = ch;
2081 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2082 info->xmit_cnt++;
2083 }
2084 }
2085
2086 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2087
2088 } /* end of mgsl_put_char() */
2089
2090 /* mgsl_flush_chars()
2091 *
2092 * Enable transmitter so remaining characters in the
2093 * transmit buffer are sent.
2094 *
2095 * Arguments: tty pointer to tty information structure
2096 * Return Value: None
2097 */
2098 static void mgsl_flush_chars(struct tty_struct *tty)
2099 {
2100 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2101 unsigned long flags;
2102
2103 if ( debug_level >= DEBUG_LEVEL_INFO )
2104 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2105 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2106
2107 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2108 return;
2109
2110 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2111 !info->xmit_buf)
2112 return;
2113
2114 if ( debug_level >= DEBUG_LEVEL_INFO )
2115 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2116 __FILE__,__LINE__,info->device_name );
2117
2118 spin_lock_irqsave(&info->irq_spinlock,flags);
2119
2120 if (!info->tx_active) {
2121 if ( (info->params.mode == MGSL_MODE_HDLC ||
2122 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2123 /* operating in synchronous (frame oriented) mode */
2124 /* copy data from circular xmit_buf to */
2125 /* transmit DMA buffer. */
2126 mgsl_load_tx_dma_buffer(info,
2127 info->xmit_buf,info->xmit_cnt);
2128 }
2129 usc_start_transmitter(info);
2130 }
2131
2132 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2133
2134 } /* end of mgsl_flush_chars() */
2135
2136 /* mgsl_write()
2137 *
2138 * Send a block of data
2139 *
2140 * Arguments:
2141 *
2142 * tty pointer to tty information structure
2143 * buf pointer to buffer containing send data
2144 * count size of send data in bytes
2145 *
2146 * Return Value: number of characters written
2147 */
2148 static int mgsl_write(struct tty_struct * tty,
2149 const unsigned char *buf, int count)
2150 {
2151 int c, ret = 0;
2152 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2153 unsigned long flags;
2154
2155 if ( debug_level >= DEBUG_LEVEL_INFO )
2156 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2157 __FILE__,__LINE__,info->device_name,count);
2158
2159 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2160 goto cleanup;
2161
2162 if (!tty || !info->xmit_buf || !tmp_buf)
2163 goto cleanup;
2164
2165 if ( info->params.mode == MGSL_MODE_HDLC ||
2166 info->params.mode == MGSL_MODE_RAW ) {
2167 /* operating in synchronous (frame oriented) mode */
2168 /* operating in synchronous (frame oriented) mode */
2169 if (info->tx_active) {
2170
2171 if ( info->params.mode == MGSL_MODE_HDLC ) {
2172 ret = 0;
2173 goto cleanup;
2174 }
2175 /* transmitter is actively sending data -
2176 * if we have multiple transmit dma and
2177 * holding buffers, attempt to queue this
2178 * frame for transmission at a later time.
2179 */
2180 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2181 /* no tx holding buffers available */
2182 ret = 0;
2183 goto cleanup;
2184 }
2185
2186 /* queue transmit frame request */
2187 ret = count;
2188 save_tx_buffer_request(info,buf,count);
2189
2190 /* if we have sufficient tx dma buffers,
2191 * load the next buffered tx request
2192 */
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 load_next_tx_holding_buffer(info);
2195 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2196 goto cleanup;
2197 }
2198
2199 /* if operating in HDLC LoopMode and the adapter */
2200 /* has yet to be inserted into the loop, we can't */
2201 /* transmit */
2202
2203 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2204 !usc_loopmode_active(info) )
2205 {
2206 ret = 0;
2207 goto cleanup;
2208 }
2209
2210 if ( info->xmit_cnt ) {
2211 /* Send accumulated from send_char() calls */
2212 /* as frame and wait before accepting more data. */
2213 ret = 0;
2214
2215 /* copy data from circular xmit_buf to */
2216 /* transmit DMA buffer. */
2217 mgsl_load_tx_dma_buffer(info,
2218 info->xmit_buf,info->xmit_cnt);
2219 if ( debug_level >= DEBUG_LEVEL_INFO )
2220 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2221 __FILE__,__LINE__,info->device_name);
2222 } else {
2223 if ( debug_level >= DEBUG_LEVEL_INFO )
2224 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2225 __FILE__,__LINE__,info->device_name);
2226 ret = count;
2227 info->xmit_cnt = count;
2228 mgsl_load_tx_dma_buffer(info,buf,count);
2229 }
2230 } else {
2231 while (1) {
2232 spin_lock_irqsave(&info->irq_spinlock,flags);
2233 c = min_t(int, count,
2234 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2235 SERIAL_XMIT_SIZE - info->xmit_head));
2236 if (c <= 0) {
2237 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2238 break;
2239 }
2240 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2241 info->xmit_head = ((info->xmit_head + c) &
2242 (SERIAL_XMIT_SIZE-1));
2243 info->xmit_cnt += c;
2244 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2245 buf += c;
2246 count -= c;
2247 ret += c;
2248 }
2249 }
2250
2251 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2252 spin_lock_irqsave(&info->irq_spinlock,flags);
2253 if (!info->tx_active)
2254 usc_start_transmitter(info);
2255 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2256 }
2257 cleanup:
2258 if ( debug_level >= DEBUG_LEVEL_INFO )
2259 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2260 __FILE__,__LINE__,info->device_name,ret);
2261
2262 return ret;
2263
2264 } /* end of mgsl_write() */
2265
2266 /* mgsl_write_room()
2267 *
2268 * Return the count of free bytes in transmit buffer
2269 *
2270 * Arguments: tty pointer to tty info structure
2271 * Return Value: None
2272 */
2273 static int mgsl_write_room(struct tty_struct *tty)
2274 {
2275 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2276 int ret;
2277
2278 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2279 return 0;
2280 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2281 if (ret < 0)
2282 ret = 0;
2283
2284 if (debug_level >= DEBUG_LEVEL_INFO)
2285 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2286 __FILE__,__LINE__, info->device_name,ret );
2287
2288 if ( info->params.mode == MGSL_MODE_HDLC ||
2289 info->params.mode == MGSL_MODE_RAW ) {
2290 /* operating in synchronous (frame oriented) mode */
2291 if ( info->tx_active )
2292 return 0;
2293 else
2294 return HDLC_MAX_FRAME_SIZE;
2295 }
2296
2297 return ret;
2298
2299 } /* end of mgsl_write_room() */
2300
2301 /* mgsl_chars_in_buffer()
2302 *
2303 * Return the count of bytes in transmit buffer
2304 *
2305 * Arguments: tty pointer to tty info structure
2306 * Return Value: None
2307 */
2308 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2309 {
2310 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2311
2312 if (debug_level >= DEBUG_LEVEL_INFO)
2313 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2314 __FILE__,__LINE__, info->device_name );
2315
2316 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2317 return 0;
2318
2319 if (debug_level >= DEBUG_LEVEL_INFO)
2320 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2321 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2322
2323 if ( info->params.mode == MGSL_MODE_HDLC ||
2324 info->params.mode == MGSL_MODE_RAW ) {
2325 /* operating in synchronous (frame oriented) mode */
2326 if ( info->tx_active )
2327 return info->max_frame_size;
2328 else
2329 return 0;
2330 }
2331
2332 return info->xmit_cnt;
2333 } /* end of mgsl_chars_in_buffer() */
2334
2335 /* mgsl_flush_buffer()
2336 *
2337 * Discard all data in the send buffer
2338 *
2339 * Arguments: tty pointer to tty info structure
2340 * Return Value: None
2341 */
2342 static void mgsl_flush_buffer(struct tty_struct *tty)
2343 {
2344 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2345 unsigned long flags;
2346
2347 if (debug_level >= DEBUG_LEVEL_INFO)
2348 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2349 __FILE__,__LINE__, info->device_name );
2350
2351 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2352 return;
2353
2354 spin_lock_irqsave(&info->irq_spinlock,flags);
2355 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2356 del_timer(&info->tx_timer);
2357 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2358
2359 wake_up_interruptible(&tty->write_wait);
2360 tty_wakeup(tty);
2361 }
2362
2363 /* mgsl_send_xchar()
2364 *
2365 * Send a high-priority XON/XOFF character
2366 *
2367 * Arguments: tty pointer to tty info structure
2368 * ch character to send
2369 * Return Value: None
2370 */
2371 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2372 {
2373 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2374 unsigned long flags;
2375
2376 if (debug_level >= DEBUG_LEVEL_INFO)
2377 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2378 __FILE__,__LINE__, info->device_name, ch );
2379
2380 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2381 return;
2382
2383 info->x_char = ch;
2384 if (ch) {
2385 /* Make sure transmit interrupts are on */
2386 spin_lock_irqsave(&info->irq_spinlock,flags);
2387 if (!info->tx_enabled)
2388 usc_start_transmitter(info);
2389 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2390 }
2391 } /* end of mgsl_send_xchar() */
2392
2393 /* mgsl_throttle()
2394 *
2395 * Signal remote device to throttle send data (our receive data)
2396 *
2397 * Arguments: tty pointer to tty info structure
2398 * Return Value: None
2399 */
2400 static void mgsl_throttle(struct tty_struct * tty)
2401 {
2402 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2403 unsigned long flags;
2404
2405 if (debug_level >= DEBUG_LEVEL_INFO)
2406 printk("%s(%d):mgsl_throttle(%s) entry\n",
2407 __FILE__,__LINE__, info->device_name );
2408
2409 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2410 return;
2411
2412 if (I_IXOFF(tty))
2413 mgsl_send_xchar(tty, STOP_CHAR(tty));
2414
2415 if (tty->termios->c_cflag & CRTSCTS) {
2416 spin_lock_irqsave(&info->irq_spinlock,flags);
2417 info->serial_signals &= ~SerialSignal_RTS;
2418 usc_set_serial_signals(info);
2419 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2420 }
2421 } /* end of mgsl_throttle() */
2422
2423 /* mgsl_unthrottle()
2424 *
2425 * Signal remote device to stop throttling send data (our receive data)
2426 *
2427 * Arguments: tty pointer to tty info structure
2428 * Return Value: None
2429 */
2430 static void mgsl_unthrottle(struct tty_struct * tty)
2431 {
2432 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2433 unsigned long flags;
2434
2435 if (debug_level >= DEBUG_LEVEL_INFO)
2436 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2437 __FILE__,__LINE__, info->device_name );
2438
2439 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2440 return;
2441
2442 if (I_IXOFF(tty)) {
2443 if (info->x_char)
2444 info->x_char = 0;
2445 else
2446 mgsl_send_xchar(tty, START_CHAR(tty));
2447 }
2448
2449 if (tty->termios->c_cflag & CRTSCTS) {
2450 spin_lock_irqsave(&info->irq_spinlock,flags);
2451 info->serial_signals |= SerialSignal_RTS;
2452 usc_set_serial_signals(info);
2453 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2454 }
2455
2456 } /* end of mgsl_unthrottle() */
2457
2458 /* mgsl_get_stats()
2459 *
2460 * get the current serial parameters information
2461 *
2462 * Arguments: info pointer to device instance data
2463 * user_icount pointer to buffer to hold returned stats
2464 *
2465 * Return Value: 0 if success, otherwise error code
2466 */
2467 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2468 {
2469 int err;
2470
2471 if (debug_level >= DEBUG_LEVEL_INFO)
2472 printk("%s(%d):mgsl_get_params(%s)\n",
2473 __FILE__,__LINE__, info->device_name);
2474
2475 if (!user_icount) {
2476 memset(&info->icount, 0, sizeof(info->icount));
2477 } else {
2478 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2479 if (err)
2480 return -EFAULT;
2481 }
2482
2483 return 0;
2484
2485 } /* end of mgsl_get_stats() */
2486
2487 /* mgsl_get_params()
2488 *
2489 * get the current serial parameters information
2490 *
2491 * Arguments: info pointer to device instance data
2492 * user_params pointer to buffer to hold returned params
2493 *
2494 * Return Value: 0 if success, otherwise error code
2495 */
2496 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2497 {
2498 int err;
2499 if (debug_level >= DEBUG_LEVEL_INFO)
2500 printk("%s(%d):mgsl_get_params(%s)\n",
2501 __FILE__,__LINE__, info->device_name);
2502
2503 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2504 if (err) {
2505 if ( debug_level >= DEBUG_LEVEL_INFO )
2506 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2507 __FILE__,__LINE__,info->device_name);
2508 return -EFAULT;
2509 }
2510
2511 return 0;
2512
2513 } /* end of mgsl_get_params() */
2514
2515 /* mgsl_set_params()
2516 *
2517 * set the serial parameters
2518 *
2519 * Arguments:
2520 *
2521 * info pointer to device instance data
2522 * new_params user buffer containing new serial params
2523 *
2524 * Return Value: 0 if success, otherwise error code
2525 */
2526 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2527 {
2528 unsigned long flags;
2529 MGSL_PARAMS tmp_params;
2530 int err;
2531
2532 if (debug_level >= DEBUG_LEVEL_INFO)
2533 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2534 info->device_name );
2535 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2536 if (err) {
2537 if ( debug_level >= DEBUG_LEVEL_INFO )
2538 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2539 __FILE__,__LINE__,info->device_name);
2540 return -EFAULT;
2541 }
2542
2543 spin_lock_irqsave(&info->irq_spinlock,flags);
2544 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2545 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2546
2547 mgsl_change_params(info);
2548
2549 return 0;
2550
2551 } /* end of mgsl_set_params() */
2552
2553 /* mgsl_get_txidle()
2554 *
2555 * get the current transmit idle mode
2556 *
2557 * Arguments: info pointer to device instance data
2558 * idle_mode pointer to buffer to hold returned idle mode
2559 *
2560 * Return Value: 0 if success, otherwise error code
2561 */
2562 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2563 {
2564 int err;
2565
2566 if (debug_level >= DEBUG_LEVEL_INFO)
2567 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2568 __FILE__,__LINE__, info->device_name, info->idle_mode);
2569
2570 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2571 if (err) {
2572 if ( debug_level >= DEBUG_LEVEL_INFO )
2573 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2574 __FILE__,__LINE__,info->device_name);
2575 return -EFAULT;
2576 }
2577
2578 return 0;
2579
2580 } /* end of mgsl_get_txidle() */
2581
2582 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2583 *
2584 * Arguments: info pointer to device instance data
2585 * idle_mode new idle mode
2586 *
2587 * Return Value: 0 if success, otherwise error code
2588 */
2589 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2590 {
2591 unsigned long flags;
2592
2593 if (debug_level >= DEBUG_LEVEL_INFO)
2594 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2595 info->device_name, idle_mode );
2596
2597 spin_lock_irqsave(&info->irq_spinlock,flags);
2598 info->idle_mode = idle_mode;
2599 usc_set_txidle( info );
2600 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2601 return 0;
2602
2603 } /* end of mgsl_set_txidle() */
2604
2605 /* mgsl_txenable()
2606 *
2607 * enable or disable the transmitter
2608 *
2609 * Arguments:
2610 *
2611 * info pointer to device instance data
2612 * enable 1 = enable, 0 = disable
2613 *
2614 * Return Value: 0 if success, otherwise error code
2615 */
2616 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2617 {
2618 unsigned long flags;
2619
2620 if (debug_level >= DEBUG_LEVEL_INFO)
2621 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2622 info->device_name, enable);
2623
2624 spin_lock_irqsave(&info->irq_spinlock,flags);
2625 if ( enable ) {
2626 if ( !info->tx_enabled ) {
2627
2628 usc_start_transmitter(info);
2629 /*--------------------------------------------------
2630 * if HDLC/SDLC Loop mode, attempt to insert the
2631 * station in the 'loop' by setting CMR:13. Upon
2632 * receipt of the next GoAhead (RxAbort) sequence,
2633 * the OnLoop indicator (CCSR:7) should go active
2634 * to indicate that we are on the loop
2635 *--------------------------------------------------*/
2636 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2637 usc_loopmode_insert_request( info );
2638 }
2639 } else {
2640 if ( info->tx_enabled )
2641 usc_stop_transmitter(info);
2642 }
2643 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2644 return 0;
2645
2646 } /* end of mgsl_txenable() */
2647
2648 /* mgsl_txabort() abort send HDLC frame
2649 *
2650 * Arguments: info pointer to device instance data
2651 * Return Value: 0 if success, otherwise error code
2652 */
2653 static int mgsl_txabort(struct mgsl_struct * info)
2654 {
2655 unsigned long flags;
2656
2657 if (debug_level >= DEBUG_LEVEL_INFO)
2658 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2659 info->device_name);
2660
2661 spin_lock_irqsave(&info->irq_spinlock,flags);
2662 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2663 {
2664 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2665 usc_loopmode_cancel_transmit( info );
2666 else
2667 usc_TCmd(info,TCmd_SendAbort);
2668 }
2669 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2670 return 0;
2671
2672 } /* end of mgsl_txabort() */
2673
2674 /* mgsl_rxenable() enable or disable the receiver
2675 *
2676 * Arguments: info pointer to device instance data
2677 * enable 1 = enable, 0 = disable
2678 * Return Value: 0 if success, otherwise error code
2679 */
2680 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2681 {
2682 unsigned long flags;
2683
2684 if (debug_level >= DEBUG_LEVEL_INFO)
2685 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2686 info->device_name, enable);
2687
2688 spin_lock_irqsave(&info->irq_spinlock,flags);
2689 if ( enable ) {
2690 if ( !info->rx_enabled )
2691 usc_start_receiver(info);
2692 } else {
2693 if ( info->rx_enabled )
2694 usc_stop_receiver(info);
2695 }
2696 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2697 return 0;
2698
2699 } /* end of mgsl_rxenable() */
2700
2701 /* mgsl_wait_event() wait for specified event to occur
2702 *
2703 * Arguments: info pointer to device instance data
2704 * mask pointer to bitmask of events to wait for
2705 * Return Value: 0 if successful and bit mask updated with
2706 * of events triggerred,
2707 * otherwise error code
2708 */
2709 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2710 {
2711 unsigned long flags;
2712 int s;
2713 int rc=0;
2714 struct mgsl_icount cprev, cnow;
2715 int events;
2716 int mask;
2717 struct _input_signal_events oldsigs, newsigs;
2718 DECLARE_WAITQUEUE(wait, current);
2719
2720 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2721 if (rc) {
2722 return -EFAULT;
2723 }
2724
2725 if (debug_level >= DEBUG_LEVEL_INFO)
2726 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2727 info->device_name, mask);
2728
2729 spin_lock_irqsave(&info->irq_spinlock,flags);
2730
2731 /* return immediately if state matches requested events */
2732 usc_get_serial_signals(info);
2733 s = info->serial_signals;
2734 events = mask &
2735 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2736 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2737 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2738 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2739 if (events) {
2740 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2741 goto exit;
2742 }
2743
2744 /* save current irq counts */
2745 cprev = info->icount;
2746 oldsigs = info->input_signal_events;
2747
2748 /* enable hunt and idle irqs if needed */
2749 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2750 u16 oldreg = usc_InReg(info,RICR);
2751 u16 newreg = oldreg +
2752 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2753 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2754 if (oldreg != newreg)
2755 usc_OutReg(info, RICR, newreg);
2756 }
2757
2758 set_current_state(TASK_INTERRUPTIBLE);
2759 add_wait_queue(&info->event_wait_q, &wait);
2760
2761 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2762
2763
2764 for(;;) {
2765 schedule();
2766 if (signal_pending(current)) {
2767 rc = -ERESTARTSYS;
2768 break;
2769 }
2770
2771 /* get current irq counts */
2772 spin_lock_irqsave(&info->irq_spinlock,flags);
2773 cnow = info->icount;
2774 newsigs = info->input_signal_events;
2775 set_current_state(TASK_INTERRUPTIBLE);
2776 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2777
2778 /* if no change, wait aborted for some reason */
2779 if (newsigs.dsr_up == oldsigs.dsr_up &&
2780 newsigs.dsr_down == oldsigs.dsr_down &&
2781 newsigs.dcd_up == oldsigs.dcd_up &&
2782 newsigs.dcd_down == oldsigs.dcd_down &&
2783 newsigs.cts_up == oldsigs.cts_up &&
2784 newsigs.cts_down == oldsigs.cts_down &&
2785 newsigs.ri_up == oldsigs.ri_up &&
2786 newsigs.ri_down == oldsigs.ri_down &&
2787 cnow.exithunt == cprev.exithunt &&
2788 cnow.rxidle == cprev.rxidle) {
2789 rc = -EIO;
2790 break;
2791 }
2792
2793 events = mask &
2794 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2795 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2796 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2797 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2798 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2799 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2800 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2801 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2802 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2803 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2804 if (events)
2805 break;
2806
2807 cprev = cnow;
2808 oldsigs = newsigs;
2809 }
2810
2811 remove_wait_queue(&info->event_wait_q, &wait);
2812 set_current_state(TASK_RUNNING);
2813
2814 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2815 spin_lock_irqsave(&info->irq_spinlock,flags);
2816 if (!waitqueue_active(&info->event_wait_q)) {
2817 /* disable enable exit hunt mode/idle rcvd IRQs */
2818 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2819 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2820 }
2821 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2822 }
2823 exit:
2824 if ( rc == 0 )
2825 PUT_USER(rc, events, mask_ptr);
2826
2827 return rc;
2828
2829 } /* end of mgsl_wait_event() */
2830
2831 static int modem_input_wait(struct mgsl_struct *info,int arg)
2832 {
2833 unsigned long flags;
2834 int rc;
2835 struct mgsl_icount cprev, cnow;
2836 DECLARE_WAITQUEUE(wait, current);
2837
2838 /* save current irq counts */
2839 spin_lock_irqsave(&info->irq_spinlock,flags);
2840 cprev = info->icount;
2841 add_wait_queue(&info->status_event_wait_q, &wait);
2842 set_current_state(TASK_INTERRUPTIBLE);
2843 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2844
2845 for(;;) {
2846 schedule();
2847 if (signal_pending(current)) {
2848 rc = -ERESTARTSYS;
2849 break;
2850 }
2851
2852 /* get new irq counts */
2853 spin_lock_irqsave(&info->irq_spinlock,flags);
2854 cnow = info->icount;
2855 set_current_state(TASK_INTERRUPTIBLE);
2856 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2857
2858 /* if no change, wait aborted for some reason */
2859 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2860 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2861 rc = -EIO;
2862 break;
2863 }
2864
2865 /* check for change in caller specified modem input */
2866 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2867 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2868 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2869 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2870 rc = 0;
2871 break;
2872 }
2873
2874 cprev = cnow;
2875 }
2876 remove_wait_queue(&info->status_event_wait_q, &wait);
2877 set_current_state(TASK_RUNNING);
2878 return rc;
2879 }
2880
2881 /* return the state of the serial control and status signals
2882 */
2883 static int tiocmget(struct tty_struct *tty, struct file *file)
2884 {
2885 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2886 unsigned int result;
2887 unsigned long flags;
2888
2889 spin_lock_irqsave(&info->irq_spinlock,flags);
2890 usc_get_serial_signals(info);
2891 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2892
2893 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2894 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2895 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2896 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2897 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2898 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2899
2900 if (debug_level >= DEBUG_LEVEL_INFO)
2901 printk("%s(%d):%s tiocmget() value=%08X\n",
2902 __FILE__,__LINE__, info->device_name, result );
2903 return result;
2904 }
2905
2906 /* set modem control signals (DTR/RTS)
2907 */
2908 static int tiocmset(struct tty_struct *tty, struct file *file,
2909 unsigned int set, unsigned int clear)
2910 {
2911 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2912 unsigned long flags;
2913
2914 if (debug_level >= DEBUG_LEVEL_INFO)
2915 printk("%s(%d):%s tiocmset(%x,%x)\n",
2916 __FILE__,__LINE__,info->device_name, set, clear);
2917
2918 if (set & TIOCM_RTS)
2919 info->serial_signals |= SerialSignal_RTS;
2920 if (set & TIOCM_DTR)
2921 info->serial_signals |= SerialSignal_DTR;
2922 if (clear & TIOCM_RTS)
2923 info->serial_signals &= ~SerialSignal_RTS;
2924 if (clear & TIOCM_DTR)
2925 info->serial_signals &= ~SerialSignal_DTR;
2926
2927 spin_lock_irqsave(&info->irq_spinlock,flags);
2928 usc_set_serial_signals(info);
2929 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2930
2931 return 0;
2932 }
2933
2934 /* mgsl_break() Set or clear transmit break condition
2935 *
2936 * Arguments: tty pointer to tty instance data
2937 * break_state -1=set break condition, 0=clear
2938 * Return Value: None
2939 */
2940 static void mgsl_break(struct tty_struct *tty, int break_state)
2941 {
2942 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2943 unsigned long flags;
2944
2945 if (debug_level >= DEBUG_LEVEL_INFO)
2946 printk("%s(%d):mgsl_break(%s,%d)\n",
2947 __FILE__,__LINE__, info->device_name, break_state);
2948
2949 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2950 return;
2951
2952 spin_lock_irqsave(&info->irq_spinlock,flags);
2953 if (break_state == -1)
2954 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2955 else
2956 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2957 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2958
2959 } /* end of mgsl_break() */
2960
2961 /* mgsl_ioctl() Service an IOCTL request
2962 *
2963 * Arguments:
2964 *
2965 * tty pointer to tty instance data
2966 * file pointer to associated file object for device
2967 * cmd IOCTL command code
2968 * arg command argument/context
2969 *
2970 * Return Value: 0 if success, otherwise error code
2971 */
2972 static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2973 unsigned int cmd, unsigned long arg)
2974 {
2975 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2976
2977 if (debug_level >= DEBUG_LEVEL_INFO)
2978 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2979 info->device_name, cmd );
2980
2981 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2982 return -ENODEV;
2983
2984 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2985 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2986 if (tty->flags & (1 << TTY_IO_ERROR))
2987 return -EIO;
2988 }
2989
2990 return mgsl_ioctl_common(info, cmd, arg);
2991 }
2992
2993 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2994 {
2995 int error;
2996 struct mgsl_icount cnow; /* kernel counter temps */
2997 void __user *argp = (void __user *)arg;
2998 struct serial_icounter_struct __user *p_cuser; /* user space */
2999 unsigned long flags;
3000
3001 switch (cmd) {
3002 case MGSL_IOCGPARAMS:
3003 return mgsl_get_params(info, argp);
3004 case MGSL_IOCSPARAMS:
3005 return mgsl_set_params(info, argp);
3006 case MGSL_IOCGTXIDLE:
3007 return mgsl_get_txidle(info, argp);
3008 case MGSL_IOCSTXIDLE:
3009 return mgsl_set_txidle(info,(int)arg);
3010 case MGSL_IOCTXENABLE:
3011 return mgsl_txenable(info,(int)arg);
3012 case MGSL_IOCRXENABLE:
3013 return mgsl_rxenable(info,(int)arg);
3014 case MGSL_IOCTXABORT:
3015 return mgsl_txabort(info);
3016 case MGSL_IOCGSTATS:
3017 return mgsl_get_stats(info, argp);
3018 case MGSL_IOCWAITEVENT:
3019 return mgsl_wait_event(info, argp);
3020 case MGSL_IOCLOOPTXDONE:
3021 return mgsl_loopmode_send_done(info);
3022 /* Wait for modem input (DCD,RI,DSR,CTS) change
3023 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3024 */
3025 case TIOCMIWAIT:
3026 return modem_input_wait(info,(int)arg);
3027
3028 /*
3029 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3030 * Return: write counters to the user passed counter struct
3031 * NB: both 1->0 and 0->1 transitions are counted except for
3032 * RI where only 0->1 is counted.
3033 */
3034 case TIOCGICOUNT:
3035 spin_lock_irqsave(&info->irq_spinlock,flags);
3036 cnow = info->icount;
3037 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3038 p_cuser = argp;
3039 PUT_USER(error,cnow.cts, &p_cuser->cts);
3040 if (error) return error;
3041 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3042 if (error) return error;
3043 PUT_USER(error,cnow.rng, &p_cuser->rng);
3044 if (error) return error;
3045 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3046 if (error) return error;
3047 PUT_USER(error,cnow.rx, &p_cuser->rx);
3048 if (error) return error;
3049 PUT_USER(error,cnow.tx, &p_cuser->tx);
3050 if (error) return error;
3051 PUT_USER(error,cnow.frame, &p_cuser->frame);
3052 if (error) return error;
3053 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3054 if (error) return error;
3055 PUT_USER(error,cnow.parity, &p_cuser->parity);
3056 if (error) return error;
3057 PUT_USER(error,cnow.brk, &p_cuser->brk);
3058 if (error) return error;
3059 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3060 if (error) return error;
3061 return 0;
3062 default:
3063 return -ENOIOCTLCMD;
3064 }
3065 return 0;
3066 }
3067
3068 /* mgsl_set_termios()
3069 *
3070 * Set new termios settings
3071 *
3072 * Arguments:
3073 *
3074 * tty pointer to tty structure
3075 * termios pointer to buffer to hold returned old termios
3076 *
3077 * Return Value: None
3078 */
3079 static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3080 {
3081 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3082 unsigned long flags;
3083
3084 if (debug_level >= DEBUG_LEVEL_INFO)
3085 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3086 tty->driver->name );
3087
3088 /* just return if nothing has changed */
3089 if ((tty->termios->c_cflag == old_termios->c_cflag)
3090 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3091 == RELEVANT_IFLAG(old_termios->c_iflag)))
3092 return;
3093
3094 mgsl_change_params(info);
3095
3096 /* Handle transition to B0 status */
3097 if (old_termios->c_cflag & CBAUD &&
3098 !(tty->termios->c_cflag & CBAUD)) {
3099 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3100 spin_lock_irqsave(&info->irq_spinlock,flags);
3101 usc_set_serial_signals(info);
3102 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3103 }
3104
3105 /* Handle transition away from B0 status */
3106 if (!(old_termios->c_cflag & CBAUD) &&
3107 tty->termios->c_cflag & CBAUD) {
3108 info->serial_signals |= SerialSignal_DTR;
3109 if (!(tty->termios->c_cflag & CRTSCTS) ||
3110 !test_bit(TTY_THROTTLED, &tty->flags)) {
3111 info->serial_signals |= SerialSignal_RTS;
3112 }
3113 spin_lock_irqsave(&info->irq_spinlock,flags);
3114 usc_set_serial_signals(info);
3115 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3116 }
3117
3118 /* Handle turning off CRTSCTS */
3119 if (old_termios->c_cflag & CRTSCTS &&
3120 !(tty->termios->c_cflag & CRTSCTS)) {
3121 tty->hw_stopped = 0;
3122 mgsl_start(tty);
3123 }
3124
3125 } /* end of mgsl_set_termios() */
3126
3127 /* mgsl_close()
3128 *
3129 * Called when port is closed. Wait for remaining data to be
3130 * sent. Disable port and free resources.
3131 *
3132 * Arguments:
3133 *
3134 * tty pointer to open tty structure
3135 * filp pointer to open file object
3136 *
3137 * Return Value: None
3138 */
3139 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3140 {
3141 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3142
3143 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3144 return;
3145
3146 if (debug_level >= DEBUG_LEVEL_INFO)
3147 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3148 __FILE__,__LINE__, info->device_name, info->count);
3149
3150 if (!info->count)
3151 return;
3152
3153 if (tty_hung_up_p(filp))
3154 goto cleanup;
3155
3156 if ((tty->count == 1) && (info->count != 1)) {
3157 /*
3158 * tty->count is 1 and the tty structure will be freed.
3159 * info->count should be one in this case.
3160 * if it's not, correct it so that the port is shutdown.
3161 */
3162 printk("mgsl_close: bad refcount; tty->count is 1, "
3163 "info->count is %d\n", info->count);
3164 info->count = 1;
3165 }
3166
3167 info->count--;
3168
3169 /* if at least one open remaining, leave hardware active */
3170 if (info->count)
3171 goto cleanup;
3172
3173 info->flags |= ASYNC_CLOSING;
3174
3175 /* set tty->closing to notify line discipline to
3176 * only process XON/XOFF characters. Only the N_TTY
3177 * discipline appears to use this (ppp does not).
3178 */
3179 tty->closing = 1;
3180
3181 /* wait for transmit data to clear all layers */
3182
3183 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3184 if (debug_level >= DEBUG_LEVEL_INFO)
3185 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3186 __FILE__,__LINE__, info->device_name );
3187 tty_wait_until_sent(tty, info->closing_wait);
3188 }
3189
3190 if (info->flags & ASYNC_INITIALIZED)
3191 mgsl_wait_until_sent(tty, info->timeout);
3192
3193 if (tty->driver->flush_buffer)
3194 tty->driver->flush_buffer(tty);
3195
3196 tty_ldisc_flush(tty);
3197
3198 shutdown(info);
3199
3200 tty->closing = 0;
3201 info->tty = NULL;
3202
3203 if (info->blocked_open) {
3204 if (info->close_delay) {
3205 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3206 }
3207 wake_up_interruptible(&info->open_wait);
3208 }
3209
3210 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3211
3212 wake_up_interruptible(&info->close_wait);
3213
3214 cleanup:
3215 if (debug_level >= DEBUG_LEVEL_INFO)
3216 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3217 tty->driver->name, info->count);
3218
3219 } /* end of mgsl_close() */
3220
3221 /* mgsl_wait_until_sent()
3222 *
3223 * Wait until the transmitter is empty.
3224 *
3225 * Arguments:
3226 *
3227 * tty pointer to tty info structure
3228 * timeout time to wait for send completion
3229 *
3230 * Return Value: None
3231 */
3232 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3233 {
3234 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3235 unsigned long orig_jiffies, char_time;
3236
3237 if (!info )
3238 return;
3239
3240 if (debug_level >= DEBUG_LEVEL_INFO)
3241 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3242 __FILE__,__LINE__, info->device_name );
3243
3244 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3245 return;
3246
3247 if (!(info->flags & ASYNC_INITIALIZED))
3248 goto exit;
3249
3250 orig_jiffies = jiffies;
3251
3252 /* Set check interval to 1/5 of estimated time to
3253 * send a character, and make it at least 1. The check
3254 * interval should also be less than the timeout.
3255 * Note: use tight timings here to satisfy the NIST-PCTS.
3256 */
3257
3258 if ( info->params.data_rate ) {
3259 char_time = info->timeout/(32 * 5);
3260 if (!char_time)
3261 char_time++;
3262 } else
3263 char_time = 1;
3264
3265 if (timeout)
3266 char_time = min_t(unsigned long, char_time, timeout);
3267
3268 if ( info->params.mode == MGSL_MODE_HDLC ||
3269 info->params.mode == MGSL_MODE_RAW ) {
3270 while (info->tx_active) {
3271 msleep_interruptible(jiffies_to_msecs(char_time));
3272 if (signal_pending(current))
3273 break;
3274 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3275 break;
3276 }
3277 } else {
3278 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3279 info->tx_enabled) {
3280 msleep_interruptible(jiffies_to_msecs(char_time));
3281 if (signal_pending(current))
3282 break;
3283 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3284 break;
3285 }
3286 }
3287
3288 exit:
3289 if (debug_level >= DEBUG_LEVEL_INFO)
3290 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3291 __FILE__,__LINE__, info->device_name );
3292
3293 } /* end of mgsl_wait_until_sent() */
3294
3295 /* mgsl_hangup()
3296 *
3297 * Called by tty_hangup() when a hangup is signaled.
3298 * This is the same as to closing all open files for the port.
3299 *
3300 * Arguments: tty pointer to associated tty object
3301 * Return Value: None
3302 */
3303 static void mgsl_hangup(struct tty_struct *tty)
3304 {
3305 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3306
3307 if (debug_level >= DEBUG_LEVEL_INFO)
3308 printk("%s(%d):mgsl_hangup(%s)\n",
3309 __FILE__,__LINE__, info->device_name );
3310
3311 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3312 return;
3313
3314 mgsl_flush_buffer(tty);
3315 shutdown(info);
3316
3317 info->count = 0;
3318 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3319 info->tty = NULL;
3320
3321 wake_up_interruptible(&info->open_wait);
3322
3323 } /* end of mgsl_hangup() */
3324
3325 /* block_til_ready()
3326 *
3327 * Block the current process until the specified port
3328 * is ready to be opened.
3329 *
3330 * Arguments:
3331 *
3332 * tty pointer to tty info structure
3333 * filp pointer to open file object
3334 * info pointer to device instance data
3335 *
3336 * Return Value: 0 if success, otherwise error code
3337 */
3338 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3339 struct mgsl_struct *info)
3340 {
3341 DECLARE_WAITQUEUE(wait, current);
3342 int retval;
3343 int do_clocal = 0, extra_count = 0;
3344 unsigned long flags;
3345
3346 if (debug_level >= DEBUG_LEVEL_INFO)
3347 printk("%s(%d):block_til_ready on %s\n",
3348 __FILE__,__LINE__, tty->driver->name );
3349
3350 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3351 /* nonblock mode is set or port is not enabled */
3352 info->flags |= ASYNC_NORMAL_ACTIVE;
3353 return 0;
3354 }
3355
3356 if (tty->termios->c_cflag & CLOCAL)
3357 do_clocal = 1;
3358
3359 /* Wait for carrier detect and the line to become
3360 * free (i.e., not in use by the callout). While we are in
3361 * this loop, info->count is dropped by one, so that
3362 * mgsl_close() knows when to free things. We restore it upon
3363 * exit, either normal or abnormal.
3364 */
3365
3366 retval = 0;
3367 add_wait_queue(&info->open_wait, &wait);
3368
3369 if (debug_level >= DEBUG_LEVEL_INFO)
3370 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3371 __FILE__,__LINE__, tty->driver->name, info->count );
3372
3373 spin_lock_irqsave(&info->irq_spinlock, flags);
3374 if (!tty_hung_up_p(filp)) {
3375 extra_count = 1;
3376 info->count--;
3377 }
3378 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3379 info->blocked_open++;
3380
3381 while (1) {
3382 if (tty->termios->c_cflag & CBAUD) {
3383 spin_lock_irqsave(&info->irq_spinlock,flags);
3384 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3385 usc_set_serial_signals(info);
3386 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3387 }
3388
3389 set_current_state(TASK_INTERRUPTIBLE);
3390
3391 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3392 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3393 -EAGAIN : -ERESTARTSYS;
3394 break;
3395 }
3396
3397 spin_lock_irqsave(&info->irq_spinlock,flags);
3398 usc_get_serial_signals(info);
3399 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3400
3401 if (!(info->flags & ASYNC_CLOSING) &&
3402 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3403 break;
3404 }
3405
3406 if (signal_pending(current)) {
3407 retval = -ERESTARTSYS;
3408 break;
3409 }
3410
3411 if (debug_level >= DEBUG_LEVEL_INFO)
3412 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3413 __FILE__,__LINE__, tty->driver->name, info->count );
3414
3415 schedule();
3416 }
3417
3418 set_current_state(TASK_RUNNING);
3419 remove_wait_queue(&info->open_wait, &wait);
3420
3421 if (extra_count)
3422 info->count++;
3423 info->blocked_open--;
3424
3425 if (debug_level >= DEBUG_LEVEL_INFO)
3426 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3427 __FILE__,__LINE__, tty->driver->name, info->count );
3428
3429 if (!retval)
3430 info->flags |= ASYNC_NORMAL_ACTIVE;
3431
3432 return retval;
3433
3434 } /* end of block_til_ready() */
3435
3436 /* mgsl_open()
3437 *
3438 * Called when a port is opened. Init and enable port.
3439 * Perform serial-specific initialization for the tty structure.
3440 *
3441 * Arguments: tty pointer to tty info structure
3442 * filp associated file pointer
3443 *
3444 * Return Value: 0 if success, otherwise error code
3445 */
3446 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3447 {
3448 struct mgsl_struct *info;
3449 int retval, line;
3450 unsigned long page;
3451 unsigned long flags;
3452
3453 /* verify range of specified line number */
3454 line = tty->index;
3455 if ((line < 0) || (line >= mgsl_device_count)) {
3456 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3457 __FILE__,__LINE__,line);
3458 return -ENODEV;
3459 }
3460
3461 /* find the info structure for the specified line */
3462 info = mgsl_device_list;
3463 while(info && info->line != line)
3464 info = info->next_device;
3465 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3466 return -ENODEV;
3467
3468 tty->driver_data = info;
3469 info->tty = tty;
3470
3471 if (debug_level >= DEBUG_LEVEL_INFO)
3472 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3473 __FILE__,__LINE__,tty->driver->name, info->count);
3474
3475 /* If port is closing, signal caller to try again */
3476 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3477 if (info->flags & ASYNC_CLOSING)
3478 interruptible_sleep_on(&info->close_wait);
3479 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3480 -EAGAIN : -ERESTARTSYS);
3481 goto cleanup;
3482 }
3483
3484 if (!tmp_buf) {
3485 page = get_zeroed_page(GFP_KERNEL);
3486 if (!page) {
3487 retval = -ENOMEM;
3488 goto cleanup;
3489 }
3490 if (tmp_buf)
3491 free_page(page);
3492 else
3493 tmp_buf = (unsigned char *) page;
3494 }
3495
3496 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3497
3498 spin_lock_irqsave(&info->netlock, flags);
3499 if (info->netcount) {
3500 retval = -EBUSY;
3501 spin_unlock_irqrestore(&info->netlock, flags);
3502 goto cleanup;
3503 }
3504 info->count++;
3505 spin_unlock_irqrestore(&info->netlock, flags);
3506
3507 if (info->count == 1) {
3508 /* 1st open on this device, init hardware */
3509 retval = startup(info);
3510 if (retval < 0)
3511 goto cleanup;
3512 }
3513
3514 retval = block_til_ready(tty, filp, info);
3515 if (retval) {
3516 if (debug_level >= DEBUG_LEVEL_INFO)
3517 printk("%s(%d):block_til_ready(%s) returned %d\n",
3518 __FILE__,__LINE__, info->device_name, retval);
3519 goto cleanup;
3520 }
3521
3522 if (debug_level >= DEBUG_LEVEL_INFO)
3523 printk("%s(%d):mgsl_open(%s) success\n",
3524 __FILE__,__LINE__, info->device_name);
3525 retval = 0;
3526
3527 cleanup:
3528 if (retval) {
3529 if (tty->count == 1)
3530 info->tty = NULL; /* tty layer will release tty struct */
3531 if(info->count)
3532 info->count--;
3533 }
3534
3535 return retval;
3536
3537 } /* end of mgsl_open() */
3538
3539 /*
3540 * /proc fs routines....
3541 */
3542
3543 static inline int line_info(char *buf, struct mgsl_struct *info)
3544 {
3545 char stat_buf[30];
3546 int ret;
3547 unsigned long flags;
3548
3549 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3550 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3551 info->device_name, info->io_base, info->irq_level,
3552 info->phys_memory_base, info->phys_lcr_base);
3553 } else {
3554 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3555 info->device_name, info->io_base,
3556 info->irq_level, info->dma_level);
3557 }
3558
3559 /* output current serial signal states */
3560 spin_lock_irqsave(&info->irq_spinlock,flags);
3561 usc_get_serial_signals(info);
3562 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3563
3564 stat_buf[0] = 0;
3565 stat_buf[1] = 0;
3566 if (info->serial_signals & SerialSignal_RTS)
3567 strcat(stat_buf, "|RTS");
3568 if (info->serial_signals & SerialSignal_CTS)
3569 strcat(stat_buf, "|CTS");
3570 if (info->serial_signals & SerialSignal_DTR)
3571 strcat(stat_buf, "|DTR");
3572 if (info->serial_signals & SerialSignal_DSR)
3573 strcat(stat_buf, "|DSR");
3574 if (info->serial_signals & SerialSignal_DCD)
3575 strcat(stat_buf, "|CD");
3576 if (info->serial_signals & SerialSignal_RI)
3577 strcat(stat_buf, "|RI");
3578
3579 if (info->params.mode == MGSL_MODE_HDLC ||
3580 info->params.mode == MGSL_MODE_RAW ) {
3581 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3582 info->icount.txok, info->icount.rxok);
3583 if (info->icount.txunder)
3584 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3585 if (info->icount.txabort)
3586 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3587 if (info->icount.rxshort)
3588 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3589 if (info->icount.rxlong)
3590 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3591 if (info->icount.rxover)
3592 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3593 if (info->icount.rxcrc)
3594 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3595 } else {
3596 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3597 info->icount.tx, info->icount.rx);
3598 if (info->icount.frame)
3599 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3600 if (info->icount.parity)
3601 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3602 if (info->icount.brk)
3603 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3604 if (info->icount.overrun)
3605 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3606 }
3607
3608 /* Append serial signal status to end */
3609 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3610
3611 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3612 info->tx_active,info->bh_requested,info->bh_running,
3613 info->pending_bh);
3614
3615 spin_lock_irqsave(&info->irq_spinlock,flags);
3616 {
3617 u16 Tcsr = usc_InReg( info, TCSR );
3618 u16 Tdmr = usc_InDmaReg( info, TDMR );
3619 u16 Ticr = usc_InReg( info, TICR );
3620 u16 Rscr = usc_InReg( info, RCSR );
3621 u16 Rdmr = usc_InDmaReg( info, RDMR );
3622 u16 Ricr = usc_InReg( info, RICR );
3623 u16 Icr = usc_InReg( info, ICR );
3624 u16 Dccr = usc_InReg( info, DCCR );
3625 u16 Tmr = usc_InReg( info, TMR );
3626 u16 Tccr = usc_InReg( info, TCCR );
3627 u16 Ccar = inw( info->io_base + CCAR );
3628 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3629 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3630 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3631 }
3632 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3633
3634 return ret;
3635
3636 } /* end of line_info() */
3637
3638 /* mgsl_read_proc()
3639 *
3640 * Called to print information about devices
3641 *
3642 * Arguments:
3643 * page page of memory to hold returned info
3644 * start
3645 * off
3646 * count
3647 * eof
3648 * data
3649 *
3650 * Return Value:
3651 */
3652 static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3653 int *eof, void *data)
3654 {
3655 int len = 0, l;
3656 off_t begin = 0;
3657 struct mgsl_struct *info;
3658
3659 len += sprintf(page, "synclink driver:%s\n", driver_version);
3660
3661 info = mgsl_device_list;
3662 while( info ) {
3663 l = line_info(page + len, info);
3664 len += l;
3665 if (len+begin > off+count)
3666 goto done;
3667 if (len+begin < off) {
3668 begin += len;
3669 len = 0;
3670 }
3671 info = info->next_device;
3672 }
3673
3674 *eof = 1;
3675 done:
3676 if (off >= len+begin)
3677 return 0;
3678 *start = page + (off-begin);
3679 return ((count < begin+len-off) ? count : begin+len-off);
3680
3681 } /* end of mgsl_read_proc() */
3682
3683 /* mgsl_allocate_dma_buffers()
3684 *
3685 * Allocate and format DMA buffers (ISA adapter)
3686 * or format shared memory buffers (PCI adapter).
3687 *
3688 * Arguments: info pointer to device instance data
3689 * Return Value: 0 if success, otherwise error
3690 */
3691 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3692 {
3693 unsigned short BuffersPerFrame;
3694
3695 info->last_mem_alloc = 0;
3696
3697 /* Calculate the number of DMA buffers necessary to hold the */
3698 /* largest allowable frame size. Note: If the max frame size is */
3699 /* not an even multiple of the DMA buffer size then we need to */
3700 /* round the buffer count per frame up one. */
3701
3702 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3703 if ( info->max_frame_size % DMABUFFERSIZE )
3704 BuffersPerFrame++;
3705
3706 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3707 /*
3708 * The PCI adapter has 256KBytes of shared memory to use.
3709 * This is 64 PAGE_SIZE buffers.
3710 *
3711 * The first page is used for padding at this time so the
3712 * buffer list does not begin at offset 0 of the PCI
3713 * adapter's shared memory.
3714 *
3715 * The 2nd page is used for the buffer list. A 4K buffer
3716 * list can hold 128 DMA_BUFFER structures at 32 bytes
3717 * each.
3718 *
3719 * This leaves 62 4K pages.
3720 *
3721 * The next N pages are used for transmit frame(s). We
3722 * reserve enough 4K page blocks to hold the required
3723 * number of transmit dma buffers (num_tx_dma_buffers),
3724 * each of MaxFrameSize size.
3725 *
3726 * Of the remaining pages (62-N), determine how many can
3727 * be used to receive full MaxFrameSize inbound frames
3728 */
3729 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3730 info->rx_buffer_count = 62 - info->tx_buffer_count;
3731 } else {
3732 /* Calculate the number of PAGE_SIZE buffers needed for */
3733 /* receive and transmit DMA buffers. */
3734
3735
3736 /* Calculate the number of DMA buffers necessary to */
3737 /* hold 7 max size receive frames and one max size transmit frame. */
3738 /* The receive buffer count is bumped by one so we avoid an */
3739 /* End of List condition if all receive buffers are used when */
3740 /* using linked list DMA buffers. */
3741
3742 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3743 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3744
3745 /*
3746 * limit total TxBuffers & RxBuffers to 62 4K total
3747 * (ala PCI Allocation)
3748 */
3749
3750 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3751 info->rx_buffer_count = 62 - info->tx_buffer_count;
3752
3753 }
3754
3755 if ( debug_level >= DEBUG_LEVEL_INFO )
3756 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3757 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3758
3759 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3760 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3761 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3762 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3763 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3764 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3765 return -ENOMEM;
3766 }
3767
3768 mgsl_reset_rx_dma_buffers( info );
3769 mgsl_reset_tx_dma_buffers( info );
3770
3771 return 0;
3772
3773 } /* end of mgsl_allocate_dma_buffers() */
3774
3775 /*
3776 * mgsl_alloc_buffer_list_memory()
3777 *
3778 * Allocate a common DMA buffer for use as the
3779 * receive and transmit buffer lists.
3780 *
3781 * A buffer list is a set of buffer entries where each entry contains
3782 * a pointer to an actual buffer and a pointer to the next buffer entry
3783 * (plus some other info about the buffer).
3784 *
3785 * The buffer entries for a list are built to form a circular list so
3786 * that when the entire list has been traversed you start back at the
3787 * beginning.
3788 *
3789 * This function allocates memory for just the buffer entries.
3790 * The links (pointer to next entry) are filled in with the physical
3791 * address of the next entry so the adapter can navigate the list
3792 * using bus master DMA. The pointers to the actual buffers are filled
3793 * out later when the actual buffers are allocated.
3794 *
3795 * Arguments: info pointer to device instance data
3796 * Return Value: 0 if success, otherwise error
3797 */
3798 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3799 {
3800 unsigned int i;
3801
3802 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3803 /* PCI adapter uses shared memory. */
3804 info->buffer_list = info->memory_base + info->last_mem_alloc;
3805 info->buffer_list_phys = info->last_mem_alloc;
3806 info->last_mem_alloc += BUFFERLISTSIZE;
3807 } else {
3808 /* ISA adapter uses system memory. */
3809 /* The buffer lists are allocated as a common buffer that both */
3810 /* the processor and adapter can access. This allows the driver to */
3811 /* inspect portions of the buffer while other portions are being */
3812 /* updated by the adapter using Bus Master DMA. */
3813
3814 info->buffer_list = kmalloc(BUFFERLISTSIZE, GFP_KERNEL | GFP_DMA);
3815 if ( info->buffer_list == NULL )
3816 return -ENOMEM;
3817
3818 info->buffer_list_phys = isa_virt_to_bus(info->buffer_list);
3819 }
3820
3821 /* We got the memory for the buffer entry lists. */
3822 /* Initialize the memory block to all zeros. */
3823 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3824
3825 /* Save virtual address pointers to the receive and */
3826 /* transmit buffer lists. (Receive 1st). These pointers will */
3827 /* be used by the processor to access the lists. */
3828 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3829 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3830 info->tx_buffer_list += info->rx_buffer_count;
3831
3832 /*
3833 * Build the links for the buffer entry lists such that
3834 * two circular lists are built. (Transmit and Receive).
3835 *
3836 * Note: the links are physical addresses
3837 * which are read by the adapter to determine the next
3838 * buffer entry to use.
3839 */
3840
3841 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3842 /* calculate and store physical address of this buffer entry */
3843 info->rx_buffer_list[i].phys_entry =
3844 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3845
3846 /* calculate and store physical address of */
3847 /* next entry in cirular list of entries */
3848
3849 info->rx_buffer_list[i].link = info->buffer_list_phys;
3850
3851 if ( i < info->rx_buffer_count - 1 )
3852 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3853 }
3854
3855 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3856 /* calculate and store physical address of this buffer entry */
3857 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3858 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3859
3860 /* calculate and store physical address of */
3861 /* next entry in cirular list of entries */
3862
3863 info->tx_buffer_list[i].link = info->buffer_list_phys +
3864 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3865
3866 if ( i < info->tx_buffer_count - 1 )
3867 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3868 }
3869
3870 return 0;
3871
3872 } /* end of mgsl_alloc_buffer_list_memory() */
3873
3874 /* Free DMA buffers allocated for use as the
3875 * receive and transmit buffer lists.
3876 * Warning:
3877 *
3878 * The data transfer buffers associated with the buffer list
3879 * MUST be freed before freeing the buffer list itself because
3880 * the buffer list contains the information necessary to free
3881 * the individual buffers!
3882 */
3883 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3884 {
3885 if ( info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI )
3886 kfree(info->buffer_list);
3887
3888 info->buffer_list = NULL;
3889 info->rx_buffer_list = NULL;
3890 info->tx_buffer_list = NULL;
3891
3892 } /* end of mgsl_free_buffer_list_memory() */
3893
3894 /*
3895 * mgsl_alloc_frame_memory()
3896 *
3897 * Allocate the frame DMA buffers used by the specified buffer list.
3898 * Each DMA buffer will be one memory page in size. This is necessary
3899 * because memory can fragment enough that it may be impossible
3900 * contiguous pages.
3901 *
3902 * Arguments:
3903 *
3904 * info pointer to device instance data
3905 * BufferList pointer to list of buffer entries
3906 * Buffercount count of buffer entries in buffer list
3907 *
3908 * Return Value: 0 if success, otherwise -ENOMEM
3909 */
3910 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3911 {
3912 int i;
3913 unsigned long phys_addr;
3914
3915 /* Allocate page sized buffers for the receive buffer list */
3916
3917 for ( i = 0; i < Buffercount; i++ ) {
3918 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3919 /* PCI adapter uses shared memory buffers. */
3920 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3921 phys_addr = info->last_mem_alloc;
3922 info->last_mem_alloc += DMABUFFERSIZE;
3923 } else {
3924 /* ISA adapter uses system memory. */
3925 BufferList[i].virt_addr =
3926 kmalloc(DMABUFFERSIZE, GFP_KERNEL | GFP_DMA);
3927 if ( BufferList[i].virt_addr == NULL )
3928 return -ENOMEM;
3929 phys_addr = isa_virt_to_bus(BufferList[i].virt_addr);
3930 }
3931 BufferList[i].phys_addr = phys_addr;
3932 }
3933
3934 return 0;
3935
3936 } /* end of mgsl_alloc_frame_memory() */
3937
3938 /*
3939 * mgsl_free_frame_memory()
3940 *
3941 * Free the buffers associated with
3942 * each buffer entry of a buffer list.
3943 *
3944 * Arguments:
3945 *
3946 * info pointer to device instance data
3947 * BufferList pointer to list of buffer entries
3948 * Buffercount count of buffer entries in buffer list
3949 *
3950 * Return Value: None
3951 */
3952 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3953 {
3954 int i;
3955
3956 if ( BufferList ) {
3957 for ( i = 0 ; i < Buffercount ; i++ ) {
3958 if ( BufferList[i].virt_addr ) {
3959 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3960 kfree(BufferList[i].virt_addr);
3961 BufferList[i].virt_addr = NULL;
3962 }
3963 }
3964 }
3965
3966 } /* end of mgsl_free_frame_memory() */
3967
3968 /* mgsl_free_dma_buffers()
3969 *
3970 * Free DMA buffers
3971 *
3972 * Arguments: info pointer to device instance data
3973 * Return Value: None
3974 */
3975 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3976 {
3977 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3978 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3979 mgsl_free_buffer_list_memory( info );
3980
3981 } /* end of mgsl_free_dma_buffers() */
3982
3983
3984 /*
3985 * mgsl_alloc_intermediate_rxbuffer_memory()
3986 *
3987 * Allocate a buffer large enough to hold max_frame_size. This buffer
3988 * is used to pass an assembled frame to the line discipline.
3989 *
3990 * Arguments:
3991 *
3992 * info pointer to device instance data
3993 *
3994 * Return Value: 0 if success, otherwise -ENOMEM
3995 */
3996 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3997 {
3998 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3999 if ( info->intermediate_rxbuffer == NULL )
4000 return -ENOMEM;
4001
4002 return 0;
4003
4004 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
4005
4006 /*
4007 * mgsl_free_intermediate_rxbuffer_memory()
4008 *
4009 *
4010 * Arguments:
4011 *
4012 * info pointer to device instance data
4013 *
4014 * Return Value: None
4015 */
4016 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4017 {
4018 kfree(info->intermediate_rxbuffer);
4019 info->intermediate_rxbuffer = NULL;
4020
4021 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
4022
4023 /*
4024 * mgsl_alloc_intermediate_txbuffer_memory()
4025 *
4026 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
4027 * This buffer is used to load transmit frames into the adapter's dma transfer
4028 * buffers when there is sufficient space.
4029 *
4030 * Arguments:
4031 *
4032 * info pointer to device instance data
4033 *
4034 * Return Value: 0 if success, otherwise -ENOMEM
4035 */
4036 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4037 {
4038 int i;
4039
4040 if ( debug_level >= DEBUG_LEVEL_INFO )
4041 printk("%s %s(%d) allocating %d tx holding buffers\n",
4042 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4043
4044 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4045
4046 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4047 info->tx_holding_buffers[i].buffer =
4048 kmalloc(info->max_frame_size, GFP_KERNEL);
4049 if ( info->tx_holding_buffers[i].buffer == NULL )
4050 return -ENOMEM;
4051 }
4052
4053 return 0;
4054
4055 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4056
4057 /*
4058 * mgsl_free_intermediate_txbuffer_memory()
4059 *
4060 *
4061 * Arguments:
4062 *
4063 * info pointer to device instance data
4064 *
4065 * Return Value: None
4066 */
4067 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4068 {
4069 int i;
4070
4071 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4072 kfree(info->tx_holding_buffers[i].buffer);
4073 info->tx_holding_buffers[i].buffer = NULL;
4074 }
4075
4076 info->get_tx_holding_index = 0;
4077 info->put_tx_holding_index = 0;
4078 info->tx_holding_count = 0;
4079
4080 } /* end of mgsl_free_intermediate_txbuffer_memory() */
4081
4082
4083 /*
4084 * load_next_tx_holding_buffer()
4085 *
4086 * attempts to load the next buffered tx request into the
4087 * tx dma buffers
4088 *
4089 * Arguments:
4090 *
4091 * info pointer to device instance data
4092 *
4093 * Return Value: 1 if next buffered tx request loaded
4094 * into adapter's tx dma buffer,
4095 * 0 otherwise
4096 */
4097 static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4098 {
4099 int ret = 0;
4100
4101 if ( info->tx_holding_count ) {
4102 /* determine if we have enough tx dma buffers
4103 * to accommodate the next tx frame
4104 */
4105 struct tx_holding_buffer *ptx =
4106 &info->tx_holding_buffers[info->get_tx_holding_index];
4107 int num_free = num_free_tx_dma_buffers(info);
4108 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4109 if ( ptx->buffer_size % DMABUFFERSIZE )
4110 ++num_needed;
4111
4112 if (num_needed <= num_free) {
4113 info->xmit_cnt = ptx->buffer_size;
4114 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4115
4116 --info->tx_holding_count;
4117 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4118 info->get_tx_holding_index=0;
4119
4120 /* restart transmit timer */
4121 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4122
4123 ret = 1;
4124 }
4125 }
4126
4127 return ret;
4128 }
4129
4130 /*
4131 * save_tx_buffer_request()
4132 *
4133 * attempt to store transmit frame request for later transmission
4134 *
4135 * Arguments:
4136 *
4137 * info pointer to device instance data
4138 * Buffer pointer to buffer containing frame to load
4139 * BufferSize size in bytes of frame in Buffer
4140 *
4141 * Return Value: 1 if able to store, 0 otherwise
4142 */
4143 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4144 {
4145 struct tx_holding_buffer *ptx;
4146
4147 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4148 return 0; /* all buffers in use */
4149 }
4150
4151 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4152 ptx->buffer_size = BufferSize;
4153 memcpy( ptx->buffer, Buffer, BufferSize);
4154
4155 ++info->tx_holding_count;
4156 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4157 info->put_tx_holding_index=0;
4158
4159 return 1;
4160 }
4161
4162 static int mgsl_claim_resources(struct mgsl_struct *info)
4163 {
4164 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4165 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4166 __FILE__,__LINE__,info->device_name, info->io_base);
4167 return -ENODEV;
4168 }
4169 info->io_addr_requested = 1;
4170
4171 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4172 info->device_name, info ) < 0 ) {
4173 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4174 __FILE__,__LINE__,info->device_name, info->irq_level );
4175 goto errout;
4176 }
4177 info->irq_requested = 1;
4178
4179 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4180 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4181 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4182 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4183 goto errout;
4184 }
4185 info->shared_mem_requested = 1;
4186 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4187 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4188 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4189 goto errout;
4190 }
4191 info->lcr_mem_requested = 1;
4192
4193 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4194 if (!info->memory_base) {
4195 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4196 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4197 goto errout;
4198 }
4199
4200 if ( !mgsl_memory_test(info) ) {
4201 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4202 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4203 goto errout;
4204 }
4205
4206 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4207 if (!info->lcr_base) {
4208 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4209 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4210 goto errout;
4211 }
4212
4213 } else {
4214 /* claim DMA channel */
4215
4216 if (request_dma(info->dma_level,info->device_name) < 0){
4217 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4218 __FILE__,__LINE__,info->device_name, info->dma_level );
4219 mgsl_release_resources( info );
4220 return -ENODEV;
4221 }
4222 info->dma_requested = 1;
4223
4224 /* ISA adapter uses bus master DMA */
4225 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4226 enable_dma(info->dma_level);
4227 }
4228
4229 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4230 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4231 __FILE__,__LINE__,info->device_name, info->dma_level );
4232 goto errout;
4233 }
4234
4235 return 0;
4236 errout:
4237 mgsl_release_resources(info);
4238 return -ENODEV;
4239
4240 } /* end of mgsl_claim_resources() */
4241
4242 static void mgsl_release_resources(struct mgsl_struct *info)
4243 {
4244 if ( debug_level >= DEBUG_LEVEL_INFO )
4245 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4246 __FILE__,__LINE__,info->device_name );
4247
4248 if ( info->irq_requested ) {
4249 free_irq(info->irq_level, info);
4250 info->irq_requested = 0;
4251 }
4252 if ( info->dma_requested ) {
4253 disable_dma(info->dma_level);
4254 free_dma(info->dma_level);
4255 info->dma_requested = 0;
4256 }
4257 mgsl_free_dma_buffers(info);
4258 mgsl_free_intermediate_rxbuffer_memory(info);
4259 mgsl_free_intermediate_txbuffer_memory(info);
4260
4261 if ( info->io_addr_requested ) {
4262 release_region(info->io_base,info->io_addr_size);
4263 info->io_addr_requested = 0;
4264 }
4265 if ( info->shared_mem_requested ) {
4266 release_mem_region(info->phys_memory_base,0x40000);
4267 info->shared_mem_requested = 0;
4268 }
4269 if ( info->lcr_mem_requested ) {
4270 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4271 info->lcr_mem_requested = 0;
4272 }
4273 if (info->memory_base){
4274 iounmap(info->memory_base);
4275 info->memory_base = NULL;
4276 }
4277 if (info->lcr_base){
4278 iounmap(info->lcr_base - info->lcr_offset);
4279 info->lcr_base = NULL;
4280 }
4281
4282 if ( debug_level >= DEBUG_LEVEL_INFO )
4283 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4284 __FILE__,__LINE__,info->device_name );
4285
4286 } /* end of mgsl_release_resources() */
4287
4288 /* mgsl_add_device()
4289 *
4290 * Add the specified device instance data structure to the
4291 * global linked list of devices and increment the device count.
4292 *
4293 * Arguments: info pointer to device instance data
4294 * Return Value: None
4295 */
4296 static void mgsl_add_device( struct mgsl_struct *info )
4297 {
4298 info->next_device = NULL;
4299 info->line = mgsl_device_count;
4300 sprintf(info->device_name,"ttySL%d",info->line);
4301
4302 if (info->line < MAX_TOTAL_DEVICES) {
4303 if (maxframe[info->line])
4304 info->max_frame_size = maxframe[info->line];
4305 info->dosyncppp = dosyncppp[info->line];
4306
4307 if (txdmabufs[info->line]) {
4308 info->num_tx_dma_buffers = txdmabufs[info->line];
4309 if (info->num_tx_dma_buffers < 1)
4310 info->num_tx_dma_buffers = 1;
4311 }
4312
4313 if (txholdbufs[info->line]) {
4314 info->num_tx_holding_buffers = txholdbufs[info->line];
4315 if (info->num_tx_holding_buffers < 1)
4316 info->num_tx_holding_buffers = 1;
4317 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4318 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4319 }
4320 }
4321
4322 mgsl_device_count++;
4323
4324 if ( !mgsl_device_list )
4325 mgsl_device_list = info;
4326 else {
4327 struct mgsl_struct *current_dev = mgsl_device_list;
4328 while( current_dev->next_device )
4329 current_dev = current_dev->next_device;
4330 current_dev->next_device = info;
4331 }
4332
4333 if ( info->max_frame_size < 4096 )
4334 info->max_frame_size = 4096;
4335 else if ( info->max_frame_size > 65535 )
4336 info->max_frame_size = 65535;
4337
4338 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4339 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4340 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4341 info->phys_memory_base, info->phys_lcr_base,
4342 info->max_frame_size );
4343 } else {
4344 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4345 info->device_name, info->io_base, info->irq_level, info->dma_level,
4346 info->max_frame_size );
4347 }
4348
4349 #ifdef CONFIG_HDLC
4350 hdlcdev_init(info);
4351 #endif
4352
4353 } /* end of mgsl_add_device() */
4354
4355 /* mgsl_allocate_device()
4356 *
4357 * Allocate and initialize a device instance structure
4358 *
4359 * Arguments: none
4360 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4361 */
4362 static struct mgsl_struct* mgsl_allocate_device(void)
4363 {
4364 struct mgsl_struct *info;
4365
4366 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4367 GFP_KERNEL);
4368
4369 if (!info) {
4370 printk("Error can't allocate device instance data\n");
4371 } else {
4372 memset(info, 0, sizeof(struct mgsl_struct));
4373 info->magic = MGSL_MAGIC;
4374 INIT_WORK(&info->task, mgsl_bh_handler, info);
4375 info->max_frame_size = 4096;
4376 info->close_delay = 5*HZ/10;
4377 info->closing_wait = 30*HZ;
4378 init_waitqueue_head(&info->open_wait);
4379 init_waitqueue_head(&info->close_wait);
4380 init_waitqueue_head(&info->status_event_wait_q);
4381 init_waitqueue_head(&info->event_wait_q);
4382 spin_lock_init(&info->irq_spinlock);
4383 spin_lock_init(&info->netlock);
4384 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4385 info->idle_mode = HDLC_TXIDLE_FLAGS;
4386 info->num_tx_dma_buffers = 1;
4387 info->num_tx_holding_buffers = 0;
4388 }
4389
4390 return info;
4391
4392 } /* end of mgsl_allocate_device()*/
4393
4394 static struct tty_operations mgsl_ops = {
4395 .open = mgsl_open,
4396 .close = mgsl_close,
4397 .write = mgsl_write,
4398 .put_char = mgsl_put_char,
4399 .flush_chars = mgsl_flush_chars,
4400 .write_room = mgsl_write_room,
4401 .chars_in_buffer = mgsl_chars_in_buffer,
4402 .flush_buffer = mgsl_flush_buffer,
4403 .ioctl = mgsl_ioctl,
4404 .throttle = mgsl_throttle,
4405 .unthrottle = mgsl_unthrottle,
4406 .send_xchar = mgsl_send_xchar,
4407 .break_ctl = mgsl_break,
4408 .wait_until_sent = mgsl_wait_until_sent,
4409 .read_proc = mgsl_read_proc,
4410 .set_termios = mgsl_set_termios,
4411 .stop = mgsl_stop,
4412 .start = mgsl_start,
4413 .hangup = mgsl_hangup,
4414 .tiocmget = tiocmget,
4415 .tiocmset = tiocmset,
4416 };
4417
4418 /*
4419 * perform tty device initialization
4420 */
4421 static int mgsl_init_tty(void)
4422 {
4423 int rc;
4424
4425 serial_driver = alloc_tty_driver(128);
4426 if (!serial_driver)
4427 return -ENOMEM;
4428
4429 serial_driver->owner = THIS_MODULE;
4430 serial_driver->driver_name = "synclink";
4431 serial_driver->name = "ttySL";
4432 serial_driver->major = ttymajor;
4433 serial_driver->minor_start = 64;
4434 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4435 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4436 serial_driver->init_termios = tty_std_termios;
4437 serial_driver->init_termios.c_cflag =
4438 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4439 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4440 tty_set_operations(serial_driver, &mgsl_ops);
4441 if ((rc = tty_register_driver(serial_driver)) < 0) {
4442 printk("%s(%d):Couldn't register serial driver\n",
4443 __FILE__,__LINE__);
4444 put_tty_driver(serial_driver);
4445 serial_driver = NULL;
4446 return rc;
4447 }
4448
4449 printk("%s %s, tty major#%d\n",
4450 driver_name, driver_version,
4451 serial_driver->major);
4452 return 0;
4453 }
4454
4455 /* enumerate user specified ISA adapters
4456 */
4457 static void mgsl_enum_isa_devices(void)
4458 {
4459 struct mgsl_struct *info;
4460 int i;
4461
4462 /* Check for user specified ISA devices */
4463
4464 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4465 if ( debug_level >= DEBUG_LEVEL_INFO )
4466 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4467 io[i], irq[i], dma[i] );
4468
4469 info = mgsl_allocate_device();
4470 if ( !info ) {
4471 /* error allocating device instance data */
4472 if ( debug_level >= DEBUG_LEVEL_ERROR )
4473 printk( "can't allocate device instance data.\n");
4474 continue;
4475 }
4476
4477 /* Copy user configuration info to device instance data */
4478 info->io_base = (unsigned int)io[i];
4479 info->irq_level = (unsigned int)irq[i];
4480 info->irq_level = irq_canonicalize(info->irq_level);
4481 info->dma_level = (unsigned int)dma[i];
4482 info->bus_type = MGSL_BUS_TYPE_ISA;
4483 info->io_addr_size = 16;
4484 info->irq_flags = 0;
4485
4486 mgsl_add_device( info );
4487 }
4488 }
4489
4490 static void synclink_cleanup(void)
4491 {
4492 int rc;
4493 struct mgsl_struct *info;
4494 struct mgsl_struct *tmp;
4495
4496 printk("Unloading %s: %s\n", driver_name, driver_version);
4497
4498 if (serial_driver) {
4499 if ((rc = tty_unregister_driver(serial_driver)))
4500 printk("%s(%d) failed to unregister tty driver err=%d\n",
4501 __FILE__,__LINE__,rc);
4502 put_tty_driver(serial_driver);
4503 }
4504
4505 info = mgsl_device_list;
4506 while(info) {
4507 #ifdef CONFIG_HDLC
4508 hdlcdev_exit(info);
4509 #endif
4510 mgsl_release_resources(info);
4511 tmp = info;
4512 info = info->next_device;
4513 kfree(tmp);
4514 }
4515
4516 if (tmp_buf) {
4517 free_page((unsigned long) tmp_buf);
4518 tmp_buf = NULL;
4519 }
4520
4521 if (pci_registered)
4522 pci_unregister_driver(&synclink_pci_driver);
4523 }
4524
4525 static int __init synclink_init(void)
4526 {
4527 int rc;
4528
4529 if (break_on_load) {
4530 mgsl_get_text_ptr();
4531 BREAKPOINT();
4532 }
4533
4534 printk("%s %s\n", driver_name, driver_version);
4535
4536 mgsl_enum_isa_devices();
4537 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4538 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4539 else
4540 pci_registered = 1;
4541
4542 if ((rc = mgsl_init_tty()) < 0)
4543 goto error;
4544
4545 return 0;
4546
4547 error:
4548 synclink_cleanup();
4549 return rc;
4550 }
4551
4552 static void __exit synclink_exit(void)
4553 {
4554 synclink_cleanup();
4555 }
4556
4557 module_init(synclink_init);
4558 module_exit(synclink_exit);
4559
4560 /*
4561 * usc_RTCmd()
4562 *
4563 * Issue a USC Receive/Transmit command to the
4564 * Channel Command/Address Register (CCAR).
4565 *
4566 * Notes:
4567 *
4568 * The command is encoded in the most significant 5 bits <15..11>
4569 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4570 * and Bits <6..0> must be written as zeros.
4571 *
4572 * Arguments:
4573 *
4574 * info pointer to device information structure
4575 * Cmd command mask (use symbolic macros)
4576 *
4577 * Return Value:
4578 *
4579 * None
4580 */
4581 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4582 {
4583 /* output command to CCAR in bits <15..11> */
4584 /* preserve bits <10..7>, bits <6..0> must be zero */
4585
4586 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4587
4588 /* Read to flush write to CCAR */
4589 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4590 inw( info->io_base + CCAR );
4591
4592 } /* end of usc_RTCmd() */
4593
4594 /*
4595 * usc_DmaCmd()
4596 *
4597 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4598 *
4599 * Arguments:
4600 *
4601 * info pointer to device information structure
4602 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4603 *
4604 * Return Value:
4605 *
4606 * None
4607 */
4608 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4609 {
4610 /* write command mask to DCAR */
4611 outw( Cmd + info->mbre_bit, info->io_base );
4612
4613 /* Read to flush write to DCAR */
4614 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4615 inw( info->io_base );
4616
4617 } /* end of usc_DmaCmd() */
4618
4619 /*
4620 * usc_OutDmaReg()
4621 *
4622 * Write a 16-bit value to a USC DMA register
4623 *
4624 * Arguments:
4625 *
4626 * info pointer to device info structure
4627 * RegAddr register address (number) for write
4628 * RegValue 16-bit value to write to register
4629 *
4630 * Return Value:
4631 *
4632 * None
4633 *
4634 */
4635 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4636 {
4637 /* Note: The DCAR is located at the adapter base address */
4638 /* Note: must preserve state of BIT8 in DCAR */
4639
4640 outw( RegAddr + info->mbre_bit, info->io_base );
4641 outw( RegValue, info->io_base );
4642
4643 /* Read to flush write to DCAR */
4644 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4645 inw( info->io_base );
4646
4647 } /* end of usc_OutDmaReg() */
4648
4649 /*
4650 * usc_InDmaReg()
4651 *
4652 * Read a 16-bit value from a DMA register
4653 *
4654 * Arguments:
4655 *
4656 * info pointer to device info structure
4657 * RegAddr register address (number) to read from
4658 *
4659 * Return Value:
4660 *
4661 * The 16-bit value read from register
4662 *
4663 */
4664 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4665 {
4666 /* Note: The DCAR is located at the adapter base address */
4667 /* Note: must preserve state of BIT8 in DCAR */
4668
4669 outw( RegAddr + info->mbre_bit, info->io_base );
4670 return inw( info->io_base );
4671
4672 } /* end of usc_InDmaReg() */
4673
4674 /*
4675 *
4676 * usc_OutReg()
4677 *
4678 * Write a 16-bit value to a USC serial channel register
4679 *
4680 * Arguments:
4681 *
4682 * info pointer to device info structure
4683 * RegAddr register address (number) to write to
4684 * RegValue 16-bit value to write to register
4685 *
4686 * Return Value:
4687 *
4688 * None
4689 *
4690 */
4691 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4692 {
4693 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4694 outw( RegValue, info->io_base + CCAR );
4695
4696 /* Read to flush write to CCAR */
4697 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4698 inw( info->io_base + CCAR );
4699
4700 } /* end of usc_OutReg() */
4701
4702 /*
4703 * usc_InReg()
4704 *
4705 * Reads a 16-bit value from a USC serial channel register
4706 *
4707 * Arguments:
4708 *
4709 * info pointer to device extension
4710 * RegAddr register address (number) to read from
4711 *
4712 * Return Value:
4713 *
4714 * 16-bit value read from register
4715 */
4716 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4717 {
4718 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4719 return inw( info->io_base + CCAR );
4720
4721 } /* end of usc_InReg() */
4722
4723 /* usc_set_sdlc_mode()
4724 *
4725 * Set up the adapter for SDLC DMA communications.
4726 *
4727 * Arguments: info pointer to device instance data
4728 * Return Value: NONE
4729 */
4730 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4731 {
4732 u16 RegValue;
4733 int PreSL1660;
4734
4735 /*
4736 * determine if the IUSC on the adapter is pre-SL1660. If
4737 * not, take advantage of the UnderWait feature of more
4738 * modern chips. If an underrun occurs and this bit is set,
4739 * the transmitter will idle the programmed idle pattern
4740 * until the driver has time to service the underrun. Otherwise,
4741 * the dma controller may get the cycles previously requested
4742 * and begin transmitting queued tx data.
4743 */
4744 usc_OutReg(info,TMCR,0x1f);
4745 RegValue=usc_InReg(info,TMDR);
4746 if ( RegValue == IUSC_PRE_SL1660 )
4747 PreSL1660 = 1;
4748 else
4749 PreSL1660 = 0;
4750
4751
4752 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4753 {
4754 /*
4755 ** Channel Mode Register (CMR)
4756 **
4757 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4758 ** <13> 0 0 = Transmit Disabled (initially)
4759 ** <12> 0 1 = Consecutive Idles share common 0
4760 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4761 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4762 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4763 **
4764 ** 1000 1110 0000 0110 = 0x8e06
4765 */
4766 RegValue = 0x8e06;
4767
4768 /*--------------------------------------------------
4769 * ignore user options for UnderRun Actions and
4770 * preambles
4771 *--------------------------------------------------*/
4772 }
4773 else
4774 {
4775 /* Channel mode Register (CMR)
4776 *
4777 * <15..14> 00 Tx Sub modes, Underrun Action
4778 * <13> 0 1 = Send Preamble before opening flag
4779 * <12> 0 1 = Consecutive Idles share common 0
4780 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4781 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4782 * <3..0> 0110 Receiver mode = HDLC/SDLC
4783 *
4784 * 0000 0110 0000 0110 = 0x0606
4785 */
4786 if (info->params.mode == MGSL_MODE_RAW) {
4787 RegValue = 0x0001; /* Set Receive mode = external sync */
4788
4789 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4790 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4791
4792 /*
4793 * TxSubMode:
4794 * CMR <15> 0 Don't send CRC on Tx Underrun
4795 * CMR <14> x undefined
4796 * CMR <13> 0 Send preamble before openning sync
4797 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4798 *
4799 * TxMode:
4800 * CMR <11-8) 0100 MonoSync
4801 *
4802 * 0x00 0100 xxxx xxxx 04xx
4803 */
4804 RegValue |= 0x0400;
4805 }
4806 else {
4807
4808 RegValue = 0x0606;
4809
4810 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4811 RegValue |= BIT14;
4812 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4813 RegValue |= BIT15;
4814 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4815 RegValue |= BIT15 + BIT14;
4816 }
4817
4818 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4819 RegValue |= BIT13;
4820 }
4821
4822 if ( info->params.mode == MGSL_MODE_HDLC &&
4823 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4824 RegValue |= BIT12;
4825
4826 if ( info->params.addr_filter != 0xff )
4827 {
4828 /* set up receive address filtering */
4829 usc_OutReg( info, RSR, info->params.addr_filter );
4830 RegValue |= BIT4;
4831 }
4832
4833 usc_OutReg( info, CMR, RegValue );
4834 info->cmr_value = RegValue;
4835
4836 /* Receiver mode Register (RMR)
4837 *
4838 * <15..13> 000 encoding
4839 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4840 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4841 * <9> 0 1 = Include Receive chars in CRC
4842 * <8> 1 1 = Use Abort/PE bit as abort indicator
4843 * <7..6> 00 Even parity
4844 * <5> 0 parity disabled
4845 * <4..2> 000 Receive Char Length = 8 bits
4846 * <1..0> 00 Disable Receiver
4847 *
4848 * 0000 0101 0000 0000 = 0x0500
4849 */
4850
4851 RegValue = 0x0500;
4852
4853 switch ( info->params.encoding ) {
4854 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4855 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4856 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4857 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4858 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4859 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4860 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4861 }
4862
4863 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4864 RegValue |= BIT9;
4865 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4866 RegValue |= ( BIT12 | BIT10 | BIT9 );
4867
4868 usc_OutReg( info, RMR, RegValue );
4869
4870 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4871 /* When an opening flag of an SDLC frame is recognized the */
4872 /* Receive Character count (RCC) is loaded with the value in */
4873 /* RCLR. The RCC is decremented for each received byte. The */
4874 /* value of RCC is stored after the closing flag of the frame */
4875 /* allowing the frame size to be computed. */
4876
4877 usc_OutReg( info, RCLR, RCLRVALUE );
4878
4879 usc_RCmd( info, RCmd_SelectRicrdma_level );
4880
4881 /* Receive Interrupt Control Register (RICR)
4882 *
4883 * <15..8> ? RxFIFO DMA Request Level
4884 * <7> 0 Exited Hunt IA (Interrupt Arm)
4885 * <6> 0 Idle Received IA
4886 * <5> 0 Break/Abort IA
4887 * <4> 0 Rx Bound IA
4888 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4889 * <2> 0 Abort/PE IA
4890 * <1> 1 Rx Overrun IA
4891 * <0> 0 Select TC0 value for readback
4892 *
4893 * 0000 0000 0000 1000 = 0x000a
4894 */
4895
4896 /* Carry over the Exit Hunt and Idle Received bits */
4897 /* in case they have been armed by usc_ArmEvents. */
4898
4899 RegValue = usc_InReg( info, RICR ) & 0xc0;
4900
4901 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4902 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4903 else
4904 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4905
4906 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4907
4908 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4909 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4910
4911 /* Transmit mode Register (TMR)
4912 *
4913 * <15..13> 000 encoding
4914 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4915 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4916 * <9> 0 1 = Tx CRC Enabled
4917 * <8> 0 1 = Append CRC to end of transmit frame
4918 * <7..6> 00 Transmit parity Even
4919 * <5> 0 Transmit parity Disabled
4920 * <4..2> 000 Tx Char Length = 8 bits
4921 * <1..0> 00 Disable Transmitter
4922 *
4923 * 0000 0100 0000 0000 = 0x0400
4924 */
4925
4926 RegValue = 0x0400;
4927
4928 switch ( info->params.encoding ) {
4929 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4930 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4931 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4932 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4933 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4934 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4935 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4936 }
4937
4938 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4939 RegValue |= BIT9 + BIT8;
4940 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4941 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4942
4943 usc_OutReg( info, TMR, RegValue );
4944
4945 usc_set_txidle( info );
4946
4947
4948 usc_TCmd( info, TCmd_SelectTicrdma_level );
4949
4950 /* Transmit Interrupt Control Register (TICR)
4951 *
4952 * <15..8> ? Transmit FIFO DMA Level
4953 * <7> 0 Present IA (Interrupt Arm)
4954 * <6> 0 Idle Sent IA
4955 * <5> 1 Abort Sent IA
4956 * <4> 1 EOF/EOM Sent IA
4957 * <3> 0 CRC Sent IA
4958 * <2> 1 1 = Wait for SW Trigger to Start Frame
4959 * <1> 1 Tx Underrun IA
4960 * <0> 0 TC0 constant on read back
4961 *
4962 * 0000 0000 0011 0110 = 0x0036
4963 */
4964
4965 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4966 usc_OutReg( info, TICR, 0x0736 );
4967 else
4968 usc_OutReg( info, TICR, 0x1436 );
4969
4970 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4971 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4972
4973 /*
4974 ** Transmit Command/Status Register (TCSR)
4975 **
4976 ** <15..12> 0000 TCmd
4977 ** <11> 0/1 UnderWait
4978 ** <10..08> 000 TxIdle
4979 ** <7> x PreSent
4980 ** <6> x IdleSent
4981 ** <5> x AbortSent
4982 ** <4> x EOF/EOM Sent
4983 ** <3> x CRC Sent
4984 ** <2> x All Sent
4985 ** <1> x TxUnder
4986 ** <0> x TxEmpty
4987 **
4988 ** 0000 0000 0000 0000 = 0x0000
4989 */
4990 info->tcsr_value = 0;
4991
4992 if ( !PreSL1660 )
4993 info->tcsr_value |= TCSR_UNDERWAIT;
4994
4995 usc_OutReg( info, TCSR, info->tcsr_value );
4996
4997 /* Clock mode Control Register (CMCR)
4998 *
4999 * <15..14> 00 counter 1 Source = Disabled
5000 * <13..12> 00 counter 0 Source = Disabled
5001 * <11..10> 11 BRG1 Input is TxC Pin
5002 * <9..8> 11 BRG0 Input is TxC Pin
5003 * <7..6> 01 DPLL Input is BRG1 Output
5004 * <5..3> XXX TxCLK comes from Port 0
5005 * <2..0> XXX RxCLK comes from Port 1
5006 *
5007 * 0000 1111 0111 0111 = 0x0f77
5008 */
5009
5010 RegValue = 0x0f40;
5011
5012 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
5013 RegValue |= 0x0003; /* RxCLK from DPLL */
5014 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
5015 RegValue |= 0x0004; /* RxCLK from BRG0 */
5016 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
5017 RegValue |= 0x0006; /* RxCLK from TXC Input */
5018 else
5019 RegValue |= 0x0007; /* RxCLK from Port1 */
5020
5021 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
5022 RegValue |= 0x0018; /* TxCLK from DPLL */
5023 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
5024 RegValue |= 0x0020; /* TxCLK from BRG0 */
5025 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
5026 RegValue |= 0x0038; /* RxCLK from TXC Input */
5027 else
5028 RegValue |= 0x0030; /* TxCLK from Port0 */
5029
5030 usc_OutReg( info, CMCR, RegValue );
5031
5032
5033 /* Hardware Configuration Register (HCR)
5034 *
5035 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5036 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5037 * <12> 0 CVOK:0=report code violation in biphase
5038 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5039 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5040 * <7..6> 00 reserved
5041 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5042 * <4> X BRG1 Enable
5043 * <3..2> 00 reserved
5044 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5045 * <0> 0 BRG0 Enable
5046 */
5047
5048 RegValue = 0x0000;
5049
5050 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5051 u32 XtalSpeed;
5052 u32 DpllDivisor;
5053 u16 Tc;
5054
5055 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5056 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5057
5058 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5059 XtalSpeed = 11059200;
5060 else
5061 XtalSpeed = 14745600;
5062
5063 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5064 DpllDivisor = 16;
5065 RegValue |= BIT10;
5066 }
5067 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5068 DpllDivisor = 8;
5069 RegValue |= BIT11;
5070 }
5071 else
5072 DpllDivisor = 32;
5073
5074 /* Tc = (Xtal/Speed) - 1 */
5075 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5076 /* then rounding up gives a more precise time constant. Instead */
5077 /* of rounding up and then subtracting 1 we just don't subtract */
5078 /* the one in this case. */
5079
5080 /*--------------------------------------------------
5081 * ejz: for DPLL mode, application should use the
5082 * same clock speed as the partner system, even
5083 * though clocking is derived from the input RxData.
5084 * In case the user uses a 0 for the clock speed,
5085 * default to 0xffffffff and don't try to divide by
5086 * zero
5087 *--------------------------------------------------*/
5088 if ( info->params.clock_speed )
5089 {
5090 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5091 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5092 / info->params.clock_speed) )
5093 Tc--;
5094 }
5095 else
5096 Tc = -1;
5097
5098
5099 /* Write 16-bit Time Constant for BRG1 */
5100 usc_OutReg( info, TC1R, Tc );
5101
5102 RegValue |= BIT4; /* enable BRG1 */
5103
5104 switch ( info->params.encoding ) {
5105 case HDLC_ENCODING_NRZ:
5106 case HDLC_ENCODING_NRZB:
5107 case HDLC_ENCODING_NRZI_MARK:
5108 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5109 case HDLC_ENCODING_BIPHASE_MARK:
5110 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5111 case HDLC_ENCODING_BIPHASE_LEVEL:
5112 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5113 }
5114 }
5115
5116 usc_OutReg( info, HCR, RegValue );
5117
5118
5119 /* Channel Control/status Register (CCSR)
5120 *
5121 * <15> X RCC FIFO Overflow status (RO)
5122 * <14> X RCC FIFO Not Empty status (RO)
5123 * <13> 0 1 = Clear RCC FIFO (WO)
5124 * <12> X DPLL Sync (RW)
5125 * <11> X DPLL 2 Missed Clocks status (RO)
5126 * <10> X DPLL 1 Missed Clock status (RO)
5127 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5128 * <7> X SDLC Loop On status (RO)
5129 * <6> X SDLC Loop Send status (RO)
5130 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5131 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5132 * <1..0> 00 reserved
5133 *
5134 * 0000 0000 0010 0000 = 0x0020
5135 */
5136
5137 usc_OutReg( info, CCSR, 0x1020 );
5138
5139
5140 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5141 usc_OutReg( info, SICR,
5142 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5143 }
5144
5145
5146 /* enable Master Interrupt Enable bit (MIE) */
5147 usc_EnableMasterIrqBit( info );
5148
5149 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5150 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5151
5152 /* arm RCC underflow interrupt */
5153 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5154 usc_EnableInterrupts(info, MISC);
5155
5156 info->mbre_bit = 0;
5157 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5158 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5159 info->mbre_bit = BIT8;
5160 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5161
5162 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5163 /* Enable DMAEN (Port 7, Bit 14) */
5164 /* This connects the DMA request signal to the ISA bus */
5165 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5166 }
5167
5168 /* DMA Control Register (DCR)
5169 *
5170 * <15..14> 10 Priority mode = Alternating Tx/Rx
5171 * 01 Rx has priority
5172 * 00 Tx has priority
5173 *
5174 * <13> 1 Enable Priority Preempt per DCR<15..14>
5175 * (WARNING DCR<11..10> must be 00 when this is 1)
5176 * 0 Choose activate channel per DCR<11..10>
5177 *
5178 * <12> 0 Little Endian for Array/List
5179 * <11..10> 00 Both Channels can use each bus grant
5180 * <9..6> 0000 reserved
5181 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5182 * <4> 0 1 = drive D/C and S/D pins
5183 * <3> 1 1 = Add one wait state to all DMA cycles.
5184 * <2> 0 1 = Strobe /UAS on every transfer.
5185 * <1..0> 11 Addr incrementing only affects LS24 bits
5186 *
5187 * 0110 0000 0000 1011 = 0x600b
5188 */
5189
5190 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5191 /* PCI adapter does not need DMA wait state */
5192 usc_OutDmaReg( info, DCR, 0xa00b );
5193 }
5194 else
5195 usc_OutDmaReg( info, DCR, 0x800b );
5196
5197
5198 /* Receive DMA mode Register (RDMR)
5199 *
5200 * <15..14> 11 DMA mode = Linked List Buffer mode
5201 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5202 * <12> 1 Clear count of List Entry after fetching
5203 * <11..10> 00 Address mode = Increment
5204 * <9> 1 Terminate Buffer on RxBound
5205 * <8> 0 Bus Width = 16bits
5206 * <7..0> ? status Bits (write as 0s)
5207 *
5208 * 1111 0010 0000 0000 = 0xf200
5209 */
5210
5211 usc_OutDmaReg( info, RDMR, 0xf200 );
5212
5213
5214 /* Transmit DMA mode Register (TDMR)
5215 *
5216 * <15..14> 11 DMA mode = Linked List Buffer mode
5217 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5218 * <12> 1 Clear count of List Entry after fetching
5219 * <11..10> 00 Address mode = Increment
5220 * <9> 1 Terminate Buffer on end of frame
5221 * <8> 0 Bus Width = 16bits
5222 * <7..0> ? status Bits (Read Only so write as 0)
5223 *
5224 * 1111 0010 0000 0000 = 0xf200
5225 */
5226
5227 usc_OutDmaReg( info, TDMR, 0xf200 );
5228
5229
5230 /* DMA Interrupt Control Register (DICR)
5231 *
5232 * <15> 1 DMA Interrupt Enable
5233 * <14> 0 1 = Disable IEO from USC
5234 * <13> 0 1 = Don't provide vector during IntAck
5235 * <12> 1 1 = Include status in Vector
5236 * <10..2> 0 reserved, Must be 0s
5237 * <1> 0 1 = Rx DMA Interrupt Enabled
5238 * <0> 0 1 = Tx DMA Interrupt Enabled
5239 *
5240 * 1001 0000 0000 0000 = 0x9000
5241 */
5242
5243 usc_OutDmaReg( info, DICR, 0x9000 );
5244
5245 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5246 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5247 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5248
5249 /* Channel Control Register (CCR)
5250 *
5251 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5252 * <13> 0 Trigger Tx on SW Command Disabled
5253 * <12> 0 Flag Preamble Disabled
5254 * <11..10> 00 Preamble Length
5255 * <9..8> 00 Preamble Pattern
5256 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5257 * <5> 0 Trigger Rx on SW Command Disabled
5258 * <4..0> 0 reserved
5259 *
5260 * 1000 0000 1000 0000 = 0x8080
5261 */
5262
5263 RegValue = 0x8080;
5264
5265 switch ( info->params.preamble_length ) {
5266 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5267 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5268 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5269 }
5270
5271 switch ( info->params.preamble ) {
5272 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5273 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5274 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5275 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5276 }
5277
5278 usc_OutReg( info, CCR, RegValue );
5279
5280
5281 /*
5282 * Burst/Dwell Control Register
5283 *
5284 * <15..8> 0x20 Maximum number of transfers per bus grant
5285 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5286 */
5287
5288 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5289 /* don't limit bus occupancy on PCI adapter */
5290 usc_OutDmaReg( info, BDCR, 0x0000 );
5291 }
5292 else
5293 usc_OutDmaReg( info, BDCR, 0x2000 );
5294
5295 usc_stop_transmitter(info);
5296 usc_stop_receiver(info);
5297
5298 } /* end of usc_set_sdlc_mode() */
5299
5300 /* usc_enable_loopback()
5301 *
5302 * Set the 16C32 for internal loopback mode.
5303 * The TxCLK and RxCLK signals are generated from the BRG0 and
5304 * the TxD is looped back to the RxD internally.
5305 *
5306 * Arguments: info pointer to device instance data
5307 * enable 1 = enable loopback, 0 = disable
5308 * Return Value: None
5309 */
5310 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5311 {
5312 if (enable) {
5313 /* blank external TXD output */
5314 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5315
5316 /* Clock mode Control Register (CMCR)
5317 *
5318 * <15..14> 00 counter 1 Disabled
5319 * <13..12> 00 counter 0 Disabled
5320 * <11..10> 11 BRG1 Input is TxC Pin
5321 * <9..8> 11 BRG0 Input is TxC Pin
5322 * <7..6> 01 DPLL Input is BRG1 Output
5323 * <5..3> 100 TxCLK comes from BRG0
5324 * <2..0> 100 RxCLK comes from BRG0
5325 *
5326 * 0000 1111 0110 0100 = 0x0f64
5327 */
5328
5329 usc_OutReg( info, CMCR, 0x0f64 );
5330
5331 /* Write 16-bit Time Constant for BRG0 */
5332 /* use clock speed if available, otherwise use 8 for diagnostics */
5333 if (info->params.clock_speed) {
5334 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5335 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5336 else
5337 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5338 } else
5339 usc_OutReg(info, TC0R, (u16)8);
5340
5341 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5342 mode = Continuous Set Bit 0 to enable BRG0. */
5343 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5344
5345 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5346 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5347
5348 /* set Internal Data loopback mode */
5349 info->loopback_bits = 0x300;
5350 outw( 0x0300, info->io_base + CCAR );
5351 } else {
5352 /* enable external TXD output */
5353 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5354
5355 /* clear Internal Data loopback mode */
5356 info->loopback_bits = 0;
5357 outw( 0,info->io_base + CCAR );
5358 }
5359
5360 } /* end of usc_enable_loopback() */
5361
5362 /* usc_enable_aux_clock()
5363 *
5364 * Enabled the AUX clock output at the specified frequency.
5365 *
5366 * Arguments:
5367 *
5368 * info pointer to device extension
5369 * data_rate data rate of clock in bits per second
5370 * A data rate of 0 disables the AUX clock.
5371 *
5372 * Return Value: None
5373 */
5374 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5375 {
5376 u32 XtalSpeed;
5377 u16 Tc;
5378
5379 if ( data_rate ) {
5380 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5381 XtalSpeed = 11059200;
5382 else
5383 XtalSpeed = 14745600;
5384
5385
5386 /* Tc = (Xtal/Speed) - 1 */
5387 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5388 /* then rounding up gives a more precise time constant. Instead */
5389 /* of rounding up and then subtracting 1 we just don't subtract */
5390 /* the one in this case. */
5391
5392
5393 Tc = (u16)(XtalSpeed/data_rate);
5394 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5395 Tc--;
5396
5397 /* Write 16-bit Time Constant for BRG0 */
5398 usc_OutReg( info, TC0R, Tc );
5399
5400 /*
5401 * Hardware Configuration Register (HCR)
5402 * Clear Bit 1, BRG0 mode = Continuous
5403 * Set Bit 0 to enable BRG0.
5404 */
5405
5406 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5407
5408 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5409 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5410 } else {
5411 /* data rate == 0 so turn off BRG0 */
5412 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5413 }
5414
5415 } /* end of usc_enable_aux_clock() */
5416
5417 /*
5418 *
5419 * usc_process_rxoverrun_sync()
5420 *
5421 * This function processes a receive overrun by resetting the
5422 * receive DMA buffers and issuing a Purge Rx FIFO command
5423 * to allow the receiver to continue receiving.
5424 *
5425 * Arguments:
5426 *
5427 * info pointer to device extension
5428 *
5429 * Return Value: None
5430 */
5431 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5432 {
5433 int start_index;
5434 int end_index;
5435 int frame_start_index;
5436 int start_of_frame_found = FALSE;
5437 int end_of_frame_found = FALSE;
5438 int reprogram_dma = FALSE;
5439
5440 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5441 u32 phys_addr;
5442
5443 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5444 usc_RCmd( info, RCmd_EnterHuntmode );
5445 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5446
5447 /* CurrentRxBuffer points to the 1st buffer of the next */
5448 /* possibly available receive frame. */
5449
5450 frame_start_index = start_index = end_index = info->current_rx_buffer;
5451
5452 /* Search for an unfinished string of buffers. This means */
5453 /* that a receive frame started (at least one buffer with */
5454 /* count set to zero) but there is no terminiting buffer */
5455 /* (status set to non-zero). */
5456
5457 while( !buffer_list[end_index].count )
5458 {
5459 /* Count field has been reset to zero by 16C32. */
5460 /* This buffer is currently in use. */
5461
5462 if ( !start_of_frame_found )
5463 {
5464 start_of_frame_found = TRUE;
5465 frame_start_index = end_index;
5466 end_of_frame_found = FALSE;
5467 }
5468
5469 if ( buffer_list[end_index].status )
5470 {
5471 /* Status field has been set by 16C32. */
5472 /* This is the last buffer of a received frame. */
5473
5474 /* We want to leave the buffers for this frame intact. */
5475 /* Move on to next possible frame. */
5476
5477 start_of_frame_found = FALSE;
5478 end_of_frame_found = TRUE;
5479 }
5480
5481 /* advance to next buffer entry in linked list */
5482 end_index++;
5483 if ( end_index == info->rx_buffer_count )
5484 end_index = 0;
5485
5486 if ( start_index == end_index )
5487 {
5488 /* The entire list has been searched with all Counts == 0 and */
5489 /* all Status == 0. The receive buffers are */
5490 /* completely screwed, reset all receive buffers! */
5491 mgsl_reset_rx_dma_buffers( info );
5492 frame_start_index = 0;
5493 start_of_frame_found = FALSE;
5494 reprogram_dma = TRUE;
5495 break;
5496 }
5497 }
5498
5499 if ( start_of_frame_found && !end_of_frame_found )
5500 {
5501 /* There is an unfinished string of receive DMA buffers */
5502 /* as a result of the receiver overrun. */
5503
5504 /* Reset the buffers for the unfinished frame */
5505 /* and reprogram the receive DMA controller to start */
5506 /* at the 1st buffer of unfinished frame. */
5507
5508 start_index = frame_start_index;
5509
5510 do
5511 {
5512 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5513
5514 /* Adjust index for wrap around. */
5515 if ( start_index == info->rx_buffer_count )
5516 start_index = 0;
5517
5518 } while( start_index != end_index );
5519
5520 reprogram_dma = TRUE;
5521 }
5522
5523 if ( reprogram_dma )
5524 {
5525 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5526 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5527 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5528
5529 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5530
5531 /* This empties the receive FIFO and loads the RCC with RCLR */
5532 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5533
5534 /* program 16C32 with physical address of 1st DMA buffer entry */
5535 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5536 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5537 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5538
5539 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5540 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5541 usc_EnableInterrupts( info, RECEIVE_STATUS );
5542
5543 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5544 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5545
5546 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5547 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5548 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5549 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5550 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5551 else
5552 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5553 }
5554 else
5555 {
5556 /* This empties the receive FIFO and loads the RCC with RCLR */
5557 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5558 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5559 }
5560
5561 } /* end of usc_process_rxoverrun_sync() */
5562
5563 /* usc_stop_receiver()
5564 *
5565 * Disable USC receiver
5566 *
5567 * Arguments: info pointer to device instance data
5568 * Return Value: None
5569 */
5570 static void usc_stop_receiver( struct mgsl_struct *info )
5571 {
5572 if (debug_level >= DEBUG_LEVEL_ISR)
5573 printk("%s(%d):usc_stop_receiver(%s)\n",
5574 __FILE__,__LINE__, info->device_name );
5575
5576 /* Disable receive DMA channel. */
5577 /* This also disables receive DMA channel interrupts */
5578 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5579
5580 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5581 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5582 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5583
5584 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5585
5586 /* This empties the receive FIFO and loads the RCC with RCLR */
5587 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5588 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5589
5590 info->rx_enabled = 0;
5591 info->rx_overflow = 0;
5592 info->rx_rcc_underrun = 0;
5593
5594 } /* end of stop_receiver() */
5595
5596 /* usc_start_receiver()
5597 *
5598 * Enable the USC receiver
5599 *
5600 * Arguments: info pointer to device instance data
5601 * Return Value: None
5602 */
5603 static void usc_start_receiver( struct mgsl_struct *info )
5604 {
5605 u32 phys_addr;
5606
5607 if (debug_level >= DEBUG_LEVEL_ISR)
5608 printk("%s(%d):usc_start_receiver(%s)\n",
5609 __FILE__,__LINE__, info->device_name );
5610
5611 mgsl_reset_rx_dma_buffers( info );
5612 usc_stop_receiver( info );
5613
5614 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5615 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5616
5617 if ( info->params.mode == MGSL_MODE_HDLC ||
5618 info->params.mode == MGSL_MODE_RAW ) {
5619 /* DMA mode Transfers */
5620 /* Program the DMA controller. */
5621 /* Enable the DMA controller end of buffer interrupt. */
5622
5623 /* program 16C32 with physical address of 1st DMA buffer entry */
5624 phys_addr = info->rx_buffer_list[0].phys_entry;
5625 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5626 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5627
5628 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5629 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5630 usc_EnableInterrupts( info, RECEIVE_STATUS );
5631
5632 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5633 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5634
5635 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5636 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5637 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5638 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5639 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5640 else
5641 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5642 } else {
5643 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5644 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5645 usc_EnableInterrupts(info, RECEIVE_DATA);
5646
5647 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5648 usc_RCmd( info, RCmd_EnterHuntmode );
5649
5650 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5651 }
5652
5653 usc_OutReg( info, CCSR, 0x1020 );
5654
5655 info->rx_enabled = 1;
5656
5657 } /* end of usc_start_receiver() */
5658
5659 /* usc_start_transmitter()
5660 *
5661 * Enable the USC transmitter and send a transmit frame if
5662 * one is loaded in the DMA buffers.
5663 *
5664 * Arguments: info pointer to device instance data
5665 * Return Value: None
5666 */
5667 static void usc_start_transmitter( struct mgsl_struct *info )
5668 {
5669 u32 phys_addr;
5670 unsigned int FrameSize;
5671
5672 if (debug_level >= DEBUG_LEVEL_ISR)
5673 printk("%s(%d):usc_start_transmitter(%s)\n",
5674 __FILE__,__LINE__, info->device_name );
5675
5676 if ( info->xmit_cnt ) {
5677
5678 /* If auto RTS enabled and RTS is inactive, then assert */
5679 /* RTS and set a flag indicating that the driver should */
5680 /* negate RTS when the transmission completes. */
5681
5682 info->drop_rts_on_tx_done = 0;
5683
5684 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5685 usc_get_serial_signals( info );
5686 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5687 info->serial_signals |= SerialSignal_RTS;
5688 usc_set_serial_signals( info );
5689 info->drop_rts_on_tx_done = 1;
5690 }
5691 }
5692
5693
5694 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5695 if ( !info->tx_active ) {
5696 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5697 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5698 usc_EnableInterrupts(info, TRANSMIT_DATA);
5699 usc_load_txfifo(info);
5700 }
5701 } else {
5702 /* Disable transmit DMA controller while programming. */
5703 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5704
5705 /* Transmit DMA buffer is loaded, so program USC */
5706 /* to send the frame contained in the buffers. */
5707
5708 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5709
5710 /* if operating in Raw sync mode, reset the rcc component
5711 * of the tx dma buffer entry, otherwise, the serial controller
5712 * will send a closing sync char after this count.
5713 */
5714 if ( info->params.mode == MGSL_MODE_RAW )
5715 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5716
5717 /* Program the Transmit Character Length Register (TCLR) */
5718 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5719 usc_OutReg( info, TCLR, (u16)FrameSize );
5720
5721 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5722
5723 /* Program the address of the 1st DMA Buffer Entry in linked list */
5724 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5725 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5726 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5727
5728 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5729 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5730 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5731
5732 if ( info->params.mode == MGSL_MODE_RAW &&
5733 info->num_tx_dma_buffers > 1 ) {
5734 /* When running external sync mode, attempt to 'stream' transmit */
5735 /* by filling tx dma buffers as they become available. To do this */
5736 /* we need to enable Tx DMA EOB Status interrupts : */
5737 /* */
5738 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5739 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5740
5741 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5742 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5743 }
5744
5745 /* Initialize Transmit DMA Channel */
5746 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5747
5748 usc_TCmd( info, TCmd_SendFrame );
5749
5750 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5751 add_timer(&info->tx_timer);
5752 }
5753 info->tx_active = 1;
5754 }
5755
5756 if ( !info->tx_enabled ) {
5757 info->tx_enabled = 1;
5758 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5759 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5760 else
5761 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5762 }
5763
5764 } /* end of usc_start_transmitter() */
5765
5766 /* usc_stop_transmitter()
5767 *
5768 * Stops the transmitter and DMA
5769 *
5770 * Arguments: info pointer to device isntance data
5771 * Return Value: None
5772 */
5773 static void usc_stop_transmitter( struct mgsl_struct *info )
5774 {
5775 if (debug_level >= DEBUG_LEVEL_ISR)
5776 printk("%s(%d):usc_stop_transmitter(%s)\n",
5777 __FILE__,__LINE__, info->device_name );
5778
5779 del_timer(&info->tx_timer);
5780
5781 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5782 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5783 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5784
5785 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5786 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5787 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5788
5789 info->tx_enabled = 0;
5790 info->tx_active = 0;
5791
5792 } /* end of usc_stop_transmitter() */
5793
5794 /* usc_load_txfifo()
5795 *
5796 * Fill the transmit FIFO until the FIFO is full or
5797 * there is no more data to load.
5798 *
5799 * Arguments: info pointer to device extension (instance data)
5800 * Return Value: None
5801 */
5802 static void usc_load_txfifo( struct mgsl_struct *info )
5803 {
5804 int Fifocount;
5805 u8 TwoBytes[2];
5806
5807 if ( !info->xmit_cnt && !info->x_char )
5808 return;
5809
5810 /* Select transmit FIFO status readback in TICR */
5811 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5812
5813 /* load the Transmit FIFO until FIFOs full or all data sent */
5814
5815 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5816 /* there is more space in the transmit FIFO and */
5817 /* there is more data in transmit buffer */
5818
5819 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5820 /* write a 16-bit word from transmit buffer to 16C32 */
5821
5822 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5823 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5824 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5825 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5826
5827 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5828
5829 info->xmit_cnt -= 2;
5830 info->icount.tx += 2;
5831 } else {
5832 /* only 1 byte left to transmit or 1 FIFO slot left */
5833
5834 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5835 info->io_base + CCAR );
5836
5837 if (info->x_char) {
5838 /* transmit pending high priority char */
5839 outw( info->x_char,info->io_base + CCAR );
5840 info->x_char = 0;
5841 } else {
5842 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5843 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5844 info->xmit_cnt--;
5845 }
5846 info->icount.tx++;
5847 }
5848 }
5849
5850 } /* end of usc_load_txfifo() */
5851
5852 /* usc_reset()
5853 *
5854 * Reset the adapter to a known state and prepare it for further use.
5855 *
5856 * Arguments: info pointer to device instance data
5857 * Return Value: None
5858 */
5859 static void usc_reset( struct mgsl_struct *info )
5860 {
5861 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5862 int i;
5863 u32 readval;
5864
5865 /* Set BIT30 of Misc Control Register */
5866 /* (Local Control Register 0x50) to force reset of USC. */
5867
5868 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5869 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5870
5871 info->misc_ctrl_value |= BIT30;
5872 *MiscCtrl = info->misc_ctrl_value;
5873
5874 /*
5875 * Force at least 170ns delay before clearing
5876 * reset bit. Each read from LCR takes at least
5877 * 30ns so 10 times for 300ns to be safe.
5878 */
5879 for(i=0;i<10;i++)
5880 readval = *MiscCtrl;
5881
5882 info->misc_ctrl_value &= ~BIT30;
5883 *MiscCtrl = info->misc_ctrl_value;
5884
5885 *LCR0BRDR = BUS_DESCRIPTOR(
5886 1, // Write Strobe Hold (0-3)
5887 2, // Write Strobe Delay (0-3)
5888 2, // Read Strobe Delay (0-3)
5889 0, // NWDD (Write data-data) (0-3)
5890 4, // NWAD (Write Addr-data) (0-31)
5891 0, // NXDA (Read/Write Data-Addr) (0-3)
5892 0, // NRDD (Read Data-Data) (0-3)
5893 5 // NRAD (Read Addr-Data) (0-31)
5894 );
5895 } else {
5896 /* do HW reset */
5897 outb( 0,info->io_base + 8 );
5898 }
5899
5900 info->mbre_bit = 0;
5901 info->loopback_bits = 0;
5902 info->usc_idle_mode = 0;
5903
5904 /*
5905 * Program the Bus Configuration Register (BCR)
5906 *
5907 * <15> 0 Don't use separate address
5908 * <14..6> 0 reserved
5909 * <5..4> 00 IAckmode = Default, don't care
5910 * <3> 1 Bus Request Totem Pole output
5911 * <2> 1 Use 16 Bit data bus
5912 * <1> 0 IRQ Totem Pole output
5913 * <0> 0 Don't Shift Right Addr
5914 *
5915 * 0000 0000 0000 1100 = 0x000c
5916 *
5917 * By writing to io_base + SDPIN the Wait/Ack pin is
5918 * programmed to work as a Wait pin.
5919 */
5920
5921 outw( 0x000c,info->io_base + SDPIN );
5922
5923
5924 outw( 0,info->io_base );
5925 outw( 0,info->io_base + CCAR );
5926
5927 /* select little endian byte ordering */
5928 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5929
5930
5931 /* Port Control Register (PCR)
5932 *
5933 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5934 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5935 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5936 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5937 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5938 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5939 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5940 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5941 *
5942 * 1111 0000 1111 0101 = 0xf0f5
5943 */
5944
5945 usc_OutReg( info, PCR, 0xf0f5 );
5946
5947
5948 /*
5949 * Input/Output Control Register
5950 *
5951 * <15..14> 00 CTS is active low input
5952 * <13..12> 00 DCD is active low input
5953 * <11..10> 00 TxREQ pin is input (DSR)
5954 * <9..8> 00 RxREQ pin is input (RI)
5955 * <7..6> 00 TxD is output (Transmit Data)
5956 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5957 * <2..0> 100 RxC is Output (drive with BRG0)
5958 *
5959 * 0000 0000 0000 0100 = 0x0004
5960 */
5961
5962 usc_OutReg( info, IOCR, 0x0004 );
5963
5964 } /* end of usc_reset() */
5965
5966 /* usc_set_async_mode()
5967 *
5968 * Program adapter for asynchronous communications.
5969 *
5970 * Arguments: info pointer to device instance data
5971 * Return Value: None
5972 */
5973 static void usc_set_async_mode( struct mgsl_struct *info )
5974 {
5975 u16 RegValue;
5976
5977 /* disable interrupts while programming USC */
5978 usc_DisableMasterIrqBit( info );
5979
5980 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5981 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5982
5983 usc_loopback_frame( info );
5984
5985 /* Channel mode Register (CMR)
5986 *
5987 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5988 * <13..12> 00 00 = 16X Clock
5989 * <11..8> 0000 Transmitter mode = Asynchronous
5990 * <7..6> 00 reserved?
5991 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5992 * <3..0> 0000 Receiver mode = Asynchronous
5993 *
5994 * 0000 0000 0000 0000 = 0x0
5995 */
5996
5997 RegValue = 0;
5998 if ( info->params.stop_bits != 1 )
5999 RegValue |= BIT14;
6000 usc_OutReg( info, CMR, RegValue );
6001
6002
6003 /* Receiver mode Register (RMR)
6004 *
6005 * <15..13> 000 encoding = None
6006 * <12..08> 00000 reserved (Sync Only)
6007 * <7..6> 00 Even parity
6008 * <5> 0 parity disabled
6009 * <4..2> 000 Receive Char Length = 8 bits
6010 * <1..0> 00 Disable Receiver
6011 *
6012 * 0000 0000 0000 0000 = 0x0
6013 */
6014
6015 RegValue = 0;
6016
6017 if ( info->params.data_bits != 8 )
6018 RegValue |= BIT4+BIT3+BIT2;
6019
6020 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6021 RegValue |= BIT5;
6022 if ( info->params.parity != ASYNC_PARITY_ODD )
6023 RegValue |= BIT6;
6024 }
6025
6026 usc_OutReg( info, RMR, RegValue );
6027
6028
6029 /* Set IRQ trigger level */
6030
6031 usc_RCmd( info, RCmd_SelectRicrIntLevel );
6032
6033
6034 /* Receive Interrupt Control Register (RICR)
6035 *
6036 * <15..8> ? RxFIFO IRQ Request Level
6037 *
6038 * Note: For async mode the receive FIFO level must be set
6039 * to 0 to aviod the situation where the FIFO contains fewer bytes
6040 * than the trigger level and no more data is expected.
6041 *
6042 * <7> 0 Exited Hunt IA (Interrupt Arm)
6043 * <6> 0 Idle Received IA
6044 * <5> 0 Break/Abort IA
6045 * <4> 0 Rx Bound IA
6046 * <3> 0 Queued status reflects oldest byte in FIFO
6047 * <2> 0 Abort/PE IA
6048 * <1> 0 Rx Overrun IA
6049 * <0> 0 Select TC0 value for readback
6050 *
6051 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6052 */
6053
6054 usc_OutReg( info, RICR, 0x0000 );
6055
6056 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6057 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6058
6059
6060 /* Transmit mode Register (TMR)
6061 *
6062 * <15..13> 000 encoding = None
6063 * <12..08> 00000 reserved (Sync Only)
6064 * <7..6> 00 Transmit parity Even
6065 * <5> 0 Transmit parity Disabled
6066 * <4..2> 000 Tx Char Length = 8 bits
6067 * <1..0> 00 Disable Transmitter
6068 *
6069 * 0000 0000 0000 0000 = 0x0
6070 */
6071
6072 RegValue = 0;
6073
6074 if ( info->params.data_bits != 8 )
6075 RegValue |= BIT4+BIT3+BIT2;
6076
6077 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6078 RegValue |= BIT5;
6079 if ( info->params.parity != ASYNC_PARITY_ODD )
6080 RegValue |= BIT6;
6081 }
6082
6083 usc_OutReg( info, TMR, RegValue );
6084
6085 usc_set_txidle( info );
6086
6087
6088 /* Set IRQ trigger level */
6089
6090 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6091
6092
6093 /* Transmit Interrupt Control Register (TICR)
6094 *
6095 * <15..8> ? Transmit FIFO IRQ Level
6096 * <7> 0 Present IA (Interrupt Arm)
6097 * <6> 1 Idle Sent IA
6098 * <5> 0 Abort Sent IA
6099 * <4> 0 EOF/EOM Sent IA
6100 * <3> 0 CRC Sent IA
6101 * <2> 0 1 = Wait for SW Trigger to Start Frame
6102 * <1> 0 Tx Underrun IA
6103 * <0> 0 TC0 constant on read back
6104 *
6105 * 0000 0000 0100 0000 = 0x0040
6106 */
6107
6108 usc_OutReg( info, TICR, 0x1f40 );
6109
6110 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6111 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6112
6113 usc_enable_async_clock( info, info->params.data_rate );
6114
6115
6116 /* Channel Control/status Register (CCSR)
6117 *
6118 * <15> X RCC FIFO Overflow status (RO)
6119 * <14> X RCC FIFO Not Empty status (RO)
6120 * <13> 0 1 = Clear RCC FIFO (WO)
6121 * <12> X DPLL in Sync status (RO)
6122 * <11> X DPLL 2 Missed Clocks status (RO)
6123 * <10> X DPLL 1 Missed Clock status (RO)
6124 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6125 * <7> X SDLC Loop On status (RO)
6126 * <6> X SDLC Loop Send status (RO)
6127 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6128 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6129 * <1..0> 00 reserved
6130 *
6131 * 0000 0000 0010 0000 = 0x0020
6132 */
6133
6134 usc_OutReg( info, CCSR, 0x0020 );
6135
6136 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6137 RECEIVE_DATA + RECEIVE_STATUS );
6138
6139 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6140 RECEIVE_DATA + RECEIVE_STATUS );
6141
6142 usc_EnableMasterIrqBit( info );
6143
6144 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6145 /* Enable INTEN (Port 6, Bit12) */
6146 /* This connects the IRQ request signal to the ISA bus */
6147 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6148 }
6149
6150 if (info->params.loopback) {
6151 info->loopback_bits = 0x300;
6152 outw(0x0300, info->io_base + CCAR);
6153 }
6154
6155 } /* end of usc_set_async_mode() */
6156
6157 /* usc_loopback_frame()
6158 *
6159 * Loop back a small (2 byte) dummy SDLC frame.
6160 * Interrupts and DMA are NOT used. The purpose of this is to
6161 * clear any 'stale' status info left over from running in async mode.
6162 *
6163 * The 16C32 shows the strange behaviour of marking the 1st
6164 * received SDLC frame with a CRC error even when there is no
6165 * CRC error. To get around this a small dummy from of 2 bytes
6166 * is looped back when switching from async to sync mode.
6167 *
6168 * Arguments: info pointer to device instance data
6169 * Return Value: None
6170 */
6171 static void usc_loopback_frame( struct mgsl_struct *info )
6172 {
6173 int i;
6174 unsigned long oldmode = info->params.mode;
6175
6176 info->params.mode = MGSL_MODE_HDLC;
6177
6178 usc_DisableMasterIrqBit( info );
6179
6180 usc_set_sdlc_mode( info );
6181 usc_enable_loopback( info, 1 );
6182
6183 /* Write 16-bit Time Constant for BRG0 */
6184 usc_OutReg( info, TC0R, 0 );
6185
6186 /* Channel Control Register (CCR)
6187 *
6188 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6189 * <13> 0 Trigger Tx on SW Command Disabled
6190 * <12> 0 Flag Preamble Disabled
6191 * <11..10> 00 Preamble Length = 8-Bits
6192 * <9..8> 01 Preamble Pattern = flags
6193 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6194 * <5> 0 Trigger Rx on SW Command Disabled
6195 * <4..0> 0 reserved
6196 *
6197 * 0000 0001 0000 0000 = 0x0100
6198 */
6199
6200 usc_OutReg( info, CCR, 0x0100 );
6201
6202 /* SETUP RECEIVER */
6203 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6204 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6205
6206 /* SETUP TRANSMITTER */
6207 /* Program the Transmit Character Length Register (TCLR) */
6208 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6209 usc_OutReg( info, TCLR, 2 );
6210 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6211
6212 /* unlatch Tx status bits, and start transmit channel. */
6213 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6214 outw(0,info->io_base + DATAREG);
6215
6216 /* ENABLE TRANSMITTER */
6217 usc_TCmd( info, TCmd_SendFrame );
6218 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6219
6220 /* WAIT FOR RECEIVE COMPLETE */
6221 for (i=0 ; i<1000 ; i++)
6222 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6223 break;
6224
6225 /* clear Internal Data loopback mode */
6226 usc_enable_loopback(info, 0);
6227
6228 usc_EnableMasterIrqBit(info);
6229
6230 info->params.mode = oldmode;
6231
6232 } /* end of usc_loopback_frame() */
6233
6234 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6235 *
6236 * Arguments: info pointer to adapter info structure
6237 * Return Value: None
6238 */
6239 static void usc_set_sync_mode( struct mgsl_struct *info )
6240 {
6241 usc_loopback_frame( info );
6242 usc_set_sdlc_mode( info );
6243
6244 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6245 /* Enable INTEN (Port 6, Bit12) */
6246 /* This connects the IRQ request signal to the ISA bus */
6247 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6248 }
6249
6250 usc_enable_aux_clock(info, info->params.clock_speed);
6251
6252 if (info->params.loopback)
6253 usc_enable_loopback(info,1);
6254
6255 } /* end of mgsl_set_sync_mode() */
6256
6257 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6258 *
6259 * Arguments: info pointer to device instance data
6260 * Return Value: None
6261 */
6262 static void usc_set_txidle( struct mgsl_struct *info )
6263 {
6264 u16 usc_idle_mode = IDLEMODE_FLAGS;
6265
6266 /* Map API idle mode to USC register bits */
6267
6268 switch( info->idle_mode ){
6269 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6270 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6271 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6272 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6273 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6274 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6275 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6276 }
6277
6278 info->usc_idle_mode = usc_idle_mode;
6279 //usc_OutReg(info, TCSR, usc_idle_mode);
6280 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6281 info->tcsr_value += usc_idle_mode;
6282 usc_OutReg(info, TCSR, info->tcsr_value);
6283
6284 /*
6285 * if SyncLink WAN adapter is running in external sync mode, the
6286 * transmitter has been set to Monosync in order to try to mimic
6287 * a true raw outbound bit stream. Monosync still sends an open/close
6288 * sync char at the start/end of a frame. Try to match those sync
6289 * patterns to the idle mode set here
6290 */
6291 if ( info->params.mode == MGSL_MODE_RAW ) {
6292 unsigned char syncpat = 0;
6293 switch( info->idle_mode ) {
6294 case HDLC_TXIDLE_FLAGS:
6295 syncpat = 0x7e;
6296 break;
6297 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6298 syncpat = 0x55;
6299 break;
6300 case HDLC_TXIDLE_ZEROS:
6301 case HDLC_TXIDLE_SPACE:
6302 syncpat = 0x00;
6303 break;
6304 case HDLC_TXIDLE_ONES:
6305 case HDLC_TXIDLE_MARK:
6306 syncpat = 0xff;
6307 break;
6308 case HDLC_TXIDLE_ALT_MARK_SPACE:
6309 syncpat = 0xaa;
6310 break;
6311 }
6312
6313 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6314 }
6315
6316 } /* end of usc_set_txidle() */
6317
6318 /* usc_get_serial_signals()
6319 *
6320 * Query the adapter for the state of the V24 status (input) signals.
6321 *
6322 * Arguments: info pointer to device instance data
6323 * Return Value: None
6324 */
6325 static void usc_get_serial_signals( struct mgsl_struct *info )
6326 {
6327 u16 status;
6328
6329 /* clear all serial signals except DTR and RTS */
6330 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6331
6332 /* Read the Misc Interrupt status Register (MISR) to get */
6333 /* the V24 status signals. */
6334
6335 status = usc_InReg( info, MISR );
6336
6337 /* set serial signal bits to reflect MISR */
6338
6339 if ( status & MISCSTATUS_CTS )
6340 info->serial_signals |= SerialSignal_CTS;
6341
6342 if ( status & MISCSTATUS_DCD )
6343 info->serial_signals |= SerialSignal_DCD;
6344
6345 if ( status & MISCSTATUS_RI )
6346 info->serial_signals |= SerialSignal_RI;
6347
6348 if ( status & MISCSTATUS_DSR )
6349 info->serial_signals |= SerialSignal_DSR;
6350
6351 } /* end of usc_get_serial_signals() */
6352
6353 /* usc_set_serial_signals()
6354 *
6355 * Set the state of DTR and RTS based on contents of
6356 * serial_signals member of device extension.
6357 *
6358 * Arguments: info pointer to device instance data
6359 * Return Value: None
6360 */
6361 static void usc_set_serial_signals( struct mgsl_struct *info )
6362 {
6363 u16 Control;
6364 unsigned char V24Out = info->serial_signals;
6365
6366 /* get the current value of the Port Control Register (PCR) */
6367
6368 Control = usc_InReg( info, PCR );
6369
6370 if ( V24Out & SerialSignal_RTS )
6371 Control &= ~(BIT6);
6372 else
6373 Control |= BIT6;
6374
6375 if ( V24Out & SerialSignal_DTR )
6376 Control &= ~(BIT4);
6377 else
6378 Control |= BIT4;
6379
6380 usc_OutReg( info, PCR, Control );
6381
6382 } /* end of usc_set_serial_signals() */
6383
6384 /* usc_enable_async_clock()
6385 *
6386 * Enable the async clock at the specified frequency.
6387 *
6388 * Arguments: info pointer to device instance data
6389 * data_rate data rate of clock in bps
6390 * 0 disables the AUX clock.
6391 * Return Value: None
6392 */
6393 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6394 {
6395 if ( data_rate ) {
6396 /*
6397 * Clock mode Control Register (CMCR)
6398 *
6399 * <15..14> 00 counter 1 Disabled
6400 * <13..12> 00 counter 0 Disabled
6401 * <11..10> 11 BRG1 Input is TxC Pin
6402 * <9..8> 11 BRG0 Input is TxC Pin
6403 * <7..6> 01 DPLL Input is BRG1 Output
6404 * <5..3> 100 TxCLK comes from BRG0
6405 * <2..0> 100 RxCLK comes from BRG0
6406 *
6407 * 0000 1111 0110 0100 = 0x0f64
6408 */
6409
6410 usc_OutReg( info, CMCR, 0x0f64 );
6411
6412
6413 /*
6414 * Write 16-bit Time Constant for BRG0
6415 * Time Constant = (ClkSpeed / data_rate) - 1
6416 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6417 */
6418
6419 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6420 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6421 else
6422 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6423
6424
6425 /*
6426 * Hardware Configuration Register (HCR)
6427 * Clear Bit 1, BRG0 mode = Continuous
6428 * Set Bit 0 to enable BRG0.
6429 */
6430
6431 usc_OutReg( info, HCR,
6432 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6433
6434
6435 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6436
6437 usc_OutReg( info, IOCR,
6438 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6439 } else {
6440 /* data rate == 0 so turn off BRG0 */
6441 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6442 }
6443
6444 } /* end of usc_enable_async_clock() */
6445
6446 /*
6447 * Buffer Structures:
6448 *
6449 * Normal memory access uses virtual addresses that can make discontiguous
6450 * physical memory pages appear to be contiguous in the virtual address
6451 * space (the processors memory mapping handles the conversions).
6452 *
6453 * DMA transfers require physically contiguous memory. This is because
6454 * the DMA system controller and DMA bus masters deal with memory using
6455 * only physical addresses.
6456 *
6457 * This causes a problem under Windows NT when large DMA buffers are
6458 * needed. Fragmentation of the nonpaged pool prevents allocations of
6459 * physically contiguous buffers larger than the PAGE_SIZE.
6460 *
6461 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6462 * allows DMA transfers to physically discontiguous buffers. Information
6463 * about each data transfer buffer is contained in a memory structure
6464 * called a 'buffer entry'. A list of buffer entries is maintained
6465 * to track and control the use of the data transfer buffers.
6466 *
6467 * To support this strategy we will allocate sufficient PAGE_SIZE
6468 * contiguous memory buffers to allow for the total required buffer
6469 * space.
6470 *
6471 * The 16C32 accesses the list of buffer entries using Bus Master
6472 * DMA. Control information is read from the buffer entries by the
6473 * 16C32 to control data transfers. status information is written to
6474 * the buffer entries by the 16C32 to indicate the status of completed
6475 * transfers.
6476 *
6477 * The CPU writes control information to the buffer entries to control
6478 * the 16C32 and reads status information from the buffer entries to
6479 * determine information about received and transmitted frames.
6480 *
6481 * Because the CPU and 16C32 (adapter) both need simultaneous access
6482 * to the buffer entries, the buffer entry memory is allocated with
6483 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6484 * entry list to PAGE_SIZE.
6485 *
6486 * The actual data buffers on the other hand will only be accessed
6487 * by the CPU or the adapter but not by both simultaneously. This allows
6488 * Scatter/Gather packet based DMA procedures for using physically
6489 * discontiguous pages.
6490 */
6491
6492 /*
6493 * mgsl_reset_tx_dma_buffers()
6494 *
6495 * Set the count for all transmit buffers to 0 to indicate the
6496 * buffer is available for use and set the current buffer to the
6497 * first buffer. This effectively makes all buffers free and
6498 * discards any data in buffers.
6499 *
6500 * Arguments: info pointer to device instance data
6501 * Return Value: None
6502 */
6503 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6504 {
6505 unsigned int i;
6506
6507 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6508 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6509 }
6510
6511 info->current_tx_buffer = 0;
6512 info->start_tx_dma_buffer = 0;
6513 info->tx_dma_buffers_used = 0;
6514
6515 info->get_tx_holding_index = 0;
6516 info->put_tx_holding_index = 0;
6517 info->tx_holding_count = 0;
6518
6519 } /* end of mgsl_reset_tx_dma_buffers() */
6520
6521 /*
6522 * num_free_tx_dma_buffers()
6523 *
6524 * returns the number of free tx dma buffers available
6525 *
6526 * Arguments: info pointer to device instance data
6527 * Return Value: number of free tx dma buffers
6528 */
6529 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6530 {
6531 return info->tx_buffer_count - info->tx_dma_buffers_used;
6532 }
6533
6534 /*
6535 * mgsl_reset_rx_dma_buffers()
6536 *
6537 * Set the count for all receive buffers to DMABUFFERSIZE
6538 * and set the current buffer to the first buffer. This effectively
6539 * makes all buffers free and discards any data in buffers.
6540 *
6541 * Arguments: info pointer to device instance data
6542 * Return Value: None
6543 */
6544 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6545 {
6546 unsigned int i;
6547
6548 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6549 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6550 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6551 // info->rx_buffer_list[i].status = 0;
6552 }
6553
6554 info->current_rx_buffer = 0;
6555
6556 } /* end of mgsl_reset_rx_dma_buffers() */
6557
6558 /*
6559 * mgsl_free_rx_frame_buffers()
6560 *
6561 * Free the receive buffers used by a received SDLC
6562 * frame such that the buffers can be reused.
6563 *
6564 * Arguments:
6565 *
6566 * info pointer to device instance data
6567 * StartIndex index of 1st receive buffer of frame
6568 * EndIndex index of last receive buffer of frame
6569 *
6570 * Return Value: None
6571 */
6572 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6573 {
6574 int Done = 0;
6575 DMABUFFERENTRY *pBufEntry;
6576 unsigned int Index;
6577
6578 /* Starting with 1st buffer entry of the frame clear the status */
6579 /* field and set the count field to DMA Buffer Size. */
6580
6581 Index = StartIndex;
6582
6583 while( !Done ) {
6584 pBufEntry = &(info->rx_buffer_list[Index]);
6585
6586 if ( Index == EndIndex ) {
6587 /* This is the last buffer of the frame! */
6588 Done = 1;
6589 }
6590
6591 /* reset current buffer for reuse */
6592 // pBufEntry->status = 0;
6593 // pBufEntry->count = DMABUFFERSIZE;
6594 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6595
6596 /* advance to next buffer entry in linked list */
6597 Index++;
6598 if ( Index == info->rx_buffer_count )
6599 Index = 0;
6600 }
6601
6602 /* set current buffer to next buffer after last buffer of frame */
6603 info->current_rx_buffer = Index;
6604
6605 } /* end of free_rx_frame_buffers() */
6606
6607 /* mgsl_get_rx_frame()
6608 *
6609 * This function attempts to return a received SDLC frame from the
6610 * receive DMA buffers. Only frames received without errors are returned.
6611 *
6612 * Arguments: info pointer to device extension
6613 * Return Value: 1 if frame returned, otherwise 0
6614 */
6615 static int mgsl_get_rx_frame(struct mgsl_struct *info)
6616 {
6617 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6618 unsigned short status;
6619 DMABUFFERENTRY *pBufEntry;
6620 unsigned int framesize = 0;
6621 int ReturnCode = 0;
6622 unsigned long flags;
6623 struct tty_struct *tty = info->tty;
6624 int return_frame = 0;
6625
6626 /*
6627 * current_rx_buffer points to the 1st buffer of the next available
6628 * receive frame. To find the last buffer of the frame look for
6629 * a non-zero status field in the buffer entries. (The status
6630 * field is set by the 16C32 after completing a receive frame.
6631 */
6632
6633 StartIndex = EndIndex = info->current_rx_buffer;
6634
6635 while( !info->rx_buffer_list[EndIndex].status ) {
6636 /*
6637 * If the count field of the buffer entry is non-zero then
6638 * this buffer has not been used. (The 16C32 clears the count
6639 * field when it starts using the buffer.) If an unused buffer
6640 * is encountered then there are no frames available.
6641 */
6642
6643 if ( info->rx_buffer_list[EndIndex].count )
6644 goto Cleanup;
6645
6646 /* advance to next buffer entry in linked list */
6647 EndIndex++;
6648 if ( EndIndex == info->rx_buffer_count )
6649 EndIndex = 0;
6650
6651 /* if entire list searched then no frame available */
6652 if ( EndIndex == StartIndex ) {
6653 /* If this occurs then something bad happened,
6654 * all buffers have been 'used' but none mark
6655 * the end of a frame. Reset buffers and receiver.
6656 */
6657
6658 if ( info->rx_enabled ){
6659 spin_lock_irqsave(&info->irq_spinlock,flags);
6660 usc_start_receiver(info);
6661 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6662 }
6663 goto Cleanup;
6664 }
6665 }
6666
6667
6668 /* check status of receive frame */
6669
6670 status = info->rx_buffer_list[EndIndex].status;
6671
6672 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6673 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6674 if ( status & RXSTATUS_SHORT_FRAME )
6675 info->icount.rxshort++;
6676 else if ( status & RXSTATUS_ABORT )
6677 info->icount.rxabort++;
6678 else if ( status & RXSTATUS_OVERRUN )
6679 info->icount.rxover++;
6680 else {
6681 info->icount.rxcrc++;
6682 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6683 return_frame = 1;
6684 }
6685 framesize = 0;
6686 #ifdef CONFIG_HDLC
6687 {
6688 struct net_device_stats *stats = hdlc_stats(info->netdev);
6689 stats->rx_errors++;
6690 stats->rx_frame_errors++;
6691 }
6692 #endif
6693 } else
6694 return_frame = 1;
6695
6696 if ( return_frame ) {
6697 /* receive frame has no errors, get frame size.
6698 * The frame size is the starting value of the RCC (which was
6699 * set to 0xffff) minus the ending value of the RCC (decremented
6700 * once for each receive character) minus 2 for the 16-bit CRC.
6701 */
6702
6703 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6704
6705 /* adjust frame size for CRC if any */
6706 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6707 framesize -= 2;
6708 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6709 framesize -= 4;
6710 }
6711
6712 if ( debug_level >= DEBUG_LEVEL_BH )
6713 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6714 __FILE__,__LINE__,info->device_name,status,framesize);
6715
6716 if ( debug_level >= DEBUG_LEVEL_DATA )
6717 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6718 min_t(int, framesize, DMABUFFERSIZE),0);
6719
6720 if (framesize) {
6721 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6722 ((framesize+1) > info->max_frame_size) ) ||
6723 (framesize > info->max_frame_size) )
6724 info->icount.rxlong++;
6725 else {
6726 /* copy dma buffer(s) to contiguous intermediate buffer */
6727 int copy_count = framesize;
6728 int index = StartIndex;
6729 unsigned char *ptmp = info->intermediate_rxbuffer;
6730
6731 if ( !(status & RXSTATUS_CRC_ERROR))
6732 info->icount.rxok++;
6733
6734 while(copy_count) {
6735 int partial_count;
6736 if ( copy_count > DMABUFFERSIZE )
6737 partial_count = DMABUFFERSIZE;
6738 else
6739 partial_count = copy_count;
6740
6741 pBufEntry = &(info->rx_buffer_list[index]);
6742 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6743 ptmp += partial_count;
6744 copy_count -= partial_count;
6745
6746 if ( ++index == info->rx_buffer_count )
6747 index = 0;
6748 }
6749
6750 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6751 ++framesize;
6752 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6753 RX_CRC_ERROR :
6754 RX_OK);
6755
6756 if ( debug_level >= DEBUG_LEVEL_DATA )
6757 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6758 __FILE__,__LINE__,info->device_name,
6759 *ptmp);
6760 }
6761
6762 #ifdef CONFIG_HDLC
6763 if (info->netcount)
6764 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6765 else
6766 #endif
6767 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6768 }
6769 }
6770 /* Free the buffers used by this frame. */
6771 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6772
6773 ReturnCode = 1;
6774
6775 Cleanup:
6776
6777 if ( info->rx_enabled && info->rx_overflow ) {
6778 /* The receiver needs to restarted because of
6779 * a receive overflow (buffer or FIFO). If the
6780 * receive buffers are now empty, then restart receiver.
6781 */
6782
6783 if ( !info->rx_buffer_list[EndIndex].status &&
6784 info->rx_buffer_list[EndIndex].count ) {
6785 spin_lock_irqsave(&info->irq_spinlock,flags);
6786 usc_start_receiver(info);
6787 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6788 }
6789 }
6790
6791 return ReturnCode;
6792
6793 } /* end of mgsl_get_rx_frame() */
6794
6795 /* mgsl_get_raw_rx_frame()
6796 *
6797 * This function attempts to return a received frame from the
6798 * receive DMA buffers when running in external loop mode. In this mode,
6799 * we will return at most one DMABUFFERSIZE frame to the application.
6800 * The USC receiver is triggering off of DCD going active to start a new
6801 * frame, and DCD going inactive to terminate the frame (similar to
6802 * processing a closing flag character).
6803 *
6804 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6805 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6806 * status field and the RCC field will indicate the length of the
6807 * entire received frame. We take this RCC field and get the modulus
6808 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6809 * last Rx DMA buffer and return that last portion of the frame.
6810 *
6811 * Arguments: info pointer to device extension
6812 * Return Value: 1 if frame returned, otherwise 0
6813 */
6814 static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6815 {
6816 unsigned int CurrentIndex, NextIndex;
6817 unsigned short status;
6818 DMABUFFERENTRY *pBufEntry;
6819 unsigned int framesize = 0;
6820 int ReturnCode = 0;
6821 unsigned long flags;
6822 struct tty_struct *tty = info->tty;
6823
6824 /*
6825 * current_rx_buffer points to the 1st buffer of the next available
6826 * receive frame. The status field is set by the 16C32 after
6827 * completing a receive frame. If the status field of this buffer
6828 * is zero, either the USC is still filling this buffer or this
6829 * is one of a series of buffers making up a received frame.
6830 *
6831 * If the count field of this buffer is zero, the USC is either
6832 * using this buffer or has used this buffer. Look at the count
6833 * field of the next buffer. If that next buffer's count is
6834 * non-zero, the USC is still actively using the current buffer.
6835 * Otherwise, if the next buffer's count field is zero, the
6836 * current buffer is complete and the USC is using the next
6837 * buffer.
6838 */
6839 CurrentIndex = NextIndex = info->current_rx_buffer;
6840 ++NextIndex;
6841 if ( NextIndex == info->rx_buffer_count )
6842 NextIndex = 0;
6843
6844 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6845 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6846 info->rx_buffer_list[NextIndex].count == 0)) {
6847 /*
6848 * Either the status field of this dma buffer is non-zero
6849 * (indicating the last buffer of a receive frame) or the next
6850 * buffer is marked as in use -- implying this buffer is complete
6851 * and an intermediate buffer for this received frame.
6852 */
6853
6854 status = info->rx_buffer_list[CurrentIndex].status;
6855
6856 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6857 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6858 if ( status & RXSTATUS_SHORT_FRAME )
6859 info->icount.rxshort++;
6860 else if ( status & RXSTATUS_ABORT )
6861 info->icount.rxabort++;
6862 else if ( status & RXSTATUS_OVERRUN )
6863 info->icount.rxover++;
6864 else
6865 info->icount.rxcrc++;
6866 framesize = 0;
6867 } else {
6868 /*
6869 * A receive frame is available, get frame size and status.
6870 *
6871 * The frame size is the starting value of the RCC (which was
6872 * set to 0xffff) minus the ending value of the RCC (decremented
6873 * once for each receive character) minus 2 or 4 for the 16-bit
6874 * or 32-bit CRC.
6875 *
6876 * If the status field is zero, this is an intermediate buffer.
6877 * It's size is 4K.
6878 *
6879 * If the DMA Buffer Entry's Status field is non-zero, the
6880 * receive operation completed normally (ie: DCD dropped). The
6881 * RCC field is valid and holds the received frame size.
6882 * It is possible that the RCC field will be zero on a DMA buffer
6883 * entry with a non-zero status. This can occur if the total
6884 * frame size (number of bytes between the time DCD goes active
6885 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6886 * case the 16C32 has underrun on the RCC count and appears to
6887 * stop updating this counter to let us know the actual received
6888 * frame size. If this happens (non-zero status and zero RCC),
6889 * simply return the entire RxDMA Buffer
6890 */
6891 if ( status ) {
6892 /*
6893 * In the event that the final RxDMA Buffer is
6894 * terminated with a non-zero status and the RCC
6895 * field is zero, we interpret this as the RCC
6896 * having underflowed (received frame > 65535 bytes).
6897 *
6898 * Signal the event to the user by passing back
6899 * a status of RxStatus_CrcError returning the full
6900 * buffer and let the app figure out what data is
6901 * actually valid
6902 */
6903 if ( info->rx_buffer_list[CurrentIndex].rcc )
6904 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6905 else
6906 framesize = DMABUFFERSIZE;
6907 }
6908 else
6909 framesize = DMABUFFERSIZE;
6910 }
6911
6912 if ( framesize > DMABUFFERSIZE ) {
6913 /*
6914 * if running in raw sync mode, ISR handler for
6915 * End Of Buffer events terminates all buffers at 4K.
6916 * If this frame size is said to be >4K, get the
6917 * actual number of bytes of the frame in this buffer.
6918 */
6919 framesize = framesize % DMABUFFERSIZE;
6920 }
6921
6922
6923 if ( debug_level >= DEBUG_LEVEL_BH )
6924 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6925 __FILE__,__LINE__,info->device_name,status,framesize);
6926
6927 if ( debug_level >= DEBUG_LEVEL_DATA )
6928 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6929 min_t(int, framesize, DMABUFFERSIZE),0);
6930
6931 if (framesize) {
6932 /* copy dma buffer(s) to contiguous intermediate buffer */
6933 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6934
6935 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6936 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6937 info->icount.rxok++;
6938
6939 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6940 }
6941
6942 /* Free the buffers used by this frame. */
6943 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6944
6945 ReturnCode = 1;
6946 }
6947
6948
6949 if ( info->rx_enabled && info->rx_overflow ) {
6950 /* The receiver needs to restarted because of
6951 * a receive overflow (buffer or FIFO). If the
6952 * receive buffers are now empty, then restart receiver.
6953 */
6954
6955 if ( !info->rx_buffer_list[CurrentIndex].status &&
6956 info->rx_buffer_list[CurrentIndex].count ) {
6957 spin_lock_irqsave(&info->irq_spinlock,flags);
6958 usc_start_receiver(info);
6959 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6960 }
6961 }
6962
6963 return ReturnCode;
6964
6965 } /* end of mgsl_get_raw_rx_frame() */
6966
6967 /* mgsl_load_tx_dma_buffer()
6968 *
6969 * Load the transmit DMA buffer with the specified data.
6970 *
6971 * Arguments:
6972 *
6973 * info pointer to device extension
6974 * Buffer pointer to buffer containing frame to load
6975 * BufferSize size in bytes of frame in Buffer
6976 *
6977 * Return Value: None
6978 */
6979 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6980 const char *Buffer, unsigned int BufferSize)
6981 {
6982 unsigned short Copycount;
6983 unsigned int i = 0;
6984 DMABUFFERENTRY *pBufEntry;
6985
6986 if ( debug_level >= DEBUG_LEVEL_DATA )
6987 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6988
6989 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6990 /* set CMR:13 to start transmit when
6991 * next GoAhead (abort) is received
6992 */
6993 info->cmr_value |= BIT13;
6994 }
6995
6996 /* begin loading the frame in the next available tx dma
6997 * buffer, remember it's starting location for setting
6998 * up tx dma operation
6999 */
7000 i = info->current_tx_buffer;
7001 info->start_tx_dma_buffer = i;
7002
7003 /* Setup the status and RCC (Frame Size) fields of the 1st */
7004 /* buffer entry in the transmit DMA buffer list. */
7005
7006 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
7007 info->tx_buffer_list[i].rcc = BufferSize;
7008 info->tx_buffer_list[i].count = BufferSize;
7009
7010 /* Copy frame data from 1st source buffer to the DMA buffers. */
7011 /* The frame data may span multiple DMA buffers. */
7012
7013 while( BufferSize ){
7014 /* Get a pointer to next DMA buffer entry. */
7015 pBufEntry = &info->tx_buffer_list[i++];
7016
7017 if ( i == info->tx_buffer_count )
7018 i=0;
7019
7020 /* Calculate the number of bytes that can be copied from */
7021 /* the source buffer to this DMA buffer. */
7022 if ( BufferSize > DMABUFFERSIZE )
7023 Copycount = DMABUFFERSIZE;
7024 else
7025 Copycount = BufferSize;
7026
7027 /* Actually copy data from source buffer to DMA buffer. */
7028 /* Also set the data count for this individual DMA buffer. */
7029 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
7030 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
7031 else
7032 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
7033
7034 pBufEntry->count = Copycount;
7035
7036 /* Advance source pointer and reduce remaining data count. */
7037 Buffer += Copycount;
7038 BufferSize -= Copycount;
7039
7040 ++info->tx_dma_buffers_used;
7041 }
7042
7043 /* remember next available tx dma buffer */
7044 info->current_tx_buffer = i;
7045
7046 } /* end of mgsl_load_tx_dma_buffer() */
7047
7048 /*
7049 * mgsl_register_test()
7050 *
7051 * Performs a register test of the 16C32.
7052 *
7053 * Arguments: info pointer to device instance data
7054 * Return Value: TRUE if test passed, otherwise FALSE
7055 */
7056 static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7057 {
7058 static unsigned short BitPatterns[] =
7059 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7060 static unsigned int Patterncount = sizeof(BitPatterns)/sizeof(unsigned short);
7061 unsigned int i;
7062 BOOLEAN rc = TRUE;
7063 unsigned long flags;
7064
7065 spin_lock_irqsave(&info->irq_spinlock,flags);
7066 usc_reset(info);
7067
7068 /* Verify the reset state of some registers. */
7069
7070 if ( (usc_InReg( info, SICR ) != 0) ||
7071 (usc_InReg( info, IVR ) != 0) ||
7072 (usc_InDmaReg( info, DIVR ) != 0) ){
7073 rc = FALSE;
7074 }
7075
7076 if ( rc == TRUE ){
7077 /* Write bit patterns to various registers but do it out of */
7078 /* sync, then read back and verify values. */
7079
7080 for ( i = 0 ; i < Patterncount ; i++ ) {
7081 usc_OutReg( info, TC0R, BitPatterns[i] );
7082 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7083 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7084 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7085 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7086 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7087
7088 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7089 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7090 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7091 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7092 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7093 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7094 rc = FALSE;
7095 break;
7096 }
7097 }
7098 }
7099
7100 usc_reset(info);
7101 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7102
7103 return rc;
7104
7105 } /* end of mgsl_register_test() */
7106
7107 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7108 *
7109 * Arguments: info pointer to device instance data
7110 * Return Value: TRUE if test passed, otherwise FALSE
7111 */
7112 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7113 {
7114 unsigned long EndTime;
7115 unsigned long flags;
7116
7117 spin_lock_irqsave(&info->irq_spinlock,flags);
7118 usc_reset(info);
7119
7120 /*
7121 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7122 * The ISR sets irq_occurred to 1.
7123 */
7124
7125 info->irq_occurred = FALSE;
7126
7127 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7128 /* Enable INTEN (Port 6, Bit12) */
7129 /* This connects the IRQ request signal to the ISA bus */
7130 /* on the ISA adapter. This has no effect for the PCI adapter */
7131 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7132
7133 usc_EnableMasterIrqBit(info);
7134 usc_EnableInterrupts(info, IO_PIN);
7135 usc_ClearIrqPendingBits(info, IO_PIN);
7136
7137 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7138 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7139
7140 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7141
7142 EndTime=100;
7143 while( EndTime-- && !info->irq_occurred ) {
7144 msleep_interruptible(10);
7145 }
7146
7147 spin_lock_irqsave(&info->irq_spinlock,flags);
7148 usc_reset(info);
7149 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7150
7151 if ( !info->irq_occurred )
7152 return FALSE;
7153 else
7154 return TRUE;
7155
7156 } /* end of mgsl_irq_test() */
7157
7158 /* mgsl_dma_test()
7159 *
7160 * Perform a DMA test of the 16C32. A small frame is
7161 * transmitted via DMA from a transmit buffer to a receive buffer
7162 * using single buffer DMA mode.
7163 *
7164 * Arguments: info pointer to device instance data
7165 * Return Value: TRUE if test passed, otherwise FALSE
7166 */
7167 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7168 {
7169 unsigned short FifoLevel;
7170 unsigned long phys_addr;
7171 unsigned int FrameSize;
7172 unsigned int i;
7173 char *TmpPtr;
7174 BOOLEAN rc = TRUE;
7175 unsigned short status=0;
7176 unsigned long EndTime;
7177 unsigned long flags;
7178 MGSL_PARAMS tmp_params;
7179
7180 /* save current port options */
7181 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7182 /* load default port options */
7183 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7184
7185 #define TESTFRAMESIZE 40
7186
7187 spin_lock_irqsave(&info->irq_spinlock,flags);
7188
7189 /* setup 16C32 for SDLC DMA transfer mode */
7190
7191 usc_reset(info);
7192 usc_set_sdlc_mode(info);
7193 usc_enable_loopback(info,1);
7194
7195 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7196 * field of the buffer entry after fetching buffer address. This
7197 * way we can detect a DMA failure for a DMA read (which should be
7198 * non-destructive to system memory) before we try and write to
7199 * memory (where a failure could corrupt system memory).
7200 */
7201
7202 /* Receive DMA mode Register (RDMR)
7203 *
7204 * <15..14> 11 DMA mode = Linked List Buffer mode
7205 * <13> 1 RSBinA/L = store Rx status Block in List entry
7206 * <12> 0 1 = Clear count of List Entry after fetching
7207 * <11..10> 00 Address mode = Increment
7208 * <9> 1 Terminate Buffer on RxBound
7209 * <8> 0 Bus Width = 16bits
7210 * <7..0> ? status Bits (write as 0s)
7211 *
7212 * 1110 0010 0000 0000 = 0xe200
7213 */
7214
7215 usc_OutDmaReg( info, RDMR, 0xe200 );
7216
7217 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7218
7219
7220 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7221
7222 FrameSize = TESTFRAMESIZE;
7223
7224 /* setup 1st transmit buffer entry: */
7225 /* with frame size and transmit control word */
7226
7227 info->tx_buffer_list[0].count = FrameSize;
7228 info->tx_buffer_list[0].rcc = FrameSize;
7229 info->tx_buffer_list[0].status = 0x4000;
7230
7231 /* build a transmit frame in 1st transmit DMA buffer */
7232
7233 TmpPtr = info->tx_buffer_list[0].virt_addr;
7234 for (i = 0; i < FrameSize; i++ )
7235 *TmpPtr++ = i;
7236
7237 /* setup 1st receive buffer entry: */
7238 /* clear status, set max receive buffer size */
7239
7240 info->rx_buffer_list[0].status = 0;
7241 info->rx_buffer_list[0].count = FrameSize + 4;
7242
7243 /* zero out the 1st receive buffer */
7244
7245 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7246
7247 /* Set count field of next buffer entries to prevent */
7248 /* 16C32 from using buffers after the 1st one. */
7249
7250 info->tx_buffer_list[1].count = 0;
7251 info->rx_buffer_list[1].count = 0;
7252
7253
7254 /***************************/
7255 /* Program 16C32 receiver. */
7256 /***************************/
7257
7258 spin_lock_irqsave(&info->irq_spinlock,flags);
7259
7260 /* setup DMA transfers */
7261 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7262
7263 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7264 phys_addr = info->rx_buffer_list[0].phys_entry;
7265 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7266 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7267
7268 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7269 usc_InDmaReg( info, RDMR );
7270 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7271
7272 /* Enable Receiver (RMR <1..0> = 10) */
7273 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7274
7275 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7276
7277
7278 /*************************************************************/
7279 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7280 /*************************************************************/
7281
7282 /* Wait 100ms for interrupt. */
7283 EndTime = jiffies + msecs_to_jiffies(100);
7284
7285 for(;;) {
7286 if (time_after(jiffies, EndTime)) {
7287 rc = FALSE;
7288 break;
7289 }
7290
7291 spin_lock_irqsave(&info->irq_spinlock,flags);
7292 status = usc_InDmaReg( info, RDMR );
7293 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7294
7295 if ( !(status & BIT4) && (status & BIT5) ) {
7296 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7297 /* BUSY (BIT 5) is active (channel still active). */
7298 /* This means the buffer entry read has completed. */
7299 break;
7300 }
7301 }
7302
7303
7304 /******************************/
7305 /* Program 16C32 transmitter. */
7306 /******************************/
7307
7308 spin_lock_irqsave(&info->irq_spinlock,flags);
7309
7310 /* Program the Transmit Character Length Register (TCLR) */
7311 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7312
7313 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7314 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7315
7316 /* Program the address of the 1st DMA Buffer Entry in linked list */
7317
7318 phys_addr = info->tx_buffer_list[0].phys_entry;
7319 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7320 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7321
7322 /* unlatch Tx status bits, and start transmit channel. */
7323
7324 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7325 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7326
7327 /* wait for DMA controller to fill transmit FIFO */
7328
7329 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7330
7331 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7332
7333
7334 /**********************************/
7335 /* WAIT FOR TRANSMIT FIFO TO FILL */
7336 /**********************************/
7337
7338 /* Wait 100ms */
7339 EndTime = jiffies + msecs_to_jiffies(100);
7340
7341 for(;;) {
7342 if (time_after(jiffies, EndTime)) {
7343 rc = FALSE;
7344 break;
7345 }
7346
7347 spin_lock_irqsave(&info->irq_spinlock,flags);
7348 FifoLevel = usc_InReg(info, TICR) >> 8;
7349 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7350
7351 if ( FifoLevel < 16 )
7352 break;
7353 else
7354 if ( FrameSize < 32 ) {
7355 /* This frame is smaller than the entire transmit FIFO */
7356 /* so wait for the entire frame to be loaded. */
7357 if ( FifoLevel <= (32 - FrameSize) )
7358 break;
7359 }
7360 }
7361
7362
7363 if ( rc == TRUE )
7364 {
7365 /* Enable 16C32 transmitter. */
7366
7367 spin_lock_irqsave(&info->irq_spinlock,flags);
7368
7369 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7370 usc_TCmd( info, TCmd_SendFrame );
7371 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7372
7373 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7374
7375
7376 /******************************/
7377 /* WAIT FOR TRANSMIT COMPLETE */
7378 /******************************/
7379
7380 /* Wait 100ms */
7381 EndTime = jiffies + msecs_to_jiffies(100);
7382
7383 /* While timer not expired wait for transmit complete */
7384
7385 spin_lock_irqsave(&info->irq_spinlock,flags);
7386 status = usc_InReg( info, TCSR );
7387 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7388
7389 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7390 if (time_after(jiffies, EndTime)) {
7391 rc = FALSE;
7392 break;
7393 }
7394
7395 spin_lock_irqsave(&info->irq_spinlock,flags);
7396 status = usc_InReg( info, TCSR );
7397 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7398 }
7399 }
7400
7401
7402 if ( rc == TRUE ){
7403 /* CHECK FOR TRANSMIT ERRORS */
7404 if ( status & (BIT5 + BIT1) )
7405 rc = FALSE;
7406 }
7407
7408 if ( rc == TRUE ) {
7409 /* WAIT FOR RECEIVE COMPLETE */
7410
7411 /* Wait 100ms */
7412 EndTime = jiffies + msecs_to_jiffies(100);
7413
7414 /* Wait for 16C32 to write receive status to buffer entry. */
7415 status=info->rx_buffer_list[0].status;
7416 while ( status == 0 ) {
7417 if (time_after(jiffies, EndTime)) {
7418 rc = FALSE;
7419 break;
7420 }
7421 status=info->rx_buffer_list[0].status;
7422 }
7423 }
7424
7425
7426 if ( rc == TRUE ) {
7427 /* CHECK FOR RECEIVE ERRORS */
7428 status = info->rx_buffer_list[0].status;
7429
7430 if ( status & (BIT8 + BIT3 + BIT1) ) {
7431 /* receive error has occurred */
7432 rc = FALSE;
7433 } else {
7434 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7435 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7436 rc = FALSE;
7437 }
7438 }
7439 }
7440
7441 spin_lock_irqsave(&info->irq_spinlock,flags);
7442 usc_reset( info );
7443 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7444
7445 /* restore current port options */
7446 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7447
7448 return rc;
7449
7450 } /* end of mgsl_dma_test() */
7451
7452 /* mgsl_adapter_test()
7453 *
7454 * Perform the register, IRQ, and DMA tests for the 16C32.
7455 *
7456 * Arguments: info pointer to device instance data
7457 * Return Value: 0 if success, otherwise -ENODEV
7458 */
7459 static int mgsl_adapter_test( struct mgsl_struct *info )
7460 {
7461 if ( debug_level >= DEBUG_LEVEL_INFO )
7462 printk( "%s(%d):Testing device %s\n",
7463 __FILE__,__LINE__,info->device_name );
7464
7465 if ( !mgsl_register_test( info ) ) {
7466 info->init_error = DiagStatus_AddressFailure;
7467 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7468 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7469 return -ENODEV;
7470 }
7471
7472 if ( !mgsl_irq_test( info ) ) {
7473 info->init_error = DiagStatus_IrqFailure;
7474 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7475 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7476 return -ENODEV;
7477 }
7478
7479 if ( !mgsl_dma_test( info ) ) {
7480 info->init_error = DiagStatus_DmaFailure;
7481 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7482 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7483 return -ENODEV;
7484 }
7485
7486 if ( debug_level >= DEBUG_LEVEL_INFO )
7487 printk( "%s(%d):device %s passed diagnostics\n",
7488 __FILE__,__LINE__,info->device_name );
7489
7490 return 0;
7491
7492 } /* end of mgsl_adapter_test() */
7493
7494 /* mgsl_memory_test()
7495 *
7496 * Test the shared memory on a PCI adapter.
7497 *
7498 * Arguments: info pointer to device instance data
7499 * Return Value: TRUE if test passed, otherwise FALSE
7500 */
7501 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7502 {
7503 static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa,
7504 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7505 unsigned long Patterncount = sizeof(BitPatterns)/sizeof(unsigned long);
7506 unsigned long i;
7507 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7508 unsigned long * TestAddr;
7509
7510 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7511 return TRUE;
7512
7513 TestAddr = (unsigned long *)info->memory_base;
7514
7515 /* Test data lines with test pattern at one location. */
7516
7517 for ( i = 0 ; i < Patterncount ; i++ ) {
7518 *TestAddr = BitPatterns[i];
7519 if ( *TestAddr != BitPatterns[i] )
7520 return FALSE;
7521 }
7522
7523 /* Test address lines with incrementing pattern over */
7524 /* entire address range. */
7525
7526 for ( i = 0 ; i < TestLimit ; i++ ) {
7527 *TestAddr = i * 4;
7528 TestAddr++;
7529 }
7530
7531 TestAddr = (unsigned long *)info->memory_base;
7532
7533 for ( i = 0 ; i < TestLimit ; i++ ) {
7534 if ( *TestAddr != i * 4 )
7535 return FALSE;
7536 TestAddr++;
7537 }
7538
7539 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7540
7541 return TRUE;
7542
7543 } /* End Of mgsl_memory_test() */
7544
7545
7546 /* mgsl_load_pci_memory()
7547 *
7548 * Load a large block of data into the PCI shared memory.
7549 * Use this instead of memcpy() or memmove() to move data
7550 * into the PCI shared memory.
7551 *
7552 * Notes:
7553 *
7554 * This function prevents the PCI9050 interface chip from hogging
7555 * the adapter local bus, which can starve the 16C32 by preventing
7556 * 16C32 bus master cycles.
7557 *
7558 * The PCI9050 documentation says that the 9050 will always release
7559 * control of the local bus after completing the current read
7560 * or write operation.
7561 *
7562 * It appears that as long as the PCI9050 write FIFO is full, the
7563 * PCI9050 treats all of the writes as a single burst transaction
7564 * and will not release the bus. This causes DMA latency problems
7565 * at high speeds when copying large data blocks to the shared
7566 * memory.
7567 *
7568 * This function in effect, breaks the a large shared memory write
7569 * into multiple transations by interleaving a shared memory read
7570 * which will flush the write FIFO and 'complete' the write
7571 * transation. This allows any pending DMA request to gain control
7572 * of the local bus in a timely fasion.
7573 *
7574 * Arguments:
7575 *
7576 * TargetPtr pointer to target address in PCI shared memory
7577 * SourcePtr pointer to source buffer for data
7578 * count count in bytes of data to copy
7579 *
7580 * Return Value: None
7581 */
7582 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7583 unsigned short count )
7584 {
7585 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7586 #define PCI_LOAD_INTERVAL 64
7587
7588 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7589 unsigned short Index;
7590 unsigned long Dummy;
7591
7592 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7593 {
7594 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7595 Dummy = *((volatile unsigned long *)TargetPtr);
7596 TargetPtr += PCI_LOAD_INTERVAL;
7597 SourcePtr += PCI_LOAD_INTERVAL;
7598 }
7599
7600 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7601
7602 } /* End Of mgsl_load_pci_memory() */
7603
7604 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7605 {
7606 int i;
7607 int linecount;
7608 if (xmit)
7609 printk("%s tx data:\n",info->device_name);
7610 else
7611 printk("%s rx data:\n",info->device_name);
7612
7613 while(count) {
7614 if (count > 16)
7615 linecount = 16;
7616 else
7617 linecount = count;
7618
7619 for(i=0;i<linecount;i++)
7620 printk("%02X ",(unsigned char)data[i]);
7621 for(;i<17;i++)
7622 printk(" ");
7623 for(i=0;i<linecount;i++) {
7624 if (data[i]>=040 && data[i]<=0176)
7625 printk("%c",data[i]);
7626 else
7627 printk(".");
7628 }
7629 printk("\n");
7630
7631 data += linecount;
7632 count -= linecount;
7633 }
7634 } /* end of mgsl_trace_block() */
7635
7636 /* mgsl_tx_timeout()
7637 *
7638 * called when HDLC frame times out
7639 * update stats and do tx completion processing
7640 *
7641 * Arguments: context pointer to device instance data
7642 * Return Value: None
7643 */
7644 static void mgsl_tx_timeout(unsigned long context)
7645 {
7646 struct mgsl_struct *info = (struct mgsl_struct*)context;
7647 unsigned long flags;
7648
7649 if ( debug_level >= DEBUG_LEVEL_INFO )
7650 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7651 __FILE__,__LINE__,info->device_name);
7652 if(info->tx_active &&
7653 (info->params.mode == MGSL_MODE_HDLC ||
7654 info->params.mode == MGSL_MODE_RAW) ) {
7655 info->icount.txtimeout++;
7656 }
7657 spin_lock_irqsave(&info->irq_spinlock,flags);
7658 info->tx_active = 0;
7659 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7660
7661 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7662 usc_loopmode_cancel_transmit( info );
7663
7664 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7665
7666 #ifdef CONFIG_HDLC
7667 if (info->netcount)
7668 hdlcdev_tx_done(info);
7669 else
7670 #endif
7671 mgsl_bh_transmit(info);
7672
7673 } /* end of mgsl_tx_timeout() */
7674
7675 /* signal that there are no more frames to send, so that
7676 * line is 'released' by echoing RxD to TxD when current
7677 * transmission is complete (or immediately if no tx in progress).
7678 */
7679 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7680 {
7681 unsigned long flags;
7682
7683 spin_lock_irqsave(&info->irq_spinlock,flags);
7684 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7685 if (info->tx_active)
7686 info->loopmode_send_done_requested = TRUE;
7687 else
7688 usc_loopmode_send_done(info);
7689 }
7690 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7691
7692 return 0;
7693 }
7694
7695 /* release the line by echoing RxD to TxD
7696 * upon completion of a transmit frame
7697 */
7698 static void usc_loopmode_send_done( struct mgsl_struct * info )
7699 {
7700 info->loopmode_send_done_requested = FALSE;
7701 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7702 info->cmr_value &= ~BIT13;
7703 usc_OutReg(info, CMR, info->cmr_value);
7704 }
7705
7706 /* abort a transmit in progress while in HDLC LoopMode
7707 */
7708 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7709 {
7710 /* reset tx dma channel and purge TxFifo */
7711 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7712 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7713 usc_loopmode_send_done( info );
7714 }
7715
7716 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7717 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7718 * we must clear CMR:13 to begin repeating TxData to RxData
7719 */
7720 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7721 {
7722 info->loopmode_insert_requested = TRUE;
7723
7724 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7725 * begin repeating TxData on RxData (complete insertion)
7726 */
7727 usc_OutReg( info, RICR,
7728 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7729
7730 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7731 info->cmr_value |= BIT13;
7732 usc_OutReg(info, CMR, info->cmr_value);
7733 }
7734
7735 /* return 1 if station is inserted into the loop, otherwise 0
7736 */
7737 static int usc_loopmode_active( struct mgsl_struct * info)
7738 {
7739 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7740 }
7741
7742 #ifdef CONFIG_HDLC
7743
7744 /**
7745 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7746 * set encoding and frame check sequence (FCS) options
7747 *
7748 * dev pointer to network device structure
7749 * encoding serial encoding setting
7750 * parity FCS setting
7751 *
7752 * returns 0 if success, otherwise error code
7753 */
7754 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7755 unsigned short parity)
7756 {
7757 struct mgsl_struct *info = dev_to_port(dev);
7758 unsigned char new_encoding;
7759 unsigned short new_crctype;
7760
7761 /* return error if TTY interface open */
7762 if (info->count)
7763 return -EBUSY;
7764
7765 switch (encoding)
7766 {
7767 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7768 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7769 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7770 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7771 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7772 default: return -EINVAL;
7773 }
7774
7775 switch (parity)
7776 {
7777 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7778 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7779 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7780 default: return -EINVAL;
7781 }
7782
7783 info->params.encoding = new_encoding;
7784 info->params.crc_type = new_crctype;;
7785
7786 /* if network interface up, reprogram hardware */
7787 if (info->netcount)
7788 mgsl_program_hw(info);
7789
7790 return 0;
7791 }
7792
7793 /**
7794 * called by generic HDLC layer to send frame
7795 *
7796 * skb socket buffer containing HDLC frame
7797 * dev pointer to network device structure
7798 *
7799 * returns 0 if success, otherwise error code
7800 */
7801 static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7802 {
7803 struct mgsl_struct *info = dev_to_port(dev);
7804 struct net_device_stats *stats = hdlc_stats(dev);
7805 unsigned long flags;
7806
7807 if (debug_level >= DEBUG_LEVEL_INFO)
7808 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7809
7810 /* stop sending until this frame completes */
7811 netif_stop_queue(dev);
7812
7813 /* copy data to device buffers */
7814 info->xmit_cnt = skb->len;
7815 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7816
7817 /* update network statistics */
7818 stats->tx_packets++;
7819 stats->tx_bytes += skb->len;
7820
7821 /* done with socket buffer, so free it */
7822 dev_kfree_skb(skb);
7823
7824 /* save start time for transmit timeout detection */
7825 dev->trans_start = jiffies;
7826
7827 /* start hardware transmitter if necessary */
7828 spin_lock_irqsave(&info->irq_spinlock,flags);
7829 if (!info->tx_active)
7830 usc_start_transmitter(info);
7831 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7832
7833 return 0;
7834 }
7835
7836 /**
7837 * called by network layer when interface enabled
7838 * claim resources and initialize hardware
7839 *
7840 * dev pointer to network device structure
7841 *
7842 * returns 0 if success, otherwise error code
7843 */
7844 static int hdlcdev_open(struct net_device *dev)
7845 {
7846 struct mgsl_struct *info = dev_to_port(dev);
7847 int rc;
7848 unsigned long flags;
7849
7850 if (debug_level >= DEBUG_LEVEL_INFO)
7851 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7852
7853 /* generic HDLC layer open processing */
7854 if ((rc = hdlc_open(dev)))
7855 return rc;
7856
7857 /* arbitrate between network and tty opens */
7858 spin_lock_irqsave(&info->netlock, flags);
7859 if (info->count != 0 || info->netcount != 0) {
7860 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7861 spin_unlock_irqrestore(&info->netlock, flags);
7862 return -EBUSY;
7863 }
7864 info->netcount=1;
7865 spin_unlock_irqrestore(&info->netlock, flags);
7866
7867 /* claim resources and init adapter */
7868 if ((rc = startup(info)) != 0) {
7869 spin_lock_irqsave(&info->netlock, flags);
7870 info->netcount=0;
7871 spin_unlock_irqrestore(&info->netlock, flags);
7872 return rc;
7873 }
7874
7875 /* assert DTR and RTS, apply hardware settings */
7876 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7877 mgsl_program_hw(info);
7878
7879 /* enable network layer transmit */
7880 dev->trans_start = jiffies;
7881 netif_start_queue(dev);
7882
7883 /* inform generic HDLC layer of current DCD status */
7884 spin_lock_irqsave(&info->irq_spinlock, flags);
7885 usc_get_serial_signals(info);
7886 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7887 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
7888
7889 return 0;
7890 }
7891
7892 /**
7893 * called by network layer when interface is disabled
7894 * shutdown hardware and release resources
7895 *
7896 * dev pointer to network device structure
7897 *
7898 * returns 0 if success, otherwise error code
7899 */
7900 static int hdlcdev_close(struct net_device *dev)
7901 {
7902 struct mgsl_struct *info = dev_to_port(dev);
7903 unsigned long flags;
7904
7905 if (debug_level >= DEBUG_LEVEL_INFO)
7906 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7907
7908 netif_stop_queue(dev);
7909
7910 /* shutdown adapter and release resources */
7911 shutdown(info);
7912
7913 hdlc_close(dev);
7914
7915 spin_lock_irqsave(&info->netlock, flags);
7916 info->netcount=0;
7917 spin_unlock_irqrestore(&info->netlock, flags);
7918
7919 return 0;
7920 }
7921
7922 /**
7923 * called by network layer to process IOCTL call to network device
7924 *
7925 * dev pointer to network device structure
7926 * ifr pointer to network interface request structure
7927 * cmd IOCTL command code
7928 *
7929 * returns 0 if success, otherwise error code
7930 */
7931 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7932 {
7933 const size_t size = sizeof(sync_serial_settings);
7934 sync_serial_settings new_line;
7935 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7936 struct mgsl_struct *info = dev_to_port(dev);
7937 unsigned int flags;
7938
7939 if (debug_level >= DEBUG_LEVEL_INFO)
7940 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7941
7942 /* return error if TTY interface open */
7943 if (info->count)
7944 return -EBUSY;
7945
7946 if (cmd != SIOCWANDEV)
7947 return hdlc_ioctl(dev, ifr, cmd);
7948
7949 switch(ifr->ifr_settings.type) {
7950 case IF_GET_IFACE: /* return current sync_serial_settings */
7951
7952 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7953 if (ifr->ifr_settings.size < size) {
7954 ifr->ifr_settings.size = size; /* data size wanted */
7955 return -ENOBUFS;
7956 }
7957
7958 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7959 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7960 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7961 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7962
7963 switch (flags){
7964 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7965 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7966 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7967 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7968 default: new_line.clock_type = CLOCK_DEFAULT;
7969 }
7970
7971 new_line.clock_rate = info->params.clock_speed;
7972 new_line.loopback = info->params.loopback ? 1:0;
7973
7974 if (copy_to_user(line, &new_line, size))
7975 return -EFAULT;
7976 return 0;
7977
7978 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7979
7980 if(!capable(CAP_NET_ADMIN))
7981 return -EPERM;
7982 if (copy_from_user(&new_line, line, size))
7983 return -EFAULT;
7984
7985 switch (new_line.clock_type)
7986 {
7987 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7988 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7989 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7990 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7991 case CLOCK_DEFAULT: flags = info->params.flags &
7992 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7993 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7994 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7995 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7996 default: return -EINVAL;
7997 }
7998
7999 if (new_line.loopback != 0 && new_line.loopback != 1)
8000 return -EINVAL;
8001
8002 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
8003 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
8004 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
8005 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
8006 info->params.flags |= flags;
8007
8008 info->params.loopback = new_line.loopback;
8009
8010 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
8011 info->params.clock_speed = new_line.clock_rate;
8012 else
8013 info->params.clock_speed = 0;
8014
8015 /* if network interface up, reprogram hardware */
8016 if (info->netcount)
8017 mgsl_program_hw(info);
8018 return 0;
8019
8020 default:
8021 return hdlc_ioctl(dev, ifr, cmd);
8022 }
8023 }
8024
8025 /**
8026 * called by network layer when transmit timeout is detected
8027 *
8028 * dev pointer to network device structure
8029 */
8030 static void hdlcdev_tx_timeout(struct net_device *dev)
8031 {
8032 struct mgsl_struct *info = dev_to_port(dev);
8033 struct net_device_stats *stats = hdlc_stats(dev);
8034 unsigned long flags;
8035
8036 if (debug_level >= DEBUG_LEVEL_INFO)
8037 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8038
8039 stats->tx_errors++;
8040 stats->tx_aborted_errors++;
8041
8042 spin_lock_irqsave(&info->irq_spinlock,flags);
8043 usc_stop_transmitter(info);
8044 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8045
8046 netif_wake_queue(dev);
8047 }
8048
8049 /**
8050 * called by device driver when transmit completes
8051 * reenable network layer transmit if stopped
8052 *
8053 * info pointer to device instance information
8054 */
8055 static void hdlcdev_tx_done(struct mgsl_struct *info)
8056 {
8057 if (netif_queue_stopped(info->netdev))
8058 netif_wake_queue(info->netdev);
8059 }
8060
8061 /**
8062 * called by device driver when frame received
8063 * pass frame to network layer
8064 *
8065 * info pointer to device instance information
8066 * buf pointer to buffer contianing frame data
8067 * size count of data bytes in buf
8068 */
8069 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8070 {
8071 struct sk_buff *skb = dev_alloc_skb(size);
8072 struct net_device *dev = info->netdev;
8073 struct net_device_stats *stats = hdlc_stats(dev);
8074
8075 if (debug_level >= DEBUG_LEVEL_INFO)
8076 printk("hdlcdev_rx(%s)\n",dev->name);
8077
8078 if (skb == NULL) {
8079 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8080 stats->rx_dropped++;
8081 return;
8082 }
8083
8084 memcpy(skb_put(skb, size),buf,size);
8085
8086 skb->protocol = hdlc_type_trans(skb, info->netdev);
8087
8088 stats->rx_packets++;
8089 stats->rx_bytes += size;
8090
8091 netif_rx(skb);
8092
8093 info->netdev->last_rx = jiffies;
8094 }
8095
8096 /**
8097 * called by device driver when adding device instance
8098 * do generic HDLC initialization
8099 *
8100 * info pointer to device instance information
8101 *
8102 * returns 0 if success, otherwise error code
8103 */
8104 static int hdlcdev_init(struct mgsl_struct *info)
8105 {
8106 int rc;
8107 struct net_device *dev;
8108 hdlc_device *hdlc;
8109
8110 /* allocate and initialize network and HDLC layer objects */
8111
8112 if (!(dev = alloc_hdlcdev(info))) {
8113 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8114 return -ENOMEM;
8115 }
8116
8117 /* for network layer reporting purposes only */
8118 dev->base_addr = info->io_base;
8119 dev->irq = info->irq_level;
8120 dev->dma = info->dma_level;
8121
8122 /* network layer callbacks and settings */
8123 dev->do_ioctl = hdlcdev_ioctl;
8124 dev->open = hdlcdev_open;
8125 dev->stop = hdlcdev_close;
8126 dev->tx_timeout = hdlcdev_tx_timeout;
8127 dev->watchdog_timeo = 10*HZ;
8128 dev->tx_queue_len = 50;
8129
8130 /* generic HDLC layer callbacks and settings */
8131 hdlc = dev_to_hdlc(dev);
8132 hdlc->attach = hdlcdev_attach;
8133 hdlc->xmit = hdlcdev_xmit;
8134
8135 /* register objects with HDLC layer */
8136 if ((rc = register_hdlc_device(dev))) {
8137 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8138 free_netdev(dev);
8139 return rc;
8140 }
8141
8142 info->netdev = dev;
8143 return 0;
8144 }
8145
8146 /**
8147 * called by device driver when removing device instance
8148 * do generic HDLC cleanup
8149 *
8150 * info pointer to device instance information
8151 */
8152 static void hdlcdev_exit(struct mgsl_struct *info)
8153 {
8154 unregister_hdlc_device(info->netdev);
8155 free_netdev(info->netdev);
8156 info->netdev = NULL;
8157 }
8158
8159 #endif /* CONFIG_HDLC */
8160
8161
8162 static int __devinit synclink_init_one (struct pci_dev *dev,
8163 const struct pci_device_id *ent)
8164 {
8165 struct mgsl_struct *info;
8166
8167 if (pci_enable_device(dev)) {
8168 printk("error enabling pci device %p\n", dev);
8169 return -EIO;
8170 }
8171
8172 if (!(info = mgsl_allocate_device())) {
8173 printk("can't allocate device instance data.\n");
8174 return -EIO;
8175 }
8176
8177 /* Copy user configuration info to device instance data */
8178
8179 info->io_base = pci_resource_start(dev, 2);
8180 info->irq_level = dev->irq;
8181 info->phys_memory_base = pci_resource_start(dev, 3);
8182
8183 /* Because veremap only works on page boundaries we must map
8184 * a larger area than is actually implemented for the LCR
8185 * memory range. We map a full page starting at the page boundary.
8186 */
8187 info->phys_lcr_base = pci_resource_start(dev, 0);
8188 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8189 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8190
8191 info->bus_type = MGSL_BUS_TYPE_PCI;
8192 info->io_addr_size = 8;
8193 info->irq_flags = SA_SHIRQ;
8194
8195 if (dev->device == 0x0210) {
8196 /* Version 1 PCI9030 based universal PCI adapter */
8197 info->misc_ctrl_value = 0x007c4080;
8198 info->hw_version = 1;
8199 } else {
8200 /* Version 0 PCI9050 based 5V PCI adapter
8201 * A PCI9050 bug prevents reading LCR registers if
8202 * LCR base address bit 7 is set. Maintain shadow
8203 * value so we can write to LCR misc control reg.
8204 */
8205 info->misc_ctrl_value = 0x087e4546;
8206 info->hw_version = 0;
8207 }
8208
8209 mgsl_add_device(info);
8210
8211 return 0;
8212 }
8213
8214 static void __devexit synclink_remove_one (struct pci_dev *dev)
8215 {
8216 }
8217