]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/synclink.c
[PATCH] tty: switch to ktermios
[mirror_ubuntu-artful-kernel.git] / drivers / char / synclink.c
1 /*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #if defined(__i386__)
57 # define BREAKPOINT() asm(" int $3");
58 #else
59 # define BREAKPOINT() { }
60 #endif
61
62 #define MAX_ISA_DEVICES 10
63 #define MAX_PCI_DEVICES 10
64 #define MAX_TOTAL_DEVICES 20
65
66 #include <linux/module.h>
67 #include <linux/errno.h>
68 #include <linux/signal.h>
69 #include <linux/sched.h>
70 #include <linux/timer.h>
71 #include <linux/interrupt.h>
72 #include <linux/pci.h>
73 #include <linux/tty.h>
74 #include <linux/tty_flip.h>
75 #include <linux/serial.h>
76 #include <linux/major.h>
77 #include <linux/string.h>
78 #include <linux/fcntl.h>
79 #include <linux/ptrace.h>
80 #include <linux/ioport.h>
81 #include <linux/mm.h>
82 #include <linux/slab.h>
83 #include <linux/delay.h>
84
85 #include <linux/netdevice.h>
86
87 #include <linux/vmalloc.h>
88 #include <linux/init.h>
89
90 #include <linux/delay.h>
91 #include <linux/ioctl.h>
92
93 #include <asm/system.h>
94 #include <asm/io.h>
95 #include <asm/irq.h>
96 #include <asm/dma.h>
97 #include <linux/bitops.h>
98 #include <asm/types.h>
99 #include <linux/termios.h>
100 #include <linux/workqueue.h>
101 #include <linux/hdlc.h>
102 #include <linux/dma-mapping.h>
103
104 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
105 #define SYNCLINK_GENERIC_HDLC 1
106 #else
107 #define SYNCLINK_GENERIC_HDLC 0
108 #endif
109
110 #define GET_USER(error,value,addr) error = get_user(value,addr)
111 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
112 #define PUT_USER(error,value,addr) error = put_user(value,addr)
113 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
114
115 #include <asm/uaccess.h>
116
117 #include "linux/synclink.h"
118
119 #define RCLRVALUE 0xffff
120
121 static MGSL_PARAMS default_params = {
122 MGSL_MODE_HDLC, /* unsigned long mode */
123 0, /* unsigned char loopback; */
124 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
125 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
126 0, /* unsigned long clock_speed; */
127 0xff, /* unsigned char addr_filter; */
128 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
129 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
130 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
131 9600, /* unsigned long data_rate; */
132 8, /* unsigned char data_bits; */
133 1, /* unsigned char stop_bits; */
134 ASYNC_PARITY_NONE /* unsigned char parity; */
135 };
136
137 #define SHARED_MEM_ADDRESS_SIZE 0x40000
138 #define BUFFERLISTSIZE 4096
139 #define DMABUFFERSIZE 4096
140 #define MAXRXFRAMES 7
141
142 typedef struct _DMABUFFERENTRY
143 {
144 u32 phys_addr; /* 32-bit flat physical address of data buffer */
145 volatile u16 count; /* buffer size/data count */
146 volatile u16 status; /* Control/status field */
147 volatile u16 rcc; /* character count field */
148 u16 reserved; /* padding required by 16C32 */
149 u32 link; /* 32-bit flat link to next buffer entry */
150 char *virt_addr; /* virtual address of data buffer */
151 u32 phys_entry; /* physical address of this buffer entry */
152 dma_addr_t dma_addr;
153 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
154
155 /* The queue of BH actions to be performed */
156
157 #define BH_RECEIVE 1
158 #define BH_TRANSMIT 2
159 #define BH_STATUS 4
160
161 #define IO_PIN_SHUTDOWN_LIMIT 100
162
163 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
164
165 struct _input_signal_events {
166 int ri_up;
167 int ri_down;
168 int dsr_up;
169 int dsr_down;
170 int dcd_up;
171 int dcd_down;
172 int cts_up;
173 int cts_down;
174 };
175
176 /* transmit holding buffer definitions*/
177 #define MAX_TX_HOLDING_BUFFERS 5
178 struct tx_holding_buffer {
179 int buffer_size;
180 unsigned char * buffer;
181 };
182
183
184 /*
185 * Device instance data structure
186 */
187
188 struct mgsl_struct {
189 int magic;
190 int flags;
191 int count; /* count of opens */
192 int line;
193 int hw_version;
194 unsigned short close_delay;
195 unsigned short closing_wait; /* time to wait before closing */
196
197 struct mgsl_icount icount;
198
199 struct tty_struct *tty;
200 int timeout;
201 int x_char; /* xon/xoff character */
202 int blocked_open; /* # of blocked opens */
203 u16 read_status_mask;
204 u16 ignore_status_mask;
205 unsigned char *xmit_buf;
206 int xmit_head;
207 int xmit_tail;
208 int xmit_cnt;
209
210 wait_queue_head_t open_wait;
211 wait_queue_head_t close_wait;
212
213 wait_queue_head_t status_event_wait_q;
214 wait_queue_head_t event_wait_q;
215 struct timer_list tx_timer; /* HDLC transmit timeout timer */
216 struct mgsl_struct *next_device; /* device list link */
217
218 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
219 struct work_struct task; /* task structure for scheduling bh */
220
221 u32 EventMask; /* event trigger mask */
222 u32 RecordedEvents; /* pending events */
223
224 u32 max_frame_size; /* as set by device config */
225
226 u32 pending_bh;
227
228 int bh_running; /* Protection from multiple */
229 int isr_overflow;
230 int bh_requested;
231
232 int dcd_chkcount; /* check counts to prevent */
233 int cts_chkcount; /* too many IRQs if a signal */
234 int dsr_chkcount; /* is floating */
235 int ri_chkcount;
236
237 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
238 u32 buffer_list_phys;
239 dma_addr_t buffer_list_dma_addr;
240
241 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
242 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
243 unsigned int current_rx_buffer;
244
245 int num_tx_dma_buffers; /* number of tx dma frames required */
246 int tx_dma_buffers_used;
247 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
248 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
249 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
250 int current_tx_buffer; /* next tx dma buffer to be loaded */
251
252 unsigned char *intermediate_rxbuffer;
253
254 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
255 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
256 int put_tx_holding_index; /* next tx holding buffer to store user request */
257 int tx_holding_count; /* number of tx holding buffers waiting */
258 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
259
260 int rx_enabled;
261 int rx_overflow;
262 int rx_rcc_underrun;
263
264 int tx_enabled;
265 int tx_active;
266 u32 idle_mode;
267
268 u16 cmr_value;
269 u16 tcsr_value;
270
271 char device_name[25]; /* device instance name */
272
273 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
274 unsigned char bus; /* expansion bus number (zero based) */
275 unsigned char function; /* PCI device number */
276
277 unsigned int io_base; /* base I/O address of adapter */
278 unsigned int io_addr_size; /* size of the I/O address range */
279 int io_addr_requested; /* nonzero if I/O address requested */
280
281 unsigned int irq_level; /* interrupt level */
282 unsigned long irq_flags;
283 int irq_requested; /* nonzero if IRQ requested */
284
285 unsigned int dma_level; /* DMA channel */
286 int dma_requested; /* nonzero if dma channel requested */
287
288 u16 mbre_bit;
289 u16 loopback_bits;
290 u16 usc_idle_mode;
291
292 MGSL_PARAMS params; /* communications parameters */
293
294 unsigned char serial_signals; /* current serial signal states */
295
296 int irq_occurred; /* for diagnostics use */
297 unsigned int init_error; /* Initialization startup error (DIAGS) */
298 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
299
300 u32 last_mem_alloc;
301 unsigned char* memory_base; /* shared memory address (PCI only) */
302 u32 phys_memory_base;
303 int shared_mem_requested;
304
305 unsigned char* lcr_base; /* local config registers (PCI only) */
306 u32 phys_lcr_base;
307 u32 lcr_offset;
308 int lcr_mem_requested;
309
310 u32 misc_ctrl_value;
311 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
312 char char_buf[MAX_ASYNC_BUFFER_SIZE];
313 BOOLEAN drop_rts_on_tx_done;
314
315 BOOLEAN loopmode_insert_requested;
316 BOOLEAN loopmode_send_done_requested;
317
318 struct _input_signal_events input_signal_events;
319
320 /* generic HDLC device parts */
321 int netcount;
322 int dosyncppp;
323 spinlock_t netlock;
324
325 #if SYNCLINK_GENERIC_HDLC
326 struct net_device *netdev;
327 #endif
328 };
329
330 #define MGSL_MAGIC 0x5401
331
332 /*
333 * The size of the serial xmit buffer is 1 page, or 4096 bytes
334 */
335 #ifndef SERIAL_XMIT_SIZE
336 #define SERIAL_XMIT_SIZE 4096
337 #endif
338
339 /*
340 * These macros define the offsets used in calculating the
341 * I/O address of the specified USC registers.
342 */
343
344
345 #define DCPIN 2 /* Bit 1 of I/O address */
346 #define SDPIN 4 /* Bit 2 of I/O address */
347
348 #define DCAR 0 /* DMA command/address register */
349 #define CCAR SDPIN /* channel command/address register */
350 #define DATAREG DCPIN + SDPIN /* serial data register */
351 #define MSBONLY 0x41
352 #define LSBONLY 0x40
353
354 /*
355 * These macros define the register address (ordinal number)
356 * used for writing address/value pairs to the USC.
357 */
358
359 #define CMR 0x02 /* Channel mode Register */
360 #define CCSR 0x04 /* Channel Command/status Register */
361 #define CCR 0x06 /* Channel Control Register */
362 #define PSR 0x08 /* Port status Register */
363 #define PCR 0x0a /* Port Control Register */
364 #define TMDR 0x0c /* Test mode Data Register */
365 #define TMCR 0x0e /* Test mode Control Register */
366 #define CMCR 0x10 /* Clock mode Control Register */
367 #define HCR 0x12 /* Hardware Configuration Register */
368 #define IVR 0x14 /* Interrupt Vector Register */
369 #define IOCR 0x16 /* Input/Output Control Register */
370 #define ICR 0x18 /* Interrupt Control Register */
371 #define DCCR 0x1a /* Daisy Chain Control Register */
372 #define MISR 0x1c /* Misc Interrupt status Register */
373 #define SICR 0x1e /* status Interrupt Control Register */
374 #define RDR 0x20 /* Receive Data Register */
375 #define RMR 0x22 /* Receive mode Register */
376 #define RCSR 0x24 /* Receive Command/status Register */
377 #define RICR 0x26 /* Receive Interrupt Control Register */
378 #define RSR 0x28 /* Receive Sync Register */
379 #define RCLR 0x2a /* Receive count Limit Register */
380 #define RCCR 0x2c /* Receive Character count Register */
381 #define TC0R 0x2e /* Time Constant 0 Register */
382 #define TDR 0x30 /* Transmit Data Register */
383 #define TMR 0x32 /* Transmit mode Register */
384 #define TCSR 0x34 /* Transmit Command/status Register */
385 #define TICR 0x36 /* Transmit Interrupt Control Register */
386 #define TSR 0x38 /* Transmit Sync Register */
387 #define TCLR 0x3a /* Transmit count Limit Register */
388 #define TCCR 0x3c /* Transmit Character count Register */
389 #define TC1R 0x3e /* Time Constant 1 Register */
390
391
392 /*
393 * MACRO DEFINITIONS FOR DMA REGISTERS
394 */
395
396 #define DCR 0x06 /* DMA Control Register (shared) */
397 #define DACR 0x08 /* DMA Array count Register (shared) */
398 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
399 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
400 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
401 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
402 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
403
404 #define TDMR 0x02 /* Transmit DMA mode Register */
405 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
406 #define TBCR 0x2a /* Transmit Byte count Register */
407 #define TARL 0x2c /* Transmit Address Register (low) */
408 #define TARU 0x2e /* Transmit Address Register (high) */
409 #define NTBCR 0x3a /* Next Transmit Byte count Register */
410 #define NTARL 0x3c /* Next Transmit Address Register (low) */
411 #define NTARU 0x3e /* Next Transmit Address Register (high) */
412
413 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
414 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
415 #define RBCR 0xaa /* Receive Byte count Register */
416 #define RARL 0xac /* Receive Address Register (low) */
417 #define RARU 0xae /* Receive Address Register (high) */
418 #define NRBCR 0xba /* Next Receive Byte count Register */
419 #define NRARL 0xbc /* Next Receive Address Register (low) */
420 #define NRARU 0xbe /* Next Receive Address Register (high) */
421
422
423 /*
424 * MACRO DEFINITIONS FOR MODEM STATUS BITS
425 */
426
427 #define MODEMSTATUS_DTR 0x80
428 #define MODEMSTATUS_DSR 0x40
429 #define MODEMSTATUS_RTS 0x20
430 #define MODEMSTATUS_CTS 0x10
431 #define MODEMSTATUS_RI 0x04
432 #define MODEMSTATUS_DCD 0x01
433
434
435 /*
436 * Channel Command/Address Register (CCAR) Command Codes
437 */
438
439 #define RTCmd_Null 0x0000
440 #define RTCmd_ResetHighestIus 0x1000
441 #define RTCmd_TriggerChannelLoadDma 0x2000
442 #define RTCmd_TriggerRxDma 0x2800
443 #define RTCmd_TriggerTxDma 0x3000
444 #define RTCmd_TriggerRxAndTxDma 0x3800
445 #define RTCmd_PurgeRxFifo 0x4800
446 #define RTCmd_PurgeTxFifo 0x5000
447 #define RTCmd_PurgeRxAndTxFifo 0x5800
448 #define RTCmd_LoadRcc 0x6800
449 #define RTCmd_LoadTcc 0x7000
450 #define RTCmd_LoadRccAndTcc 0x7800
451 #define RTCmd_LoadTC0 0x8800
452 #define RTCmd_LoadTC1 0x9000
453 #define RTCmd_LoadTC0AndTC1 0x9800
454 #define RTCmd_SerialDataLSBFirst 0xa000
455 #define RTCmd_SerialDataMSBFirst 0xa800
456 #define RTCmd_SelectBigEndian 0xb000
457 #define RTCmd_SelectLittleEndian 0xb800
458
459
460 /*
461 * DMA Command/Address Register (DCAR) Command Codes
462 */
463
464 #define DmaCmd_Null 0x0000
465 #define DmaCmd_ResetTxChannel 0x1000
466 #define DmaCmd_ResetRxChannel 0x1200
467 #define DmaCmd_StartTxChannel 0x2000
468 #define DmaCmd_StartRxChannel 0x2200
469 #define DmaCmd_ContinueTxChannel 0x3000
470 #define DmaCmd_ContinueRxChannel 0x3200
471 #define DmaCmd_PauseTxChannel 0x4000
472 #define DmaCmd_PauseRxChannel 0x4200
473 #define DmaCmd_AbortTxChannel 0x5000
474 #define DmaCmd_AbortRxChannel 0x5200
475 #define DmaCmd_InitTxChannel 0x7000
476 #define DmaCmd_InitRxChannel 0x7200
477 #define DmaCmd_ResetHighestDmaIus 0x8000
478 #define DmaCmd_ResetAllChannels 0x9000
479 #define DmaCmd_StartAllChannels 0xa000
480 #define DmaCmd_ContinueAllChannels 0xb000
481 #define DmaCmd_PauseAllChannels 0xc000
482 #define DmaCmd_AbortAllChannels 0xd000
483 #define DmaCmd_InitAllChannels 0xf000
484
485 #define TCmd_Null 0x0000
486 #define TCmd_ClearTxCRC 0x2000
487 #define TCmd_SelectTicrTtsaData 0x4000
488 #define TCmd_SelectTicrTxFifostatus 0x5000
489 #define TCmd_SelectTicrIntLevel 0x6000
490 #define TCmd_SelectTicrdma_level 0x7000
491 #define TCmd_SendFrame 0x8000
492 #define TCmd_SendAbort 0x9000
493 #define TCmd_EnableDleInsertion 0xc000
494 #define TCmd_DisableDleInsertion 0xd000
495 #define TCmd_ClearEofEom 0xe000
496 #define TCmd_SetEofEom 0xf000
497
498 #define RCmd_Null 0x0000
499 #define RCmd_ClearRxCRC 0x2000
500 #define RCmd_EnterHuntmode 0x3000
501 #define RCmd_SelectRicrRtsaData 0x4000
502 #define RCmd_SelectRicrRxFifostatus 0x5000
503 #define RCmd_SelectRicrIntLevel 0x6000
504 #define RCmd_SelectRicrdma_level 0x7000
505
506 /*
507 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
508 */
509
510 #define RECEIVE_STATUS BIT5
511 #define RECEIVE_DATA BIT4
512 #define TRANSMIT_STATUS BIT3
513 #define TRANSMIT_DATA BIT2
514 #define IO_PIN BIT1
515 #define MISC BIT0
516
517
518 /*
519 * Receive status Bits in Receive Command/status Register RCSR
520 */
521
522 #define RXSTATUS_SHORT_FRAME BIT8
523 #define RXSTATUS_CODE_VIOLATION BIT8
524 #define RXSTATUS_EXITED_HUNT BIT7
525 #define RXSTATUS_IDLE_RECEIVED BIT6
526 #define RXSTATUS_BREAK_RECEIVED BIT5
527 #define RXSTATUS_ABORT_RECEIVED BIT5
528 #define RXSTATUS_RXBOUND BIT4
529 #define RXSTATUS_CRC_ERROR BIT3
530 #define RXSTATUS_FRAMING_ERROR BIT3
531 #define RXSTATUS_ABORT BIT2
532 #define RXSTATUS_PARITY_ERROR BIT2
533 #define RXSTATUS_OVERRUN BIT1
534 #define RXSTATUS_DATA_AVAILABLE BIT0
535 #define RXSTATUS_ALL 0x01f6
536 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
537
538 /*
539 * Values for setting transmit idle mode in
540 * Transmit Control/status Register (TCSR)
541 */
542 #define IDLEMODE_FLAGS 0x0000
543 #define IDLEMODE_ALT_ONE_ZERO 0x0100
544 #define IDLEMODE_ZERO 0x0200
545 #define IDLEMODE_ONE 0x0300
546 #define IDLEMODE_ALT_MARK_SPACE 0x0500
547 #define IDLEMODE_SPACE 0x0600
548 #define IDLEMODE_MARK 0x0700
549 #define IDLEMODE_MASK 0x0700
550
551 /*
552 * IUSC revision identifiers
553 */
554 #define IUSC_SL1660 0x4d44
555 #define IUSC_PRE_SL1660 0x4553
556
557 /*
558 * Transmit status Bits in Transmit Command/status Register (TCSR)
559 */
560
561 #define TCSR_PRESERVE 0x0F00
562
563 #define TCSR_UNDERWAIT BIT11
564 #define TXSTATUS_PREAMBLE_SENT BIT7
565 #define TXSTATUS_IDLE_SENT BIT6
566 #define TXSTATUS_ABORT_SENT BIT5
567 #define TXSTATUS_EOF_SENT BIT4
568 #define TXSTATUS_EOM_SENT BIT4
569 #define TXSTATUS_CRC_SENT BIT3
570 #define TXSTATUS_ALL_SENT BIT2
571 #define TXSTATUS_UNDERRUN BIT1
572 #define TXSTATUS_FIFO_EMPTY BIT0
573 #define TXSTATUS_ALL 0x00fa
574 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
575
576
577 #define MISCSTATUS_RXC_LATCHED BIT15
578 #define MISCSTATUS_RXC BIT14
579 #define MISCSTATUS_TXC_LATCHED BIT13
580 #define MISCSTATUS_TXC BIT12
581 #define MISCSTATUS_RI_LATCHED BIT11
582 #define MISCSTATUS_RI BIT10
583 #define MISCSTATUS_DSR_LATCHED BIT9
584 #define MISCSTATUS_DSR BIT8
585 #define MISCSTATUS_DCD_LATCHED BIT7
586 #define MISCSTATUS_DCD BIT6
587 #define MISCSTATUS_CTS_LATCHED BIT5
588 #define MISCSTATUS_CTS BIT4
589 #define MISCSTATUS_RCC_UNDERRUN BIT3
590 #define MISCSTATUS_DPLL_NO_SYNC BIT2
591 #define MISCSTATUS_BRG1_ZERO BIT1
592 #define MISCSTATUS_BRG0_ZERO BIT0
593
594 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
595 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
596
597 #define SICR_RXC_ACTIVE BIT15
598 #define SICR_RXC_INACTIVE BIT14
599 #define SICR_RXC (BIT15+BIT14)
600 #define SICR_TXC_ACTIVE BIT13
601 #define SICR_TXC_INACTIVE BIT12
602 #define SICR_TXC (BIT13+BIT12)
603 #define SICR_RI_ACTIVE BIT11
604 #define SICR_RI_INACTIVE BIT10
605 #define SICR_RI (BIT11+BIT10)
606 #define SICR_DSR_ACTIVE BIT9
607 #define SICR_DSR_INACTIVE BIT8
608 #define SICR_DSR (BIT9+BIT8)
609 #define SICR_DCD_ACTIVE BIT7
610 #define SICR_DCD_INACTIVE BIT6
611 #define SICR_DCD (BIT7+BIT6)
612 #define SICR_CTS_ACTIVE BIT5
613 #define SICR_CTS_INACTIVE BIT4
614 #define SICR_CTS (BIT5+BIT4)
615 #define SICR_RCC_UNDERFLOW BIT3
616 #define SICR_DPLL_NO_SYNC BIT2
617 #define SICR_BRG1_ZERO BIT1
618 #define SICR_BRG0_ZERO BIT0
619
620 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
621 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
622 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
623 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
624 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
625
626 #define usc_EnableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
628
629 #define usc_DisableInterrupts( a, b ) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
631
632 #define usc_EnableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
634
635 #define usc_DisableMasterIrqBit(a) \
636 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
637
638 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
639
640 /*
641 * Transmit status Bits in Transmit Control status Register (TCSR)
642 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
643 */
644
645 #define TXSTATUS_PREAMBLE_SENT BIT7
646 #define TXSTATUS_IDLE_SENT BIT6
647 #define TXSTATUS_ABORT_SENT BIT5
648 #define TXSTATUS_EOF BIT4
649 #define TXSTATUS_CRC_SENT BIT3
650 #define TXSTATUS_ALL_SENT BIT2
651 #define TXSTATUS_UNDERRUN BIT1
652 #define TXSTATUS_FIFO_EMPTY BIT0
653
654 #define DICR_MASTER BIT15
655 #define DICR_TRANSMIT BIT0
656 #define DICR_RECEIVE BIT1
657
658 #define usc_EnableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
660
661 #define usc_DisableDmaInterrupts(a,b) \
662 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
663
664 #define usc_EnableStatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
666
667 #define usc_DisablestatusIrqs(a,b) \
668 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
669
670 /* Transmit status Bits in Transmit Control status Register (TCSR) */
671 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
672
673
674 #define DISABLE_UNCONDITIONAL 0
675 #define DISABLE_END_OF_FRAME 1
676 #define ENABLE_UNCONDITIONAL 2
677 #define ENABLE_AUTO_CTS 3
678 #define ENABLE_AUTO_DCD 3
679 #define usc_EnableTransmitter(a,b) \
680 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
681 #define usc_EnableReceiver(a,b) \
682 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
683
684 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
685 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
686 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
687
688 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
689 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
690 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
691 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
692 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
693
694 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
695 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
696
697 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
698
699 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
700 static void usc_start_receiver( struct mgsl_struct *info );
701 static void usc_stop_receiver( struct mgsl_struct *info );
702
703 static void usc_start_transmitter( struct mgsl_struct *info );
704 static void usc_stop_transmitter( struct mgsl_struct *info );
705 static void usc_set_txidle( struct mgsl_struct *info );
706 static void usc_load_txfifo( struct mgsl_struct *info );
707
708 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
709 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
710
711 static void usc_get_serial_signals( struct mgsl_struct *info );
712 static void usc_set_serial_signals( struct mgsl_struct *info );
713
714 static void usc_reset( struct mgsl_struct *info );
715
716 static void usc_set_sync_mode( struct mgsl_struct *info );
717 static void usc_set_sdlc_mode( struct mgsl_struct *info );
718 static void usc_set_async_mode( struct mgsl_struct *info );
719 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
720
721 static void usc_loopback_frame( struct mgsl_struct *info );
722
723 static void mgsl_tx_timeout(unsigned long context);
724
725
726 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
727 static void usc_loopmode_insert_request( struct mgsl_struct * info );
728 static int usc_loopmode_active( struct mgsl_struct * info);
729 static void usc_loopmode_send_done( struct mgsl_struct * info );
730
731 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
732
733 #if SYNCLINK_GENERIC_HDLC
734 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
735 static void hdlcdev_tx_done(struct mgsl_struct *info);
736 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
737 static int hdlcdev_init(struct mgsl_struct *info);
738 static void hdlcdev_exit(struct mgsl_struct *info);
739 #endif
740
741 /*
742 * Defines a BUS descriptor value for the PCI adapter
743 * local bus address ranges.
744 */
745
746 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
747 (0x00400020 + \
748 ((WrHold) << 30) + \
749 ((WrDly) << 28) + \
750 ((RdDly) << 26) + \
751 ((Nwdd) << 20) + \
752 ((Nwad) << 15) + \
753 ((Nxda) << 13) + \
754 ((Nrdd) << 11) + \
755 ((Nrad) << 6) )
756
757 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
758
759 /*
760 * Adapter diagnostic routines
761 */
762 static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
763 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
764 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
765 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
766 static int mgsl_adapter_test( struct mgsl_struct *info );
767
768 /*
769 * device and resource management routines
770 */
771 static int mgsl_claim_resources(struct mgsl_struct *info);
772 static void mgsl_release_resources(struct mgsl_struct *info);
773 static void mgsl_add_device(struct mgsl_struct *info);
774 static struct mgsl_struct* mgsl_allocate_device(void);
775
776 /*
777 * DMA buffer manupulation functions.
778 */
779 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
780 static int mgsl_get_rx_frame( struct mgsl_struct *info );
781 static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
782 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
783 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
784 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
785 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
786 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
787
788 /*
789 * DMA and Shared Memory buffer allocation and formatting
790 */
791 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
792 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
793 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
794 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
795 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
796 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
797 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
798 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
799 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
800 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
801 static int load_next_tx_holding_buffer(struct mgsl_struct *info);
802 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
803
804 /*
805 * Bottom half interrupt handlers
806 */
807 static void mgsl_bh_handler(struct work_struct *work);
808 static void mgsl_bh_receive(struct mgsl_struct *info);
809 static void mgsl_bh_transmit(struct mgsl_struct *info);
810 static void mgsl_bh_status(struct mgsl_struct *info);
811
812 /*
813 * Interrupt handler routines and dispatch table.
814 */
815 static void mgsl_isr_null( struct mgsl_struct *info );
816 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
817 static void mgsl_isr_receive_data( struct mgsl_struct *info );
818 static void mgsl_isr_receive_status( struct mgsl_struct *info );
819 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
820 static void mgsl_isr_io_pin( struct mgsl_struct *info );
821 static void mgsl_isr_misc( struct mgsl_struct *info );
822 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
823 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
824
825 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
826
827 static isr_dispatch_func UscIsrTable[7] =
828 {
829 mgsl_isr_null,
830 mgsl_isr_misc,
831 mgsl_isr_io_pin,
832 mgsl_isr_transmit_data,
833 mgsl_isr_transmit_status,
834 mgsl_isr_receive_data,
835 mgsl_isr_receive_status
836 };
837
838 /*
839 * ioctl call handlers
840 */
841 static int tiocmget(struct tty_struct *tty, struct file *file);
842 static int tiocmset(struct tty_struct *tty, struct file *file,
843 unsigned int set, unsigned int clear);
844 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
845 __user *user_icount);
846 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
847 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
848 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
849 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
850 static int mgsl_txenable(struct mgsl_struct * info, int enable);
851 static int mgsl_txabort(struct mgsl_struct * info);
852 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
853 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
854 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
855
856 /* set non-zero on successful registration with PCI subsystem */
857 static int pci_registered;
858
859 /*
860 * Global linked list of SyncLink devices
861 */
862 static struct mgsl_struct *mgsl_device_list;
863 static int mgsl_device_count;
864
865 /*
866 * Set this param to non-zero to load eax with the
867 * .text section address and breakpoint on module load.
868 * This is useful for use with gdb and add-symbol-file command.
869 */
870 static int break_on_load;
871
872 /*
873 * Driver major number, defaults to zero to get auto
874 * assigned major number. May be forced as module parameter.
875 */
876 static int ttymajor;
877
878 /*
879 * Array of user specified options for ISA adapters.
880 */
881 static int io[MAX_ISA_DEVICES];
882 static int irq[MAX_ISA_DEVICES];
883 static int dma[MAX_ISA_DEVICES];
884 static int debug_level;
885 static int maxframe[MAX_TOTAL_DEVICES];
886 static int dosyncppp[MAX_TOTAL_DEVICES];
887 static int txdmabufs[MAX_TOTAL_DEVICES];
888 static int txholdbufs[MAX_TOTAL_DEVICES];
889
890 module_param(break_on_load, bool, 0);
891 module_param(ttymajor, int, 0);
892 module_param_array(io, int, NULL, 0);
893 module_param_array(irq, int, NULL, 0);
894 module_param_array(dma, int, NULL, 0);
895 module_param(debug_level, int, 0);
896 module_param_array(maxframe, int, NULL, 0);
897 module_param_array(dosyncppp, int, NULL, 0);
898 module_param_array(txdmabufs, int, NULL, 0);
899 module_param_array(txholdbufs, int, NULL, 0);
900
901 static char *driver_name = "SyncLink serial driver";
902 static char *driver_version = "$Revision: 4.38 $";
903
904 static int synclink_init_one (struct pci_dev *dev,
905 const struct pci_device_id *ent);
906 static void synclink_remove_one (struct pci_dev *dev);
907
908 static struct pci_device_id synclink_pci_tbl[] = {
909 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
910 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
911 { 0, }, /* terminate list */
912 };
913 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
914
915 MODULE_LICENSE("GPL");
916
917 static struct pci_driver synclink_pci_driver = {
918 .name = "synclink",
919 .id_table = synclink_pci_tbl,
920 .probe = synclink_init_one,
921 .remove = __devexit_p(synclink_remove_one),
922 };
923
924 static struct tty_driver *serial_driver;
925
926 /* number of characters left in xmit buffer before we ask for more */
927 #define WAKEUP_CHARS 256
928
929
930 static void mgsl_change_params(struct mgsl_struct *info);
931 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
932
933 /*
934 * 1st function defined in .text section. Calling this function in
935 * init_module() followed by a breakpoint allows a remote debugger
936 * (gdb) to get the .text address for the add-symbol-file command.
937 * This allows remote debugging of dynamically loadable modules.
938 */
939 static void* mgsl_get_text_ptr(void)
940 {
941 return mgsl_get_text_ptr;
942 }
943
944 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
945 char *name, const char *routine)
946 {
947 #ifdef MGSL_PARANOIA_CHECK
948 static const char *badmagic =
949 "Warning: bad magic number for mgsl struct (%s) in %s\n";
950 static const char *badinfo =
951 "Warning: null mgsl_struct for (%s) in %s\n";
952
953 if (!info) {
954 printk(badinfo, name, routine);
955 return 1;
956 }
957 if (info->magic != MGSL_MAGIC) {
958 printk(badmagic, name, routine);
959 return 1;
960 }
961 #else
962 if (!info)
963 return 1;
964 #endif
965 return 0;
966 }
967
968 /**
969 * line discipline callback wrappers
970 *
971 * The wrappers maintain line discipline references
972 * while calling into the line discipline.
973 *
974 * ldisc_receive_buf - pass receive data to line discipline
975 */
976
977 static void ldisc_receive_buf(struct tty_struct *tty,
978 const __u8 *data, char *flags, int count)
979 {
980 struct tty_ldisc *ld;
981 if (!tty)
982 return;
983 ld = tty_ldisc_ref(tty);
984 if (ld) {
985 if (ld->receive_buf)
986 ld->receive_buf(tty, data, flags, count);
987 tty_ldisc_deref(ld);
988 }
989 }
990
991 /* mgsl_stop() throttle (stop) transmitter
992 *
993 * Arguments: tty pointer to tty info structure
994 * Return Value: None
995 */
996 static void mgsl_stop(struct tty_struct *tty)
997 {
998 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
999 unsigned long flags;
1000
1001 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1002 return;
1003
1004 if ( debug_level >= DEBUG_LEVEL_INFO )
1005 printk("mgsl_stop(%s)\n",info->device_name);
1006
1007 spin_lock_irqsave(&info->irq_spinlock,flags);
1008 if (info->tx_enabled)
1009 usc_stop_transmitter(info);
1010 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1011
1012 } /* end of mgsl_stop() */
1013
1014 /* mgsl_start() release (start) transmitter
1015 *
1016 * Arguments: tty pointer to tty info structure
1017 * Return Value: None
1018 */
1019 static void mgsl_start(struct tty_struct *tty)
1020 {
1021 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1022 unsigned long flags;
1023
1024 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1025 return;
1026
1027 if ( debug_level >= DEBUG_LEVEL_INFO )
1028 printk("mgsl_start(%s)\n",info->device_name);
1029
1030 spin_lock_irqsave(&info->irq_spinlock,flags);
1031 if (!info->tx_enabled)
1032 usc_start_transmitter(info);
1033 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1034
1035 } /* end of mgsl_start() */
1036
1037 /*
1038 * Bottom half work queue access functions
1039 */
1040
1041 /* mgsl_bh_action() Return next bottom half action to perform.
1042 * Return Value: BH action code or 0 if nothing to do.
1043 */
1044 static int mgsl_bh_action(struct mgsl_struct *info)
1045 {
1046 unsigned long flags;
1047 int rc = 0;
1048
1049 spin_lock_irqsave(&info->irq_spinlock,flags);
1050
1051 if (info->pending_bh & BH_RECEIVE) {
1052 info->pending_bh &= ~BH_RECEIVE;
1053 rc = BH_RECEIVE;
1054 } else if (info->pending_bh & BH_TRANSMIT) {
1055 info->pending_bh &= ~BH_TRANSMIT;
1056 rc = BH_TRANSMIT;
1057 } else if (info->pending_bh & BH_STATUS) {
1058 info->pending_bh &= ~BH_STATUS;
1059 rc = BH_STATUS;
1060 }
1061
1062 if (!rc) {
1063 /* Mark BH routine as complete */
1064 info->bh_running = 0;
1065 info->bh_requested = 0;
1066 }
1067
1068 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1069
1070 return rc;
1071 }
1072
1073 /*
1074 * Perform bottom half processing of work items queued by ISR.
1075 */
1076 static void mgsl_bh_handler(struct work_struct *work)
1077 {
1078 struct mgsl_struct *info =
1079 container_of(work, struct mgsl_struct, task);
1080 int action;
1081
1082 if (!info)
1083 return;
1084
1085 if ( debug_level >= DEBUG_LEVEL_BH )
1086 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1087 __FILE__,__LINE__,info->device_name);
1088
1089 info->bh_running = 1;
1090
1091 while((action = mgsl_bh_action(info)) != 0) {
1092
1093 /* Process work item */
1094 if ( debug_level >= DEBUG_LEVEL_BH )
1095 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1096 __FILE__,__LINE__,action);
1097
1098 switch (action) {
1099
1100 case BH_RECEIVE:
1101 mgsl_bh_receive(info);
1102 break;
1103 case BH_TRANSMIT:
1104 mgsl_bh_transmit(info);
1105 break;
1106 case BH_STATUS:
1107 mgsl_bh_status(info);
1108 break;
1109 default:
1110 /* unknown work item ID */
1111 printk("Unknown work item ID=%08X!\n", action);
1112 break;
1113 }
1114 }
1115
1116 if ( debug_level >= DEBUG_LEVEL_BH )
1117 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1118 __FILE__,__LINE__,info->device_name);
1119 }
1120
1121 static void mgsl_bh_receive(struct mgsl_struct *info)
1122 {
1123 int (*get_rx_frame)(struct mgsl_struct *info) =
1124 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1125
1126 if ( debug_level >= DEBUG_LEVEL_BH )
1127 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1128 __FILE__,__LINE__,info->device_name);
1129
1130 do
1131 {
1132 if (info->rx_rcc_underrun) {
1133 unsigned long flags;
1134 spin_lock_irqsave(&info->irq_spinlock,flags);
1135 usc_start_receiver(info);
1136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1137 return;
1138 }
1139 } while(get_rx_frame(info));
1140 }
1141
1142 static void mgsl_bh_transmit(struct mgsl_struct *info)
1143 {
1144 struct tty_struct *tty = info->tty;
1145 unsigned long flags;
1146
1147 if ( debug_level >= DEBUG_LEVEL_BH )
1148 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1149 __FILE__,__LINE__,info->device_name);
1150
1151 if (tty) {
1152 tty_wakeup(tty);
1153 wake_up_interruptible(&tty->write_wait);
1154 }
1155
1156 /* if transmitter idle and loopmode_send_done_requested
1157 * then start echoing RxD to TxD
1158 */
1159 spin_lock_irqsave(&info->irq_spinlock,flags);
1160 if ( !info->tx_active && info->loopmode_send_done_requested )
1161 usc_loopmode_send_done( info );
1162 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1163 }
1164
1165 static void mgsl_bh_status(struct mgsl_struct *info)
1166 {
1167 if ( debug_level >= DEBUG_LEVEL_BH )
1168 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1169 __FILE__,__LINE__,info->device_name);
1170
1171 info->ri_chkcount = 0;
1172 info->dsr_chkcount = 0;
1173 info->dcd_chkcount = 0;
1174 info->cts_chkcount = 0;
1175 }
1176
1177 /* mgsl_isr_receive_status()
1178 *
1179 * Service a receive status interrupt. The type of status
1180 * interrupt is indicated by the state of the RCSR.
1181 * This is only used for HDLC mode.
1182 *
1183 * Arguments: info pointer to device instance data
1184 * Return Value: None
1185 */
1186 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1187 {
1188 u16 status = usc_InReg( info, RCSR );
1189
1190 if ( debug_level >= DEBUG_LEVEL_ISR )
1191 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1192 __FILE__,__LINE__,status);
1193
1194 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1195 info->loopmode_insert_requested &&
1196 usc_loopmode_active(info) )
1197 {
1198 ++info->icount.rxabort;
1199 info->loopmode_insert_requested = FALSE;
1200
1201 /* clear CMR:13 to start echoing RxD to TxD */
1202 info->cmr_value &= ~BIT13;
1203 usc_OutReg(info, CMR, info->cmr_value);
1204
1205 /* disable received abort irq (no longer required) */
1206 usc_OutReg(info, RICR,
1207 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1208 }
1209
1210 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1211 if (status & RXSTATUS_EXITED_HUNT)
1212 info->icount.exithunt++;
1213 if (status & RXSTATUS_IDLE_RECEIVED)
1214 info->icount.rxidle++;
1215 wake_up_interruptible(&info->event_wait_q);
1216 }
1217
1218 if (status & RXSTATUS_OVERRUN){
1219 info->icount.rxover++;
1220 usc_process_rxoverrun_sync( info );
1221 }
1222
1223 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1224 usc_UnlatchRxstatusBits( info, status );
1225
1226 } /* end of mgsl_isr_receive_status() */
1227
1228 /* mgsl_isr_transmit_status()
1229 *
1230 * Service a transmit status interrupt
1231 * HDLC mode :end of transmit frame
1232 * Async mode:all data is sent
1233 * transmit status is indicated by bits in the TCSR.
1234 *
1235 * Arguments: info pointer to device instance data
1236 * Return Value: None
1237 */
1238 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1239 {
1240 u16 status = usc_InReg( info, TCSR );
1241
1242 if ( debug_level >= DEBUG_LEVEL_ISR )
1243 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1244 __FILE__,__LINE__,status);
1245
1246 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1247 usc_UnlatchTxstatusBits( info, status );
1248
1249 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1250 {
1251 /* finished sending HDLC abort. This may leave */
1252 /* the TxFifo with data from the aborted frame */
1253 /* so purge the TxFifo. Also shutdown the DMA */
1254 /* channel in case there is data remaining in */
1255 /* the DMA buffer */
1256 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1257 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1258 }
1259
1260 if ( status & TXSTATUS_EOF_SENT )
1261 info->icount.txok++;
1262 else if ( status & TXSTATUS_UNDERRUN )
1263 info->icount.txunder++;
1264 else if ( status & TXSTATUS_ABORT_SENT )
1265 info->icount.txabort++;
1266 else
1267 info->icount.txunder++;
1268
1269 info->tx_active = 0;
1270 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1271 del_timer(&info->tx_timer);
1272
1273 if ( info->drop_rts_on_tx_done ) {
1274 usc_get_serial_signals( info );
1275 if ( info->serial_signals & SerialSignal_RTS ) {
1276 info->serial_signals &= ~SerialSignal_RTS;
1277 usc_set_serial_signals( info );
1278 }
1279 info->drop_rts_on_tx_done = 0;
1280 }
1281
1282 #if SYNCLINK_GENERIC_HDLC
1283 if (info->netcount)
1284 hdlcdev_tx_done(info);
1285 else
1286 #endif
1287 {
1288 if (info->tty->stopped || info->tty->hw_stopped) {
1289 usc_stop_transmitter(info);
1290 return;
1291 }
1292 info->pending_bh |= BH_TRANSMIT;
1293 }
1294
1295 } /* end of mgsl_isr_transmit_status() */
1296
1297 /* mgsl_isr_io_pin()
1298 *
1299 * Service an Input/Output pin interrupt. The type of
1300 * interrupt is indicated by bits in the MISR
1301 *
1302 * Arguments: info pointer to device instance data
1303 * Return Value: None
1304 */
1305 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1306 {
1307 struct mgsl_icount *icount;
1308 u16 status = usc_InReg( info, MISR );
1309
1310 if ( debug_level >= DEBUG_LEVEL_ISR )
1311 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1312 __FILE__,__LINE__,status);
1313
1314 usc_ClearIrqPendingBits( info, IO_PIN );
1315 usc_UnlatchIostatusBits( info, status );
1316
1317 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1318 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1319 icount = &info->icount;
1320 /* update input line counters */
1321 if (status & MISCSTATUS_RI_LATCHED) {
1322 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1323 usc_DisablestatusIrqs(info,SICR_RI);
1324 icount->rng++;
1325 if ( status & MISCSTATUS_RI )
1326 info->input_signal_events.ri_up++;
1327 else
1328 info->input_signal_events.ri_down++;
1329 }
1330 if (status & MISCSTATUS_DSR_LATCHED) {
1331 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1332 usc_DisablestatusIrqs(info,SICR_DSR);
1333 icount->dsr++;
1334 if ( status & MISCSTATUS_DSR )
1335 info->input_signal_events.dsr_up++;
1336 else
1337 info->input_signal_events.dsr_down++;
1338 }
1339 if (status & MISCSTATUS_DCD_LATCHED) {
1340 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1341 usc_DisablestatusIrqs(info,SICR_DCD);
1342 icount->dcd++;
1343 if (status & MISCSTATUS_DCD) {
1344 info->input_signal_events.dcd_up++;
1345 } else
1346 info->input_signal_events.dcd_down++;
1347 #if SYNCLINK_GENERIC_HDLC
1348 if (info->netcount) {
1349 if (status & MISCSTATUS_DCD)
1350 netif_carrier_on(info->netdev);
1351 else
1352 netif_carrier_off(info->netdev);
1353 }
1354 #endif
1355 }
1356 if (status & MISCSTATUS_CTS_LATCHED)
1357 {
1358 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1359 usc_DisablestatusIrqs(info,SICR_CTS);
1360 icount->cts++;
1361 if ( status & MISCSTATUS_CTS )
1362 info->input_signal_events.cts_up++;
1363 else
1364 info->input_signal_events.cts_down++;
1365 }
1366 wake_up_interruptible(&info->status_event_wait_q);
1367 wake_up_interruptible(&info->event_wait_q);
1368
1369 if ( (info->flags & ASYNC_CHECK_CD) &&
1370 (status & MISCSTATUS_DCD_LATCHED) ) {
1371 if ( debug_level >= DEBUG_LEVEL_ISR )
1372 printk("%s CD now %s...", info->device_name,
1373 (status & MISCSTATUS_DCD) ? "on" : "off");
1374 if (status & MISCSTATUS_DCD)
1375 wake_up_interruptible(&info->open_wait);
1376 else {
1377 if ( debug_level >= DEBUG_LEVEL_ISR )
1378 printk("doing serial hangup...");
1379 if (info->tty)
1380 tty_hangup(info->tty);
1381 }
1382 }
1383
1384 if ( (info->flags & ASYNC_CTS_FLOW) &&
1385 (status & MISCSTATUS_CTS_LATCHED) ) {
1386 if (info->tty->hw_stopped) {
1387 if (status & MISCSTATUS_CTS) {
1388 if ( debug_level >= DEBUG_LEVEL_ISR )
1389 printk("CTS tx start...");
1390 if (info->tty)
1391 info->tty->hw_stopped = 0;
1392 usc_start_transmitter(info);
1393 info->pending_bh |= BH_TRANSMIT;
1394 return;
1395 }
1396 } else {
1397 if (!(status & MISCSTATUS_CTS)) {
1398 if ( debug_level >= DEBUG_LEVEL_ISR )
1399 printk("CTS tx stop...");
1400 if (info->tty)
1401 info->tty->hw_stopped = 1;
1402 usc_stop_transmitter(info);
1403 }
1404 }
1405 }
1406 }
1407
1408 info->pending_bh |= BH_STATUS;
1409
1410 /* for diagnostics set IRQ flag */
1411 if ( status & MISCSTATUS_TXC_LATCHED ){
1412 usc_OutReg( info, SICR,
1413 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1414 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1415 info->irq_occurred = 1;
1416 }
1417
1418 } /* end of mgsl_isr_io_pin() */
1419
1420 /* mgsl_isr_transmit_data()
1421 *
1422 * Service a transmit data interrupt (async mode only).
1423 *
1424 * Arguments: info pointer to device instance data
1425 * Return Value: None
1426 */
1427 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1428 {
1429 if ( debug_level >= DEBUG_LEVEL_ISR )
1430 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1431 __FILE__,__LINE__,info->xmit_cnt);
1432
1433 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1434
1435 if (info->tty->stopped || info->tty->hw_stopped) {
1436 usc_stop_transmitter(info);
1437 return;
1438 }
1439
1440 if ( info->xmit_cnt )
1441 usc_load_txfifo( info );
1442 else
1443 info->tx_active = 0;
1444
1445 if (info->xmit_cnt < WAKEUP_CHARS)
1446 info->pending_bh |= BH_TRANSMIT;
1447
1448 } /* end of mgsl_isr_transmit_data() */
1449
1450 /* mgsl_isr_receive_data()
1451 *
1452 * Service a receive data interrupt. This occurs
1453 * when operating in asynchronous interrupt transfer mode.
1454 * The receive data FIFO is flushed to the receive data buffers.
1455 *
1456 * Arguments: info pointer to device instance data
1457 * Return Value: None
1458 */
1459 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1460 {
1461 int Fifocount;
1462 u16 status;
1463 int work = 0;
1464 unsigned char DataByte;
1465 struct tty_struct *tty = info->tty;
1466 struct mgsl_icount *icount = &info->icount;
1467
1468 if ( debug_level >= DEBUG_LEVEL_ISR )
1469 printk("%s(%d):mgsl_isr_receive_data\n",
1470 __FILE__,__LINE__);
1471
1472 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1473
1474 /* select FIFO status for RICR readback */
1475 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1476
1477 /* clear the Wordstatus bit so that status readback */
1478 /* only reflects the status of this byte */
1479 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1480
1481 /* flush the receive FIFO */
1482
1483 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1484 int flag;
1485
1486 /* read one byte from RxFIFO */
1487 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1488 info->io_base + CCAR );
1489 DataByte = inb( info->io_base + CCAR );
1490
1491 /* get the status of the received byte */
1492 status = usc_InReg(info, RCSR);
1493 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1494 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1495 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1496
1497 icount->rx++;
1498
1499 flag = 0;
1500 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1501 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1502 printk("rxerr=%04X\n",status);
1503 /* update error statistics */
1504 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1505 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1506 icount->brk++;
1507 } else if (status & RXSTATUS_PARITY_ERROR)
1508 icount->parity++;
1509 else if (status & RXSTATUS_FRAMING_ERROR)
1510 icount->frame++;
1511 else if (status & RXSTATUS_OVERRUN) {
1512 /* must issue purge fifo cmd before */
1513 /* 16C32 accepts more receive chars */
1514 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1515 icount->overrun++;
1516 }
1517
1518 /* discard char if tty control flags say so */
1519 if (status & info->ignore_status_mask)
1520 continue;
1521
1522 status &= info->read_status_mask;
1523
1524 if (status & RXSTATUS_BREAK_RECEIVED) {
1525 flag = TTY_BREAK;
1526 if (info->flags & ASYNC_SAK)
1527 do_SAK(tty);
1528 } else if (status & RXSTATUS_PARITY_ERROR)
1529 flag = TTY_PARITY;
1530 else if (status & RXSTATUS_FRAMING_ERROR)
1531 flag = TTY_FRAME;
1532 } /* end of if (error) */
1533 tty_insert_flip_char(tty, DataByte, flag);
1534 if (status & RXSTATUS_OVERRUN) {
1535 /* Overrun is special, since it's
1536 * reported immediately, and doesn't
1537 * affect the current character
1538 */
1539 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1540 }
1541 }
1542
1543 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1544 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1545 __FILE__,__LINE__,icount->rx,icount->brk,
1546 icount->parity,icount->frame,icount->overrun);
1547 }
1548
1549 if(work)
1550 tty_flip_buffer_push(tty);
1551 }
1552
1553 /* mgsl_isr_misc()
1554 *
1555 * Service a miscellaneos interrupt source.
1556 *
1557 * Arguments: info pointer to device extension (instance data)
1558 * Return Value: None
1559 */
1560 static void mgsl_isr_misc( struct mgsl_struct *info )
1561 {
1562 u16 status = usc_InReg( info, MISR );
1563
1564 if ( debug_level >= DEBUG_LEVEL_ISR )
1565 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1566 __FILE__,__LINE__,status);
1567
1568 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1569 (info->params.mode == MGSL_MODE_HDLC)) {
1570
1571 /* turn off receiver and rx DMA */
1572 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1573 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1574 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1575 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1576 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1577
1578 /* schedule BH handler to restart receiver */
1579 info->pending_bh |= BH_RECEIVE;
1580 info->rx_rcc_underrun = 1;
1581 }
1582
1583 usc_ClearIrqPendingBits( info, MISC );
1584 usc_UnlatchMiscstatusBits( info, status );
1585
1586 } /* end of mgsl_isr_misc() */
1587
1588 /* mgsl_isr_null()
1589 *
1590 * Services undefined interrupt vectors from the
1591 * USC. (hence this function SHOULD never be called)
1592 *
1593 * Arguments: info pointer to device extension (instance data)
1594 * Return Value: None
1595 */
1596 static void mgsl_isr_null( struct mgsl_struct *info )
1597 {
1598
1599 } /* end of mgsl_isr_null() */
1600
1601 /* mgsl_isr_receive_dma()
1602 *
1603 * Service a receive DMA channel interrupt.
1604 * For this driver there are two sources of receive DMA interrupts
1605 * as identified in the Receive DMA mode Register (RDMR):
1606 *
1607 * BIT3 EOA/EOL End of List, all receive buffers in receive
1608 * buffer list have been filled (no more free buffers
1609 * available). The DMA controller has shut down.
1610 *
1611 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1612 * DMA buffer is terminated in response to completion
1613 * of a good frame or a frame with errors. The status
1614 * of the frame is stored in the buffer entry in the
1615 * list of receive buffer entries.
1616 *
1617 * Arguments: info pointer to device instance data
1618 * Return Value: None
1619 */
1620 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1621 {
1622 u16 status;
1623
1624 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1625 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1626
1627 /* Read the receive DMA status to identify interrupt type. */
1628 /* This also clears the status bits. */
1629 status = usc_InDmaReg( info, RDMR );
1630
1631 if ( debug_level >= DEBUG_LEVEL_ISR )
1632 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1633 __FILE__,__LINE__,info->device_name,status);
1634
1635 info->pending_bh |= BH_RECEIVE;
1636
1637 if ( status & BIT3 ) {
1638 info->rx_overflow = 1;
1639 info->icount.buf_overrun++;
1640 }
1641
1642 } /* end of mgsl_isr_receive_dma() */
1643
1644 /* mgsl_isr_transmit_dma()
1645 *
1646 * This function services a transmit DMA channel interrupt.
1647 *
1648 * For this driver there is one source of transmit DMA interrupts
1649 * as identified in the Transmit DMA Mode Register (TDMR):
1650 *
1651 * BIT2 EOB End of Buffer. This interrupt occurs when a
1652 * transmit DMA buffer has been emptied.
1653 *
1654 * The driver maintains enough transmit DMA buffers to hold at least
1655 * one max frame size transmit frame. When operating in a buffered
1656 * transmit mode, there may be enough transmit DMA buffers to hold at
1657 * least two or more max frame size frames. On an EOB condition,
1658 * determine if there are any queued transmit buffers and copy into
1659 * transmit DMA buffers if we have room.
1660 *
1661 * Arguments: info pointer to device instance data
1662 * Return Value: None
1663 */
1664 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1665 {
1666 u16 status;
1667
1668 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1669 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1670
1671 /* Read the transmit DMA status to identify interrupt type. */
1672 /* This also clears the status bits. */
1673
1674 status = usc_InDmaReg( info, TDMR );
1675
1676 if ( debug_level >= DEBUG_LEVEL_ISR )
1677 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1678 __FILE__,__LINE__,info->device_name,status);
1679
1680 if ( status & BIT2 ) {
1681 --info->tx_dma_buffers_used;
1682
1683 /* if there are transmit frames queued,
1684 * try to load the next one
1685 */
1686 if ( load_next_tx_holding_buffer(info) ) {
1687 /* if call returns non-zero value, we have
1688 * at least one free tx holding buffer
1689 */
1690 info->pending_bh |= BH_TRANSMIT;
1691 }
1692 }
1693
1694 } /* end of mgsl_isr_transmit_dma() */
1695
1696 /* mgsl_interrupt()
1697 *
1698 * Interrupt service routine entry point.
1699 *
1700 * Arguments:
1701 *
1702 * irq interrupt number that caused interrupt
1703 * dev_id device ID supplied during interrupt registration
1704 *
1705 * Return Value: None
1706 */
1707 static irqreturn_t mgsl_interrupt(int irq, void *dev_id)
1708 {
1709 struct mgsl_struct * info;
1710 u16 UscVector;
1711 u16 DmaVector;
1712
1713 if ( debug_level >= DEBUG_LEVEL_ISR )
1714 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1715 __FILE__,__LINE__,irq);
1716
1717 info = (struct mgsl_struct *)dev_id;
1718 if (!info)
1719 return IRQ_NONE;
1720
1721 spin_lock(&info->irq_spinlock);
1722
1723 for(;;) {
1724 /* Read the interrupt vectors from hardware. */
1725 UscVector = usc_InReg(info, IVR) >> 9;
1726 DmaVector = usc_InDmaReg(info, DIVR);
1727
1728 if ( debug_level >= DEBUG_LEVEL_ISR )
1729 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1730 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1731
1732 if ( !UscVector && !DmaVector )
1733 break;
1734
1735 /* Dispatch interrupt vector */
1736 if ( UscVector )
1737 (*UscIsrTable[UscVector])(info);
1738 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1739 mgsl_isr_transmit_dma(info);
1740 else
1741 mgsl_isr_receive_dma(info);
1742
1743 if ( info->isr_overflow ) {
1744 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1745 __FILE__,__LINE__,info->device_name, irq);
1746 usc_DisableMasterIrqBit(info);
1747 usc_DisableDmaInterrupts(info,DICR_MASTER);
1748 break;
1749 }
1750 }
1751
1752 /* Request bottom half processing if there's something
1753 * for it to do and the bh is not already running
1754 */
1755
1756 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1757 if ( debug_level >= DEBUG_LEVEL_ISR )
1758 printk("%s(%d):%s queueing bh task.\n",
1759 __FILE__,__LINE__,info->device_name);
1760 schedule_work(&info->task);
1761 info->bh_requested = 1;
1762 }
1763
1764 spin_unlock(&info->irq_spinlock);
1765
1766 if ( debug_level >= DEBUG_LEVEL_ISR )
1767 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1768 __FILE__,__LINE__,irq);
1769 return IRQ_HANDLED;
1770 } /* end of mgsl_interrupt() */
1771
1772 /* startup()
1773 *
1774 * Initialize and start device.
1775 *
1776 * Arguments: info pointer to device instance data
1777 * Return Value: 0 if success, otherwise error code
1778 */
1779 static int startup(struct mgsl_struct * info)
1780 {
1781 int retval = 0;
1782
1783 if ( debug_level >= DEBUG_LEVEL_INFO )
1784 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1785
1786 if (info->flags & ASYNC_INITIALIZED)
1787 return 0;
1788
1789 if (!info->xmit_buf) {
1790 /* allocate a page of memory for a transmit buffer */
1791 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1792 if (!info->xmit_buf) {
1793 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1794 __FILE__,__LINE__,info->device_name);
1795 return -ENOMEM;
1796 }
1797 }
1798
1799 info->pending_bh = 0;
1800
1801 memset(&info->icount, 0, sizeof(info->icount));
1802
1803 init_timer(&info->tx_timer);
1804 info->tx_timer.data = (unsigned long)info;
1805 info->tx_timer.function = mgsl_tx_timeout;
1806
1807 /* Allocate and claim adapter resources */
1808 retval = mgsl_claim_resources(info);
1809
1810 /* perform existence check and diagnostics */
1811 if ( !retval )
1812 retval = mgsl_adapter_test(info);
1813
1814 if ( retval ) {
1815 if (capable(CAP_SYS_ADMIN) && info->tty)
1816 set_bit(TTY_IO_ERROR, &info->tty->flags);
1817 mgsl_release_resources(info);
1818 return retval;
1819 }
1820
1821 /* program hardware for current parameters */
1822 mgsl_change_params(info);
1823
1824 if (info->tty)
1825 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1826
1827 info->flags |= ASYNC_INITIALIZED;
1828
1829 return 0;
1830
1831 } /* end of startup() */
1832
1833 /* shutdown()
1834 *
1835 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1836 *
1837 * Arguments: info pointer to device instance data
1838 * Return Value: None
1839 */
1840 static void shutdown(struct mgsl_struct * info)
1841 {
1842 unsigned long flags;
1843
1844 if (!(info->flags & ASYNC_INITIALIZED))
1845 return;
1846
1847 if (debug_level >= DEBUG_LEVEL_INFO)
1848 printk("%s(%d):mgsl_shutdown(%s)\n",
1849 __FILE__,__LINE__, info->device_name );
1850
1851 /* clear status wait queue because status changes */
1852 /* can't happen after shutting down the hardware */
1853 wake_up_interruptible(&info->status_event_wait_q);
1854 wake_up_interruptible(&info->event_wait_q);
1855
1856 del_timer(&info->tx_timer);
1857
1858 if (info->xmit_buf) {
1859 free_page((unsigned long) info->xmit_buf);
1860 info->xmit_buf = NULL;
1861 }
1862
1863 spin_lock_irqsave(&info->irq_spinlock,flags);
1864 usc_DisableMasterIrqBit(info);
1865 usc_stop_receiver(info);
1866 usc_stop_transmitter(info);
1867 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1868 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1869 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1870
1871 /* Disable DMAEN (Port 7, Bit 14) */
1872 /* This disconnects the DMA request signal from the ISA bus */
1873 /* on the ISA adapter. This has no effect for the PCI adapter */
1874 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1875
1876 /* Disable INTEN (Port 6, Bit12) */
1877 /* This disconnects the IRQ request signal to the ISA bus */
1878 /* on the ISA adapter. This has no effect for the PCI adapter */
1879 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1880
1881 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1882 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1883 usc_set_serial_signals(info);
1884 }
1885
1886 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1887
1888 mgsl_release_resources(info);
1889
1890 if (info->tty)
1891 set_bit(TTY_IO_ERROR, &info->tty->flags);
1892
1893 info->flags &= ~ASYNC_INITIALIZED;
1894
1895 } /* end of shutdown() */
1896
1897 static void mgsl_program_hw(struct mgsl_struct *info)
1898 {
1899 unsigned long flags;
1900
1901 spin_lock_irqsave(&info->irq_spinlock,flags);
1902
1903 usc_stop_receiver(info);
1904 usc_stop_transmitter(info);
1905 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1906
1907 if (info->params.mode == MGSL_MODE_HDLC ||
1908 info->params.mode == MGSL_MODE_RAW ||
1909 info->netcount)
1910 usc_set_sync_mode(info);
1911 else
1912 usc_set_async_mode(info);
1913
1914 usc_set_serial_signals(info);
1915
1916 info->dcd_chkcount = 0;
1917 info->cts_chkcount = 0;
1918 info->ri_chkcount = 0;
1919 info->dsr_chkcount = 0;
1920
1921 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1922 usc_EnableInterrupts(info, IO_PIN);
1923 usc_get_serial_signals(info);
1924
1925 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1926 usc_start_receiver(info);
1927
1928 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1929 }
1930
1931 /* Reconfigure adapter based on new parameters
1932 */
1933 static void mgsl_change_params(struct mgsl_struct *info)
1934 {
1935 unsigned cflag;
1936 int bits_per_char;
1937
1938 if (!info->tty || !info->tty->termios)
1939 return;
1940
1941 if (debug_level >= DEBUG_LEVEL_INFO)
1942 printk("%s(%d):mgsl_change_params(%s)\n",
1943 __FILE__,__LINE__, info->device_name );
1944
1945 cflag = info->tty->termios->c_cflag;
1946
1947 /* if B0 rate (hangup) specified then negate DTR and RTS */
1948 /* otherwise assert DTR and RTS */
1949 if (cflag & CBAUD)
1950 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1951 else
1952 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1953
1954 /* byte size and parity */
1955
1956 switch (cflag & CSIZE) {
1957 case CS5: info->params.data_bits = 5; break;
1958 case CS6: info->params.data_bits = 6; break;
1959 case CS7: info->params.data_bits = 7; break;
1960 case CS8: info->params.data_bits = 8; break;
1961 /* Never happens, but GCC is too dumb to figure it out */
1962 default: info->params.data_bits = 7; break;
1963 }
1964
1965 if (cflag & CSTOPB)
1966 info->params.stop_bits = 2;
1967 else
1968 info->params.stop_bits = 1;
1969
1970 info->params.parity = ASYNC_PARITY_NONE;
1971 if (cflag & PARENB) {
1972 if (cflag & PARODD)
1973 info->params.parity = ASYNC_PARITY_ODD;
1974 else
1975 info->params.parity = ASYNC_PARITY_EVEN;
1976 #ifdef CMSPAR
1977 if (cflag & CMSPAR)
1978 info->params.parity = ASYNC_PARITY_SPACE;
1979 #endif
1980 }
1981
1982 /* calculate number of jiffies to transmit a full
1983 * FIFO (32 bytes) at specified data rate
1984 */
1985 bits_per_char = info->params.data_bits +
1986 info->params.stop_bits + 1;
1987
1988 /* if port data rate is set to 460800 or less then
1989 * allow tty settings to override, otherwise keep the
1990 * current data rate.
1991 */
1992 if (info->params.data_rate <= 460800)
1993 info->params.data_rate = tty_get_baud_rate(info->tty);
1994
1995 if ( info->params.data_rate ) {
1996 info->timeout = (32*HZ*bits_per_char) /
1997 info->params.data_rate;
1998 }
1999 info->timeout += HZ/50; /* Add .02 seconds of slop */
2000
2001 if (cflag & CRTSCTS)
2002 info->flags |= ASYNC_CTS_FLOW;
2003 else
2004 info->flags &= ~ASYNC_CTS_FLOW;
2005
2006 if (cflag & CLOCAL)
2007 info->flags &= ~ASYNC_CHECK_CD;
2008 else
2009 info->flags |= ASYNC_CHECK_CD;
2010
2011 /* process tty input control flags */
2012
2013 info->read_status_mask = RXSTATUS_OVERRUN;
2014 if (I_INPCK(info->tty))
2015 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2016 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2017 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2018
2019 if (I_IGNPAR(info->tty))
2020 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2021 if (I_IGNBRK(info->tty)) {
2022 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2023 /* If ignoring parity and break indicators, ignore
2024 * overruns too. (For real raw support).
2025 */
2026 if (I_IGNPAR(info->tty))
2027 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2028 }
2029
2030 mgsl_program_hw(info);
2031
2032 } /* end of mgsl_change_params() */
2033
2034 /* mgsl_put_char()
2035 *
2036 * Add a character to the transmit buffer.
2037 *
2038 * Arguments: tty pointer to tty information structure
2039 * ch character to add to transmit buffer
2040 *
2041 * Return Value: None
2042 */
2043 static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2044 {
2045 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2046 unsigned long flags;
2047
2048 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2049 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2050 __FILE__,__LINE__,ch,info->device_name);
2051 }
2052
2053 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2054 return;
2055
2056 if (!tty || !info->xmit_buf)
2057 return;
2058
2059 spin_lock_irqsave(&info->irq_spinlock,flags);
2060
2061 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2062
2063 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2064 info->xmit_buf[info->xmit_head++] = ch;
2065 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2066 info->xmit_cnt++;
2067 }
2068 }
2069
2070 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2071
2072 } /* end of mgsl_put_char() */
2073
2074 /* mgsl_flush_chars()
2075 *
2076 * Enable transmitter so remaining characters in the
2077 * transmit buffer are sent.
2078 *
2079 * Arguments: tty pointer to tty information structure
2080 * Return Value: None
2081 */
2082 static void mgsl_flush_chars(struct tty_struct *tty)
2083 {
2084 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2085 unsigned long flags;
2086
2087 if ( debug_level >= DEBUG_LEVEL_INFO )
2088 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2089 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2090
2091 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2092 return;
2093
2094 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2095 !info->xmit_buf)
2096 return;
2097
2098 if ( debug_level >= DEBUG_LEVEL_INFO )
2099 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2100 __FILE__,__LINE__,info->device_name );
2101
2102 spin_lock_irqsave(&info->irq_spinlock,flags);
2103
2104 if (!info->tx_active) {
2105 if ( (info->params.mode == MGSL_MODE_HDLC ||
2106 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2107 /* operating in synchronous (frame oriented) mode */
2108 /* copy data from circular xmit_buf to */
2109 /* transmit DMA buffer. */
2110 mgsl_load_tx_dma_buffer(info,
2111 info->xmit_buf,info->xmit_cnt);
2112 }
2113 usc_start_transmitter(info);
2114 }
2115
2116 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2117
2118 } /* end of mgsl_flush_chars() */
2119
2120 /* mgsl_write()
2121 *
2122 * Send a block of data
2123 *
2124 * Arguments:
2125 *
2126 * tty pointer to tty information structure
2127 * buf pointer to buffer containing send data
2128 * count size of send data in bytes
2129 *
2130 * Return Value: number of characters written
2131 */
2132 static int mgsl_write(struct tty_struct * tty,
2133 const unsigned char *buf, int count)
2134 {
2135 int c, ret = 0;
2136 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2137 unsigned long flags;
2138
2139 if ( debug_level >= DEBUG_LEVEL_INFO )
2140 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2141 __FILE__,__LINE__,info->device_name,count);
2142
2143 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2144 goto cleanup;
2145
2146 if (!tty || !info->xmit_buf)
2147 goto cleanup;
2148
2149 if ( info->params.mode == MGSL_MODE_HDLC ||
2150 info->params.mode == MGSL_MODE_RAW ) {
2151 /* operating in synchronous (frame oriented) mode */
2152 /* operating in synchronous (frame oriented) mode */
2153 if (info->tx_active) {
2154
2155 if ( info->params.mode == MGSL_MODE_HDLC ) {
2156 ret = 0;
2157 goto cleanup;
2158 }
2159 /* transmitter is actively sending data -
2160 * if we have multiple transmit dma and
2161 * holding buffers, attempt to queue this
2162 * frame for transmission at a later time.
2163 */
2164 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2165 /* no tx holding buffers available */
2166 ret = 0;
2167 goto cleanup;
2168 }
2169
2170 /* queue transmit frame request */
2171 ret = count;
2172 save_tx_buffer_request(info,buf,count);
2173
2174 /* if we have sufficient tx dma buffers,
2175 * load the next buffered tx request
2176 */
2177 spin_lock_irqsave(&info->irq_spinlock,flags);
2178 load_next_tx_holding_buffer(info);
2179 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2180 goto cleanup;
2181 }
2182
2183 /* if operating in HDLC LoopMode and the adapter */
2184 /* has yet to be inserted into the loop, we can't */
2185 /* transmit */
2186
2187 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2188 !usc_loopmode_active(info) )
2189 {
2190 ret = 0;
2191 goto cleanup;
2192 }
2193
2194 if ( info->xmit_cnt ) {
2195 /* Send accumulated from send_char() calls */
2196 /* as frame and wait before accepting more data. */
2197 ret = 0;
2198
2199 /* copy data from circular xmit_buf to */
2200 /* transmit DMA buffer. */
2201 mgsl_load_tx_dma_buffer(info,
2202 info->xmit_buf,info->xmit_cnt);
2203 if ( debug_level >= DEBUG_LEVEL_INFO )
2204 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2205 __FILE__,__LINE__,info->device_name);
2206 } else {
2207 if ( debug_level >= DEBUG_LEVEL_INFO )
2208 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2209 __FILE__,__LINE__,info->device_name);
2210 ret = count;
2211 info->xmit_cnt = count;
2212 mgsl_load_tx_dma_buffer(info,buf,count);
2213 }
2214 } else {
2215 while (1) {
2216 spin_lock_irqsave(&info->irq_spinlock,flags);
2217 c = min_t(int, count,
2218 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2219 SERIAL_XMIT_SIZE - info->xmit_head));
2220 if (c <= 0) {
2221 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2222 break;
2223 }
2224 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2225 info->xmit_head = ((info->xmit_head + c) &
2226 (SERIAL_XMIT_SIZE-1));
2227 info->xmit_cnt += c;
2228 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2229 buf += c;
2230 count -= c;
2231 ret += c;
2232 }
2233 }
2234
2235 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2236 spin_lock_irqsave(&info->irq_spinlock,flags);
2237 if (!info->tx_active)
2238 usc_start_transmitter(info);
2239 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2240 }
2241 cleanup:
2242 if ( debug_level >= DEBUG_LEVEL_INFO )
2243 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2244 __FILE__,__LINE__,info->device_name,ret);
2245
2246 return ret;
2247
2248 } /* end of mgsl_write() */
2249
2250 /* mgsl_write_room()
2251 *
2252 * Return the count of free bytes in transmit buffer
2253 *
2254 * Arguments: tty pointer to tty info structure
2255 * Return Value: None
2256 */
2257 static int mgsl_write_room(struct tty_struct *tty)
2258 {
2259 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2260 int ret;
2261
2262 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2263 return 0;
2264 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2265 if (ret < 0)
2266 ret = 0;
2267
2268 if (debug_level >= DEBUG_LEVEL_INFO)
2269 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2270 __FILE__,__LINE__, info->device_name,ret );
2271
2272 if ( info->params.mode == MGSL_MODE_HDLC ||
2273 info->params.mode == MGSL_MODE_RAW ) {
2274 /* operating in synchronous (frame oriented) mode */
2275 if ( info->tx_active )
2276 return 0;
2277 else
2278 return HDLC_MAX_FRAME_SIZE;
2279 }
2280
2281 return ret;
2282
2283 } /* end of mgsl_write_room() */
2284
2285 /* mgsl_chars_in_buffer()
2286 *
2287 * Return the count of bytes in transmit buffer
2288 *
2289 * Arguments: tty pointer to tty info structure
2290 * Return Value: None
2291 */
2292 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2293 {
2294 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2295
2296 if (debug_level >= DEBUG_LEVEL_INFO)
2297 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2298 __FILE__,__LINE__, info->device_name );
2299
2300 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2301 return 0;
2302
2303 if (debug_level >= DEBUG_LEVEL_INFO)
2304 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2305 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2306
2307 if ( info->params.mode == MGSL_MODE_HDLC ||
2308 info->params.mode == MGSL_MODE_RAW ) {
2309 /* operating in synchronous (frame oriented) mode */
2310 if ( info->tx_active )
2311 return info->max_frame_size;
2312 else
2313 return 0;
2314 }
2315
2316 return info->xmit_cnt;
2317 } /* end of mgsl_chars_in_buffer() */
2318
2319 /* mgsl_flush_buffer()
2320 *
2321 * Discard all data in the send buffer
2322 *
2323 * Arguments: tty pointer to tty info structure
2324 * Return Value: None
2325 */
2326 static void mgsl_flush_buffer(struct tty_struct *tty)
2327 {
2328 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2329 unsigned long flags;
2330
2331 if (debug_level >= DEBUG_LEVEL_INFO)
2332 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2333 __FILE__,__LINE__, info->device_name );
2334
2335 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2336 return;
2337
2338 spin_lock_irqsave(&info->irq_spinlock,flags);
2339 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2340 del_timer(&info->tx_timer);
2341 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2342
2343 wake_up_interruptible(&tty->write_wait);
2344 tty_wakeup(tty);
2345 }
2346
2347 /* mgsl_send_xchar()
2348 *
2349 * Send a high-priority XON/XOFF character
2350 *
2351 * Arguments: tty pointer to tty info structure
2352 * ch character to send
2353 * Return Value: None
2354 */
2355 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2356 {
2357 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2358 unsigned long flags;
2359
2360 if (debug_level >= DEBUG_LEVEL_INFO)
2361 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2362 __FILE__,__LINE__, info->device_name, ch );
2363
2364 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2365 return;
2366
2367 info->x_char = ch;
2368 if (ch) {
2369 /* Make sure transmit interrupts are on */
2370 spin_lock_irqsave(&info->irq_spinlock,flags);
2371 if (!info->tx_enabled)
2372 usc_start_transmitter(info);
2373 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2374 }
2375 } /* end of mgsl_send_xchar() */
2376
2377 /* mgsl_throttle()
2378 *
2379 * Signal remote device to throttle send data (our receive data)
2380 *
2381 * Arguments: tty pointer to tty info structure
2382 * Return Value: None
2383 */
2384 static void mgsl_throttle(struct tty_struct * tty)
2385 {
2386 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2387 unsigned long flags;
2388
2389 if (debug_level >= DEBUG_LEVEL_INFO)
2390 printk("%s(%d):mgsl_throttle(%s) entry\n",
2391 __FILE__,__LINE__, info->device_name );
2392
2393 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2394 return;
2395
2396 if (I_IXOFF(tty))
2397 mgsl_send_xchar(tty, STOP_CHAR(tty));
2398
2399 if (tty->termios->c_cflag & CRTSCTS) {
2400 spin_lock_irqsave(&info->irq_spinlock,flags);
2401 info->serial_signals &= ~SerialSignal_RTS;
2402 usc_set_serial_signals(info);
2403 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2404 }
2405 } /* end of mgsl_throttle() */
2406
2407 /* mgsl_unthrottle()
2408 *
2409 * Signal remote device to stop throttling send data (our receive data)
2410 *
2411 * Arguments: tty pointer to tty info structure
2412 * Return Value: None
2413 */
2414 static void mgsl_unthrottle(struct tty_struct * tty)
2415 {
2416 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2417 unsigned long flags;
2418
2419 if (debug_level >= DEBUG_LEVEL_INFO)
2420 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2421 __FILE__,__LINE__, info->device_name );
2422
2423 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2424 return;
2425
2426 if (I_IXOFF(tty)) {
2427 if (info->x_char)
2428 info->x_char = 0;
2429 else
2430 mgsl_send_xchar(tty, START_CHAR(tty));
2431 }
2432
2433 if (tty->termios->c_cflag & CRTSCTS) {
2434 spin_lock_irqsave(&info->irq_spinlock,flags);
2435 info->serial_signals |= SerialSignal_RTS;
2436 usc_set_serial_signals(info);
2437 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2438 }
2439
2440 } /* end of mgsl_unthrottle() */
2441
2442 /* mgsl_get_stats()
2443 *
2444 * get the current serial parameters information
2445 *
2446 * Arguments: info pointer to device instance data
2447 * user_icount pointer to buffer to hold returned stats
2448 *
2449 * Return Value: 0 if success, otherwise error code
2450 */
2451 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2452 {
2453 int err;
2454
2455 if (debug_level >= DEBUG_LEVEL_INFO)
2456 printk("%s(%d):mgsl_get_params(%s)\n",
2457 __FILE__,__LINE__, info->device_name);
2458
2459 if (!user_icount) {
2460 memset(&info->icount, 0, sizeof(info->icount));
2461 } else {
2462 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2463 if (err)
2464 return -EFAULT;
2465 }
2466
2467 return 0;
2468
2469 } /* end of mgsl_get_stats() */
2470
2471 /* mgsl_get_params()
2472 *
2473 * get the current serial parameters information
2474 *
2475 * Arguments: info pointer to device instance data
2476 * user_params pointer to buffer to hold returned params
2477 *
2478 * Return Value: 0 if success, otherwise error code
2479 */
2480 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2481 {
2482 int err;
2483 if (debug_level >= DEBUG_LEVEL_INFO)
2484 printk("%s(%d):mgsl_get_params(%s)\n",
2485 __FILE__,__LINE__, info->device_name);
2486
2487 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2488 if (err) {
2489 if ( debug_level >= DEBUG_LEVEL_INFO )
2490 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2491 __FILE__,__LINE__,info->device_name);
2492 return -EFAULT;
2493 }
2494
2495 return 0;
2496
2497 } /* end of mgsl_get_params() */
2498
2499 /* mgsl_set_params()
2500 *
2501 * set the serial parameters
2502 *
2503 * Arguments:
2504 *
2505 * info pointer to device instance data
2506 * new_params user buffer containing new serial params
2507 *
2508 * Return Value: 0 if success, otherwise error code
2509 */
2510 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2511 {
2512 unsigned long flags;
2513 MGSL_PARAMS tmp_params;
2514 int err;
2515
2516 if (debug_level >= DEBUG_LEVEL_INFO)
2517 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2518 info->device_name );
2519 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2520 if (err) {
2521 if ( debug_level >= DEBUG_LEVEL_INFO )
2522 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2523 __FILE__,__LINE__,info->device_name);
2524 return -EFAULT;
2525 }
2526
2527 spin_lock_irqsave(&info->irq_spinlock,flags);
2528 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2529 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2530
2531 mgsl_change_params(info);
2532
2533 return 0;
2534
2535 } /* end of mgsl_set_params() */
2536
2537 /* mgsl_get_txidle()
2538 *
2539 * get the current transmit idle mode
2540 *
2541 * Arguments: info pointer to device instance data
2542 * idle_mode pointer to buffer to hold returned idle mode
2543 *
2544 * Return Value: 0 if success, otherwise error code
2545 */
2546 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2547 {
2548 int err;
2549
2550 if (debug_level >= DEBUG_LEVEL_INFO)
2551 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2552 __FILE__,__LINE__, info->device_name, info->idle_mode);
2553
2554 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2555 if (err) {
2556 if ( debug_level >= DEBUG_LEVEL_INFO )
2557 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2558 __FILE__,__LINE__,info->device_name);
2559 return -EFAULT;
2560 }
2561
2562 return 0;
2563
2564 } /* end of mgsl_get_txidle() */
2565
2566 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2567 *
2568 * Arguments: info pointer to device instance data
2569 * idle_mode new idle mode
2570 *
2571 * Return Value: 0 if success, otherwise error code
2572 */
2573 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2574 {
2575 unsigned long flags;
2576
2577 if (debug_level >= DEBUG_LEVEL_INFO)
2578 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2579 info->device_name, idle_mode );
2580
2581 spin_lock_irqsave(&info->irq_spinlock,flags);
2582 info->idle_mode = idle_mode;
2583 usc_set_txidle( info );
2584 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2585 return 0;
2586
2587 } /* end of mgsl_set_txidle() */
2588
2589 /* mgsl_txenable()
2590 *
2591 * enable or disable the transmitter
2592 *
2593 * Arguments:
2594 *
2595 * info pointer to device instance data
2596 * enable 1 = enable, 0 = disable
2597 *
2598 * Return Value: 0 if success, otherwise error code
2599 */
2600 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2601 {
2602 unsigned long flags;
2603
2604 if (debug_level >= DEBUG_LEVEL_INFO)
2605 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2606 info->device_name, enable);
2607
2608 spin_lock_irqsave(&info->irq_spinlock,flags);
2609 if ( enable ) {
2610 if ( !info->tx_enabled ) {
2611
2612 usc_start_transmitter(info);
2613 /*--------------------------------------------------
2614 * if HDLC/SDLC Loop mode, attempt to insert the
2615 * station in the 'loop' by setting CMR:13. Upon
2616 * receipt of the next GoAhead (RxAbort) sequence,
2617 * the OnLoop indicator (CCSR:7) should go active
2618 * to indicate that we are on the loop
2619 *--------------------------------------------------*/
2620 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2621 usc_loopmode_insert_request( info );
2622 }
2623 } else {
2624 if ( info->tx_enabled )
2625 usc_stop_transmitter(info);
2626 }
2627 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2628 return 0;
2629
2630 } /* end of mgsl_txenable() */
2631
2632 /* mgsl_txabort() abort send HDLC frame
2633 *
2634 * Arguments: info pointer to device instance data
2635 * Return Value: 0 if success, otherwise error code
2636 */
2637 static int mgsl_txabort(struct mgsl_struct * info)
2638 {
2639 unsigned long flags;
2640
2641 if (debug_level >= DEBUG_LEVEL_INFO)
2642 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2643 info->device_name);
2644
2645 spin_lock_irqsave(&info->irq_spinlock,flags);
2646 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2647 {
2648 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2649 usc_loopmode_cancel_transmit( info );
2650 else
2651 usc_TCmd(info,TCmd_SendAbort);
2652 }
2653 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2654 return 0;
2655
2656 } /* end of mgsl_txabort() */
2657
2658 /* mgsl_rxenable() enable or disable the receiver
2659 *
2660 * Arguments: info pointer to device instance data
2661 * enable 1 = enable, 0 = disable
2662 * Return Value: 0 if success, otherwise error code
2663 */
2664 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2665 {
2666 unsigned long flags;
2667
2668 if (debug_level >= DEBUG_LEVEL_INFO)
2669 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2670 info->device_name, enable);
2671
2672 spin_lock_irqsave(&info->irq_spinlock,flags);
2673 if ( enable ) {
2674 if ( !info->rx_enabled )
2675 usc_start_receiver(info);
2676 } else {
2677 if ( info->rx_enabled )
2678 usc_stop_receiver(info);
2679 }
2680 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2681 return 0;
2682
2683 } /* end of mgsl_rxenable() */
2684
2685 /* mgsl_wait_event() wait for specified event to occur
2686 *
2687 * Arguments: info pointer to device instance data
2688 * mask pointer to bitmask of events to wait for
2689 * Return Value: 0 if successful and bit mask updated with
2690 * of events triggerred,
2691 * otherwise error code
2692 */
2693 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2694 {
2695 unsigned long flags;
2696 int s;
2697 int rc=0;
2698 struct mgsl_icount cprev, cnow;
2699 int events;
2700 int mask;
2701 struct _input_signal_events oldsigs, newsigs;
2702 DECLARE_WAITQUEUE(wait, current);
2703
2704 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2705 if (rc) {
2706 return -EFAULT;
2707 }
2708
2709 if (debug_level >= DEBUG_LEVEL_INFO)
2710 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2711 info->device_name, mask);
2712
2713 spin_lock_irqsave(&info->irq_spinlock,flags);
2714
2715 /* return immediately if state matches requested events */
2716 usc_get_serial_signals(info);
2717 s = info->serial_signals;
2718 events = mask &
2719 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2720 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2721 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2722 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2723 if (events) {
2724 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2725 goto exit;
2726 }
2727
2728 /* save current irq counts */
2729 cprev = info->icount;
2730 oldsigs = info->input_signal_events;
2731
2732 /* enable hunt and idle irqs if needed */
2733 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2734 u16 oldreg = usc_InReg(info,RICR);
2735 u16 newreg = oldreg +
2736 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2737 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2738 if (oldreg != newreg)
2739 usc_OutReg(info, RICR, newreg);
2740 }
2741
2742 set_current_state(TASK_INTERRUPTIBLE);
2743 add_wait_queue(&info->event_wait_q, &wait);
2744
2745 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2746
2747
2748 for(;;) {
2749 schedule();
2750 if (signal_pending(current)) {
2751 rc = -ERESTARTSYS;
2752 break;
2753 }
2754
2755 /* get current irq counts */
2756 spin_lock_irqsave(&info->irq_spinlock,flags);
2757 cnow = info->icount;
2758 newsigs = info->input_signal_events;
2759 set_current_state(TASK_INTERRUPTIBLE);
2760 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2761
2762 /* if no change, wait aborted for some reason */
2763 if (newsigs.dsr_up == oldsigs.dsr_up &&
2764 newsigs.dsr_down == oldsigs.dsr_down &&
2765 newsigs.dcd_up == oldsigs.dcd_up &&
2766 newsigs.dcd_down == oldsigs.dcd_down &&
2767 newsigs.cts_up == oldsigs.cts_up &&
2768 newsigs.cts_down == oldsigs.cts_down &&
2769 newsigs.ri_up == oldsigs.ri_up &&
2770 newsigs.ri_down == oldsigs.ri_down &&
2771 cnow.exithunt == cprev.exithunt &&
2772 cnow.rxidle == cprev.rxidle) {
2773 rc = -EIO;
2774 break;
2775 }
2776
2777 events = mask &
2778 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2779 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2780 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2781 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2782 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2783 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2784 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2785 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2786 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2787 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2788 if (events)
2789 break;
2790
2791 cprev = cnow;
2792 oldsigs = newsigs;
2793 }
2794
2795 remove_wait_queue(&info->event_wait_q, &wait);
2796 set_current_state(TASK_RUNNING);
2797
2798 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2799 spin_lock_irqsave(&info->irq_spinlock,flags);
2800 if (!waitqueue_active(&info->event_wait_q)) {
2801 /* disable enable exit hunt mode/idle rcvd IRQs */
2802 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2803 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2804 }
2805 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2806 }
2807 exit:
2808 if ( rc == 0 )
2809 PUT_USER(rc, events, mask_ptr);
2810
2811 return rc;
2812
2813 } /* end of mgsl_wait_event() */
2814
2815 static int modem_input_wait(struct mgsl_struct *info,int arg)
2816 {
2817 unsigned long flags;
2818 int rc;
2819 struct mgsl_icount cprev, cnow;
2820 DECLARE_WAITQUEUE(wait, current);
2821
2822 /* save current irq counts */
2823 spin_lock_irqsave(&info->irq_spinlock,flags);
2824 cprev = info->icount;
2825 add_wait_queue(&info->status_event_wait_q, &wait);
2826 set_current_state(TASK_INTERRUPTIBLE);
2827 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2828
2829 for(;;) {
2830 schedule();
2831 if (signal_pending(current)) {
2832 rc = -ERESTARTSYS;
2833 break;
2834 }
2835
2836 /* get new irq counts */
2837 spin_lock_irqsave(&info->irq_spinlock,flags);
2838 cnow = info->icount;
2839 set_current_state(TASK_INTERRUPTIBLE);
2840 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2841
2842 /* if no change, wait aborted for some reason */
2843 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2844 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2845 rc = -EIO;
2846 break;
2847 }
2848
2849 /* check for change in caller specified modem input */
2850 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2851 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2852 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2853 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2854 rc = 0;
2855 break;
2856 }
2857
2858 cprev = cnow;
2859 }
2860 remove_wait_queue(&info->status_event_wait_q, &wait);
2861 set_current_state(TASK_RUNNING);
2862 return rc;
2863 }
2864
2865 /* return the state of the serial control and status signals
2866 */
2867 static int tiocmget(struct tty_struct *tty, struct file *file)
2868 {
2869 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2870 unsigned int result;
2871 unsigned long flags;
2872
2873 spin_lock_irqsave(&info->irq_spinlock,flags);
2874 usc_get_serial_signals(info);
2875 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2876
2877 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2878 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2879 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2880 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2881 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2882 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2883
2884 if (debug_level >= DEBUG_LEVEL_INFO)
2885 printk("%s(%d):%s tiocmget() value=%08X\n",
2886 __FILE__,__LINE__, info->device_name, result );
2887 return result;
2888 }
2889
2890 /* set modem control signals (DTR/RTS)
2891 */
2892 static int tiocmset(struct tty_struct *tty, struct file *file,
2893 unsigned int set, unsigned int clear)
2894 {
2895 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2896 unsigned long flags;
2897
2898 if (debug_level >= DEBUG_LEVEL_INFO)
2899 printk("%s(%d):%s tiocmset(%x,%x)\n",
2900 __FILE__,__LINE__,info->device_name, set, clear);
2901
2902 if (set & TIOCM_RTS)
2903 info->serial_signals |= SerialSignal_RTS;
2904 if (set & TIOCM_DTR)
2905 info->serial_signals |= SerialSignal_DTR;
2906 if (clear & TIOCM_RTS)
2907 info->serial_signals &= ~SerialSignal_RTS;
2908 if (clear & TIOCM_DTR)
2909 info->serial_signals &= ~SerialSignal_DTR;
2910
2911 spin_lock_irqsave(&info->irq_spinlock,flags);
2912 usc_set_serial_signals(info);
2913 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2914
2915 return 0;
2916 }
2917
2918 /* mgsl_break() Set or clear transmit break condition
2919 *
2920 * Arguments: tty pointer to tty instance data
2921 * break_state -1=set break condition, 0=clear
2922 * Return Value: None
2923 */
2924 static void mgsl_break(struct tty_struct *tty, int break_state)
2925 {
2926 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2927 unsigned long flags;
2928
2929 if (debug_level >= DEBUG_LEVEL_INFO)
2930 printk("%s(%d):mgsl_break(%s,%d)\n",
2931 __FILE__,__LINE__, info->device_name, break_state);
2932
2933 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2934 return;
2935
2936 spin_lock_irqsave(&info->irq_spinlock,flags);
2937 if (break_state == -1)
2938 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2939 else
2940 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2941 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2942
2943 } /* end of mgsl_break() */
2944
2945 /* mgsl_ioctl() Service an IOCTL request
2946 *
2947 * Arguments:
2948 *
2949 * tty pointer to tty instance data
2950 * file pointer to associated file object for device
2951 * cmd IOCTL command code
2952 * arg command argument/context
2953 *
2954 * Return Value: 0 if success, otherwise error code
2955 */
2956 static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2957 unsigned int cmd, unsigned long arg)
2958 {
2959 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2960
2961 if (debug_level >= DEBUG_LEVEL_INFO)
2962 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2963 info->device_name, cmd );
2964
2965 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2966 return -ENODEV;
2967
2968 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2969 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2970 if (tty->flags & (1 << TTY_IO_ERROR))
2971 return -EIO;
2972 }
2973
2974 return mgsl_ioctl_common(info, cmd, arg);
2975 }
2976
2977 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2978 {
2979 int error;
2980 struct mgsl_icount cnow; /* kernel counter temps */
2981 void __user *argp = (void __user *)arg;
2982 struct serial_icounter_struct __user *p_cuser; /* user space */
2983 unsigned long flags;
2984
2985 switch (cmd) {
2986 case MGSL_IOCGPARAMS:
2987 return mgsl_get_params(info, argp);
2988 case MGSL_IOCSPARAMS:
2989 return mgsl_set_params(info, argp);
2990 case MGSL_IOCGTXIDLE:
2991 return mgsl_get_txidle(info, argp);
2992 case MGSL_IOCSTXIDLE:
2993 return mgsl_set_txidle(info,(int)arg);
2994 case MGSL_IOCTXENABLE:
2995 return mgsl_txenable(info,(int)arg);
2996 case MGSL_IOCRXENABLE:
2997 return mgsl_rxenable(info,(int)arg);
2998 case MGSL_IOCTXABORT:
2999 return mgsl_txabort(info);
3000 case MGSL_IOCGSTATS:
3001 return mgsl_get_stats(info, argp);
3002 case MGSL_IOCWAITEVENT:
3003 return mgsl_wait_event(info, argp);
3004 case MGSL_IOCLOOPTXDONE:
3005 return mgsl_loopmode_send_done(info);
3006 /* Wait for modem input (DCD,RI,DSR,CTS) change
3007 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3008 */
3009 case TIOCMIWAIT:
3010 return modem_input_wait(info,(int)arg);
3011
3012 /*
3013 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3014 * Return: write counters to the user passed counter struct
3015 * NB: both 1->0 and 0->1 transitions are counted except for
3016 * RI where only 0->1 is counted.
3017 */
3018 case TIOCGICOUNT:
3019 spin_lock_irqsave(&info->irq_spinlock,flags);
3020 cnow = info->icount;
3021 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3022 p_cuser = argp;
3023 PUT_USER(error,cnow.cts, &p_cuser->cts);
3024 if (error) return error;
3025 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3026 if (error) return error;
3027 PUT_USER(error,cnow.rng, &p_cuser->rng);
3028 if (error) return error;
3029 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3030 if (error) return error;
3031 PUT_USER(error,cnow.rx, &p_cuser->rx);
3032 if (error) return error;
3033 PUT_USER(error,cnow.tx, &p_cuser->tx);
3034 if (error) return error;
3035 PUT_USER(error,cnow.frame, &p_cuser->frame);
3036 if (error) return error;
3037 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3038 if (error) return error;
3039 PUT_USER(error,cnow.parity, &p_cuser->parity);
3040 if (error) return error;
3041 PUT_USER(error,cnow.brk, &p_cuser->brk);
3042 if (error) return error;
3043 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3044 if (error) return error;
3045 return 0;
3046 default:
3047 return -ENOIOCTLCMD;
3048 }
3049 return 0;
3050 }
3051
3052 /* mgsl_set_termios()
3053 *
3054 * Set new termios settings
3055 *
3056 * Arguments:
3057 *
3058 * tty pointer to tty structure
3059 * termios pointer to buffer to hold returned old termios
3060 *
3061 * Return Value: None
3062 */
3063 static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3064 {
3065 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3066 unsigned long flags;
3067
3068 if (debug_level >= DEBUG_LEVEL_INFO)
3069 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3070 tty->driver->name );
3071
3072 /* just return if nothing has changed */
3073 if ((tty->termios->c_cflag == old_termios->c_cflag)
3074 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3075 == RELEVANT_IFLAG(old_termios->c_iflag)))
3076 return;
3077
3078 mgsl_change_params(info);
3079
3080 /* Handle transition to B0 status */
3081 if (old_termios->c_cflag & CBAUD &&
3082 !(tty->termios->c_cflag & CBAUD)) {
3083 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3084 spin_lock_irqsave(&info->irq_spinlock,flags);
3085 usc_set_serial_signals(info);
3086 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3087 }
3088
3089 /* Handle transition away from B0 status */
3090 if (!(old_termios->c_cflag & CBAUD) &&
3091 tty->termios->c_cflag & CBAUD) {
3092 info->serial_signals |= SerialSignal_DTR;
3093 if (!(tty->termios->c_cflag & CRTSCTS) ||
3094 !test_bit(TTY_THROTTLED, &tty->flags)) {
3095 info->serial_signals |= SerialSignal_RTS;
3096 }
3097 spin_lock_irqsave(&info->irq_spinlock,flags);
3098 usc_set_serial_signals(info);
3099 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3100 }
3101
3102 /* Handle turning off CRTSCTS */
3103 if (old_termios->c_cflag & CRTSCTS &&
3104 !(tty->termios->c_cflag & CRTSCTS)) {
3105 tty->hw_stopped = 0;
3106 mgsl_start(tty);
3107 }
3108
3109 } /* end of mgsl_set_termios() */
3110
3111 /* mgsl_close()
3112 *
3113 * Called when port is closed. Wait for remaining data to be
3114 * sent. Disable port and free resources.
3115 *
3116 * Arguments:
3117 *
3118 * tty pointer to open tty structure
3119 * filp pointer to open file object
3120 *
3121 * Return Value: None
3122 */
3123 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3124 {
3125 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3126
3127 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3128 return;
3129
3130 if (debug_level >= DEBUG_LEVEL_INFO)
3131 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3132 __FILE__,__LINE__, info->device_name, info->count);
3133
3134 if (!info->count)
3135 return;
3136
3137 if (tty_hung_up_p(filp))
3138 goto cleanup;
3139
3140 if ((tty->count == 1) && (info->count != 1)) {
3141 /*
3142 * tty->count is 1 and the tty structure will be freed.
3143 * info->count should be one in this case.
3144 * if it's not, correct it so that the port is shutdown.
3145 */
3146 printk("mgsl_close: bad refcount; tty->count is 1, "
3147 "info->count is %d\n", info->count);
3148 info->count = 1;
3149 }
3150
3151 info->count--;
3152
3153 /* if at least one open remaining, leave hardware active */
3154 if (info->count)
3155 goto cleanup;
3156
3157 info->flags |= ASYNC_CLOSING;
3158
3159 /* set tty->closing to notify line discipline to
3160 * only process XON/XOFF characters. Only the N_TTY
3161 * discipline appears to use this (ppp does not).
3162 */
3163 tty->closing = 1;
3164
3165 /* wait for transmit data to clear all layers */
3166
3167 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3168 if (debug_level >= DEBUG_LEVEL_INFO)
3169 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3170 __FILE__,__LINE__, info->device_name );
3171 tty_wait_until_sent(tty, info->closing_wait);
3172 }
3173
3174 if (info->flags & ASYNC_INITIALIZED)
3175 mgsl_wait_until_sent(tty, info->timeout);
3176
3177 if (tty->driver->flush_buffer)
3178 tty->driver->flush_buffer(tty);
3179
3180 tty_ldisc_flush(tty);
3181
3182 shutdown(info);
3183
3184 tty->closing = 0;
3185 info->tty = NULL;
3186
3187 if (info->blocked_open) {
3188 if (info->close_delay) {
3189 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3190 }
3191 wake_up_interruptible(&info->open_wait);
3192 }
3193
3194 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3195
3196 wake_up_interruptible(&info->close_wait);
3197
3198 cleanup:
3199 if (debug_level >= DEBUG_LEVEL_INFO)
3200 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3201 tty->driver->name, info->count);
3202
3203 } /* end of mgsl_close() */
3204
3205 /* mgsl_wait_until_sent()
3206 *
3207 * Wait until the transmitter is empty.
3208 *
3209 * Arguments:
3210 *
3211 * tty pointer to tty info structure
3212 * timeout time to wait for send completion
3213 *
3214 * Return Value: None
3215 */
3216 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3217 {
3218 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3219 unsigned long orig_jiffies, char_time;
3220
3221 if (!info )
3222 return;
3223
3224 if (debug_level >= DEBUG_LEVEL_INFO)
3225 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3226 __FILE__,__LINE__, info->device_name );
3227
3228 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3229 return;
3230
3231 if (!(info->flags & ASYNC_INITIALIZED))
3232 goto exit;
3233
3234 orig_jiffies = jiffies;
3235
3236 /* Set check interval to 1/5 of estimated time to
3237 * send a character, and make it at least 1. The check
3238 * interval should also be less than the timeout.
3239 * Note: use tight timings here to satisfy the NIST-PCTS.
3240 */
3241
3242 if ( info->params.data_rate ) {
3243 char_time = info->timeout/(32 * 5);
3244 if (!char_time)
3245 char_time++;
3246 } else
3247 char_time = 1;
3248
3249 if (timeout)
3250 char_time = min_t(unsigned long, char_time, timeout);
3251
3252 if ( info->params.mode == MGSL_MODE_HDLC ||
3253 info->params.mode == MGSL_MODE_RAW ) {
3254 while (info->tx_active) {
3255 msleep_interruptible(jiffies_to_msecs(char_time));
3256 if (signal_pending(current))
3257 break;
3258 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3259 break;
3260 }
3261 } else {
3262 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3263 info->tx_enabled) {
3264 msleep_interruptible(jiffies_to_msecs(char_time));
3265 if (signal_pending(current))
3266 break;
3267 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3268 break;
3269 }
3270 }
3271
3272 exit:
3273 if (debug_level >= DEBUG_LEVEL_INFO)
3274 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3275 __FILE__,__LINE__, info->device_name );
3276
3277 } /* end of mgsl_wait_until_sent() */
3278
3279 /* mgsl_hangup()
3280 *
3281 * Called by tty_hangup() when a hangup is signaled.
3282 * This is the same as to closing all open files for the port.
3283 *
3284 * Arguments: tty pointer to associated tty object
3285 * Return Value: None
3286 */
3287 static void mgsl_hangup(struct tty_struct *tty)
3288 {
3289 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3290
3291 if (debug_level >= DEBUG_LEVEL_INFO)
3292 printk("%s(%d):mgsl_hangup(%s)\n",
3293 __FILE__,__LINE__, info->device_name );
3294
3295 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3296 return;
3297
3298 mgsl_flush_buffer(tty);
3299 shutdown(info);
3300
3301 info->count = 0;
3302 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3303 info->tty = NULL;
3304
3305 wake_up_interruptible(&info->open_wait);
3306
3307 } /* end of mgsl_hangup() */
3308
3309 /* block_til_ready()
3310 *
3311 * Block the current process until the specified port
3312 * is ready to be opened.
3313 *
3314 * Arguments:
3315 *
3316 * tty pointer to tty info structure
3317 * filp pointer to open file object
3318 * info pointer to device instance data
3319 *
3320 * Return Value: 0 if success, otherwise error code
3321 */
3322 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3323 struct mgsl_struct *info)
3324 {
3325 DECLARE_WAITQUEUE(wait, current);
3326 int retval;
3327 int do_clocal = 0, extra_count = 0;
3328 unsigned long flags;
3329
3330 if (debug_level >= DEBUG_LEVEL_INFO)
3331 printk("%s(%d):block_til_ready on %s\n",
3332 __FILE__,__LINE__, tty->driver->name );
3333
3334 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3335 /* nonblock mode is set or port is not enabled */
3336 info->flags |= ASYNC_NORMAL_ACTIVE;
3337 return 0;
3338 }
3339
3340 if (tty->termios->c_cflag & CLOCAL)
3341 do_clocal = 1;
3342
3343 /* Wait for carrier detect and the line to become
3344 * free (i.e., not in use by the callout). While we are in
3345 * this loop, info->count is dropped by one, so that
3346 * mgsl_close() knows when to free things. We restore it upon
3347 * exit, either normal or abnormal.
3348 */
3349
3350 retval = 0;
3351 add_wait_queue(&info->open_wait, &wait);
3352
3353 if (debug_level >= DEBUG_LEVEL_INFO)
3354 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3355 __FILE__,__LINE__, tty->driver->name, info->count );
3356
3357 spin_lock_irqsave(&info->irq_spinlock, flags);
3358 if (!tty_hung_up_p(filp)) {
3359 extra_count = 1;
3360 info->count--;
3361 }
3362 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3363 info->blocked_open++;
3364
3365 while (1) {
3366 if (tty->termios->c_cflag & CBAUD) {
3367 spin_lock_irqsave(&info->irq_spinlock,flags);
3368 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3369 usc_set_serial_signals(info);
3370 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3371 }
3372
3373 set_current_state(TASK_INTERRUPTIBLE);
3374
3375 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3376 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3377 -EAGAIN : -ERESTARTSYS;
3378 break;
3379 }
3380
3381 spin_lock_irqsave(&info->irq_spinlock,flags);
3382 usc_get_serial_signals(info);
3383 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3384
3385 if (!(info->flags & ASYNC_CLOSING) &&
3386 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3387 break;
3388 }
3389
3390 if (signal_pending(current)) {
3391 retval = -ERESTARTSYS;
3392 break;
3393 }
3394
3395 if (debug_level >= DEBUG_LEVEL_INFO)
3396 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3397 __FILE__,__LINE__, tty->driver->name, info->count );
3398
3399 schedule();
3400 }
3401
3402 set_current_state(TASK_RUNNING);
3403 remove_wait_queue(&info->open_wait, &wait);
3404
3405 if (extra_count)
3406 info->count++;
3407 info->blocked_open--;
3408
3409 if (debug_level >= DEBUG_LEVEL_INFO)
3410 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3411 __FILE__,__LINE__, tty->driver->name, info->count );
3412
3413 if (!retval)
3414 info->flags |= ASYNC_NORMAL_ACTIVE;
3415
3416 return retval;
3417
3418 } /* end of block_til_ready() */
3419
3420 /* mgsl_open()
3421 *
3422 * Called when a port is opened. Init and enable port.
3423 * Perform serial-specific initialization for the tty structure.
3424 *
3425 * Arguments: tty pointer to tty info structure
3426 * filp associated file pointer
3427 *
3428 * Return Value: 0 if success, otherwise error code
3429 */
3430 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3431 {
3432 struct mgsl_struct *info;
3433 int retval, line;
3434 unsigned long flags;
3435
3436 /* verify range of specified line number */
3437 line = tty->index;
3438 if ((line < 0) || (line >= mgsl_device_count)) {
3439 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3440 __FILE__,__LINE__,line);
3441 return -ENODEV;
3442 }
3443
3444 /* find the info structure for the specified line */
3445 info = mgsl_device_list;
3446 while(info && info->line != line)
3447 info = info->next_device;
3448 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3449 return -ENODEV;
3450
3451 tty->driver_data = info;
3452 info->tty = tty;
3453
3454 if (debug_level >= DEBUG_LEVEL_INFO)
3455 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3456 __FILE__,__LINE__,tty->driver->name, info->count);
3457
3458 /* If port is closing, signal caller to try again */
3459 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3460 if (info->flags & ASYNC_CLOSING)
3461 interruptible_sleep_on(&info->close_wait);
3462 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3463 -EAGAIN : -ERESTARTSYS);
3464 goto cleanup;
3465 }
3466
3467 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3468
3469 spin_lock_irqsave(&info->netlock, flags);
3470 if (info->netcount) {
3471 retval = -EBUSY;
3472 spin_unlock_irqrestore(&info->netlock, flags);
3473 goto cleanup;
3474 }
3475 info->count++;
3476 spin_unlock_irqrestore(&info->netlock, flags);
3477
3478 if (info->count == 1) {
3479 /* 1st open on this device, init hardware */
3480 retval = startup(info);
3481 if (retval < 0)
3482 goto cleanup;
3483 }
3484
3485 retval = block_til_ready(tty, filp, info);
3486 if (retval) {
3487 if (debug_level >= DEBUG_LEVEL_INFO)
3488 printk("%s(%d):block_til_ready(%s) returned %d\n",
3489 __FILE__,__LINE__, info->device_name, retval);
3490 goto cleanup;
3491 }
3492
3493 if (debug_level >= DEBUG_LEVEL_INFO)
3494 printk("%s(%d):mgsl_open(%s) success\n",
3495 __FILE__,__LINE__, info->device_name);
3496 retval = 0;
3497
3498 cleanup:
3499 if (retval) {
3500 if (tty->count == 1)
3501 info->tty = NULL; /* tty layer will release tty struct */
3502 if(info->count)
3503 info->count--;
3504 }
3505
3506 return retval;
3507
3508 } /* end of mgsl_open() */
3509
3510 /*
3511 * /proc fs routines....
3512 */
3513
3514 static inline int line_info(char *buf, struct mgsl_struct *info)
3515 {
3516 char stat_buf[30];
3517 int ret;
3518 unsigned long flags;
3519
3520 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3521 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3522 info->device_name, info->io_base, info->irq_level,
3523 info->phys_memory_base, info->phys_lcr_base);
3524 } else {
3525 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3526 info->device_name, info->io_base,
3527 info->irq_level, info->dma_level);
3528 }
3529
3530 /* output current serial signal states */
3531 spin_lock_irqsave(&info->irq_spinlock,flags);
3532 usc_get_serial_signals(info);
3533 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3534
3535 stat_buf[0] = 0;
3536 stat_buf[1] = 0;
3537 if (info->serial_signals & SerialSignal_RTS)
3538 strcat(stat_buf, "|RTS");
3539 if (info->serial_signals & SerialSignal_CTS)
3540 strcat(stat_buf, "|CTS");
3541 if (info->serial_signals & SerialSignal_DTR)
3542 strcat(stat_buf, "|DTR");
3543 if (info->serial_signals & SerialSignal_DSR)
3544 strcat(stat_buf, "|DSR");
3545 if (info->serial_signals & SerialSignal_DCD)
3546 strcat(stat_buf, "|CD");
3547 if (info->serial_signals & SerialSignal_RI)
3548 strcat(stat_buf, "|RI");
3549
3550 if (info->params.mode == MGSL_MODE_HDLC ||
3551 info->params.mode == MGSL_MODE_RAW ) {
3552 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3553 info->icount.txok, info->icount.rxok);
3554 if (info->icount.txunder)
3555 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3556 if (info->icount.txabort)
3557 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3558 if (info->icount.rxshort)
3559 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3560 if (info->icount.rxlong)
3561 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3562 if (info->icount.rxover)
3563 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3564 if (info->icount.rxcrc)
3565 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3566 } else {
3567 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3568 info->icount.tx, info->icount.rx);
3569 if (info->icount.frame)
3570 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3571 if (info->icount.parity)
3572 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3573 if (info->icount.brk)
3574 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3575 if (info->icount.overrun)
3576 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3577 }
3578
3579 /* Append serial signal status to end */
3580 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3581
3582 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3583 info->tx_active,info->bh_requested,info->bh_running,
3584 info->pending_bh);
3585
3586 spin_lock_irqsave(&info->irq_spinlock,flags);
3587 {
3588 u16 Tcsr = usc_InReg( info, TCSR );
3589 u16 Tdmr = usc_InDmaReg( info, TDMR );
3590 u16 Ticr = usc_InReg( info, TICR );
3591 u16 Rscr = usc_InReg( info, RCSR );
3592 u16 Rdmr = usc_InDmaReg( info, RDMR );
3593 u16 Ricr = usc_InReg( info, RICR );
3594 u16 Icr = usc_InReg( info, ICR );
3595 u16 Dccr = usc_InReg( info, DCCR );
3596 u16 Tmr = usc_InReg( info, TMR );
3597 u16 Tccr = usc_InReg( info, TCCR );
3598 u16 Ccar = inw( info->io_base + CCAR );
3599 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3600 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3601 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3602 }
3603 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3604
3605 return ret;
3606
3607 } /* end of line_info() */
3608
3609 /* mgsl_read_proc()
3610 *
3611 * Called to print information about devices
3612 *
3613 * Arguments:
3614 * page page of memory to hold returned info
3615 * start
3616 * off
3617 * count
3618 * eof
3619 * data
3620 *
3621 * Return Value:
3622 */
3623 static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3624 int *eof, void *data)
3625 {
3626 int len = 0, l;
3627 off_t begin = 0;
3628 struct mgsl_struct *info;
3629
3630 len += sprintf(page, "synclink driver:%s\n", driver_version);
3631
3632 info = mgsl_device_list;
3633 while( info ) {
3634 l = line_info(page + len, info);
3635 len += l;
3636 if (len+begin > off+count)
3637 goto done;
3638 if (len+begin < off) {
3639 begin += len;
3640 len = 0;
3641 }
3642 info = info->next_device;
3643 }
3644
3645 *eof = 1;
3646 done:
3647 if (off >= len+begin)
3648 return 0;
3649 *start = page + (off-begin);
3650 return ((count < begin+len-off) ? count : begin+len-off);
3651
3652 } /* end of mgsl_read_proc() */
3653
3654 /* mgsl_allocate_dma_buffers()
3655 *
3656 * Allocate and format DMA buffers (ISA adapter)
3657 * or format shared memory buffers (PCI adapter).
3658 *
3659 * Arguments: info pointer to device instance data
3660 * Return Value: 0 if success, otherwise error
3661 */
3662 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3663 {
3664 unsigned short BuffersPerFrame;
3665
3666 info->last_mem_alloc = 0;
3667
3668 /* Calculate the number of DMA buffers necessary to hold the */
3669 /* largest allowable frame size. Note: If the max frame size is */
3670 /* not an even multiple of the DMA buffer size then we need to */
3671 /* round the buffer count per frame up one. */
3672
3673 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3674 if ( info->max_frame_size % DMABUFFERSIZE )
3675 BuffersPerFrame++;
3676
3677 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3678 /*
3679 * The PCI adapter has 256KBytes of shared memory to use.
3680 * This is 64 PAGE_SIZE buffers.
3681 *
3682 * The first page is used for padding at this time so the
3683 * buffer list does not begin at offset 0 of the PCI
3684 * adapter's shared memory.
3685 *
3686 * The 2nd page is used for the buffer list. A 4K buffer
3687 * list can hold 128 DMA_BUFFER structures at 32 bytes
3688 * each.
3689 *
3690 * This leaves 62 4K pages.
3691 *
3692 * The next N pages are used for transmit frame(s). We
3693 * reserve enough 4K page blocks to hold the required
3694 * number of transmit dma buffers (num_tx_dma_buffers),
3695 * each of MaxFrameSize size.
3696 *
3697 * Of the remaining pages (62-N), determine how many can
3698 * be used to receive full MaxFrameSize inbound frames
3699 */
3700 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3701 info->rx_buffer_count = 62 - info->tx_buffer_count;
3702 } else {
3703 /* Calculate the number of PAGE_SIZE buffers needed for */
3704 /* receive and transmit DMA buffers. */
3705
3706
3707 /* Calculate the number of DMA buffers necessary to */
3708 /* hold 7 max size receive frames and one max size transmit frame. */
3709 /* The receive buffer count is bumped by one so we avoid an */
3710 /* End of List condition if all receive buffers are used when */
3711 /* using linked list DMA buffers. */
3712
3713 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3714 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3715
3716 /*
3717 * limit total TxBuffers & RxBuffers to 62 4K total
3718 * (ala PCI Allocation)
3719 */
3720
3721 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3722 info->rx_buffer_count = 62 - info->tx_buffer_count;
3723
3724 }
3725
3726 if ( debug_level >= DEBUG_LEVEL_INFO )
3727 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3728 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3729
3730 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3731 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3732 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3733 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3734 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3735 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3736 return -ENOMEM;
3737 }
3738
3739 mgsl_reset_rx_dma_buffers( info );
3740 mgsl_reset_tx_dma_buffers( info );
3741
3742 return 0;
3743
3744 } /* end of mgsl_allocate_dma_buffers() */
3745
3746 /*
3747 * mgsl_alloc_buffer_list_memory()
3748 *
3749 * Allocate a common DMA buffer for use as the
3750 * receive and transmit buffer lists.
3751 *
3752 * A buffer list is a set of buffer entries where each entry contains
3753 * a pointer to an actual buffer and a pointer to the next buffer entry
3754 * (plus some other info about the buffer).
3755 *
3756 * The buffer entries for a list are built to form a circular list so
3757 * that when the entire list has been traversed you start back at the
3758 * beginning.
3759 *
3760 * This function allocates memory for just the buffer entries.
3761 * The links (pointer to next entry) are filled in with the physical
3762 * address of the next entry so the adapter can navigate the list
3763 * using bus master DMA. The pointers to the actual buffers are filled
3764 * out later when the actual buffers are allocated.
3765 *
3766 * Arguments: info pointer to device instance data
3767 * Return Value: 0 if success, otherwise error
3768 */
3769 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3770 {
3771 unsigned int i;
3772
3773 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3774 /* PCI adapter uses shared memory. */
3775 info->buffer_list = info->memory_base + info->last_mem_alloc;
3776 info->buffer_list_phys = info->last_mem_alloc;
3777 info->last_mem_alloc += BUFFERLISTSIZE;
3778 } else {
3779 /* ISA adapter uses system memory. */
3780 /* The buffer lists are allocated as a common buffer that both */
3781 /* the processor and adapter can access. This allows the driver to */
3782 /* inspect portions of the buffer while other portions are being */
3783 /* updated by the adapter using Bus Master DMA. */
3784
3785 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3786 if (info->buffer_list == NULL)
3787 return -ENOMEM;
3788 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3789 }
3790
3791 /* We got the memory for the buffer entry lists. */
3792 /* Initialize the memory block to all zeros. */
3793 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3794
3795 /* Save virtual address pointers to the receive and */
3796 /* transmit buffer lists. (Receive 1st). These pointers will */
3797 /* be used by the processor to access the lists. */
3798 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3799 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3800 info->tx_buffer_list += info->rx_buffer_count;
3801
3802 /*
3803 * Build the links for the buffer entry lists such that
3804 * two circular lists are built. (Transmit and Receive).
3805 *
3806 * Note: the links are physical addresses
3807 * which are read by the adapter to determine the next
3808 * buffer entry to use.
3809 */
3810
3811 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3812 /* calculate and store physical address of this buffer entry */
3813 info->rx_buffer_list[i].phys_entry =
3814 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3815
3816 /* calculate and store physical address of */
3817 /* next entry in cirular list of entries */
3818
3819 info->rx_buffer_list[i].link = info->buffer_list_phys;
3820
3821 if ( i < info->rx_buffer_count - 1 )
3822 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3823 }
3824
3825 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3826 /* calculate and store physical address of this buffer entry */
3827 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3828 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3829
3830 /* calculate and store physical address of */
3831 /* next entry in cirular list of entries */
3832
3833 info->tx_buffer_list[i].link = info->buffer_list_phys +
3834 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3835
3836 if ( i < info->tx_buffer_count - 1 )
3837 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3838 }
3839
3840 return 0;
3841
3842 } /* end of mgsl_alloc_buffer_list_memory() */
3843
3844 /* Free DMA buffers allocated for use as the
3845 * receive and transmit buffer lists.
3846 * Warning:
3847 *
3848 * The data transfer buffers associated with the buffer list
3849 * MUST be freed before freeing the buffer list itself because
3850 * the buffer list contains the information necessary to free
3851 * the individual buffers!
3852 */
3853 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3854 {
3855 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3856 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3857
3858 info->buffer_list = NULL;
3859 info->rx_buffer_list = NULL;
3860 info->tx_buffer_list = NULL;
3861
3862 } /* end of mgsl_free_buffer_list_memory() */
3863
3864 /*
3865 * mgsl_alloc_frame_memory()
3866 *
3867 * Allocate the frame DMA buffers used by the specified buffer list.
3868 * Each DMA buffer will be one memory page in size. This is necessary
3869 * because memory can fragment enough that it may be impossible
3870 * contiguous pages.
3871 *
3872 * Arguments:
3873 *
3874 * info pointer to device instance data
3875 * BufferList pointer to list of buffer entries
3876 * Buffercount count of buffer entries in buffer list
3877 *
3878 * Return Value: 0 if success, otherwise -ENOMEM
3879 */
3880 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3881 {
3882 int i;
3883 u32 phys_addr;
3884
3885 /* Allocate page sized buffers for the receive buffer list */
3886
3887 for ( i = 0; i < Buffercount; i++ ) {
3888 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3889 /* PCI adapter uses shared memory buffers. */
3890 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3891 phys_addr = info->last_mem_alloc;
3892 info->last_mem_alloc += DMABUFFERSIZE;
3893 } else {
3894 /* ISA adapter uses system memory. */
3895 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3896 if (BufferList[i].virt_addr == NULL)
3897 return -ENOMEM;
3898 phys_addr = (u32)(BufferList[i].dma_addr);
3899 }
3900 BufferList[i].phys_addr = phys_addr;
3901 }
3902
3903 return 0;
3904
3905 } /* end of mgsl_alloc_frame_memory() */
3906
3907 /*
3908 * mgsl_free_frame_memory()
3909 *
3910 * Free the buffers associated with
3911 * each buffer entry of a buffer list.
3912 *
3913 * Arguments:
3914 *
3915 * info pointer to device instance data
3916 * BufferList pointer to list of buffer entries
3917 * Buffercount count of buffer entries in buffer list
3918 *
3919 * Return Value: None
3920 */
3921 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3922 {
3923 int i;
3924
3925 if ( BufferList ) {
3926 for ( i = 0 ; i < Buffercount ; i++ ) {
3927 if ( BufferList[i].virt_addr ) {
3928 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3929 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3930 BufferList[i].virt_addr = NULL;
3931 }
3932 }
3933 }
3934
3935 } /* end of mgsl_free_frame_memory() */
3936
3937 /* mgsl_free_dma_buffers()
3938 *
3939 * Free DMA buffers
3940 *
3941 * Arguments: info pointer to device instance data
3942 * Return Value: None
3943 */
3944 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3945 {
3946 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3947 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3948 mgsl_free_buffer_list_memory( info );
3949
3950 } /* end of mgsl_free_dma_buffers() */
3951
3952
3953 /*
3954 * mgsl_alloc_intermediate_rxbuffer_memory()
3955 *
3956 * Allocate a buffer large enough to hold max_frame_size. This buffer
3957 * is used to pass an assembled frame to the line discipline.
3958 *
3959 * Arguments:
3960 *
3961 * info pointer to device instance data
3962 *
3963 * Return Value: 0 if success, otherwise -ENOMEM
3964 */
3965 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3966 {
3967 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3968 if ( info->intermediate_rxbuffer == NULL )
3969 return -ENOMEM;
3970
3971 return 0;
3972
3973 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3974
3975 /*
3976 * mgsl_free_intermediate_rxbuffer_memory()
3977 *
3978 *
3979 * Arguments:
3980 *
3981 * info pointer to device instance data
3982 *
3983 * Return Value: None
3984 */
3985 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3986 {
3987 kfree(info->intermediate_rxbuffer);
3988 info->intermediate_rxbuffer = NULL;
3989
3990 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3991
3992 /*
3993 * mgsl_alloc_intermediate_txbuffer_memory()
3994 *
3995 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3996 * This buffer is used to load transmit frames into the adapter's dma transfer
3997 * buffers when there is sufficient space.
3998 *
3999 * Arguments:
4000 *
4001 * info pointer to device instance data
4002 *
4003 * Return Value: 0 if success, otherwise -ENOMEM
4004 */
4005 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4006 {
4007 int i;
4008
4009 if ( debug_level >= DEBUG_LEVEL_INFO )
4010 printk("%s %s(%d) allocating %d tx holding buffers\n",
4011 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4012
4013 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4014
4015 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4016 info->tx_holding_buffers[i].buffer =
4017 kmalloc(info->max_frame_size, GFP_KERNEL);
4018 if ( info->tx_holding_buffers[i].buffer == NULL )
4019 return -ENOMEM;
4020 }
4021
4022 return 0;
4023
4024 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4025
4026 /*
4027 * mgsl_free_intermediate_txbuffer_memory()
4028 *
4029 *
4030 * Arguments:
4031 *
4032 * info pointer to device instance data
4033 *
4034 * Return Value: None
4035 */
4036 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4037 {
4038 int i;
4039
4040 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4041 kfree(info->tx_holding_buffers[i].buffer);
4042 info->tx_holding_buffers[i].buffer = NULL;
4043 }
4044
4045 info->get_tx_holding_index = 0;
4046 info->put_tx_holding_index = 0;
4047 info->tx_holding_count = 0;
4048
4049 } /* end of mgsl_free_intermediate_txbuffer_memory() */
4050
4051
4052 /*
4053 * load_next_tx_holding_buffer()
4054 *
4055 * attempts to load the next buffered tx request into the
4056 * tx dma buffers
4057 *
4058 * Arguments:
4059 *
4060 * info pointer to device instance data
4061 *
4062 * Return Value: 1 if next buffered tx request loaded
4063 * into adapter's tx dma buffer,
4064 * 0 otherwise
4065 */
4066 static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4067 {
4068 int ret = 0;
4069
4070 if ( info->tx_holding_count ) {
4071 /* determine if we have enough tx dma buffers
4072 * to accommodate the next tx frame
4073 */
4074 struct tx_holding_buffer *ptx =
4075 &info->tx_holding_buffers[info->get_tx_holding_index];
4076 int num_free = num_free_tx_dma_buffers(info);
4077 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4078 if ( ptx->buffer_size % DMABUFFERSIZE )
4079 ++num_needed;
4080
4081 if (num_needed <= num_free) {
4082 info->xmit_cnt = ptx->buffer_size;
4083 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4084
4085 --info->tx_holding_count;
4086 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4087 info->get_tx_holding_index=0;
4088
4089 /* restart transmit timer */
4090 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4091
4092 ret = 1;
4093 }
4094 }
4095
4096 return ret;
4097 }
4098
4099 /*
4100 * save_tx_buffer_request()
4101 *
4102 * attempt to store transmit frame request for later transmission
4103 *
4104 * Arguments:
4105 *
4106 * info pointer to device instance data
4107 * Buffer pointer to buffer containing frame to load
4108 * BufferSize size in bytes of frame in Buffer
4109 *
4110 * Return Value: 1 if able to store, 0 otherwise
4111 */
4112 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4113 {
4114 struct tx_holding_buffer *ptx;
4115
4116 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4117 return 0; /* all buffers in use */
4118 }
4119
4120 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4121 ptx->buffer_size = BufferSize;
4122 memcpy( ptx->buffer, Buffer, BufferSize);
4123
4124 ++info->tx_holding_count;
4125 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4126 info->put_tx_holding_index=0;
4127
4128 return 1;
4129 }
4130
4131 static int mgsl_claim_resources(struct mgsl_struct *info)
4132 {
4133 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4134 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4135 __FILE__,__LINE__,info->device_name, info->io_base);
4136 return -ENODEV;
4137 }
4138 info->io_addr_requested = 1;
4139
4140 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4141 info->device_name, info ) < 0 ) {
4142 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4143 __FILE__,__LINE__,info->device_name, info->irq_level );
4144 goto errout;
4145 }
4146 info->irq_requested = 1;
4147
4148 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4149 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4150 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4151 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4152 goto errout;
4153 }
4154 info->shared_mem_requested = 1;
4155 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4156 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4157 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4158 goto errout;
4159 }
4160 info->lcr_mem_requested = 1;
4161
4162 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4163 if (!info->memory_base) {
4164 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4165 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4166 goto errout;
4167 }
4168
4169 if ( !mgsl_memory_test(info) ) {
4170 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4171 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4172 goto errout;
4173 }
4174
4175 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4176 if (!info->lcr_base) {
4177 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4178 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4179 goto errout;
4180 }
4181
4182 } else {
4183 /* claim DMA channel */
4184
4185 if (request_dma(info->dma_level,info->device_name) < 0){
4186 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4187 __FILE__,__LINE__,info->device_name, info->dma_level );
4188 mgsl_release_resources( info );
4189 return -ENODEV;
4190 }
4191 info->dma_requested = 1;
4192
4193 /* ISA adapter uses bus master DMA */
4194 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4195 enable_dma(info->dma_level);
4196 }
4197
4198 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4199 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4200 __FILE__,__LINE__,info->device_name, info->dma_level );
4201 goto errout;
4202 }
4203
4204 return 0;
4205 errout:
4206 mgsl_release_resources(info);
4207 return -ENODEV;
4208
4209 } /* end of mgsl_claim_resources() */
4210
4211 static void mgsl_release_resources(struct mgsl_struct *info)
4212 {
4213 if ( debug_level >= DEBUG_LEVEL_INFO )
4214 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4215 __FILE__,__LINE__,info->device_name );
4216
4217 if ( info->irq_requested ) {
4218 free_irq(info->irq_level, info);
4219 info->irq_requested = 0;
4220 }
4221 if ( info->dma_requested ) {
4222 disable_dma(info->dma_level);
4223 free_dma(info->dma_level);
4224 info->dma_requested = 0;
4225 }
4226 mgsl_free_dma_buffers(info);
4227 mgsl_free_intermediate_rxbuffer_memory(info);
4228 mgsl_free_intermediate_txbuffer_memory(info);
4229
4230 if ( info->io_addr_requested ) {
4231 release_region(info->io_base,info->io_addr_size);
4232 info->io_addr_requested = 0;
4233 }
4234 if ( info->shared_mem_requested ) {
4235 release_mem_region(info->phys_memory_base,0x40000);
4236 info->shared_mem_requested = 0;
4237 }
4238 if ( info->lcr_mem_requested ) {
4239 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4240 info->lcr_mem_requested = 0;
4241 }
4242 if (info->memory_base){
4243 iounmap(info->memory_base);
4244 info->memory_base = NULL;
4245 }
4246 if (info->lcr_base){
4247 iounmap(info->lcr_base - info->lcr_offset);
4248 info->lcr_base = NULL;
4249 }
4250
4251 if ( debug_level >= DEBUG_LEVEL_INFO )
4252 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4253 __FILE__,__LINE__,info->device_name );
4254
4255 } /* end of mgsl_release_resources() */
4256
4257 /* mgsl_add_device()
4258 *
4259 * Add the specified device instance data structure to the
4260 * global linked list of devices and increment the device count.
4261 *
4262 * Arguments: info pointer to device instance data
4263 * Return Value: None
4264 */
4265 static void mgsl_add_device( struct mgsl_struct *info )
4266 {
4267 info->next_device = NULL;
4268 info->line = mgsl_device_count;
4269 sprintf(info->device_name,"ttySL%d",info->line);
4270
4271 if (info->line < MAX_TOTAL_DEVICES) {
4272 if (maxframe[info->line])
4273 info->max_frame_size = maxframe[info->line];
4274 info->dosyncppp = dosyncppp[info->line];
4275
4276 if (txdmabufs[info->line]) {
4277 info->num_tx_dma_buffers = txdmabufs[info->line];
4278 if (info->num_tx_dma_buffers < 1)
4279 info->num_tx_dma_buffers = 1;
4280 }
4281
4282 if (txholdbufs[info->line]) {
4283 info->num_tx_holding_buffers = txholdbufs[info->line];
4284 if (info->num_tx_holding_buffers < 1)
4285 info->num_tx_holding_buffers = 1;
4286 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4287 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4288 }
4289 }
4290
4291 mgsl_device_count++;
4292
4293 if ( !mgsl_device_list )
4294 mgsl_device_list = info;
4295 else {
4296 struct mgsl_struct *current_dev = mgsl_device_list;
4297 while( current_dev->next_device )
4298 current_dev = current_dev->next_device;
4299 current_dev->next_device = info;
4300 }
4301
4302 if ( info->max_frame_size < 4096 )
4303 info->max_frame_size = 4096;
4304 else if ( info->max_frame_size > 65535 )
4305 info->max_frame_size = 65535;
4306
4307 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4308 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4309 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4310 info->phys_memory_base, info->phys_lcr_base,
4311 info->max_frame_size );
4312 } else {
4313 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4314 info->device_name, info->io_base, info->irq_level, info->dma_level,
4315 info->max_frame_size );
4316 }
4317
4318 #if SYNCLINK_GENERIC_HDLC
4319 hdlcdev_init(info);
4320 #endif
4321
4322 } /* end of mgsl_add_device() */
4323
4324 /* mgsl_allocate_device()
4325 *
4326 * Allocate and initialize a device instance structure
4327 *
4328 * Arguments: none
4329 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4330 */
4331 static struct mgsl_struct* mgsl_allocate_device(void)
4332 {
4333 struct mgsl_struct *info;
4334
4335 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4336 GFP_KERNEL);
4337
4338 if (!info) {
4339 printk("Error can't allocate device instance data\n");
4340 } else {
4341 memset(info, 0, sizeof(struct mgsl_struct));
4342 info->magic = MGSL_MAGIC;
4343 INIT_WORK(&info->task, mgsl_bh_handler);
4344 info->max_frame_size = 4096;
4345 info->close_delay = 5*HZ/10;
4346 info->closing_wait = 30*HZ;
4347 init_waitqueue_head(&info->open_wait);
4348 init_waitqueue_head(&info->close_wait);
4349 init_waitqueue_head(&info->status_event_wait_q);
4350 init_waitqueue_head(&info->event_wait_q);
4351 spin_lock_init(&info->irq_spinlock);
4352 spin_lock_init(&info->netlock);
4353 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4354 info->idle_mode = HDLC_TXIDLE_FLAGS;
4355 info->num_tx_dma_buffers = 1;
4356 info->num_tx_holding_buffers = 0;
4357 }
4358
4359 return info;
4360
4361 } /* end of mgsl_allocate_device()*/
4362
4363 static const struct tty_operations mgsl_ops = {
4364 .open = mgsl_open,
4365 .close = mgsl_close,
4366 .write = mgsl_write,
4367 .put_char = mgsl_put_char,
4368 .flush_chars = mgsl_flush_chars,
4369 .write_room = mgsl_write_room,
4370 .chars_in_buffer = mgsl_chars_in_buffer,
4371 .flush_buffer = mgsl_flush_buffer,
4372 .ioctl = mgsl_ioctl,
4373 .throttle = mgsl_throttle,
4374 .unthrottle = mgsl_unthrottle,
4375 .send_xchar = mgsl_send_xchar,
4376 .break_ctl = mgsl_break,
4377 .wait_until_sent = mgsl_wait_until_sent,
4378 .read_proc = mgsl_read_proc,
4379 .set_termios = mgsl_set_termios,
4380 .stop = mgsl_stop,
4381 .start = mgsl_start,
4382 .hangup = mgsl_hangup,
4383 .tiocmget = tiocmget,
4384 .tiocmset = tiocmset,
4385 };
4386
4387 /*
4388 * perform tty device initialization
4389 */
4390 static int mgsl_init_tty(void)
4391 {
4392 int rc;
4393
4394 serial_driver = alloc_tty_driver(128);
4395 if (!serial_driver)
4396 return -ENOMEM;
4397
4398 serial_driver->owner = THIS_MODULE;
4399 serial_driver->driver_name = "synclink";
4400 serial_driver->name = "ttySL";
4401 serial_driver->major = ttymajor;
4402 serial_driver->minor_start = 64;
4403 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4404 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4405 serial_driver->init_termios = tty_std_termios;
4406 serial_driver->init_termios.c_cflag =
4407 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4408 serial_driver->init_termios.c_ispeed = 9600;
4409 serial_driver->init_termios.c_ospeed = 9600;
4410 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4411 tty_set_operations(serial_driver, &mgsl_ops);
4412 if ((rc = tty_register_driver(serial_driver)) < 0) {
4413 printk("%s(%d):Couldn't register serial driver\n",
4414 __FILE__,__LINE__);
4415 put_tty_driver(serial_driver);
4416 serial_driver = NULL;
4417 return rc;
4418 }
4419
4420 printk("%s %s, tty major#%d\n",
4421 driver_name, driver_version,
4422 serial_driver->major);
4423 return 0;
4424 }
4425
4426 /* enumerate user specified ISA adapters
4427 */
4428 static void mgsl_enum_isa_devices(void)
4429 {
4430 struct mgsl_struct *info;
4431 int i;
4432
4433 /* Check for user specified ISA devices */
4434
4435 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4436 if ( debug_level >= DEBUG_LEVEL_INFO )
4437 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4438 io[i], irq[i], dma[i] );
4439
4440 info = mgsl_allocate_device();
4441 if ( !info ) {
4442 /* error allocating device instance data */
4443 if ( debug_level >= DEBUG_LEVEL_ERROR )
4444 printk( "can't allocate device instance data.\n");
4445 continue;
4446 }
4447
4448 /* Copy user configuration info to device instance data */
4449 info->io_base = (unsigned int)io[i];
4450 info->irq_level = (unsigned int)irq[i];
4451 info->irq_level = irq_canonicalize(info->irq_level);
4452 info->dma_level = (unsigned int)dma[i];
4453 info->bus_type = MGSL_BUS_TYPE_ISA;
4454 info->io_addr_size = 16;
4455 info->irq_flags = 0;
4456
4457 mgsl_add_device( info );
4458 }
4459 }
4460
4461 static void synclink_cleanup(void)
4462 {
4463 int rc;
4464 struct mgsl_struct *info;
4465 struct mgsl_struct *tmp;
4466
4467 printk("Unloading %s: %s\n", driver_name, driver_version);
4468
4469 if (serial_driver) {
4470 if ((rc = tty_unregister_driver(serial_driver)))
4471 printk("%s(%d) failed to unregister tty driver err=%d\n",
4472 __FILE__,__LINE__,rc);
4473 put_tty_driver(serial_driver);
4474 }
4475
4476 info = mgsl_device_list;
4477 while(info) {
4478 #if SYNCLINK_GENERIC_HDLC
4479 hdlcdev_exit(info);
4480 #endif
4481 mgsl_release_resources(info);
4482 tmp = info;
4483 info = info->next_device;
4484 kfree(tmp);
4485 }
4486
4487 if (pci_registered)
4488 pci_unregister_driver(&synclink_pci_driver);
4489 }
4490
4491 static int __init synclink_init(void)
4492 {
4493 int rc;
4494
4495 if (break_on_load) {
4496 mgsl_get_text_ptr();
4497 BREAKPOINT();
4498 }
4499
4500 printk("%s %s\n", driver_name, driver_version);
4501
4502 mgsl_enum_isa_devices();
4503 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4504 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4505 else
4506 pci_registered = 1;
4507
4508 if ((rc = mgsl_init_tty()) < 0)
4509 goto error;
4510
4511 return 0;
4512
4513 error:
4514 synclink_cleanup();
4515 return rc;
4516 }
4517
4518 static void __exit synclink_exit(void)
4519 {
4520 synclink_cleanup();
4521 }
4522
4523 module_init(synclink_init);
4524 module_exit(synclink_exit);
4525
4526 /*
4527 * usc_RTCmd()
4528 *
4529 * Issue a USC Receive/Transmit command to the
4530 * Channel Command/Address Register (CCAR).
4531 *
4532 * Notes:
4533 *
4534 * The command is encoded in the most significant 5 bits <15..11>
4535 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4536 * and Bits <6..0> must be written as zeros.
4537 *
4538 * Arguments:
4539 *
4540 * info pointer to device information structure
4541 * Cmd command mask (use symbolic macros)
4542 *
4543 * Return Value:
4544 *
4545 * None
4546 */
4547 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4548 {
4549 /* output command to CCAR in bits <15..11> */
4550 /* preserve bits <10..7>, bits <6..0> must be zero */
4551
4552 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4553
4554 /* Read to flush write to CCAR */
4555 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4556 inw( info->io_base + CCAR );
4557
4558 } /* end of usc_RTCmd() */
4559
4560 /*
4561 * usc_DmaCmd()
4562 *
4563 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4564 *
4565 * Arguments:
4566 *
4567 * info pointer to device information structure
4568 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4569 *
4570 * Return Value:
4571 *
4572 * None
4573 */
4574 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4575 {
4576 /* write command mask to DCAR */
4577 outw( Cmd + info->mbre_bit, info->io_base );
4578
4579 /* Read to flush write to DCAR */
4580 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4581 inw( info->io_base );
4582
4583 } /* end of usc_DmaCmd() */
4584
4585 /*
4586 * usc_OutDmaReg()
4587 *
4588 * Write a 16-bit value to a USC DMA register
4589 *
4590 * Arguments:
4591 *
4592 * info pointer to device info structure
4593 * RegAddr register address (number) for write
4594 * RegValue 16-bit value to write to register
4595 *
4596 * Return Value:
4597 *
4598 * None
4599 *
4600 */
4601 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4602 {
4603 /* Note: The DCAR is located at the adapter base address */
4604 /* Note: must preserve state of BIT8 in DCAR */
4605
4606 outw( RegAddr + info->mbre_bit, info->io_base );
4607 outw( RegValue, info->io_base );
4608
4609 /* Read to flush write to DCAR */
4610 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4611 inw( info->io_base );
4612
4613 } /* end of usc_OutDmaReg() */
4614
4615 /*
4616 * usc_InDmaReg()
4617 *
4618 * Read a 16-bit value from a DMA register
4619 *
4620 * Arguments:
4621 *
4622 * info pointer to device info structure
4623 * RegAddr register address (number) to read from
4624 *
4625 * Return Value:
4626 *
4627 * The 16-bit value read from register
4628 *
4629 */
4630 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4631 {
4632 /* Note: The DCAR is located at the adapter base address */
4633 /* Note: must preserve state of BIT8 in DCAR */
4634
4635 outw( RegAddr + info->mbre_bit, info->io_base );
4636 return inw( info->io_base );
4637
4638 } /* end of usc_InDmaReg() */
4639
4640 /*
4641 *
4642 * usc_OutReg()
4643 *
4644 * Write a 16-bit value to a USC serial channel register
4645 *
4646 * Arguments:
4647 *
4648 * info pointer to device info structure
4649 * RegAddr register address (number) to write to
4650 * RegValue 16-bit value to write to register
4651 *
4652 * Return Value:
4653 *
4654 * None
4655 *
4656 */
4657 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4658 {
4659 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4660 outw( RegValue, info->io_base + CCAR );
4661
4662 /* Read to flush write to CCAR */
4663 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4664 inw( info->io_base + CCAR );
4665
4666 } /* end of usc_OutReg() */
4667
4668 /*
4669 * usc_InReg()
4670 *
4671 * Reads a 16-bit value from a USC serial channel register
4672 *
4673 * Arguments:
4674 *
4675 * info pointer to device extension
4676 * RegAddr register address (number) to read from
4677 *
4678 * Return Value:
4679 *
4680 * 16-bit value read from register
4681 */
4682 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4683 {
4684 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4685 return inw( info->io_base + CCAR );
4686
4687 } /* end of usc_InReg() */
4688
4689 /* usc_set_sdlc_mode()
4690 *
4691 * Set up the adapter for SDLC DMA communications.
4692 *
4693 * Arguments: info pointer to device instance data
4694 * Return Value: NONE
4695 */
4696 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4697 {
4698 u16 RegValue;
4699 int PreSL1660;
4700
4701 /*
4702 * determine if the IUSC on the adapter is pre-SL1660. If
4703 * not, take advantage of the UnderWait feature of more
4704 * modern chips. If an underrun occurs and this bit is set,
4705 * the transmitter will idle the programmed idle pattern
4706 * until the driver has time to service the underrun. Otherwise,
4707 * the dma controller may get the cycles previously requested
4708 * and begin transmitting queued tx data.
4709 */
4710 usc_OutReg(info,TMCR,0x1f);
4711 RegValue=usc_InReg(info,TMDR);
4712 if ( RegValue == IUSC_PRE_SL1660 )
4713 PreSL1660 = 1;
4714 else
4715 PreSL1660 = 0;
4716
4717
4718 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4719 {
4720 /*
4721 ** Channel Mode Register (CMR)
4722 **
4723 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4724 ** <13> 0 0 = Transmit Disabled (initially)
4725 ** <12> 0 1 = Consecutive Idles share common 0
4726 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4727 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4728 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4729 **
4730 ** 1000 1110 0000 0110 = 0x8e06
4731 */
4732 RegValue = 0x8e06;
4733
4734 /*--------------------------------------------------
4735 * ignore user options for UnderRun Actions and
4736 * preambles
4737 *--------------------------------------------------*/
4738 }
4739 else
4740 {
4741 /* Channel mode Register (CMR)
4742 *
4743 * <15..14> 00 Tx Sub modes, Underrun Action
4744 * <13> 0 1 = Send Preamble before opening flag
4745 * <12> 0 1 = Consecutive Idles share common 0
4746 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4747 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4748 * <3..0> 0110 Receiver mode = HDLC/SDLC
4749 *
4750 * 0000 0110 0000 0110 = 0x0606
4751 */
4752 if (info->params.mode == MGSL_MODE_RAW) {
4753 RegValue = 0x0001; /* Set Receive mode = external sync */
4754
4755 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4756 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4757
4758 /*
4759 * TxSubMode:
4760 * CMR <15> 0 Don't send CRC on Tx Underrun
4761 * CMR <14> x undefined
4762 * CMR <13> 0 Send preamble before openning sync
4763 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4764 *
4765 * TxMode:
4766 * CMR <11-8) 0100 MonoSync
4767 *
4768 * 0x00 0100 xxxx xxxx 04xx
4769 */
4770 RegValue |= 0x0400;
4771 }
4772 else {
4773
4774 RegValue = 0x0606;
4775
4776 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4777 RegValue |= BIT14;
4778 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4779 RegValue |= BIT15;
4780 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4781 RegValue |= BIT15 + BIT14;
4782 }
4783
4784 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4785 RegValue |= BIT13;
4786 }
4787
4788 if ( info->params.mode == MGSL_MODE_HDLC &&
4789 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4790 RegValue |= BIT12;
4791
4792 if ( info->params.addr_filter != 0xff )
4793 {
4794 /* set up receive address filtering */
4795 usc_OutReg( info, RSR, info->params.addr_filter );
4796 RegValue |= BIT4;
4797 }
4798
4799 usc_OutReg( info, CMR, RegValue );
4800 info->cmr_value = RegValue;
4801
4802 /* Receiver mode Register (RMR)
4803 *
4804 * <15..13> 000 encoding
4805 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4806 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4807 * <9> 0 1 = Include Receive chars in CRC
4808 * <8> 1 1 = Use Abort/PE bit as abort indicator
4809 * <7..6> 00 Even parity
4810 * <5> 0 parity disabled
4811 * <4..2> 000 Receive Char Length = 8 bits
4812 * <1..0> 00 Disable Receiver
4813 *
4814 * 0000 0101 0000 0000 = 0x0500
4815 */
4816
4817 RegValue = 0x0500;
4818
4819 switch ( info->params.encoding ) {
4820 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4821 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4822 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4823 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4824 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4825 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4826 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4827 }
4828
4829 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4830 RegValue |= BIT9;
4831 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4832 RegValue |= ( BIT12 | BIT10 | BIT9 );
4833
4834 usc_OutReg( info, RMR, RegValue );
4835
4836 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4837 /* When an opening flag of an SDLC frame is recognized the */
4838 /* Receive Character count (RCC) is loaded with the value in */
4839 /* RCLR. The RCC is decremented for each received byte. The */
4840 /* value of RCC is stored after the closing flag of the frame */
4841 /* allowing the frame size to be computed. */
4842
4843 usc_OutReg( info, RCLR, RCLRVALUE );
4844
4845 usc_RCmd( info, RCmd_SelectRicrdma_level );
4846
4847 /* Receive Interrupt Control Register (RICR)
4848 *
4849 * <15..8> ? RxFIFO DMA Request Level
4850 * <7> 0 Exited Hunt IA (Interrupt Arm)
4851 * <6> 0 Idle Received IA
4852 * <5> 0 Break/Abort IA
4853 * <4> 0 Rx Bound IA
4854 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4855 * <2> 0 Abort/PE IA
4856 * <1> 1 Rx Overrun IA
4857 * <0> 0 Select TC0 value for readback
4858 *
4859 * 0000 0000 0000 1000 = 0x000a
4860 */
4861
4862 /* Carry over the Exit Hunt and Idle Received bits */
4863 /* in case they have been armed by usc_ArmEvents. */
4864
4865 RegValue = usc_InReg( info, RICR ) & 0xc0;
4866
4867 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4868 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4869 else
4870 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4871
4872 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4873
4874 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4875 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4876
4877 /* Transmit mode Register (TMR)
4878 *
4879 * <15..13> 000 encoding
4880 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4881 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4882 * <9> 0 1 = Tx CRC Enabled
4883 * <8> 0 1 = Append CRC to end of transmit frame
4884 * <7..6> 00 Transmit parity Even
4885 * <5> 0 Transmit parity Disabled
4886 * <4..2> 000 Tx Char Length = 8 bits
4887 * <1..0> 00 Disable Transmitter
4888 *
4889 * 0000 0100 0000 0000 = 0x0400
4890 */
4891
4892 RegValue = 0x0400;
4893
4894 switch ( info->params.encoding ) {
4895 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4896 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4897 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4898 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4899 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4900 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4901 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4902 }
4903
4904 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4905 RegValue |= BIT9 + BIT8;
4906 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4907 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4908
4909 usc_OutReg( info, TMR, RegValue );
4910
4911 usc_set_txidle( info );
4912
4913
4914 usc_TCmd( info, TCmd_SelectTicrdma_level );
4915
4916 /* Transmit Interrupt Control Register (TICR)
4917 *
4918 * <15..8> ? Transmit FIFO DMA Level
4919 * <7> 0 Present IA (Interrupt Arm)
4920 * <6> 0 Idle Sent IA
4921 * <5> 1 Abort Sent IA
4922 * <4> 1 EOF/EOM Sent IA
4923 * <3> 0 CRC Sent IA
4924 * <2> 1 1 = Wait for SW Trigger to Start Frame
4925 * <1> 1 Tx Underrun IA
4926 * <0> 0 TC0 constant on read back
4927 *
4928 * 0000 0000 0011 0110 = 0x0036
4929 */
4930
4931 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4932 usc_OutReg( info, TICR, 0x0736 );
4933 else
4934 usc_OutReg( info, TICR, 0x1436 );
4935
4936 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4937 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4938
4939 /*
4940 ** Transmit Command/Status Register (TCSR)
4941 **
4942 ** <15..12> 0000 TCmd
4943 ** <11> 0/1 UnderWait
4944 ** <10..08> 000 TxIdle
4945 ** <7> x PreSent
4946 ** <6> x IdleSent
4947 ** <5> x AbortSent
4948 ** <4> x EOF/EOM Sent
4949 ** <3> x CRC Sent
4950 ** <2> x All Sent
4951 ** <1> x TxUnder
4952 ** <0> x TxEmpty
4953 **
4954 ** 0000 0000 0000 0000 = 0x0000
4955 */
4956 info->tcsr_value = 0;
4957
4958 if ( !PreSL1660 )
4959 info->tcsr_value |= TCSR_UNDERWAIT;
4960
4961 usc_OutReg( info, TCSR, info->tcsr_value );
4962
4963 /* Clock mode Control Register (CMCR)
4964 *
4965 * <15..14> 00 counter 1 Source = Disabled
4966 * <13..12> 00 counter 0 Source = Disabled
4967 * <11..10> 11 BRG1 Input is TxC Pin
4968 * <9..8> 11 BRG0 Input is TxC Pin
4969 * <7..6> 01 DPLL Input is BRG1 Output
4970 * <5..3> XXX TxCLK comes from Port 0
4971 * <2..0> XXX RxCLK comes from Port 1
4972 *
4973 * 0000 1111 0111 0111 = 0x0f77
4974 */
4975
4976 RegValue = 0x0f40;
4977
4978 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4979 RegValue |= 0x0003; /* RxCLK from DPLL */
4980 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4981 RegValue |= 0x0004; /* RxCLK from BRG0 */
4982 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4983 RegValue |= 0x0006; /* RxCLK from TXC Input */
4984 else
4985 RegValue |= 0x0007; /* RxCLK from Port1 */
4986
4987 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4988 RegValue |= 0x0018; /* TxCLK from DPLL */
4989 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4990 RegValue |= 0x0020; /* TxCLK from BRG0 */
4991 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4992 RegValue |= 0x0038; /* RxCLK from TXC Input */
4993 else
4994 RegValue |= 0x0030; /* TxCLK from Port0 */
4995
4996 usc_OutReg( info, CMCR, RegValue );
4997
4998
4999 /* Hardware Configuration Register (HCR)
5000 *
5001 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5002 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5003 * <12> 0 CVOK:0=report code violation in biphase
5004 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5005 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5006 * <7..6> 00 reserved
5007 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5008 * <4> X BRG1 Enable
5009 * <3..2> 00 reserved
5010 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5011 * <0> 0 BRG0 Enable
5012 */
5013
5014 RegValue = 0x0000;
5015
5016 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5017 u32 XtalSpeed;
5018 u32 DpllDivisor;
5019 u16 Tc;
5020
5021 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5022 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5023
5024 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5025 XtalSpeed = 11059200;
5026 else
5027 XtalSpeed = 14745600;
5028
5029 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5030 DpllDivisor = 16;
5031 RegValue |= BIT10;
5032 }
5033 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5034 DpllDivisor = 8;
5035 RegValue |= BIT11;
5036 }
5037 else
5038 DpllDivisor = 32;
5039
5040 /* Tc = (Xtal/Speed) - 1 */
5041 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5042 /* then rounding up gives a more precise time constant. Instead */
5043 /* of rounding up and then subtracting 1 we just don't subtract */
5044 /* the one in this case. */
5045
5046 /*--------------------------------------------------
5047 * ejz: for DPLL mode, application should use the
5048 * same clock speed as the partner system, even
5049 * though clocking is derived from the input RxData.
5050 * In case the user uses a 0 for the clock speed,
5051 * default to 0xffffffff and don't try to divide by
5052 * zero
5053 *--------------------------------------------------*/
5054 if ( info->params.clock_speed )
5055 {
5056 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5057 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5058 / info->params.clock_speed) )
5059 Tc--;
5060 }
5061 else
5062 Tc = -1;
5063
5064
5065 /* Write 16-bit Time Constant for BRG1 */
5066 usc_OutReg( info, TC1R, Tc );
5067
5068 RegValue |= BIT4; /* enable BRG1 */
5069
5070 switch ( info->params.encoding ) {
5071 case HDLC_ENCODING_NRZ:
5072 case HDLC_ENCODING_NRZB:
5073 case HDLC_ENCODING_NRZI_MARK:
5074 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5075 case HDLC_ENCODING_BIPHASE_MARK:
5076 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5077 case HDLC_ENCODING_BIPHASE_LEVEL:
5078 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5079 }
5080 }
5081
5082 usc_OutReg( info, HCR, RegValue );
5083
5084
5085 /* Channel Control/status Register (CCSR)
5086 *
5087 * <15> X RCC FIFO Overflow status (RO)
5088 * <14> X RCC FIFO Not Empty status (RO)
5089 * <13> 0 1 = Clear RCC FIFO (WO)
5090 * <12> X DPLL Sync (RW)
5091 * <11> X DPLL 2 Missed Clocks status (RO)
5092 * <10> X DPLL 1 Missed Clock status (RO)
5093 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5094 * <7> X SDLC Loop On status (RO)
5095 * <6> X SDLC Loop Send status (RO)
5096 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5097 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5098 * <1..0> 00 reserved
5099 *
5100 * 0000 0000 0010 0000 = 0x0020
5101 */
5102
5103 usc_OutReg( info, CCSR, 0x1020 );
5104
5105
5106 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5107 usc_OutReg( info, SICR,
5108 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5109 }
5110
5111
5112 /* enable Master Interrupt Enable bit (MIE) */
5113 usc_EnableMasterIrqBit( info );
5114
5115 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5116 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5117
5118 /* arm RCC underflow interrupt */
5119 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5120 usc_EnableInterrupts(info, MISC);
5121
5122 info->mbre_bit = 0;
5123 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5124 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5125 info->mbre_bit = BIT8;
5126 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5127
5128 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5129 /* Enable DMAEN (Port 7, Bit 14) */
5130 /* This connects the DMA request signal to the ISA bus */
5131 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5132 }
5133
5134 /* DMA Control Register (DCR)
5135 *
5136 * <15..14> 10 Priority mode = Alternating Tx/Rx
5137 * 01 Rx has priority
5138 * 00 Tx has priority
5139 *
5140 * <13> 1 Enable Priority Preempt per DCR<15..14>
5141 * (WARNING DCR<11..10> must be 00 when this is 1)
5142 * 0 Choose activate channel per DCR<11..10>
5143 *
5144 * <12> 0 Little Endian for Array/List
5145 * <11..10> 00 Both Channels can use each bus grant
5146 * <9..6> 0000 reserved
5147 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5148 * <4> 0 1 = drive D/C and S/D pins
5149 * <3> 1 1 = Add one wait state to all DMA cycles.
5150 * <2> 0 1 = Strobe /UAS on every transfer.
5151 * <1..0> 11 Addr incrementing only affects LS24 bits
5152 *
5153 * 0110 0000 0000 1011 = 0x600b
5154 */
5155
5156 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5157 /* PCI adapter does not need DMA wait state */
5158 usc_OutDmaReg( info, DCR, 0xa00b );
5159 }
5160 else
5161 usc_OutDmaReg( info, DCR, 0x800b );
5162
5163
5164 /* Receive DMA mode Register (RDMR)
5165 *
5166 * <15..14> 11 DMA mode = Linked List Buffer mode
5167 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5168 * <12> 1 Clear count of List Entry after fetching
5169 * <11..10> 00 Address mode = Increment
5170 * <9> 1 Terminate Buffer on RxBound
5171 * <8> 0 Bus Width = 16bits
5172 * <7..0> ? status Bits (write as 0s)
5173 *
5174 * 1111 0010 0000 0000 = 0xf200
5175 */
5176
5177 usc_OutDmaReg( info, RDMR, 0xf200 );
5178
5179
5180 /* Transmit DMA mode Register (TDMR)
5181 *
5182 * <15..14> 11 DMA mode = Linked List Buffer mode
5183 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5184 * <12> 1 Clear count of List Entry after fetching
5185 * <11..10> 00 Address mode = Increment
5186 * <9> 1 Terminate Buffer on end of frame
5187 * <8> 0 Bus Width = 16bits
5188 * <7..0> ? status Bits (Read Only so write as 0)
5189 *
5190 * 1111 0010 0000 0000 = 0xf200
5191 */
5192
5193 usc_OutDmaReg( info, TDMR, 0xf200 );
5194
5195
5196 /* DMA Interrupt Control Register (DICR)
5197 *
5198 * <15> 1 DMA Interrupt Enable
5199 * <14> 0 1 = Disable IEO from USC
5200 * <13> 0 1 = Don't provide vector during IntAck
5201 * <12> 1 1 = Include status in Vector
5202 * <10..2> 0 reserved, Must be 0s
5203 * <1> 0 1 = Rx DMA Interrupt Enabled
5204 * <0> 0 1 = Tx DMA Interrupt Enabled
5205 *
5206 * 1001 0000 0000 0000 = 0x9000
5207 */
5208
5209 usc_OutDmaReg( info, DICR, 0x9000 );
5210
5211 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5212 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5213 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5214
5215 /* Channel Control Register (CCR)
5216 *
5217 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5218 * <13> 0 Trigger Tx on SW Command Disabled
5219 * <12> 0 Flag Preamble Disabled
5220 * <11..10> 00 Preamble Length
5221 * <9..8> 00 Preamble Pattern
5222 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5223 * <5> 0 Trigger Rx on SW Command Disabled
5224 * <4..0> 0 reserved
5225 *
5226 * 1000 0000 1000 0000 = 0x8080
5227 */
5228
5229 RegValue = 0x8080;
5230
5231 switch ( info->params.preamble_length ) {
5232 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5233 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5234 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5235 }
5236
5237 switch ( info->params.preamble ) {
5238 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5239 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5240 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5241 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5242 }
5243
5244 usc_OutReg( info, CCR, RegValue );
5245
5246
5247 /*
5248 * Burst/Dwell Control Register
5249 *
5250 * <15..8> 0x20 Maximum number of transfers per bus grant
5251 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5252 */
5253
5254 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5255 /* don't limit bus occupancy on PCI adapter */
5256 usc_OutDmaReg( info, BDCR, 0x0000 );
5257 }
5258 else
5259 usc_OutDmaReg( info, BDCR, 0x2000 );
5260
5261 usc_stop_transmitter(info);
5262 usc_stop_receiver(info);
5263
5264 } /* end of usc_set_sdlc_mode() */
5265
5266 /* usc_enable_loopback()
5267 *
5268 * Set the 16C32 for internal loopback mode.
5269 * The TxCLK and RxCLK signals are generated from the BRG0 and
5270 * the TxD is looped back to the RxD internally.
5271 *
5272 * Arguments: info pointer to device instance data
5273 * enable 1 = enable loopback, 0 = disable
5274 * Return Value: None
5275 */
5276 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5277 {
5278 if (enable) {
5279 /* blank external TXD output */
5280 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5281
5282 /* Clock mode Control Register (CMCR)
5283 *
5284 * <15..14> 00 counter 1 Disabled
5285 * <13..12> 00 counter 0 Disabled
5286 * <11..10> 11 BRG1 Input is TxC Pin
5287 * <9..8> 11 BRG0 Input is TxC Pin
5288 * <7..6> 01 DPLL Input is BRG1 Output
5289 * <5..3> 100 TxCLK comes from BRG0
5290 * <2..0> 100 RxCLK comes from BRG0
5291 *
5292 * 0000 1111 0110 0100 = 0x0f64
5293 */
5294
5295 usc_OutReg( info, CMCR, 0x0f64 );
5296
5297 /* Write 16-bit Time Constant for BRG0 */
5298 /* use clock speed if available, otherwise use 8 for diagnostics */
5299 if (info->params.clock_speed) {
5300 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5301 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5302 else
5303 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5304 } else
5305 usc_OutReg(info, TC0R, (u16)8);
5306
5307 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5308 mode = Continuous Set Bit 0 to enable BRG0. */
5309 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5310
5311 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5312 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5313
5314 /* set Internal Data loopback mode */
5315 info->loopback_bits = 0x300;
5316 outw( 0x0300, info->io_base + CCAR );
5317 } else {
5318 /* enable external TXD output */
5319 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5320
5321 /* clear Internal Data loopback mode */
5322 info->loopback_bits = 0;
5323 outw( 0,info->io_base + CCAR );
5324 }
5325
5326 } /* end of usc_enable_loopback() */
5327
5328 /* usc_enable_aux_clock()
5329 *
5330 * Enabled the AUX clock output at the specified frequency.
5331 *
5332 * Arguments:
5333 *
5334 * info pointer to device extension
5335 * data_rate data rate of clock in bits per second
5336 * A data rate of 0 disables the AUX clock.
5337 *
5338 * Return Value: None
5339 */
5340 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5341 {
5342 u32 XtalSpeed;
5343 u16 Tc;
5344
5345 if ( data_rate ) {
5346 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5347 XtalSpeed = 11059200;
5348 else
5349 XtalSpeed = 14745600;
5350
5351
5352 /* Tc = (Xtal/Speed) - 1 */
5353 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5354 /* then rounding up gives a more precise time constant. Instead */
5355 /* of rounding up and then subtracting 1 we just don't subtract */
5356 /* the one in this case. */
5357
5358
5359 Tc = (u16)(XtalSpeed/data_rate);
5360 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5361 Tc--;
5362
5363 /* Write 16-bit Time Constant for BRG0 */
5364 usc_OutReg( info, TC0R, Tc );
5365
5366 /*
5367 * Hardware Configuration Register (HCR)
5368 * Clear Bit 1, BRG0 mode = Continuous
5369 * Set Bit 0 to enable BRG0.
5370 */
5371
5372 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5373
5374 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5375 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5376 } else {
5377 /* data rate == 0 so turn off BRG0 */
5378 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5379 }
5380
5381 } /* end of usc_enable_aux_clock() */
5382
5383 /*
5384 *
5385 * usc_process_rxoverrun_sync()
5386 *
5387 * This function processes a receive overrun by resetting the
5388 * receive DMA buffers and issuing a Purge Rx FIFO command
5389 * to allow the receiver to continue receiving.
5390 *
5391 * Arguments:
5392 *
5393 * info pointer to device extension
5394 *
5395 * Return Value: None
5396 */
5397 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5398 {
5399 int start_index;
5400 int end_index;
5401 int frame_start_index;
5402 int start_of_frame_found = FALSE;
5403 int end_of_frame_found = FALSE;
5404 int reprogram_dma = FALSE;
5405
5406 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5407 u32 phys_addr;
5408
5409 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5410 usc_RCmd( info, RCmd_EnterHuntmode );
5411 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5412
5413 /* CurrentRxBuffer points to the 1st buffer of the next */
5414 /* possibly available receive frame. */
5415
5416 frame_start_index = start_index = end_index = info->current_rx_buffer;
5417
5418 /* Search for an unfinished string of buffers. This means */
5419 /* that a receive frame started (at least one buffer with */
5420 /* count set to zero) but there is no terminiting buffer */
5421 /* (status set to non-zero). */
5422
5423 while( !buffer_list[end_index].count )
5424 {
5425 /* Count field has been reset to zero by 16C32. */
5426 /* This buffer is currently in use. */
5427
5428 if ( !start_of_frame_found )
5429 {
5430 start_of_frame_found = TRUE;
5431 frame_start_index = end_index;
5432 end_of_frame_found = FALSE;
5433 }
5434
5435 if ( buffer_list[end_index].status )
5436 {
5437 /* Status field has been set by 16C32. */
5438 /* This is the last buffer of a received frame. */
5439
5440 /* We want to leave the buffers for this frame intact. */
5441 /* Move on to next possible frame. */
5442
5443 start_of_frame_found = FALSE;
5444 end_of_frame_found = TRUE;
5445 }
5446
5447 /* advance to next buffer entry in linked list */
5448 end_index++;
5449 if ( end_index == info->rx_buffer_count )
5450 end_index = 0;
5451
5452 if ( start_index == end_index )
5453 {
5454 /* The entire list has been searched with all Counts == 0 and */
5455 /* all Status == 0. The receive buffers are */
5456 /* completely screwed, reset all receive buffers! */
5457 mgsl_reset_rx_dma_buffers( info );
5458 frame_start_index = 0;
5459 start_of_frame_found = FALSE;
5460 reprogram_dma = TRUE;
5461 break;
5462 }
5463 }
5464
5465 if ( start_of_frame_found && !end_of_frame_found )
5466 {
5467 /* There is an unfinished string of receive DMA buffers */
5468 /* as a result of the receiver overrun. */
5469
5470 /* Reset the buffers for the unfinished frame */
5471 /* and reprogram the receive DMA controller to start */
5472 /* at the 1st buffer of unfinished frame. */
5473
5474 start_index = frame_start_index;
5475
5476 do
5477 {
5478 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5479
5480 /* Adjust index for wrap around. */
5481 if ( start_index == info->rx_buffer_count )
5482 start_index = 0;
5483
5484 } while( start_index != end_index );
5485
5486 reprogram_dma = TRUE;
5487 }
5488
5489 if ( reprogram_dma )
5490 {
5491 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5492 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5493 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5494
5495 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5496
5497 /* This empties the receive FIFO and loads the RCC with RCLR */
5498 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5499
5500 /* program 16C32 with physical address of 1st DMA buffer entry */
5501 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5502 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5503 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5504
5505 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5506 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5507 usc_EnableInterrupts( info, RECEIVE_STATUS );
5508
5509 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5510 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5511
5512 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5513 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5514 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5515 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5516 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5517 else
5518 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5519 }
5520 else
5521 {
5522 /* This empties the receive FIFO and loads the RCC with RCLR */
5523 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5524 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5525 }
5526
5527 } /* end of usc_process_rxoverrun_sync() */
5528
5529 /* usc_stop_receiver()
5530 *
5531 * Disable USC receiver
5532 *
5533 * Arguments: info pointer to device instance data
5534 * Return Value: None
5535 */
5536 static void usc_stop_receiver( struct mgsl_struct *info )
5537 {
5538 if (debug_level >= DEBUG_LEVEL_ISR)
5539 printk("%s(%d):usc_stop_receiver(%s)\n",
5540 __FILE__,__LINE__, info->device_name );
5541
5542 /* Disable receive DMA channel. */
5543 /* This also disables receive DMA channel interrupts */
5544 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5545
5546 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5547 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5548 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5549
5550 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5551
5552 /* This empties the receive FIFO and loads the RCC with RCLR */
5553 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5554 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5555
5556 info->rx_enabled = 0;
5557 info->rx_overflow = 0;
5558 info->rx_rcc_underrun = 0;
5559
5560 } /* end of stop_receiver() */
5561
5562 /* usc_start_receiver()
5563 *
5564 * Enable the USC receiver
5565 *
5566 * Arguments: info pointer to device instance data
5567 * Return Value: None
5568 */
5569 static void usc_start_receiver( struct mgsl_struct *info )
5570 {
5571 u32 phys_addr;
5572
5573 if (debug_level >= DEBUG_LEVEL_ISR)
5574 printk("%s(%d):usc_start_receiver(%s)\n",
5575 __FILE__,__LINE__, info->device_name );
5576
5577 mgsl_reset_rx_dma_buffers( info );
5578 usc_stop_receiver( info );
5579
5580 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5581 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5582
5583 if ( info->params.mode == MGSL_MODE_HDLC ||
5584 info->params.mode == MGSL_MODE_RAW ) {
5585 /* DMA mode Transfers */
5586 /* Program the DMA controller. */
5587 /* Enable the DMA controller end of buffer interrupt. */
5588
5589 /* program 16C32 with physical address of 1st DMA buffer entry */
5590 phys_addr = info->rx_buffer_list[0].phys_entry;
5591 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5592 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5593
5594 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5595 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5596 usc_EnableInterrupts( info, RECEIVE_STATUS );
5597
5598 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5599 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5600
5601 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5602 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5603 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5604 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5605 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5606 else
5607 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5608 } else {
5609 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5610 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5611 usc_EnableInterrupts(info, RECEIVE_DATA);
5612
5613 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5614 usc_RCmd( info, RCmd_EnterHuntmode );
5615
5616 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5617 }
5618
5619 usc_OutReg( info, CCSR, 0x1020 );
5620
5621 info->rx_enabled = 1;
5622
5623 } /* end of usc_start_receiver() */
5624
5625 /* usc_start_transmitter()
5626 *
5627 * Enable the USC transmitter and send a transmit frame if
5628 * one is loaded in the DMA buffers.
5629 *
5630 * Arguments: info pointer to device instance data
5631 * Return Value: None
5632 */
5633 static void usc_start_transmitter( struct mgsl_struct *info )
5634 {
5635 u32 phys_addr;
5636 unsigned int FrameSize;
5637
5638 if (debug_level >= DEBUG_LEVEL_ISR)
5639 printk("%s(%d):usc_start_transmitter(%s)\n",
5640 __FILE__,__LINE__, info->device_name );
5641
5642 if ( info->xmit_cnt ) {
5643
5644 /* If auto RTS enabled and RTS is inactive, then assert */
5645 /* RTS and set a flag indicating that the driver should */
5646 /* negate RTS when the transmission completes. */
5647
5648 info->drop_rts_on_tx_done = 0;
5649
5650 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5651 usc_get_serial_signals( info );
5652 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5653 info->serial_signals |= SerialSignal_RTS;
5654 usc_set_serial_signals( info );
5655 info->drop_rts_on_tx_done = 1;
5656 }
5657 }
5658
5659
5660 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5661 if ( !info->tx_active ) {
5662 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5663 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5664 usc_EnableInterrupts(info, TRANSMIT_DATA);
5665 usc_load_txfifo(info);
5666 }
5667 } else {
5668 /* Disable transmit DMA controller while programming. */
5669 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5670
5671 /* Transmit DMA buffer is loaded, so program USC */
5672 /* to send the frame contained in the buffers. */
5673
5674 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5675
5676 /* if operating in Raw sync mode, reset the rcc component
5677 * of the tx dma buffer entry, otherwise, the serial controller
5678 * will send a closing sync char after this count.
5679 */
5680 if ( info->params.mode == MGSL_MODE_RAW )
5681 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5682
5683 /* Program the Transmit Character Length Register (TCLR) */
5684 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5685 usc_OutReg( info, TCLR, (u16)FrameSize );
5686
5687 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5688
5689 /* Program the address of the 1st DMA Buffer Entry in linked list */
5690 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5691 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5692 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5693
5694 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5695 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5696 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5697
5698 if ( info->params.mode == MGSL_MODE_RAW &&
5699 info->num_tx_dma_buffers > 1 ) {
5700 /* When running external sync mode, attempt to 'stream' transmit */
5701 /* by filling tx dma buffers as they become available. To do this */
5702 /* we need to enable Tx DMA EOB Status interrupts : */
5703 /* */
5704 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5705 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5706
5707 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5708 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5709 }
5710
5711 /* Initialize Transmit DMA Channel */
5712 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5713
5714 usc_TCmd( info, TCmd_SendFrame );
5715
5716 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5717 add_timer(&info->tx_timer);
5718 }
5719 info->tx_active = 1;
5720 }
5721
5722 if ( !info->tx_enabled ) {
5723 info->tx_enabled = 1;
5724 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5725 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5726 else
5727 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5728 }
5729
5730 } /* end of usc_start_transmitter() */
5731
5732 /* usc_stop_transmitter()
5733 *
5734 * Stops the transmitter and DMA
5735 *
5736 * Arguments: info pointer to device isntance data
5737 * Return Value: None
5738 */
5739 static void usc_stop_transmitter( struct mgsl_struct *info )
5740 {
5741 if (debug_level >= DEBUG_LEVEL_ISR)
5742 printk("%s(%d):usc_stop_transmitter(%s)\n",
5743 __FILE__,__LINE__, info->device_name );
5744
5745 del_timer(&info->tx_timer);
5746
5747 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5748 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5749 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5750
5751 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5752 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5753 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5754
5755 info->tx_enabled = 0;
5756 info->tx_active = 0;
5757
5758 } /* end of usc_stop_transmitter() */
5759
5760 /* usc_load_txfifo()
5761 *
5762 * Fill the transmit FIFO until the FIFO is full or
5763 * there is no more data to load.
5764 *
5765 * Arguments: info pointer to device extension (instance data)
5766 * Return Value: None
5767 */
5768 static void usc_load_txfifo( struct mgsl_struct *info )
5769 {
5770 int Fifocount;
5771 u8 TwoBytes[2];
5772
5773 if ( !info->xmit_cnt && !info->x_char )
5774 return;
5775
5776 /* Select transmit FIFO status readback in TICR */
5777 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5778
5779 /* load the Transmit FIFO until FIFOs full or all data sent */
5780
5781 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5782 /* there is more space in the transmit FIFO and */
5783 /* there is more data in transmit buffer */
5784
5785 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5786 /* write a 16-bit word from transmit buffer to 16C32 */
5787
5788 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5789 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5790 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5791 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5792
5793 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5794
5795 info->xmit_cnt -= 2;
5796 info->icount.tx += 2;
5797 } else {
5798 /* only 1 byte left to transmit or 1 FIFO slot left */
5799
5800 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5801 info->io_base + CCAR );
5802
5803 if (info->x_char) {
5804 /* transmit pending high priority char */
5805 outw( info->x_char,info->io_base + CCAR );
5806 info->x_char = 0;
5807 } else {
5808 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5809 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5810 info->xmit_cnt--;
5811 }
5812 info->icount.tx++;
5813 }
5814 }
5815
5816 } /* end of usc_load_txfifo() */
5817
5818 /* usc_reset()
5819 *
5820 * Reset the adapter to a known state and prepare it for further use.
5821 *
5822 * Arguments: info pointer to device instance data
5823 * Return Value: None
5824 */
5825 static void usc_reset( struct mgsl_struct *info )
5826 {
5827 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5828 int i;
5829 u32 readval;
5830
5831 /* Set BIT30 of Misc Control Register */
5832 /* (Local Control Register 0x50) to force reset of USC. */
5833
5834 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5835 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5836
5837 info->misc_ctrl_value |= BIT30;
5838 *MiscCtrl = info->misc_ctrl_value;
5839
5840 /*
5841 * Force at least 170ns delay before clearing
5842 * reset bit. Each read from LCR takes at least
5843 * 30ns so 10 times for 300ns to be safe.
5844 */
5845 for(i=0;i<10;i++)
5846 readval = *MiscCtrl;
5847
5848 info->misc_ctrl_value &= ~BIT30;
5849 *MiscCtrl = info->misc_ctrl_value;
5850
5851 *LCR0BRDR = BUS_DESCRIPTOR(
5852 1, // Write Strobe Hold (0-3)
5853 2, // Write Strobe Delay (0-3)
5854 2, // Read Strobe Delay (0-3)
5855 0, // NWDD (Write data-data) (0-3)
5856 4, // NWAD (Write Addr-data) (0-31)
5857 0, // NXDA (Read/Write Data-Addr) (0-3)
5858 0, // NRDD (Read Data-Data) (0-3)
5859 5 // NRAD (Read Addr-Data) (0-31)
5860 );
5861 } else {
5862 /* do HW reset */
5863 outb( 0,info->io_base + 8 );
5864 }
5865
5866 info->mbre_bit = 0;
5867 info->loopback_bits = 0;
5868 info->usc_idle_mode = 0;
5869
5870 /*
5871 * Program the Bus Configuration Register (BCR)
5872 *
5873 * <15> 0 Don't use separate address
5874 * <14..6> 0 reserved
5875 * <5..4> 00 IAckmode = Default, don't care
5876 * <3> 1 Bus Request Totem Pole output
5877 * <2> 1 Use 16 Bit data bus
5878 * <1> 0 IRQ Totem Pole output
5879 * <0> 0 Don't Shift Right Addr
5880 *
5881 * 0000 0000 0000 1100 = 0x000c
5882 *
5883 * By writing to io_base + SDPIN the Wait/Ack pin is
5884 * programmed to work as a Wait pin.
5885 */
5886
5887 outw( 0x000c,info->io_base + SDPIN );
5888
5889
5890 outw( 0,info->io_base );
5891 outw( 0,info->io_base + CCAR );
5892
5893 /* select little endian byte ordering */
5894 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5895
5896
5897 /* Port Control Register (PCR)
5898 *
5899 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5900 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5901 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5902 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5903 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5904 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5905 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5906 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5907 *
5908 * 1111 0000 1111 0101 = 0xf0f5
5909 */
5910
5911 usc_OutReg( info, PCR, 0xf0f5 );
5912
5913
5914 /*
5915 * Input/Output Control Register
5916 *
5917 * <15..14> 00 CTS is active low input
5918 * <13..12> 00 DCD is active low input
5919 * <11..10> 00 TxREQ pin is input (DSR)
5920 * <9..8> 00 RxREQ pin is input (RI)
5921 * <7..6> 00 TxD is output (Transmit Data)
5922 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5923 * <2..0> 100 RxC is Output (drive with BRG0)
5924 *
5925 * 0000 0000 0000 0100 = 0x0004
5926 */
5927
5928 usc_OutReg( info, IOCR, 0x0004 );
5929
5930 } /* end of usc_reset() */
5931
5932 /* usc_set_async_mode()
5933 *
5934 * Program adapter for asynchronous communications.
5935 *
5936 * Arguments: info pointer to device instance data
5937 * Return Value: None
5938 */
5939 static void usc_set_async_mode( struct mgsl_struct *info )
5940 {
5941 u16 RegValue;
5942
5943 /* disable interrupts while programming USC */
5944 usc_DisableMasterIrqBit( info );
5945
5946 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5947 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5948
5949 usc_loopback_frame( info );
5950
5951 /* Channel mode Register (CMR)
5952 *
5953 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5954 * <13..12> 00 00 = 16X Clock
5955 * <11..8> 0000 Transmitter mode = Asynchronous
5956 * <7..6> 00 reserved?
5957 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5958 * <3..0> 0000 Receiver mode = Asynchronous
5959 *
5960 * 0000 0000 0000 0000 = 0x0
5961 */
5962
5963 RegValue = 0;
5964 if ( info->params.stop_bits != 1 )
5965 RegValue |= BIT14;
5966 usc_OutReg( info, CMR, RegValue );
5967
5968
5969 /* Receiver mode Register (RMR)
5970 *
5971 * <15..13> 000 encoding = None
5972 * <12..08> 00000 reserved (Sync Only)
5973 * <7..6> 00 Even parity
5974 * <5> 0 parity disabled
5975 * <4..2> 000 Receive Char Length = 8 bits
5976 * <1..0> 00 Disable Receiver
5977 *
5978 * 0000 0000 0000 0000 = 0x0
5979 */
5980
5981 RegValue = 0;
5982
5983 if ( info->params.data_bits != 8 )
5984 RegValue |= BIT4+BIT3+BIT2;
5985
5986 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5987 RegValue |= BIT5;
5988 if ( info->params.parity != ASYNC_PARITY_ODD )
5989 RegValue |= BIT6;
5990 }
5991
5992 usc_OutReg( info, RMR, RegValue );
5993
5994
5995 /* Set IRQ trigger level */
5996
5997 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5998
5999
6000 /* Receive Interrupt Control Register (RICR)
6001 *
6002 * <15..8> ? RxFIFO IRQ Request Level
6003 *
6004 * Note: For async mode the receive FIFO level must be set
6005 * to 0 to avoid the situation where the FIFO contains fewer bytes
6006 * than the trigger level and no more data is expected.
6007 *
6008 * <7> 0 Exited Hunt IA (Interrupt Arm)
6009 * <6> 0 Idle Received IA
6010 * <5> 0 Break/Abort IA
6011 * <4> 0 Rx Bound IA
6012 * <3> 0 Queued status reflects oldest byte in FIFO
6013 * <2> 0 Abort/PE IA
6014 * <1> 0 Rx Overrun IA
6015 * <0> 0 Select TC0 value for readback
6016 *
6017 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6018 */
6019
6020 usc_OutReg( info, RICR, 0x0000 );
6021
6022 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6023 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6024
6025
6026 /* Transmit mode Register (TMR)
6027 *
6028 * <15..13> 000 encoding = None
6029 * <12..08> 00000 reserved (Sync Only)
6030 * <7..6> 00 Transmit parity Even
6031 * <5> 0 Transmit parity Disabled
6032 * <4..2> 000 Tx Char Length = 8 bits
6033 * <1..0> 00 Disable Transmitter
6034 *
6035 * 0000 0000 0000 0000 = 0x0
6036 */
6037
6038 RegValue = 0;
6039
6040 if ( info->params.data_bits != 8 )
6041 RegValue |= BIT4+BIT3+BIT2;
6042
6043 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6044 RegValue |= BIT5;
6045 if ( info->params.parity != ASYNC_PARITY_ODD )
6046 RegValue |= BIT6;
6047 }
6048
6049 usc_OutReg( info, TMR, RegValue );
6050
6051 usc_set_txidle( info );
6052
6053
6054 /* Set IRQ trigger level */
6055
6056 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6057
6058
6059 /* Transmit Interrupt Control Register (TICR)
6060 *
6061 * <15..8> ? Transmit FIFO IRQ Level
6062 * <7> 0 Present IA (Interrupt Arm)
6063 * <6> 1 Idle Sent IA
6064 * <5> 0 Abort Sent IA
6065 * <4> 0 EOF/EOM Sent IA
6066 * <3> 0 CRC Sent IA
6067 * <2> 0 1 = Wait for SW Trigger to Start Frame
6068 * <1> 0 Tx Underrun IA
6069 * <0> 0 TC0 constant on read back
6070 *
6071 * 0000 0000 0100 0000 = 0x0040
6072 */
6073
6074 usc_OutReg( info, TICR, 0x1f40 );
6075
6076 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6077 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6078
6079 usc_enable_async_clock( info, info->params.data_rate );
6080
6081
6082 /* Channel Control/status Register (CCSR)
6083 *
6084 * <15> X RCC FIFO Overflow status (RO)
6085 * <14> X RCC FIFO Not Empty status (RO)
6086 * <13> 0 1 = Clear RCC FIFO (WO)
6087 * <12> X DPLL in Sync status (RO)
6088 * <11> X DPLL 2 Missed Clocks status (RO)
6089 * <10> X DPLL 1 Missed Clock status (RO)
6090 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6091 * <7> X SDLC Loop On status (RO)
6092 * <6> X SDLC Loop Send status (RO)
6093 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6094 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6095 * <1..0> 00 reserved
6096 *
6097 * 0000 0000 0010 0000 = 0x0020
6098 */
6099
6100 usc_OutReg( info, CCSR, 0x0020 );
6101
6102 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6103 RECEIVE_DATA + RECEIVE_STATUS );
6104
6105 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6106 RECEIVE_DATA + RECEIVE_STATUS );
6107
6108 usc_EnableMasterIrqBit( info );
6109
6110 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6111 /* Enable INTEN (Port 6, Bit12) */
6112 /* This connects the IRQ request signal to the ISA bus */
6113 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6114 }
6115
6116 if (info->params.loopback) {
6117 info->loopback_bits = 0x300;
6118 outw(0x0300, info->io_base + CCAR);
6119 }
6120
6121 } /* end of usc_set_async_mode() */
6122
6123 /* usc_loopback_frame()
6124 *
6125 * Loop back a small (2 byte) dummy SDLC frame.
6126 * Interrupts and DMA are NOT used. The purpose of this is to
6127 * clear any 'stale' status info left over from running in async mode.
6128 *
6129 * The 16C32 shows the strange behaviour of marking the 1st
6130 * received SDLC frame with a CRC error even when there is no
6131 * CRC error. To get around this a small dummy from of 2 bytes
6132 * is looped back when switching from async to sync mode.
6133 *
6134 * Arguments: info pointer to device instance data
6135 * Return Value: None
6136 */
6137 static void usc_loopback_frame( struct mgsl_struct *info )
6138 {
6139 int i;
6140 unsigned long oldmode = info->params.mode;
6141
6142 info->params.mode = MGSL_MODE_HDLC;
6143
6144 usc_DisableMasterIrqBit( info );
6145
6146 usc_set_sdlc_mode( info );
6147 usc_enable_loopback( info, 1 );
6148
6149 /* Write 16-bit Time Constant for BRG0 */
6150 usc_OutReg( info, TC0R, 0 );
6151
6152 /* Channel Control Register (CCR)
6153 *
6154 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6155 * <13> 0 Trigger Tx on SW Command Disabled
6156 * <12> 0 Flag Preamble Disabled
6157 * <11..10> 00 Preamble Length = 8-Bits
6158 * <9..8> 01 Preamble Pattern = flags
6159 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6160 * <5> 0 Trigger Rx on SW Command Disabled
6161 * <4..0> 0 reserved
6162 *
6163 * 0000 0001 0000 0000 = 0x0100
6164 */
6165
6166 usc_OutReg( info, CCR, 0x0100 );
6167
6168 /* SETUP RECEIVER */
6169 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6170 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6171
6172 /* SETUP TRANSMITTER */
6173 /* Program the Transmit Character Length Register (TCLR) */
6174 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6175 usc_OutReg( info, TCLR, 2 );
6176 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6177
6178 /* unlatch Tx status bits, and start transmit channel. */
6179 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6180 outw(0,info->io_base + DATAREG);
6181
6182 /* ENABLE TRANSMITTER */
6183 usc_TCmd( info, TCmd_SendFrame );
6184 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6185
6186 /* WAIT FOR RECEIVE COMPLETE */
6187 for (i=0 ; i<1000 ; i++)
6188 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6189 break;
6190
6191 /* clear Internal Data loopback mode */
6192 usc_enable_loopback(info, 0);
6193
6194 usc_EnableMasterIrqBit(info);
6195
6196 info->params.mode = oldmode;
6197
6198 } /* end of usc_loopback_frame() */
6199
6200 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6201 *
6202 * Arguments: info pointer to adapter info structure
6203 * Return Value: None
6204 */
6205 static void usc_set_sync_mode( struct mgsl_struct *info )
6206 {
6207 usc_loopback_frame( info );
6208 usc_set_sdlc_mode( info );
6209
6210 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6211 /* Enable INTEN (Port 6, Bit12) */
6212 /* This connects the IRQ request signal to the ISA bus */
6213 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6214 }
6215
6216 usc_enable_aux_clock(info, info->params.clock_speed);
6217
6218 if (info->params.loopback)
6219 usc_enable_loopback(info,1);
6220
6221 } /* end of mgsl_set_sync_mode() */
6222
6223 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6224 *
6225 * Arguments: info pointer to device instance data
6226 * Return Value: None
6227 */
6228 static void usc_set_txidle( struct mgsl_struct *info )
6229 {
6230 u16 usc_idle_mode = IDLEMODE_FLAGS;
6231
6232 /* Map API idle mode to USC register bits */
6233
6234 switch( info->idle_mode ){
6235 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6236 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6237 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6238 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6239 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6240 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6241 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6242 }
6243
6244 info->usc_idle_mode = usc_idle_mode;
6245 //usc_OutReg(info, TCSR, usc_idle_mode);
6246 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6247 info->tcsr_value += usc_idle_mode;
6248 usc_OutReg(info, TCSR, info->tcsr_value);
6249
6250 /*
6251 * if SyncLink WAN adapter is running in external sync mode, the
6252 * transmitter has been set to Monosync in order to try to mimic
6253 * a true raw outbound bit stream. Monosync still sends an open/close
6254 * sync char at the start/end of a frame. Try to match those sync
6255 * patterns to the idle mode set here
6256 */
6257 if ( info->params.mode == MGSL_MODE_RAW ) {
6258 unsigned char syncpat = 0;
6259 switch( info->idle_mode ) {
6260 case HDLC_TXIDLE_FLAGS:
6261 syncpat = 0x7e;
6262 break;
6263 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6264 syncpat = 0x55;
6265 break;
6266 case HDLC_TXIDLE_ZEROS:
6267 case HDLC_TXIDLE_SPACE:
6268 syncpat = 0x00;
6269 break;
6270 case HDLC_TXIDLE_ONES:
6271 case HDLC_TXIDLE_MARK:
6272 syncpat = 0xff;
6273 break;
6274 case HDLC_TXIDLE_ALT_MARK_SPACE:
6275 syncpat = 0xaa;
6276 break;
6277 }
6278
6279 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6280 }
6281
6282 } /* end of usc_set_txidle() */
6283
6284 /* usc_get_serial_signals()
6285 *
6286 * Query the adapter for the state of the V24 status (input) signals.
6287 *
6288 * Arguments: info pointer to device instance data
6289 * Return Value: None
6290 */
6291 static void usc_get_serial_signals( struct mgsl_struct *info )
6292 {
6293 u16 status;
6294
6295 /* clear all serial signals except DTR and RTS */
6296 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6297
6298 /* Read the Misc Interrupt status Register (MISR) to get */
6299 /* the V24 status signals. */
6300
6301 status = usc_InReg( info, MISR );
6302
6303 /* set serial signal bits to reflect MISR */
6304
6305 if ( status & MISCSTATUS_CTS )
6306 info->serial_signals |= SerialSignal_CTS;
6307
6308 if ( status & MISCSTATUS_DCD )
6309 info->serial_signals |= SerialSignal_DCD;
6310
6311 if ( status & MISCSTATUS_RI )
6312 info->serial_signals |= SerialSignal_RI;
6313
6314 if ( status & MISCSTATUS_DSR )
6315 info->serial_signals |= SerialSignal_DSR;
6316
6317 } /* end of usc_get_serial_signals() */
6318
6319 /* usc_set_serial_signals()
6320 *
6321 * Set the state of DTR and RTS based on contents of
6322 * serial_signals member of device extension.
6323 *
6324 * Arguments: info pointer to device instance data
6325 * Return Value: None
6326 */
6327 static void usc_set_serial_signals( struct mgsl_struct *info )
6328 {
6329 u16 Control;
6330 unsigned char V24Out = info->serial_signals;
6331
6332 /* get the current value of the Port Control Register (PCR) */
6333
6334 Control = usc_InReg( info, PCR );
6335
6336 if ( V24Out & SerialSignal_RTS )
6337 Control &= ~(BIT6);
6338 else
6339 Control |= BIT6;
6340
6341 if ( V24Out & SerialSignal_DTR )
6342 Control &= ~(BIT4);
6343 else
6344 Control |= BIT4;
6345
6346 usc_OutReg( info, PCR, Control );
6347
6348 } /* end of usc_set_serial_signals() */
6349
6350 /* usc_enable_async_clock()
6351 *
6352 * Enable the async clock at the specified frequency.
6353 *
6354 * Arguments: info pointer to device instance data
6355 * data_rate data rate of clock in bps
6356 * 0 disables the AUX clock.
6357 * Return Value: None
6358 */
6359 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6360 {
6361 if ( data_rate ) {
6362 /*
6363 * Clock mode Control Register (CMCR)
6364 *
6365 * <15..14> 00 counter 1 Disabled
6366 * <13..12> 00 counter 0 Disabled
6367 * <11..10> 11 BRG1 Input is TxC Pin
6368 * <9..8> 11 BRG0 Input is TxC Pin
6369 * <7..6> 01 DPLL Input is BRG1 Output
6370 * <5..3> 100 TxCLK comes from BRG0
6371 * <2..0> 100 RxCLK comes from BRG0
6372 *
6373 * 0000 1111 0110 0100 = 0x0f64
6374 */
6375
6376 usc_OutReg( info, CMCR, 0x0f64 );
6377
6378
6379 /*
6380 * Write 16-bit Time Constant for BRG0
6381 * Time Constant = (ClkSpeed / data_rate) - 1
6382 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6383 */
6384
6385 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6386 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6387 else
6388 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6389
6390
6391 /*
6392 * Hardware Configuration Register (HCR)
6393 * Clear Bit 1, BRG0 mode = Continuous
6394 * Set Bit 0 to enable BRG0.
6395 */
6396
6397 usc_OutReg( info, HCR,
6398 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6399
6400
6401 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6402
6403 usc_OutReg( info, IOCR,
6404 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6405 } else {
6406 /* data rate == 0 so turn off BRG0 */
6407 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6408 }
6409
6410 } /* end of usc_enable_async_clock() */
6411
6412 /*
6413 * Buffer Structures:
6414 *
6415 * Normal memory access uses virtual addresses that can make discontiguous
6416 * physical memory pages appear to be contiguous in the virtual address
6417 * space (the processors memory mapping handles the conversions).
6418 *
6419 * DMA transfers require physically contiguous memory. This is because
6420 * the DMA system controller and DMA bus masters deal with memory using
6421 * only physical addresses.
6422 *
6423 * This causes a problem under Windows NT when large DMA buffers are
6424 * needed. Fragmentation of the nonpaged pool prevents allocations of
6425 * physically contiguous buffers larger than the PAGE_SIZE.
6426 *
6427 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6428 * allows DMA transfers to physically discontiguous buffers. Information
6429 * about each data transfer buffer is contained in a memory structure
6430 * called a 'buffer entry'. A list of buffer entries is maintained
6431 * to track and control the use of the data transfer buffers.
6432 *
6433 * To support this strategy we will allocate sufficient PAGE_SIZE
6434 * contiguous memory buffers to allow for the total required buffer
6435 * space.
6436 *
6437 * The 16C32 accesses the list of buffer entries using Bus Master
6438 * DMA. Control information is read from the buffer entries by the
6439 * 16C32 to control data transfers. status information is written to
6440 * the buffer entries by the 16C32 to indicate the status of completed
6441 * transfers.
6442 *
6443 * The CPU writes control information to the buffer entries to control
6444 * the 16C32 and reads status information from the buffer entries to
6445 * determine information about received and transmitted frames.
6446 *
6447 * Because the CPU and 16C32 (adapter) both need simultaneous access
6448 * to the buffer entries, the buffer entry memory is allocated with
6449 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6450 * entry list to PAGE_SIZE.
6451 *
6452 * The actual data buffers on the other hand will only be accessed
6453 * by the CPU or the adapter but not by both simultaneously. This allows
6454 * Scatter/Gather packet based DMA procedures for using physically
6455 * discontiguous pages.
6456 */
6457
6458 /*
6459 * mgsl_reset_tx_dma_buffers()
6460 *
6461 * Set the count for all transmit buffers to 0 to indicate the
6462 * buffer is available for use and set the current buffer to the
6463 * first buffer. This effectively makes all buffers free and
6464 * discards any data in buffers.
6465 *
6466 * Arguments: info pointer to device instance data
6467 * Return Value: None
6468 */
6469 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6470 {
6471 unsigned int i;
6472
6473 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6474 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6475 }
6476
6477 info->current_tx_buffer = 0;
6478 info->start_tx_dma_buffer = 0;
6479 info->tx_dma_buffers_used = 0;
6480
6481 info->get_tx_holding_index = 0;
6482 info->put_tx_holding_index = 0;
6483 info->tx_holding_count = 0;
6484
6485 } /* end of mgsl_reset_tx_dma_buffers() */
6486
6487 /*
6488 * num_free_tx_dma_buffers()
6489 *
6490 * returns the number of free tx dma buffers available
6491 *
6492 * Arguments: info pointer to device instance data
6493 * Return Value: number of free tx dma buffers
6494 */
6495 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6496 {
6497 return info->tx_buffer_count - info->tx_dma_buffers_used;
6498 }
6499
6500 /*
6501 * mgsl_reset_rx_dma_buffers()
6502 *
6503 * Set the count for all receive buffers to DMABUFFERSIZE
6504 * and set the current buffer to the first buffer. This effectively
6505 * makes all buffers free and discards any data in buffers.
6506 *
6507 * Arguments: info pointer to device instance data
6508 * Return Value: None
6509 */
6510 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6511 {
6512 unsigned int i;
6513
6514 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6515 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6516 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6517 // info->rx_buffer_list[i].status = 0;
6518 }
6519
6520 info->current_rx_buffer = 0;
6521
6522 } /* end of mgsl_reset_rx_dma_buffers() */
6523
6524 /*
6525 * mgsl_free_rx_frame_buffers()
6526 *
6527 * Free the receive buffers used by a received SDLC
6528 * frame such that the buffers can be reused.
6529 *
6530 * Arguments:
6531 *
6532 * info pointer to device instance data
6533 * StartIndex index of 1st receive buffer of frame
6534 * EndIndex index of last receive buffer of frame
6535 *
6536 * Return Value: None
6537 */
6538 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6539 {
6540 int Done = 0;
6541 DMABUFFERENTRY *pBufEntry;
6542 unsigned int Index;
6543
6544 /* Starting with 1st buffer entry of the frame clear the status */
6545 /* field and set the count field to DMA Buffer Size. */
6546
6547 Index = StartIndex;
6548
6549 while( !Done ) {
6550 pBufEntry = &(info->rx_buffer_list[Index]);
6551
6552 if ( Index == EndIndex ) {
6553 /* This is the last buffer of the frame! */
6554 Done = 1;
6555 }
6556
6557 /* reset current buffer for reuse */
6558 // pBufEntry->status = 0;
6559 // pBufEntry->count = DMABUFFERSIZE;
6560 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6561
6562 /* advance to next buffer entry in linked list */
6563 Index++;
6564 if ( Index == info->rx_buffer_count )
6565 Index = 0;
6566 }
6567
6568 /* set current buffer to next buffer after last buffer of frame */
6569 info->current_rx_buffer = Index;
6570
6571 } /* end of free_rx_frame_buffers() */
6572
6573 /* mgsl_get_rx_frame()
6574 *
6575 * This function attempts to return a received SDLC frame from the
6576 * receive DMA buffers. Only frames received without errors are returned.
6577 *
6578 * Arguments: info pointer to device extension
6579 * Return Value: 1 if frame returned, otherwise 0
6580 */
6581 static int mgsl_get_rx_frame(struct mgsl_struct *info)
6582 {
6583 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6584 unsigned short status;
6585 DMABUFFERENTRY *pBufEntry;
6586 unsigned int framesize = 0;
6587 int ReturnCode = 0;
6588 unsigned long flags;
6589 struct tty_struct *tty = info->tty;
6590 int return_frame = 0;
6591
6592 /*
6593 * current_rx_buffer points to the 1st buffer of the next available
6594 * receive frame. To find the last buffer of the frame look for
6595 * a non-zero status field in the buffer entries. (The status
6596 * field is set by the 16C32 after completing a receive frame.
6597 */
6598
6599 StartIndex = EndIndex = info->current_rx_buffer;
6600
6601 while( !info->rx_buffer_list[EndIndex].status ) {
6602 /*
6603 * If the count field of the buffer entry is non-zero then
6604 * this buffer has not been used. (The 16C32 clears the count
6605 * field when it starts using the buffer.) If an unused buffer
6606 * is encountered then there are no frames available.
6607 */
6608
6609 if ( info->rx_buffer_list[EndIndex].count )
6610 goto Cleanup;
6611
6612 /* advance to next buffer entry in linked list */
6613 EndIndex++;
6614 if ( EndIndex == info->rx_buffer_count )
6615 EndIndex = 0;
6616
6617 /* if entire list searched then no frame available */
6618 if ( EndIndex == StartIndex ) {
6619 /* If this occurs then something bad happened,
6620 * all buffers have been 'used' but none mark
6621 * the end of a frame. Reset buffers and receiver.
6622 */
6623
6624 if ( info->rx_enabled ){
6625 spin_lock_irqsave(&info->irq_spinlock,flags);
6626 usc_start_receiver(info);
6627 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6628 }
6629 goto Cleanup;
6630 }
6631 }
6632
6633
6634 /* check status of receive frame */
6635
6636 status = info->rx_buffer_list[EndIndex].status;
6637
6638 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6639 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6640 if ( status & RXSTATUS_SHORT_FRAME )
6641 info->icount.rxshort++;
6642 else if ( status & RXSTATUS_ABORT )
6643 info->icount.rxabort++;
6644 else if ( status & RXSTATUS_OVERRUN )
6645 info->icount.rxover++;
6646 else {
6647 info->icount.rxcrc++;
6648 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6649 return_frame = 1;
6650 }
6651 framesize = 0;
6652 #if SYNCLINK_GENERIC_HDLC
6653 {
6654 struct net_device_stats *stats = hdlc_stats(info->netdev);
6655 stats->rx_errors++;
6656 stats->rx_frame_errors++;
6657 }
6658 #endif
6659 } else
6660 return_frame = 1;
6661
6662 if ( return_frame ) {
6663 /* receive frame has no errors, get frame size.
6664 * The frame size is the starting value of the RCC (which was
6665 * set to 0xffff) minus the ending value of the RCC (decremented
6666 * once for each receive character) minus 2 for the 16-bit CRC.
6667 */
6668
6669 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6670
6671 /* adjust frame size for CRC if any */
6672 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6673 framesize -= 2;
6674 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6675 framesize -= 4;
6676 }
6677
6678 if ( debug_level >= DEBUG_LEVEL_BH )
6679 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6680 __FILE__,__LINE__,info->device_name,status,framesize);
6681
6682 if ( debug_level >= DEBUG_LEVEL_DATA )
6683 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6684 min_t(int, framesize, DMABUFFERSIZE),0);
6685
6686 if (framesize) {
6687 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6688 ((framesize+1) > info->max_frame_size) ) ||
6689 (framesize > info->max_frame_size) )
6690 info->icount.rxlong++;
6691 else {
6692 /* copy dma buffer(s) to contiguous intermediate buffer */
6693 int copy_count = framesize;
6694 int index = StartIndex;
6695 unsigned char *ptmp = info->intermediate_rxbuffer;
6696
6697 if ( !(status & RXSTATUS_CRC_ERROR))
6698 info->icount.rxok++;
6699
6700 while(copy_count) {
6701 int partial_count;
6702 if ( copy_count > DMABUFFERSIZE )
6703 partial_count = DMABUFFERSIZE;
6704 else
6705 partial_count = copy_count;
6706
6707 pBufEntry = &(info->rx_buffer_list[index]);
6708 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6709 ptmp += partial_count;
6710 copy_count -= partial_count;
6711
6712 if ( ++index == info->rx_buffer_count )
6713 index = 0;
6714 }
6715
6716 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6717 ++framesize;
6718 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6719 RX_CRC_ERROR :
6720 RX_OK);
6721
6722 if ( debug_level >= DEBUG_LEVEL_DATA )
6723 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6724 __FILE__,__LINE__,info->device_name,
6725 *ptmp);
6726 }
6727
6728 #if SYNCLINK_GENERIC_HDLC
6729 if (info->netcount)
6730 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6731 else
6732 #endif
6733 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6734 }
6735 }
6736 /* Free the buffers used by this frame. */
6737 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6738
6739 ReturnCode = 1;
6740
6741 Cleanup:
6742
6743 if ( info->rx_enabled && info->rx_overflow ) {
6744 /* The receiver needs to restarted because of
6745 * a receive overflow (buffer or FIFO). If the
6746 * receive buffers are now empty, then restart receiver.
6747 */
6748
6749 if ( !info->rx_buffer_list[EndIndex].status &&
6750 info->rx_buffer_list[EndIndex].count ) {
6751 spin_lock_irqsave(&info->irq_spinlock,flags);
6752 usc_start_receiver(info);
6753 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6754 }
6755 }
6756
6757 return ReturnCode;
6758
6759 } /* end of mgsl_get_rx_frame() */
6760
6761 /* mgsl_get_raw_rx_frame()
6762 *
6763 * This function attempts to return a received frame from the
6764 * receive DMA buffers when running in external loop mode. In this mode,
6765 * we will return at most one DMABUFFERSIZE frame to the application.
6766 * The USC receiver is triggering off of DCD going active to start a new
6767 * frame, and DCD going inactive to terminate the frame (similar to
6768 * processing a closing flag character).
6769 *
6770 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6771 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6772 * status field and the RCC field will indicate the length of the
6773 * entire received frame. We take this RCC field and get the modulus
6774 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6775 * last Rx DMA buffer and return that last portion of the frame.
6776 *
6777 * Arguments: info pointer to device extension
6778 * Return Value: 1 if frame returned, otherwise 0
6779 */
6780 static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6781 {
6782 unsigned int CurrentIndex, NextIndex;
6783 unsigned short status;
6784 DMABUFFERENTRY *pBufEntry;
6785 unsigned int framesize = 0;
6786 int ReturnCode = 0;
6787 unsigned long flags;
6788 struct tty_struct *tty = info->tty;
6789
6790 /*
6791 * current_rx_buffer points to the 1st buffer of the next available
6792 * receive frame. The status field is set by the 16C32 after
6793 * completing a receive frame. If the status field of this buffer
6794 * is zero, either the USC is still filling this buffer or this
6795 * is one of a series of buffers making up a received frame.
6796 *
6797 * If the count field of this buffer is zero, the USC is either
6798 * using this buffer or has used this buffer. Look at the count
6799 * field of the next buffer. If that next buffer's count is
6800 * non-zero, the USC is still actively using the current buffer.
6801 * Otherwise, if the next buffer's count field is zero, the
6802 * current buffer is complete and the USC is using the next
6803 * buffer.
6804 */
6805 CurrentIndex = NextIndex = info->current_rx_buffer;
6806 ++NextIndex;
6807 if ( NextIndex == info->rx_buffer_count )
6808 NextIndex = 0;
6809
6810 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6811 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6812 info->rx_buffer_list[NextIndex].count == 0)) {
6813 /*
6814 * Either the status field of this dma buffer is non-zero
6815 * (indicating the last buffer of a receive frame) or the next
6816 * buffer is marked as in use -- implying this buffer is complete
6817 * and an intermediate buffer for this received frame.
6818 */
6819
6820 status = info->rx_buffer_list[CurrentIndex].status;
6821
6822 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6823 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6824 if ( status & RXSTATUS_SHORT_FRAME )
6825 info->icount.rxshort++;
6826 else if ( status & RXSTATUS_ABORT )
6827 info->icount.rxabort++;
6828 else if ( status & RXSTATUS_OVERRUN )
6829 info->icount.rxover++;
6830 else
6831 info->icount.rxcrc++;
6832 framesize = 0;
6833 } else {
6834 /*
6835 * A receive frame is available, get frame size and status.
6836 *
6837 * The frame size is the starting value of the RCC (which was
6838 * set to 0xffff) minus the ending value of the RCC (decremented
6839 * once for each receive character) minus 2 or 4 for the 16-bit
6840 * or 32-bit CRC.
6841 *
6842 * If the status field is zero, this is an intermediate buffer.
6843 * It's size is 4K.
6844 *
6845 * If the DMA Buffer Entry's Status field is non-zero, the
6846 * receive operation completed normally (ie: DCD dropped). The
6847 * RCC field is valid and holds the received frame size.
6848 * It is possible that the RCC field will be zero on a DMA buffer
6849 * entry with a non-zero status. This can occur if the total
6850 * frame size (number of bytes between the time DCD goes active
6851 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6852 * case the 16C32 has underrun on the RCC count and appears to
6853 * stop updating this counter to let us know the actual received
6854 * frame size. If this happens (non-zero status and zero RCC),
6855 * simply return the entire RxDMA Buffer
6856 */
6857 if ( status ) {
6858 /*
6859 * In the event that the final RxDMA Buffer is
6860 * terminated with a non-zero status and the RCC
6861 * field is zero, we interpret this as the RCC
6862 * having underflowed (received frame > 65535 bytes).
6863 *
6864 * Signal the event to the user by passing back
6865 * a status of RxStatus_CrcError returning the full
6866 * buffer and let the app figure out what data is
6867 * actually valid
6868 */
6869 if ( info->rx_buffer_list[CurrentIndex].rcc )
6870 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6871 else
6872 framesize = DMABUFFERSIZE;
6873 }
6874 else
6875 framesize = DMABUFFERSIZE;
6876 }
6877
6878 if ( framesize > DMABUFFERSIZE ) {
6879 /*
6880 * if running in raw sync mode, ISR handler for
6881 * End Of Buffer events terminates all buffers at 4K.
6882 * If this frame size is said to be >4K, get the
6883 * actual number of bytes of the frame in this buffer.
6884 */
6885 framesize = framesize % DMABUFFERSIZE;
6886 }
6887
6888
6889 if ( debug_level >= DEBUG_LEVEL_BH )
6890 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6891 __FILE__,__LINE__,info->device_name,status,framesize);
6892
6893 if ( debug_level >= DEBUG_LEVEL_DATA )
6894 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6895 min_t(int, framesize, DMABUFFERSIZE),0);
6896
6897 if (framesize) {
6898 /* copy dma buffer(s) to contiguous intermediate buffer */
6899 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6900
6901 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6902 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6903 info->icount.rxok++;
6904
6905 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6906 }
6907
6908 /* Free the buffers used by this frame. */
6909 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6910
6911 ReturnCode = 1;
6912 }
6913
6914
6915 if ( info->rx_enabled && info->rx_overflow ) {
6916 /* The receiver needs to restarted because of
6917 * a receive overflow (buffer or FIFO). If the
6918 * receive buffers are now empty, then restart receiver.
6919 */
6920
6921 if ( !info->rx_buffer_list[CurrentIndex].status &&
6922 info->rx_buffer_list[CurrentIndex].count ) {
6923 spin_lock_irqsave(&info->irq_spinlock,flags);
6924 usc_start_receiver(info);
6925 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6926 }
6927 }
6928
6929 return ReturnCode;
6930
6931 } /* end of mgsl_get_raw_rx_frame() */
6932
6933 /* mgsl_load_tx_dma_buffer()
6934 *
6935 * Load the transmit DMA buffer with the specified data.
6936 *
6937 * Arguments:
6938 *
6939 * info pointer to device extension
6940 * Buffer pointer to buffer containing frame to load
6941 * BufferSize size in bytes of frame in Buffer
6942 *
6943 * Return Value: None
6944 */
6945 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6946 const char *Buffer, unsigned int BufferSize)
6947 {
6948 unsigned short Copycount;
6949 unsigned int i = 0;
6950 DMABUFFERENTRY *pBufEntry;
6951
6952 if ( debug_level >= DEBUG_LEVEL_DATA )
6953 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6954
6955 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6956 /* set CMR:13 to start transmit when
6957 * next GoAhead (abort) is received
6958 */
6959 info->cmr_value |= BIT13;
6960 }
6961
6962 /* begin loading the frame in the next available tx dma
6963 * buffer, remember it's starting location for setting
6964 * up tx dma operation
6965 */
6966 i = info->current_tx_buffer;
6967 info->start_tx_dma_buffer = i;
6968
6969 /* Setup the status and RCC (Frame Size) fields of the 1st */
6970 /* buffer entry in the transmit DMA buffer list. */
6971
6972 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6973 info->tx_buffer_list[i].rcc = BufferSize;
6974 info->tx_buffer_list[i].count = BufferSize;
6975
6976 /* Copy frame data from 1st source buffer to the DMA buffers. */
6977 /* The frame data may span multiple DMA buffers. */
6978
6979 while( BufferSize ){
6980 /* Get a pointer to next DMA buffer entry. */
6981 pBufEntry = &info->tx_buffer_list[i++];
6982
6983 if ( i == info->tx_buffer_count )
6984 i=0;
6985
6986 /* Calculate the number of bytes that can be copied from */
6987 /* the source buffer to this DMA buffer. */
6988 if ( BufferSize > DMABUFFERSIZE )
6989 Copycount = DMABUFFERSIZE;
6990 else
6991 Copycount = BufferSize;
6992
6993 /* Actually copy data from source buffer to DMA buffer. */
6994 /* Also set the data count for this individual DMA buffer. */
6995 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6996 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6997 else
6998 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6999
7000 pBufEntry->count = Copycount;
7001
7002 /* Advance source pointer and reduce remaining data count. */
7003 Buffer += Copycount;
7004 BufferSize -= Copycount;
7005
7006 ++info->tx_dma_buffers_used;
7007 }
7008
7009 /* remember next available tx dma buffer */
7010 info->current_tx_buffer = i;
7011
7012 } /* end of mgsl_load_tx_dma_buffer() */
7013
7014 /*
7015 * mgsl_register_test()
7016 *
7017 * Performs a register test of the 16C32.
7018 *
7019 * Arguments: info pointer to device instance data
7020 * Return Value: TRUE if test passed, otherwise FALSE
7021 */
7022 static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7023 {
7024 static unsigned short BitPatterns[] =
7025 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7026 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
7027 unsigned int i;
7028 BOOLEAN rc = TRUE;
7029 unsigned long flags;
7030
7031 spin_lock_irqsave(&info->irq_spinlock,flags);
7032 usc_reset(info);
7033
7034 /* Verify the reset state of some registers. */
7035
7036 if ( (usc_InReg( info, SICR ) != 0) ||
7037 (usc_InReg( info, IVR ) != 0) ||
7038 (usc_InDmaReg( info, DIVR ) != 0) ){
7039 rc = FALSE;
7040 }
7041
7042 if ( rc == TRUE ){
7043 /* Write bit patterns to various registers but do it out of */
7044 /* sync, then read back and verify values. */
7045
7046 for ( i = 0 ; i < Patterncount ; i++ ) {
7047 usc_OutReg( info, TC0R, BitPatterns[i] );
7048 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7049 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7050 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7051 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7052 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7053
7054 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7055 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7056 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7057 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7058 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7059 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7060 rc = FALSE;
7061 break;
7062 }
7063 }
7064 }
7065
7066 usc_reset(info);
7067 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7068
7069 return rc;
7070
7071 } /* end of mgsl_register_test() */
7072
7073 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7074 *
7075 * Arguments: info pointer to device instance data
7076 * Return Value: TRUE if test passed, otherwise FALSE
7077 */
7078 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7079 {
7080 unsigned long EndTime;
7081 unsigned long flags;
7082
7083 spin_lock_irqsave(&info->irq_spinlock,flags);
7084 usc_reset(info);
7085
7086 /*
7087 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7088 * The ISR sets irq_occurred to 1.
7089 */
7090
7091 info->irq_occurred = FALSE;
7092
7093 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7094 /* Enable INTEN (Port 6, Bit12) */
7095 /* This connects the IRQ request signal to the ISA bus */
7096 /* on the ISA adapter. This has no effect for the PCI adapter */
7097 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7098
7099 usc_EnableMasterIrqBit(info);
7100 usc_EnableInterrupts(info, IO_PIN);
7101 usc_ClearIrqPendingBits(info, IO_PIN);
7102
7103 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7104 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7105
7106 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7107
7108 EndTime=100;
7109 while( EndTime-- && !info->irq_occurred ) {
7110 msleep_interruptible(10);
7111 }
7112
7113 spin_lock_irqsave(&info->irq_spinlock,flags);
7114 usc_reset(info);
7115 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7116
7117 if ( !info->irq_occurred )
7118 return FALSE;
7119 else
7120 return TRUE;
7121
7122 } /* end of mgsl_irq_test() */
7123
7124 /* mgsl_dma_test()
7125 *
7126 * Perform a DMA test of the 16C32. A small frame is
7127 * transmitted via DMA from a transmit buffer to a receive buffer
7128 * using single buffer DMA mode.
7129 *
7130 * Arguments: info pointer to device instance data
7131 * Return Value: TRUE if test passed, otherwise FALSE
7132 */
7133 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7134 {
7135 unsigned short FifoLevel;
7136 unsigned long phys_addr;
7137 unsigned int FrameSize;
7138 unsigned int i;
7139 char *TmpPtr;
7140 BOOLEAN rc = TRUE;
7141 unsigned short status=0;
7142 unsigned long EndTime;
7143 unsigned long flags;
7144 MGSL_PARAMS tmp_params;
7145
7146 /* save current port options */
7147 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7148 /* load default port options */
7149 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7150
7151 #define TESTFRAMESIZE 40
7152
7153 spin_lock_irqsave(&info->irq_spinlock,flags);
7154
7155 /* setup 16C32 for SDLC DMA transfer mode */
7156
7157 usc_reset(info);
7158 usc_set_sdlc_mode(info);
7159 usc_enable_loopback(info,1);
7160
7161 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7162 * field of the buffer entry after fetching buffer address. This
7163 * way we can detect a DMA failure for a DMA read (which should be
7164 * non-destructive to system memory) before we try and write to
7165 * memory (where a failure could corrupt system memory).
7166 */
7167
7168 /* Receive DMA mode Register (RDMR)
7169 *
7170 * <15..14> 11 DMA mode = Linked List Buffer mode
7171 * <13> 1 RSBinA/L = store Rx status Block in List entry
7172 * <12> 0 1 = Clear count of List Entry after fetching
7173 * <11..10> 00 Address mode = Increment
7174 * <9> 1 Terminate Buffer on RxBound
7175 * <8> 0 Bus Width = 16bits
7176 * <7..0> ? status Bits (write as 0s)
7177 *
7178 * 1110 0010 0000 0000 = 0xe200
7179 */
7180
7181 usc_OutDmaReg( info, RDMR, 0xe200 );
7182
7183 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7184
7185
7186 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7187
7188 FrameSize = TESTFRAMESIZE;
7189
7190 /* setup 1st transmit buffer entry: */
7191 /* with frame size and transmit control word */
7192
7193 info->tx_buffer_list[0].count = FrameSize;
7194 info->tx_buffer_list[0].rcc = FrameSize;
7195 info->tx_buffer_list[0].status = 0x4000;
7196
7197 /* build a transmit frame in 1st transmit DMA buffer */
7198
7199 TmpPtr = info->tx_buffer_list[0].virt_addr;
7200 for (i = 0; i < FrameSize; i++ )
7201 *TmpPtr++ = i;
7202
7203 /* setup 1st receive buffer entry: */
7204 /* clear status, set max receive buffer size */
7205
7206 info->rx_buffer_list[0].status = 0;
7207 info->rx_buffer_list[0].count = FrameSize + 4;
7208
7209 /* zero out the 1st receive buffer */
7210
7211 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7212
7213 /* Set count field of next buffer entries to prevent */
7214 /* 16C32 from using buffers after the 1st one. */
7215
7216 info->tx_buffer_list[1].count = 0;
7217 info->rx_buffer_list[1].count = 0;
7218
7219
7220 /***************************/
7221 /* Program 16C32 receiver. */
7222 /***************************/
7223
7224 spin_lock_irqsave(&info->irq_spinlock,flags);
7225
7226 /* setup DMA transfers */
7227 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7228
7229 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7230 phys_addr = info->rx_buffer_list[0].phys_entry;
7231 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7232 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7233
7234 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7235 usc_InDmaReg( info, RDMR );
7236 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7237
7238 /* Enable Receiver (RMR <1..0> = 10) */
7239 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7240
7241 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7242
7243
7244 /*************************************************************/
7245 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7246 /*************************************************************/
7247
7248 /* Wait 100ms for interrupt. */
7249 EndTime = jiffies + msecs_to_jiffies(100);
7250
7251 for(;;) {
7252 if (time_after(jiffies, EndTime)) {
7253 rc = FALSE;
7254 break;
7255 }
7256
7257 spin_lock_irqsave(&info->irq_spinlock,flags);
7258 status = usc_InDmaReg( info, RDMR );
7259 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7260
7261 if ( !(status & BIT4) && (status & BIT5) ) {
7262 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7263 /* BUSY (BIT 5) is active (channel still active). */
7264 /* This means the buffer entry read has completed. */
7265 break;
7266 }
7267 }
7268
7269
7270 /******************************/
7271 /* Program 16C32 transmitter. */
7272 /******************************/
7273
7274 spin_lock_irqsave(&info->irq_spinlock,flags);
7275
7276 /* Program the Transmit Character Length Register (TCLR) */
7277 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7278
7279 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7280 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7281
7282 /* Program the address of the 1st DMA Buffer Entry in linked list */
7283
7284 phys_addr = info->tx_buffer_list[0].phys_entry;
7285 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7286 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7287
7288 /* unlatch Tx status bits, and start transmit channel. */
7289
7290 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7291 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7292
7293 /* wait for DMA controller to fill transmit FIFO */
7294
7295 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7296
7297 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7298
7299
7300 /**********************************/
7301 /* WAIT FOR TRANSMIT FIFO TO FILL */
7302 /**********************************/
7303
7304 /* Wait 100ms */
7305 EndTime = jiffies + msecs_to_jiffies(100);
7306
7307 for(;;) {
7308 if (time_after(jiffies, EndTime)) {
7309 rc = FALSE;
7310 break;
7311 }
7312
7313 spin_lock_irqsave(&info->irq_spinlock,flags);
7314 FifoLevel = usc_InReg(info, TICR) >> 8;
7315 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7316
7317 if ( FifoLevel < 16 )
7318 break;
7319 else
7320 if ( FrameSize < 32 ) {
7321 /* This frame is smaller than the entire transmit FIFO */
7322 /* so wait for the entire frame to be loaded. */
7323 if ( FifoLevel <= (32 - FrameSize) )
7324 break;
7325 }
7326 }
7327
7328
7329 if ( rc == TRUE )
7330 {
7331 /* Enable 16C32 transmitter. */
7332
7333 spin_lock_irqsave(&info->irq_spinlock,flags);
7334
7335 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7336 usc_TCmd( info, TCmd_SendFrame );
7337 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7338
7339 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7340
7341
7342 /******************************/
7343 /* WAIT FOR TRANSMIT COMPLETE */
7344 /******************************/
7345
7346 /* Wait 100ms */
7347 EndTime = jiffies + msecs_to_jiffies(100);
7348
7349 /* While timer not expired wait for transmit complete */
7350
7351 spin_lock_irqsave(&info->irq_spinlock,flags);
7352 status = usc_InReg( info, TCSR );
7353 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7354
7355 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7356 if (time_after(jiffies, EndTime)) {
7357 rc = FALSE;
7358 break;
7359 }
7360
7361 spin_lock_irqsave(&info->irq_spinlock,flags);
7362 status = usc_InReg( info, TCSR );
7363 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7364 }
7365 }
7366
7367
7368 if ( rc == TRUE ){
7369 /* CHECK FOR TRANSMIT ERRORS */
7370 if ( status & (BIT5 + BIT1) )
7371 rc = FALSE;
7372 }
7373
7374 if ( rc == TRUE ) {
7375 /* WAIT FOR RECEIVE COMPLETE */
7376
7377 /* Wait 100ms */
7378 EndTime = jiffies + msecs_to_jiffies(100);
7379
7380 /* Wait for 16C32 to write receive status to buffer entry. */
7381 status=info->rx_buffer_list[0].status;
7382 while ( status == 0 ) {
7383 if (time_after(jiffies, EndTime)) {
7384 rc = FALSE;
7385 break;
7386 }
7387 status=info->rx_buffer_list[0].status;
7388 }
7389 }
7390
7391
7392 if ( rc == TRUE ) {
7393 /* CHECK FOR RECEIVE ERRORS */
7394 status = info->rx_buffer_list[0].status;
7395
7396 if ( status & (BIT8 + BIT3 + BIT1) ) {
7397 /* receive error has occurred */
7398 rc = FALSE;
7399 } else {
7400 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7401 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7402 rc = FALSE;
7403 }
7404 }
7405 }
7406
7407 spin_lock_irqsave(&info->irq_spinlock,flags);
7408 usc_reset( info );
7409 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7410
7411 /* restore current port options */
7412 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7413
7414 return rc;
7415
7416 } /* end of mgsl_dma_test() */
7417
7418 /* mgsl_adapter_test()
7419 *
7420 * Perform the register, IRQ, and DMA tests for the 16C32.
7421 *
7422 * Arguments: info pointer to device instance data
7423 * Return Value: 0 if success, otherwise -ENODEV
7424 */
7425 static int mgsl_adapter_test( struct mgsl_struct *info )
7426 {
7427 if ( debug_level >= DEBUG_LEVEL_INFO )
7428 printk( "%s(%d):Testing device %s\n",
7429 __FILE__,__LINE__,info->device_name );
7430
7431 if ( !mgsl_register_test( info ) ) {
7432 info->init_error = DiagStatus_AddressFailure;
7433 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7434 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7435 return -ENODEV;
7436 }
7437
7438 if ( !mgsl_irq_test( info ) ) {
7439 info->init_error = DiagStatus_IrqFailure;
7440 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7441 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7442 return -ENODEV;
7443 }
7444
7445 if ( !mgsl_dma_test( info ) ) {
7446 info->init_error = DiagStatus_DmaFailure;
7447 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7448 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7449 return -ENODEV;
7450 }
7451
7452 if ( debug_level >= DEBUG_LEVEL_INFO )
7453 printk( "%s(%d):device %s passed diagnostics\n",
7454 __FILE__,__LINE__,info->device_name );
7455
7456 return 0;
7457
7458 } /* end of mgsl_adapter_test() */
7459
7460 /* mgsl_memory_test()
7461 *
7462 * Test the shared memory on a PCI adapter.
7463 *
7464 * Arguments: info pointer to device instance data
7465 * Return Value: TRUE if test passed, otherwise FALSE
7466 */
7467 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7468 {
7469 static unsigned long BitPatterns[] =
7470 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7471 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7472 unsigned long i;
7473 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7474 unsigned long * TestAddr;
7475
7476 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7477 return TRUE;
7478
7479 TestAddr = (unsigned long *)info->memory_base;
7480
7481 /* Test data lines with test pattern at one location. */
7482
7483 for ( i = 0 ; i < Patterncount ; i++ ) {
7484 *TestAddr = BitPatterns[i];
7485 if ( *TestAddr != BitPatterns[i] )
7486 return FALSE;
7487 }
7488
7489 /* Test address lines with incrementing pattern over */
7490 /* entire address range. */
7491
7492 for ( i = 0 ; i < TestLimit ; i++ ) {
7493 *TestAddr = i * 4;
7494 TestAddr++;
7495 }
7496
7497 TestAddr = (unsigned long *)info->memory_base;
7498
7499 for ( i = 0 ; i < TestLimit ; i++ ) {
7500 if ( *TestAddr != i * 4 )
7501 return FALSE;
7502 TestAddr++;
7503 }
7504
7505 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7506
7507 return TRUE;
7508
7509 } /* End Of mgsl_memory_test() */
7510
7511
7512 /* mgsl_load_pci_memory()
7513 *
7514 * Load a large block of data into the PCI shared memory.
7515 * Use this instead of memcpy() or memmove() to move data
7516 * into the PCI shared memory.
7517 *
7518 * Notes:
7519 *
7520 * This function prevents the PCI9050 interface chip from hogging
7521 * the adapter local bus, which can starve the 16C32 by preventing
7522 * 16C32 bus master cycles.
7523 *
7524 * The PCI9050 documentation says that the 9050 will always release
7525 * control of the local bus after completing the current read
7526 * or write operation.
7527 *
7528 * It appears that as long as the PCI9050 write FIFO is full, the
7529 * PCI9050 treats all of the writes as a single burst transaction
7530 * and will not release the bus. This causes DMA latency problems
7531 * at high speeds when copying large data blocks to the shared
7532 * memory.
7533 *
7534 * This function in effect, breaks the a large shared memory write
7535 * into multiple transations by interleaving a shared memory read
7536 * which will flush the write FIFO and 'complete' the write
7537 * transation. This allows any pending DMA request to gain control
7538 * of the local bus in a timely fasion.
7539 *
7540 * Arguments:
7541 *
7542 * TargetPtr pointer to target address in PCI shared memory
7543 * SourcePtr pointer to source buffer for data
7544 * count count in bytes of data to copy
7545 *
7546 * Return Value: None
7547 */
7548 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7549 unsigned short count )
7550 {
7551 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7552 #define PCI_LOAD_INTERVAL 64
7553
7554 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7555 unsigned short Index;
7556 unsigned long Dummy;
7557
7558 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7559 {
7560 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7561 Dummy = *((volatile unsigned long *)TargetPtr);
7562 TargetPtr += PCI_LOAD_INTERVAL;
7563 SourcePtr += PCI_LOAD_INTERVAL;
7564 }
7565
7566 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7567
7568 } /* End Of mgsl_load_pci_memory() */
7569
7570 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7571 {
7572 int i;
7573 int linecount;
7574 if (xmit)
7575 printk("%s tx data:\n",info->device_name);
7576 else
7577 printk("%s rx data:\n",info->device_name);
7578
7579 while(count) {
7580 if (count > 16)
7581 linecount = 16;
7582 else
7583 linecount = count;
7584
7585 for(i=0;i<linecount;i++)
7586 printk("%02X ",(unsigned char)data[i]);
7587 for(;i<17;i++)
7588 printk(" ");
7589 for(i=0;i<linecount;i++) {
7590 if (data[i]>=040 && data[i]<=0176)
7591 printk("%c",data[i]);
7592 else
7593 printk(".");
7594 }
7595 printk("\n");
7596
7597 data += linecount;
7598 count -= linecount;
7599 }
7600 } /* end of mgsl_trace_block() */
7601
7602 /* mgsl_tx_timeout()
7603 *
7604 * called when HDLC frame times out
7605 * update stats and do tx completion processing
7606 *
7607 * Arguments: context pointer to device instance data
7608 * Return Value: None
7609 */
7610 static void mgsl_tx_timeout(unsigned long context)
7611 {
7612 struct mgsl_struct *info = (struct mgsl_struct*)context;
7613 unsigned long flags;
7614
7615 if ( debug_level >= DEBUG_LEVEL_INFO )
7616 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7617 __FILE__,__LINE__,info->device_name);
7618 if(info->tx_active &&
7619 (info->params.mode == MGSL_MODE_HDLC ||
7620 info->params.mode == MGSL_MODE_RAW) ) {
7621 info->icount.txtimeout++;
7622 }
7623 spin_lock_irqsave(&info->irq_spinlock,flags);
7624 info->tx_active = 0;
7625 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7626
7627 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7628 usc_loopmode_cancel_transmit( info );
7629
7630 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7631
7632 #if SYNCLINK_GENERIC_HDLC
7633 if (info->netcount)
7634 hdlcdev_tx_done(info);
7635 else
7636 #endif
7637 mgsl_bh_transmit(info);
7638
7639 } /* end of mgsl_tx_timeout() */
7640
7641 /* signal that there are no more frames to send, so that
7642 * line is 'released' by echoing RxD to TxD when current
7643 * transmission is complete (or immediately if no tx in progress).
7644 */
7645 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7646 {
7647 unsigned long flags;
7648
7649 spin_lock_irqsave(&info->irq_spinlock,flags);
7650 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7651 if (info->tx_active)
7652 info->loopmode_send_done_requested = TRUE;
7653 else
7654 usc_loopmode_send_done(info);
7655 }
7656 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7657
7658 return 0;
7659 }
7660
7661 /* release the line by echoing RxD to TxD
7662 * upon completion of a transmit frame
7663 */
7664 static void usc_loopmode_send_done( struct mgsl_struct * info )
7665 {
7666 info->loopmode_send_done_requested = FALSE;
7667 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7668 info->cmr_value &= ~BIT13;
7669 usc_OutReg(info, CMR, info->cmr_value);
7670 }
7671
7672 /* abort a transmit in progress while in HDLC LoopMode
7673 */
7674 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7675 {
7676 /* reset tx dma channel and purge TxFifo */
7677 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7678 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7679 usc_loopmode_send_done( info );
7680 }
7681
7682 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7683 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7684 * we must clear CMR:13 to begin repeating TxData to RxData
7685 */
7686 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7687 {
7688 info->loopmode_insert_requested = TRUE;
7689
7690 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7691 * begin repeating TxData on RxData (complete insertion)
7692 */
7693 usc_OutReg( info, RICR,
7694 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7695
7696 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7697 info->cmr_value |= BIT13;
7698 usc_OutReg(info, CMR, info->cmr_value);
7699 }
7700
7701 /* return 1 if station is inserted into the loop, otherwise 0
7702 */
7703 static int usc_loopmode_active( struct mgsl_struct * info)
7704 {
7705 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7706 }
7707
7708 #if SYNCLINK_GENERIC_HDLC
7709
7710 /**
7711 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7712 * set encoding and frame check sequence (FCS) options
7713 *
7714 * dev pointer to network device structure
7715 * encoding serial encoding setting
7716 * parity FCS setting
7717 *
7718 * returns 0 if success, otherwise error code
7719 */
7720 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7721 unsigned short parity)
7722 {
7723 struct mgsl_struct *info = dev_to_port(dev);
7724 unsigned char new_encoding;
7725 unsigned short new_crctype;
7726
7727 /* return error if TTY interface open */
7728 if (info->count)
7729 return -EBUSY;
7730
7731 switch (encoding)
7732 {
7733 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7734 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7735 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7736 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7737 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7738 default: return -EINVAL;
7739 }
7740
7741 switch (parity)
7742 {
7743 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7744 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7745 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7746 default: return -EINVAL;
7747 }
7748
7749 info->params.encoding = new_encoding;
7750 info->params.crc_type = new_crctype;
7751
7752 /* if network interface up, reprogram hardware */
7753 if (info->netcount)
7754 mgsl_program_hw(info);
7755
7756 return 0;
7757 }
7758
7759 /**
7760 * called by generic HDLC layer to send frame
7761 *
7762 * skb socket buffer containing HDLC frame
7763 * dev pointer to network device structure
7764 *
7765 * returns 0 if success, otherwise error code
7766 */
7767 static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7768 {
7769 struct mgsl_struct *info = dev_to_port(dev);
7770 struct net_device_stats *stats = hdlc_stats(dev);
7771 unsigned long flags;
7772
7773 if (debug_level >= DEBUG_LEVEL_INFO)
7774 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7775
7776 /* stop sending until this frame completes */
7777 netif_stop_queue(dev);
7778
7779 /* copy data to device buffers */
7780 info->xmit_cnt = skb->len;
7781 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7782
7783 /* update network statistics */
7784 stats->tx_packets++;
7785 stats->tx_bytes += skb->len;
7786
7787 /* done with socket buffer, so free it */
7788 dev_kfree_skb(skb);
7789
7790 /* save start time for transmit timeout detection */
7791 dev->trans_start = jiffies;
7792
7793 /* start hardware transmitter if necessary */
7794 spin_lock_irqsave(&info->irq_spinlock,flags);
7795 if (!info->tx_active)
7796 usc_start_transmitter(info);
7797 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7798
7799 return 0;
7800 }
7801
7802 /**
7803 * called by network layer when interface enabled
7804 * claim resources and initialize hardware
7805 *
7806 * dev pointer to network device structure
7807 *
7808 * returns 0 if success, otherwise error code
7809 */
7810 static int hdlcdev_open(struct net_device *dev)
7811 {
7812 struct mgsl_struct *info = dev_to_port(dev);
7813 int rc;
7814 unsigned long flags;
7815
7816 if (debug_level >= DEBUG_LEVEL_INFO)
7817 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7818
7819 /* generic HDLC layer open processing */
7820 if ((rc = hdlc_open(dev)))
7821 return rc;
7822
7823 /* arbitrate between network and tty opens */
7824 spin_lock_irqsave(&info->netlock, flags);
7825 if (info->count != 0 || info->netcount != 0) {
7826 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7827 spin_unlock_irqrestore(&info->netlock, flags);
7828 return -EBUSY;
7829 }
7830 info->netcount=1;
7831 spin_unlock_irqrestore(&info->netlock, flags);
7832
7833 /* claim resources and init adapter */
7834 if ((rc = startup(info)) != 0) {
7835 spin_lock_irqsave(&info->netlock, flags);
7836 info->netcount=0;
7837 spin_unlock_irqrestore(&info->netlock, flags);
7838 return rc;
7839 }
7840
7841 /* assert DTR and RTS, apply hardware settings */
7842 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7843 mgsl_program_hw(info);
7844
7845 /* enable network layer transmit */
7846 dev->trans_start = jiffies;
7847 netif_start_queue(dev);
7848
7849 /* inform generic HDLC layer of current DCD status */
7850 spin_lock_irqsave(&info->irq_spinlock, flags);
7851 usc_get_serial_signals(info);
7852 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7853 if (info->serial_signals & SerialSignal_DCD)
7854 netif_carrier_on(dev);
7855 else
7856 netif_carrier_off(dev);
7857 return 0;
7858 }
7859
7860 /**
7861 * called by network layer when interface is disabled
7862 * shutdown hardware and release resources
7863 *
7864 * dev pointer to network device structure
7865 *
7866 * returns 0 if success, otherwise error code
7867 */
7868 static int hdlcdev_close(struct net_device *dev)
7869 {
7870 struct mgsl_struct *info = dev_to_port(dev);
7871 unsigned long flags;
7872
7873 if (debug_level >= DEBUG_LEVEL_INFO)
7874 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7875
7876 netif_stop_queue(dev);
7877
7878 /* shutdown adapter and release resources */
7879 shutdown(info);
7880
7881 hdlc_close(dev);
7882
7883 spin_lock_irqsave(&info->netlock, flags);
7884 info->netcount=0;
7885 spin_unlock_irqrestore(&info->netlock, flags);
7886
7887 return 0;
7888 }
7889
7890 /**
7891 * called by network layer to process IOCTL call to network device
7892 *
7893 * dev pointer to network device structure
7894 * ifr pointer to network interface request structure
7895 * cmd IOCTL command code
7896 *
7897 * returns 0 if success, otherwise error code
7898 */
7899 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7900 {
7901 const size_t size = sizeof(sync_serial_settings);
7902 sync_serial_settings new_line;
7903 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7904 struct mgsl_struct *info = dev_to_port(dev);
7905 unsigned int flags;
7906
7907 if (debug_level >= DEBUG_LEVEL_INFO)
7908 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7909
7910 /* return error if TTY interface open */
7911 if (info->count)
7912 return -EBUSY;
7913
7914 if (cmd != SIOCWANDEV)
7915 return hdlc_ioctl(dev, ifr, cmd);
7916
7917 switch(ifr->ifr_settings.type) {
7918 case IF_GET_IFACE: /* return current sync_serial_settings */
7919
7920 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7921 if (ifr->ifr_settings.size < size) {
7922 ifr->ifr_settings.size = size; /* data size wanted */
7923 return -ENOBUFS;
7924 }
7925
7926 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7927 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7928 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7929 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7930
7931 switch (flags){
7932 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7933 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7934 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7935 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7936 default: new_line.clock_type = CLOCK_DEFAULT;
7937 }
7938
7939 new_line.clock_rate = info->params.clock_speed;
7940 new_line.loopback = info->params.loopback ? 1:0;
7941
7942 if (copy_to_user(line, &new_line, size))
7943 return -EFAULT;
7944 return 0;
7945
7946 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7947
7948 if(!capable(CAP_NET_ADMIN))
7949 return -EPERM;
7950 if (copy_from_user(&new_line, line, size))
7951 return -EFAULT;
7952
7953 switch (new_line.clock_type)
7954 {
7955 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7956 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7957 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7958 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7959 case CLOCK_DEFAULT: flags = info->params.flags &
7960 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7961 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7962 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7963 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7964 default: return -EINVAL;
7965 }
7966
7967 if (new_line.loopback != 0 && new_line.loopback != 1)
7968 return -EINVAL;
7969
7970 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7971 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7972 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7973 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7974 info->params.flags |= flags;
7975
7976 info->params.loopback = new_line.loopback;
7977
7978 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7979 info->params.clock_speed = new_line.clock_rate;
7980 else
7981 info->params.clock_speed = 0;
7982
7983 /* if network interface up, reprogram hardware */
7984 if (info->netcount)
7985 mgsl_program_hw(info);
7986 return 0;
7987
7988 default:
7989 return hdlc_ioctl(dev, ifr, cmd);
7990 }
7991 }
7992
7993 /**
7994 * called by network layer when transmit timeout is detected
7995 *
7996 * dev pointer to network device structure
7997 */
7998 static void hdlcdev_tx_timeout(struct net_device *dev)
7999 {
8000 struct mgsl_struct *info = dev_to_port(dev);
8001 struct net_device_stats *stats = hdlc_stats(dev);
8002 unsigned long flags;
8003
8004 if (debug_level >= DEBUG_LEVEL_INFO)
8005 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8006
8007 stats->tx_errors++;
8008 stats->tx_aborted_errors++;
8009
8010 spin_lock_irqsave(&info->irq_spinlock,flags);
8011 usc_stop_transmitter(info);
8012 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8013
8014 netif_wake_queue(dev);
8015 }
8016
8017 /**
8018 * called by device driver when transmit completes
8019 * reenable network layer transmit if stopped
8020 *
8021 * info pointer to device instance information
8022 */
8023 static void hdlcdev_tx_done(struct mgsl_struct *info)
8024 {
8025 if (netif_queue_stopped(info->netdev))
8026 netif_wake_queue(info->netdev);
8027 }
8028
8029 /**
8030 * called by device driver when frame received
8031 * pass frame to network layer
8032 *
8033 * info pointer to device instance information
8034 * buf pointer to buffer contianing frame data
8035 * size count of data bytes in buf
8036 */
8037 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8038 {
8039 struct sk_buff *skb = dev_alloc_skb(size);
8040 struct net_device *dev = info->netdev;
8041 struct net_device_stats *stats = hdlc_stats(dev);
8042
8043 if (debug_level >= DEBUG_LEVEL_INFO)
8044 printk("hdlcdev_rx(%s)\n",dev->name);
8045
8046 if (skb == NULL) {
8047 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8048 stats->rx_dropped++;
8049 return;
8050 }
8051
8052 memcpy(skb_put(skb, size),buf,size);
8053
8054 skb->protocol = hdlc_type_trans(skb, info->netdev);
8055
8056 stats->rx_packets++;
8057 stats->rx_bytes += size;
8058
8059 netif_rx(skb);
8060
8061 info->netdev->last_rx = jiffies;
8062 }
8063
8064 /**
8065 * called by device driver when adding device instance
8066 * do generic HDLC initialization
8067 *
8068 * info pointer to device instance information
8069 *
8070 * returns 0 if success, otherwise error code
8071 */
8072 static int hdlcdev_init(struct mgsl_struct *info)
8073 {
8074 int rc;
8075 struct net_device *dev;
8076 hdlc_device *hdlc;
8077
8078 /* allocate and initialize network and HDLC layer objects */
8079
8080 if (!(dev = alloc_hdlcdev(info))) {
8081 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8082 return -ENOMEM;
8083 }
8084
8085 /* for network layer reporting purposes only */
8086 dev->base_addr = info->io_base;
8087 dev->irq = info->irq_level;
8088 dev->dma = info->dma_level;
8089
8090 /* network layer callbacks and settings */
8091 dev->do_ioctl = hdlcdev_ioctl;
8092 dev->open = hdlcdev_open;
8093 dev->stop = hdlcdev_close;
8094 dev->tx_timeout = hdlcdev_tx_timeout;
8095 dev->watchdog_timeo = 10*HZ;
8096 dev->tx_queue_len = 50;
8097
8098 /* generic HDLC layer callbacks and settings */
8099 hdlc = dev_to_hdlc(dev);
8100 hdlc->attach = hdlcdev_attach;
8101 hdlc->xmit = hdlcdev_xmit;
8102
8103 /* register objects with HDLC layer */
8104 if ((rc = register_hdlc_device(dev))) {
8105 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8106 free_netdev(dev);
8107 return rc;
8108 }
8109
8110 info->netdev = dev;
8111 return 0;
8112 }
8113
8114 /**
8115 * called by device driver when removing device instance
8116 * do generic HDLC cleanup
8117 *
8118 * info pointer to device instance information
8119 */
8120 static void hdlcdev_exit(struct mgsl_struct *info)
8121 {
8122 unregister_hdlc_device(info->netdev);
8123 free_netdev(info->netdev);
8124 info->netdev = NULL;
8125 }
8126
8127 #endif /* CONFIG_HDLC */
8128
8129
8130 static int __devinit synclink_init_one (struct pci_dev *dev,
8131 const struct pci_device_id *ent)
8132 {
8133 struct mgsl_struct *info;
8134
8135 if (pci_enable_device(dev)) {
8136 printk("error enabling pci device %p\n", dev);
8137 return -EIO;
8138 }
8139
8140 if (!(info = mgsl_allocate_device())) {
8141 printk("can't allocate device instance data.\n");
8142 return -EIO;
8143 }
8144
8145 /* Copy user configuration info to device instance data */
8146
8147 info->io_base = pci_resource_start(dev, 2);
8148 info->irq_level = dev->irq;
8149 info->phys_memory_base = pci_resource_start(dev, 3);
8150
8151 /* Because veremap only works on page boundaries we must map
8152 * a larger area than is actually implemented for the LCR
8153 * memory range. We map a full page starting at the page boundary.
8154 */
8155 info->phys_lcr_base = pci_resource_start(dev, 0);
8156 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8157 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8158
8159 info->bus_type = MGSL_BUS_TYPE_PCI;
8160 info->io_addr_size = 8;
8161 info->irq_flags = IRQF_SHARED;
8162
8163 if (dev->device == 0x0210) {
8164 /* Version 1 PCI9030 based universal PCI adapter */
8165 info->misc_ctrl_value = 0x007c4080;
8166 info->hw_version = 1;
8167 } else {
8168 /* Version 0 PCI9050 based 5V PCI adapter
8169 * A PCI9050 bug prevents reading LCR registers if
8170 * LCR base address bit 7 is set. Maintain shadow
8171 * value so we can write to LCR misc control reg.
8172 */
8173 info->misc_ctrl_value = 0x087e4546;
8174 info->hw_version = 0;
8175 }
8176
8177 mgsl_add_device(info);
8178
8179 return 0;
8180 }
8181
8182 static void __devexit synclink_remove_one (struct pci_dev *dev)
8183 {
8184 }
8185