]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/tty/synclink.c
2b2988c779c74d6c1b76eca2312c712005378e12
[mirror_ubuntu-bionic-kernel.git] / drivers / tty / synclink.c
1 /*
2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink ISA and PCI
5 * high speed multiprotocol serial adapters.
6 *
7 * written by Paul Fulghum for Microgate Corporation
8 * paulkf@microgate.com
9 *
10 * Microgate and SyncLink are trademarks of Microgate Corporation
11 *
12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
13 *
14 * Original release 01/11/99
15 *
16 * This code is released under the GNU General Public License (GPL)
17 *
18 * This driver is primarily intended for use in synchronous
19 * HDLC mode. Asynchronous mode is also provided.
20 *
21 * When operating in synchronous mode, each call to mgsl_write()
22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
23 * will start assembling an HDLC frame that will not be sent until
24 * mgsl_flush_chars or mgsl_write is called.
25 *
26 * Synchronous receive data is reported as complete frames. To accomplish
27 * this, the TTY flip buffer is bypassed (too small to hold largest
28 * frame and may fragment frames) and the line discipline
29 * receive entry point is called directly.
30 *
31 * This driver has been tested with a slightly modified ppp.c driver
32 * for synchronous PPP.
33 *
34 * 2000/02/16
35 * Added interface for syncppp.c driver (an alternate synchronous PPP
36 * implementation that also supports Cisco HDLC). Each device instance
37 * registers as a tty device AND a network device (if dosyncppp option
38 * is set for the device). The functionality is determined by which
39 * device interface is opened.
40 *
41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54 #if defined(__i386__)
55 # define BREAKPOINT() asm(" int $3");
56 #else
57 # define BREAKPOINT() { }
58 #endif
59
60 #define MAX_ISA_DEVICES 10
61 #define MAX_PCI_DEVICES 10
62 #define MAX_TOTAL_DEVICES 20
63
64 #include <linux/module.h>
65 #include <linux/errno.h>
66 #include <linux/signal.h>
67 #include <linux/sched.h>
68 #include <linux/timer.h>
69 #include <linux/interrupt.h>
70 #include <linux/pci.h>
71 #include <linux/tty.h>
72 #include <linux/tty_flip.h>
73 #include <linux/serial.h>
74 #include <linux/major.h>
75 #include <linux/string.h>
76 #include <linux/fcntl.h>
77 #include <linux/ptrace.h>
78 #include <linux/ioport.h>
79 #include <linux/mm.h>
80 #include <linux/seq_file.h>
81 #include <linux/slab.h>
82 #include <linux/delay.h>
83 #include <linux/netdevice.h>
84 #include <linux/vmalloc.h>
85 #include <linux/init.h>
86 #include <linux/ioctl.h>
87 #include <linux/synclink.h>
88
89 #include <asm/system.h>
90 #include <asm/io.h>
91 #include <asm/irq.h>
92 #include <asm/dma.h>
93 #include <linux/bitops.h>
94 #include <asm/types.h>
95 #include <linux/termios.h>
96 #include <linux/workqueue.h>
97 #include <linux/hdlc.h>
98 #include <linux/dma-mapping.h>
99
100 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
101 #define SYNCLINK_GENERIC_HDLC 1
102 #else
103 #define SYNCLINK_GENERIC_HDLC 0
104 #endif
105
106 #define GET_USER(error,value,addr) error = get_user(value,addr)
107 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
108 #define PUT_USER(error,value,addr) error = put_user(value,addr)
109 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
110
111 #include <asm/uaccess.h>
112
113 #define RCLRVALUE 0xffff
114
115 static MGSL_PARAMS default_params = {
116 MGSL_MODE_HDLC, /* unsigned long mode */
117 0, /* unsigned char loopback; */
118 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
119 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
120 0, /* unsigned long clock_speed; */
121 0xff, /* unsigned char addr_filter; */
122 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
123 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
124 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
125 9600, /* unsigned long data_rate; */
126 8, /* unsigned char data_bits; */
127 1, /* unsigned char stop_bits; */
128 ASYNC_PARITY_NONE /* unsigned char parity; */
129 };
130
131 #define SHARED_MEM_ADDRESS_SIZE 0x40000
132 #define BUFFERLISTSIZE 4096
133 #define DMABUFFERSIZE 4096
134 #define MAXRXFRAMES 7
135
136 typedef struct _DMABUFFERENTRY
137 {
138 u32 phys_addr; /* 32-bit flat physical address of data buffer */
139 volatile u16 count; /* buffer size/data count */
140 volatile u16 status; /* Control/status field */
141 volatile u16 rcc; /* character count field */
142 u16 reserved; /* padding required by 16C32 */
143 u32 link; /* 32-bit flat link to next buffer entry */
144 char *virt_addr; /* virtual address of data buffer */
145 u32 phys_entry; /* physical address of this buffer entry */
146 dma_addr_t dma_addr;
147 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
148
149 /* The queue of BH actions to be performed */
150
151 #define BH_RECEIVE 1
152 #define BH_TRANSMIT 2
153 #define BH_STATUS 4
154
155 #define IO_PIN_SHUTDOWN_LIMIT 100
156
157 struct _input_signal_events {
158 int ri_up;
159 int ri_down;
160 int dsr_up;
161 int dsr_down;
162 int dcd_up;
163 int dcd_down;
164 int cts_up;
165 int cts_down;
166 };
167
168 /* transmit holding buffer definitions*/
169 #define MAX_TX_HOLDING_BUFFERS 5
170 struct tx_holding_buffer {
171 int buffer_size;
172 unsigned char * buffer;
173 };
174
175
176 /*
177 * Device instance data structure
178 */
179
180 struct mgsl_struct {
181 int magic;
182 struct tty_port port;
183 int line;
184 int hw_version;
185
186 struct mgsl_icount icount;
187
188 int timeout;
189 int x_char; /* xon/xoff character */
190 u16 read_status_mask;
191 u16 ignore_status_mask;
192 unsigned char *xmit_buf;
193 int xmit_head;
194 int xmit_tail;
195 int xmit_cnt;
196
197 wait_queue_head_t status_event_wait_q;
198 wait_queue_head_t event_wait_q;
199 struct timer_list tx_timer; /* HDLC transmit timeout timer */
200 struct mgsl_struct *next_device; /* device list link */
201
202 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
203 struct work_struct task; /* task structure for scheduling bh */
204
205 u32 EventMask; /* event trigger mask */
206 u32 RecordedEvents; /* pending events */
207
208 u32 max_frame_size; /* as set by device config */
209
210 u32 pending_bh;
211
212 bool bh_running; /* Protection from multiple */
213 int isr_overflow;
214 bool bh_requested;
215
216 int dcd_chkcount; /* check counts to prevent */
217 int cts_chkcount; /* too many IRQs if a signal */
218 int dsr_chkcount; /* is floating */
219 int ri_chkcount;
220
221 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
222 u32 buffer_list_phys;
223 dma_addr_t buffer_list_dma_addr;
224
225 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
226 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
227 unsigned int current_rx_buffer;
228
229 int num_tx_dma_buffers; /* number of tx dma frames required */
230 int tx_dma_buffers_used;
231 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
232 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
233 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
234 int current_tx_buffer; /* next tx dma buffer to be loaded */
235
236 unsigned char *intermediate_rxbuffer;
237
238 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
239 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
240 int put_tx_holding_index; /* next tx holding buffer to store user request */
241 int tx_holding_count; /* number of tx holding buffers waiting */
242 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
243
244 bool rx_enabled;
245 bool rx_overflow;
246 bool rx_rcc_underrun;
247
248 bool tx_enabled;
249 bool tx_active;
250 u32 idle_mode;
251
252 u16 cmr_value;
253 u16 tcsr_value;
254
255 char device_name[25]; /* device instance name */
256
257 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
258 unsigned char bus; /* expansion bus number (zero based) */
259 unsigned char function; /* PCI device number */
260
261 unsigned int io_base; /* base I/O address of adapter */
262 unsigned int io_addr_size; /* size of the I/O address range */
263 bool io_addr_requested; /* true if I/O address requested */
264
265 unsigned int irq_level; /* interrupt level */
266 unsigned long irq_flags;
267 bool irq_requested; /* true if IRQ requested */
268
269 unsigned int dma_level; /* DMA channel */
270 bool dma_requested; /* true if dma channel requested */
271
272 u16 mbre_bit;
273 u16 loopback_bits;
274 u16 usc_idle_mode;
275
276 MGSL_PARAMS params; /* communications parameters */
277
278 unsigned char serial_signals; /* current serial signal states */
279
280 bool irq_occurred; /* for diagnostics use */
281 unsigned int init_error; /* Initialization startup error (DIAGS) */
282 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
283
284 u32 last_mem_alloc;
285 unsigned char* memory_base; /* shared memory address (PCI only) */
286 u32 phys_memory_base;
287 bool shared_mem_requested;
288
289 unsigned char* lcr_base; /* local config registers (PCI only) */
290 u32 phys_lcr_base;
291 u32 lcr_offset;
292 bool lcr_mem_requested;
293
294 u32 misc_ctrl_value;
295 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
296 char char_buf[MAX_ASYNC_BUFFER_SIZE];
297 bool drop_rts_on_tx_done;
298
299 bool loopmode_insert_requested;
300 bool loopmode_send_done_requested;
301
302 struct _input_signal_events input_signal_events;
303
304 /* generic HDLC device parts */
305 int netcount;
306 spinlock_t netlock;
307
308 #if SYNCLINK_GENERIC_HDLC
309 struct net_device *netdev;
310 #endif
311 };
312
313 #define MGSL_MAGIC 0x5401
314
315 /*
316 * The size of the serial xmit buffer is 1 page, or 4096 bytes
317 */
318 #ifndef SERIAL_XMIT_SIZE
319 #define SERIAL_XMIT_SIZE 4096
320 #endif
321
322 /*
323 * These macros define the offsets used in calculating the
324 * I/O address of the specified USC registers.
325 */
326
327
328 #define DCPIN 2 /* Bit 1 of I/O address */
329 #define SDPIN 4 /* Bit 2 of I/O address */
330
331 #define DCAR 0 /* DMA command/address register */
332 #define CCAR SDPIN /* channel command/address register */
333 #define DATAREG DCPIN + SDPIN /* serial data register */
334 #define MSBONLY 0x41
335 #define LSBONLY 0x40
336
337 /*
338 * These macros define the register address (ordinal number)
339 * used for writing address/value pairs to the USC.
340 */
341
342 #define CMR 0x02 /* Channel mode Register */
343 #define CCSR 0x04 /* Channel Command/status Register */
344 #define CCR 0x06 /* Channel Control Register */
345 #define PSR 0x08 /* Port status Register */
346 #define PCR 0x0a /* Port Control Register */
347 #define TMDR 0x0c /* Test mode Data Register */
348 #define TMCR 0x0e /* Test mode Control Register */
349 #define CMCR 0x10 /* Clock mode Control Register */
350 #define HCR 0x12 /* Hardware Configuration Register */
351 #define IVR 0x14 /* Interrupt Vector Register */
352 #define IOCR 0x16 /* Input/Output Control Register */
353 #define ICR 0x18 /* Interrupt Control Register */
354 #define DCCR 0x1a /* Daisy Chain Control Register */
355 #define MISR 0x1c /* Misc Interrupt status Register */
356 #define SICR 0x1e /* status Interrupt Control Register */
357 #define RDR 0x20 /* Receive Data Register */
358 #define RMR 0x22 /* Receive mode Register */
359 #define RCSR 0x24 /* Receive Command/status Register */
360 #define RICR 0x26 /* Receive Interrupt Control Register */
361 #define RSR 0x28 /* Receive Sync Register */
362 #define RCLR 0x2a /* Receive count Limit Register */
363 #define RCCR 0x2c /* Receive Character count Register */
364 #define TC0R 0x2e /* Time Constant 0 Register */
365 #define TDR 0x30 /* Transmit Data Register */
366 #define TMR 0x32 /* Transmit mode Register */
367 #define TCSR 0x34 /* Transmit Command/status Register */
368 #define TICR 0x36 /* Transmit Interrupt Control Register */
369 #define TSR 0x38 /* Transmit Sync Register */
370 #define TCLR 0x3a /* Transmit count Limit Register */
371 #define TCCR 0x3c /* Transmit Character count Register */
372 #define TC1R 0x3e /* Time Constant 1 Register */
373
374
375 /*
376 * MACRO DEFINITIONS FOR DMA REGISTERS
377 */
378
379 #define DCR 0x06 /* DMA Control Register (shared) */
380 #define DACR 0x08 /* DMA Array count Register (shared) */
381 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
382 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
383 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
384 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
385 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
386
387 #define TDMR 0x02 /* Transmit DMA mode Register */
388 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
389 #define TBCR 0x2a /* Transmit Byte count Register */
390 #define TARL 0x2c /* Transmit Address Register (low) */
391 #define TARU 0x2e /* Transmit Address Register (high) */
392 #define NTBCR 0x3a /* Next Transmit Byte count Register */
393 #define NTARL 0x3c /* Next Transmit Address Register (low) */
394 #define NTARU 0x3e /* Next Transmit Address Register (high) */
395
396 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
397 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
398 #define RBCR 0xaa /* Receive Byte count Register */
399 #define RARL 0xac /* Receive Address Register (low) */
400 #define RARU 0xae /* Receive Address Register (high) */
401 #define NRBCR 0xba /* Next Receive Byte count Register */
402 #define NRARL 0xbc /* Next Receive Address Register (low) */
403 #define NRARU 0xbe /* Next Receive Address Register (high) */
404
405
406 /*
407 * MACRO DEFINITIONS FOR MODEM STATUS BITS
408 */
409
410 #define MODEMSTATUS_DTR 0x80
411 #define MODEMSTATUS_DSR 0x40
412 #define MODEMSTATUS_RTS 0x20
413 #define MODEMSTATUS_CTS 0x10
414 #define MODEMSTATUS_RI 0x04
415 #define MODEMSTATUS_DCD 0x01
416
417
418 /*
419 * Channel Command/Address Register (CCAR) Command Codes
420 */
421
422 #define RTCmd_Null 0x0000
423 #define RTCmd_ResetHighestIus 0x1000
424 #define RTCmd_TriggerChannelLoadDma 0x2000
425 #define RTCmd_TriggerRxDma 0x2800
426 #define RTCmd_TriggerTxDma 0x3000
427 #define RTCmd_TriggerRxAndTxDma 0x3800
428 #define RTCmd_PurgeRxFifo 0x4800
429 #define RTCmd_PurgeTxFifo 0x5000
430 #define RTCmd_PurgeRxAndTxFifo 0x5800
431 #define RTCmd_LoadRcc 0x6800
432 #define RTCmd_LoadTcc 0x7000
433 #define RTCmd_LoadRccAndTcc 0x7800
434 #define RTCmd_LoadTC0 0x8800
435 #define RTCmd_LoadTC1 0x9000
436 #define RTCmd_LoadTC0AndTC1 0x9800
437 #define RTCmd_SerialDataLSBFirst 0xa000
438 #define RTCmd_SerialDataMSBFirst 0xa800
439 #define RTCmd_SelectBigEndian 0xb000
440 #define RTCmd_SelectLittleEndian 0xb800
441
442
443 /*
444 * DMA Command/Address Register (DCAR) Command Codes
445 */
446
447 #define DmaCmd_Null 0x0000
448 #define DmaCmd_ResetTxChannel 0x1000
449 #define DmaCmd_ResetRxChannel 0x1200
450 #define DmaCmd_StartTxChannel 0x2000
451 #define DmaCmd_StartRxChannel 0x2200
452 #define DmaCmd_ContinueTxChannel 0x3000
453 #define DmaCmd_ContinueRxChannel 0x3200
454 #define DmaCmd_PauseTxChannel 0x4000
455 #define DmaCmd_PauseRxChannel 0x4200
456 #define DmaCmd_AbortTxChannel 0x5000
457 #define DmaCmd_AbortRxChannel 0x5200
458 #define DmaCmd_InitTxChannel 0x7000
459 #define DmaCmd_InitRxChannel 0x7200
460 #define DmaCmd_ResetHighestDmaIus 0x8000
461 #define DmaCmd_ResetAllChannels 0x9000
462 #define DmaCmd_StartAllChannels 0xa000
463 #define DmaCmd_ContinueAllChannels 0xb000
464 #define DmaCmd_PauseAllChannels 0xc000
465 #define DmaCmd_AbortAllChannels 0xd000
466 #define DmaCmd_InitAllChannels 0xf000
467
468 #define TCmd_Null 0x0000
469 #define TCmd_ClearTxCRC 0x2000
470 #define TCmd_SelectTicrTtsaData 0x4000
471 #define TCmd_SelectTicrTxFifostatus 0x5000
472 #define TCmd_SelectTicrIntLevel 0x6000
473 #define TCmd_SelectTicrdma_level 0x7000
474 #define TCmd_SendFrame 0x8000
475 #define TCmd_SendAbort 0x9000
476 #define TCmd_EnableDleInsertion 0xc000
477 #define TCmd_DisableDleInsertion 0xd000
478 #define TCmd_ClearEofEom 0xe000
479 #define TCmd_SetEofEom 0xf000
480
481 #define RCmd_Null 0x0000
482 #define RCmd_ClearRxCRC 0x2000
483 #define RCmd_EnterHuntmode 0x3000
484 #define RCmd_SelectRicrRtsaData 0x4000
485 #define RCmd_SelectRicrRxFifostatus 0x5000
486 #define RCmd_SelectRicrIntLevel 0x6000
487 #define RCmd_SelectRicrdma_level 0x7000
488
489 /*
490 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
491 */
492
493 #define RECEIVE_STATUS BIT5
494 #define RECEIVE_DATA BIT4
495 #define TRANSMIT_STATUS BIT3
496 #define TRANSMIT_DATA BIT2
497 #define IO_PIN BIT1
498 #define MISC BIT0
499
500
501 /*
502 * Receive status Bits in Receive Command/status Register RCSR
503 */
504
505 #define RXSTATUS_SHORT_FRAME BIT8
506 #define RXSTATUS_CODE_VIOLATION BIT8
507 #define RXSTATUS_EXITED_HUNT BIT7
508 #define RXSTATUS_IDLE_RECEIVED BIT6
509 #define RXSTATUS_BREAK_RECEIVED BIT5
510 #define RXSTATUS_ABORT_RECEIVED BIT5
511 #define RXSTATUS_RXBOUND BIT4
512 #define RXSTATUS_CRC_ERROR BIT3
513 #define RXSTATUS_FRAMING_ERROR BIT3
514 #define RXSTATUS_ABORT BIT2
515 #define RXSTATUS_PARITY_ERROR BIT2
516 #define RXSTATUS_OVERRUN BIT1
517 #define RXSTATUS_DATA_AVAILABLE BIT0
518 #define RXSTATUS_ALL 0x01f6
519 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
520
521 /*
522 * Values for setting transmit idle mode in
523 * Transmit Control/status Register (TCSR)
524 */
525 #define IDLEMODE_FLAGS 0x0000
526 #define IDLEMODE_ALT_ONE_ZERO 0x0100
527 #define IDLEMODE_ZERO 0x0200
528 #define IDLEMODE_ONE 0x0300
529 #define IDLEMODE_ALT_MARK_SPACE 0x0500
530 #define IDLEMODE_SPACE 0x0600
531 #define IDLEMODE_MARK 0x0700
532 #define IDLEMODE_MASK 0x0700
533
534 /*
535 * IUSC revision identifiers
536 */
537 #define IUSC_SL1660 0x4d44
538 #define IUSC_PRE_SL1660 0x4553
539
540 /*
541 * Transmit status Bits in Transmit Command/status Register (TCSR)
542 */
543
544 #define TCSR_PRESERVE 0x0F00
545
546 #define TCSR_UNDERWAIT BIT11
547 #define TXSTATUS_PREAMBLE_SENT BIT7
548 #define TXSTATUS_IDLE_SENT BIT6
549 #define TXSTATUS_ABORT_SENT BIT5
550 #define TXSTATUS_EOF_SENT BIT4
551 #define TXSTATUS_EOM_SENT BIT4
552 #define TXSTATUS_CRC_SENT BIT3
553 #define TXSTATUS_ALL_SENT BIT2
554 #define TXSTATUS_UNDERRUN BIT1
555 #define TXSTATUS_FIFO_EMPTY BIT0
556 #define TXSTATUS_ALL 0x00fa
557 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
558
559
560 #define MISCSTATUS_RXC_LATCHED BIT15
561 #define MISCSTATUS_RXC BIT14
562 #define MISCSTATUS_TXC_LATCHED BIT13
563 #define MISCSTATUS_TXC BIT12
564 #define MISCSTATUS_RI_LATCHED BIT11
565 #define MISCSTATUS_RI BIT10
566 #define MISCSTATUS_DSR_LATCHED BIT9
567 #define MISCSTATUS_DSR BIT8
568 #define MISCSTATUS_DCD_LATCHED BIT7
569 #define MISCSTATUS_DCD BIT6
570 #define MISCSTATUS_CTS_LATCHED BIT5
571 #define MISCSTATUS_CTS BIT4
572 #define MISCSTATUS_RCC_UNDERRUN BIT3
573 #define MISCSTATUS_DPLL_NO_SYNC BIT2
574 #define MISCSTATUS_BRG1_ZERO BIT1
575 #define MISCSTATUS_BRG0_ZERO BIT0
576
577 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
578 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
579
580 #define SICR_RXC_ACTIVE BIT15
581 #define SICR_RXC_INACTIVE BIT14
582 #define SICR_RXC (BIT15+BIT14)
583 #define SICR_TXC_ACTIVE BIT13
584 #define SICR_TXC_INACTIVE BIT12
585 #define SICR_TXC (BIT13+BIT12)
586 #define SICR_RI_ACTIVE BIT11
587 #define SICR_RI_INACTIVE BIT10
588 #define SICR_RI (BIT11+BIT10)
589 #define SICR_DSR_ACTIVE BIT9
590 #define SICR_DSR_INACTIVE BIT8
591 #define SICR_DSR (BIT9+BIT8)
592 #define SICR_DCD_ACTIVE BIT7
593 #define SICR_DCD_INACTIVE BIT6
594 #define SICR_DCD (BIT7+BIT6)
595 #define SICR_CTS_ACTIVE BIT5
596 #define SICR_CTS_INACTIVE BIT4
597 #define SICR_CTS (BIT5+BIT4)
598 #define SICR_RCC_UNDERFLOW BIT3
599 #define SICR_DPLL_NO_SYNC BIT2
600 #define SICR_BRG1_ZERO BIT1
601 #define SICR_BRG0_ZERO BIT0
602
603 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
604 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
605 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
606 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
607 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
608
609 #define usc_EnableInterrupts( a, b ) \
610 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
611
612 #define usc_DisableInterrupts( a, b ) \
613 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
614
615 #define usc_EnableMasterIrqBit(a) \
616 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
617
618 #define usc_DisableMasterIrqBit(a) \
619 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
620
621 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
622
623 /*
624 * Transmit status Bits in Transmit Control status Register (TCSR)
625 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
626 */
627
628 #define TXSTATUS_PREAMBLE_SENT BIT7
629 #define TXSTATUS_IDLE_SENT BIT6
630 #define TXSTATUS_ABORT_SENT BIT5
631 #define TXSTATUS_EOF BIT4
632 #define TXSTATUS_CRC_SENT BIT3
633 #define TXSTATUS_ALL_SENT BIT2
634 #define TXSTATUS_UNDERRUN BIT1
635 #define TXSTATUS_FIFO_EMPTY BIT0
636
637 #define DICR_MASTER BIT15
638 #define DICR_TRANSMIT BIT0
639 #define DICR_RECEIVE BIT1
640
641 #define usc_EnableDmaInterrupts(a,b) \
642 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
643
644 #define usc_DisableDmaInterrupts(a,b) \
645 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
646
647 #define usc_EnableStatusIrqs(a,b) \
648 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
649
650 #define usc_DisablestatusIrqs(a,b) \
651 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
652
653 /* Transmit status Bits in Transmit Control status Register (TCSR) */
654 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
655
656
657 #define DISABLE_UNCONDITIONAL 0
658 #define DISABLE_END_OF_FRAME 1
659 #define ENABLE_UNCONDITIONAL 2
660 #define ENABLE_AUTO_CTS 3
661 #define ENABLE_AUTO_DCD 3
662 #define usc_EnableTransmitter(a,b) \
663 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
664 #define usc_EnableReceiver(a,b) \
665 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
666
667 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
668 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
669 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
670
671 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
672 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
673 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
674 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
675 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
676
677 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
678 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
679
680 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
681
682 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
683 static void usc_start_receiver( struct mgsl_struct *info );
684 static void usc_stop_receiver( struct mgsl_struct *info );
685
686 static void usc_start_transmitter( struct mgsl_struct *info );
687 static void usc_stop_transmitter( struct mgsl_struct *info );
688 static void usc_set_txidle( struct mgsl_struct *info );
689 static void usc_load_txfifo( struct mgsl_struct *info );
690
691 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
692 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
693
694 static void usc_get_serial_signals( struct mgsl_struct *info );
695 static void usc_set_serial_signals( struct mgsl_struct *info );
696
697 static void usc_reset( struct mgsl_struct *info );
698
699 static void usc_set_sync_mode( struct mgsl_struct *info );
700 static void usc_set_sdlc_mode( struct mgsl_struct *info );
701 static void usc_set_async_mode( struct mgsl_struct *info );
702 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
703
704 static void usc_loopback_frame( struct mgsl_struct *info );
705
706 static void mgsl_tx_timeout(unsigned long context);
707
708
709 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
710 static void usc_loopmode_insert_request( struct mgsl_struct * info );
711 static int usc_loopmode_active( struct mgsl_struct * info);
712 static void usc_loopmode_send_done( struct mgsl_struct * info );
713
714 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
715
716 #if SYNCLINK_GENERIC_HDLC
717 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
718 static void hdlcdev_tx_done(struct mgsl_struct *info);
719 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
720 static int hdlcdev_init(struct mgsl_struct *info);
721 static void hdlcdev_exit(struct mgsl_struct *info);
722 #endif
723
724 /*
725 * Defines a BUS descriptor value for the PCI adapter
726 * local bus address ranges.
727 */
728
729 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
730 (0x00400020 + \
731 ((WrHold) << 30) + \
732 ((WrDly) << 28) + \
733 ((RdDly) << 26) + \
734 ((Nwdd) << 20) + \
735 ((Nwad) << 15) + \
736 ((Nxda) << 13) + \
737 ((Nrdd) << 11) + \
738 ((Nrad) << 6) )
739
740 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
741
742 /*
743 * Adapter diagnostic routines
744 */
745 static bool mgsl_register_test( struct mgsl_struct *info );
746 static bool mgsl_irq_test( struct mgsl_struct *info );
747 static bool mgsl_dma_test( struct mgsl_struct *info );
748 static bool mgsl_memory_test( struct mgsl_struct *info );
749 static int mgsl_adapter_test( struct mgsl_struct *info );
750
751 /*
752 * device and resource management routines
753 */
754 static int mgsl_claim_resources(struct mgsl_struct *info);
755 static void mgsl_release_resources(struct mgsl_struct *info);
756 static void mgsl_add_device(struct mgsl_struct *info);
757 static struct mgsl_struct* mgsl_allocate_device(void);
758
759 /*
760 * DMA buffer manupulation functions.
761 */
762 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
763 static bool mgsl_get_rx_frame( struct mgsl_struct *info );
764 static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
765 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
766 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
767 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
768 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
769 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
770
771 /*
772 * DMA and Shared Memory buffer allocation and formatting
773 */
774 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
775 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
776 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
777 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
778 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
779 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
780 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
781 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
782 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
783 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
784 static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
785 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
786
787 /*
788 * Bottom half interrupt handlers
789 */
790 static void mgsl_bh_handler(struct work_struct *work);
791 static void mgsl_bh_receive(struct mgsl_struct *info);
792 static void mgsl_bh_transmit(struct mgsl_struct *info);
793 static void mgsl_bh_status(struct mgsl_struct *info);
794
795 /*
796 * Interrupt handler routines and dispatch table.
797 */
798 static void mgsl_isr_null( struct mgsl_struct *info );
799 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
800 static void mgsl_isr_receive_data( struct mgsl_struct *info );
801 static void mgsl_isr_receive_status( struct mgsl_struct *info );
802 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
803 static void mgsl_isr_io_pin( struct mgsl_struct *info );
804 static void mgsl_isr_misc( struct mgsl_struct *info );
805 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
806 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
807
808 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
809
810 static isr_dispatch_func UscIsrTable[7] =
811 {
812 mgsl_isr_null,
813 mgsl_isr_misc,
814 mgsl_isr_io_pin,
815 mgsl_isr_transmit_data,
816 mgsl_isr_transmit_status,
817 mgsl_isr_receive_data,
818 mgsl_isr_receive_status
819 };
820
821 /*
822 * ioctl call handlers
823 */
824 static int tiocmget(struct tty_struct *tty);
825 static int tiocmset(struct tty_struct *tty,
826 unsigned int set, unsigned int clear);
827 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
828 __user *user_icount);
829 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
830 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
831 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
832 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
833 static int mgsl_txenable(struct mgsl_struct * info, int enable);
834 static int mgsl_txabort(struct mgsl_struct * info);
835 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
836 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
837 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
838
839 /* set non-zero on successful registration with PCI subsystem */
840 static bool pci_registered;
841
842 /*
843 * Global linked list of SyncLink devices
844 */
845 static struct mgsl_struct *mgsl_device_list;
846 static int mgsl_device_count;
847
848 /*
849 * Set this param to non-zero to load eax with the
850 * .text section address and breakpoint on module load.
851 * This is useful for use with gdb and add-symbol-file command.
852 */
853 static bool break_on_load;
854
855 /*
856 * Driver major number, defaults to zero to get auto
857 * assigned major number. May be forced as module parameter.
858 */
859 static int ttymajor;
860
861 /*
862 * Array of user specified options for ISA adapters.
863 */
864 static int io[MAX_ISA_DEVICES];
865 static int irq[MAX_ISA_DEVICES];
866 static int dma[MAX_ISA_DEVICES];
867 static int debug_level;
868 static int maxframe[MAX_TOTAL_DEVICES];
869 static int txdmabufs[MAX_TOTAL_DEVICES];
870 static int txholdbufs[MAX_TOTAL_DEVICES];
871
872 module_param(break_on_load, bool, 0);
873 module_param(ttymajor, int, 0);
874 module_param_array(io, int, NULL, 0);
875 module_param_array(irq, int, NULL, 0);
876 module_param_array(dma, int, NULL, 0);
877 module_param(debug_level, int, 0);
878 module_param_array(maxframe, int, NULL, 0);
879 module_param_array(txdmabufs, int, NULL, 0);
880 module_param_array(txholdbufs, int, NULL, 0);
881
882 static char *driver_name = "SyncLink serial driver";
883 static char *driver_version = "$Revision: 4.38 $";
884
885 static int synclink_init_one (struct pci_dev *dev,
886 const struct pci_device_id *ent);
887 static void synclink_remove_one (struct pci_dev *dev);
888
889 static struct pci_device_id synclink_pci_tbl[] = {
890 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
891 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
892 { 0, }, /* terminate list */
893 };
894 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
895
896 MODULE_LICENSE("GPL");
897
898 static struct pci_driver synclink_pci_driver = {
899 .name = "synclink",
900 .id_table = synclink_pci_tbl,
901 .probe = synclink_init_one,
902 .remove = __devexit_p(synclink_remove_one),
903 };
904
905 static struct tty_driver *serial_driver;
906
907 /* number of characters left in xmit buffer before we ask for more */
908 #define WAKEUP_CHARS 256
909
910
911 static void mgsl_change_params(struct mgsl_struct *info);
912 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
913
914 /*
915 * 1st function defined in .text section. Calling this function in
916 * init_module() followed by a breakpoint allows a remote debugger
917 * (gdb) to get the .text address for the add-symbol-file command.
918 * This allows remote debugging of dynamically loadable modules.
919 */
920 static void* mgsl_get_text_ptr(void)
921 {
922 return mgsl_get_text_ptr;
923 }
924
925 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
926 char *name, const char *routine)
927 {
928 #ifdef MGSL_PARANOIA_CHECK
929 static const char *badmagic =
930 "Warning: bad magic number for mgsl struct (%s) in %s\n";
931 static const char *badinfo =
932 "Warning: null mgsl_struct for (%s) in %s\n";
933
934 if (!info) {
935 printk(badinfo, name, routine);
936 return 1;
937 }
938 if (info->magic != MGSL_MAGIC) {
939 printk(badmagic, name, routine);
940 return 1;
941 }
942 #else
943 if (!info)
944 return 1;
945 #endif
946 return 0;
947 }
948
949 /**
950 * line discipline callback wrappers
951 *
952 * The wrappers maintain line discipline references
953 * while calling into the line discipline.
954 *
955 * ldisc_receive_buf - pass receive data to line discipline
956 */
957
958 static void ldisc_receive_buf(struct tty_struct *tty,
959 const __u8 *data, char *flags, int count)
960 {
961 struct tty_ldisc *ld;
962 if (!tty)
963 return;
964 ld = tty_ldisc_ref(tty);
965 if (ld) {
966 if (ld->ops->receive_buf)
967 ld->ops->receive_buf(tty, data, flags, count);
968 tty_ldisc_deref(ld);
969 }
970 }
971
972 /* mgsl_stop() throttle (stop) transmitter
973 *
974 * Arguments: tty pointer to tty info structure
975 * Return Value: None
976 */
977 static void mgsl_stop(struct tty_struct *tty)
978 {
979 struct mgsl_struct *info = tty->driver_data;
980 unsigned long flags;
981
982 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
983 return;
984
985 if ( debug_level >= DEBUG_LEVEL_INFO )
986 printk("mgsl_stop(%s)\n",info->device_name);
987
988 spin_lock_irqsave(&info->irq_spinlock,flags);
989 if (info->tx_enabled)
990 usc_stop_transmitter(info);
991 spin_unlock_irqrestore(&info->irq_spinlock,flags);
992
993 } /* end of mgsl_stop() */
994
995 /* mgsl_start() release (start) transmitter
996 *
997 * Arguments: tty pointer to tty info structure
998 * Return Value: None
999 */
1000 static void mgsl_start(struct tty_struct *tty)
1001 {
1002 struct mgsl_struct *info = tty->driver_data;
1003 unsigned long flags;
1004
1005 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1006 return;
1007
1008 if ( debug_level >= DEBUG_LEVEL_INFO )
1009 printk("mgsl_start(%s)\n",info->device_name);
1010
1011 spin_lock_irqsave(&info->irq_spinlock,flags);
1012 if (!info->tx_enabled)
1013 usc_start_transmitter(info);
1014 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1015
1016 } /* end of mgsl_start() */
1017
1018 /*
1019 * Bottom half work queue access functions
1020 */
1021
1022 /* mgsl_bh_action() Return next bottom half action to perform.
1023 * Return Value: BH action code or 0 if nothing to do.
1024 */
1025 static int mgsl_bh_action(struct mgsl_struct *info)
1026 {
1027 unsigned long flags;
1028 int rc = 0;
1029
1030 spin_lock_irqsave(&info->irq_spinlock,flags);
1031
1032 if (info->pending_bh & BH_RECEIVE) {
1033 info->pending_bh &= ~BH_RECEIVE;
1034 rc = BH_RECEIVE;
1035 } else if (info->pending_bh & BH_TRANSMIT) {
1036 info->pending_bh &= ~BH_TRANSMIT;
1037 rc = BH_TRANSMIT;
1038 } else if (info->pending_bh & BH_STATUS) {
1039 info->pending_bh &= ~BH_STATUS;
1040 rc = BH_STATUS;
1041 }
1042
1043 if (!rc) {
1044 /* Mark BH routine as complete */
1045 info->bh_running = false;
1046 info->bh_requested = false;
1047 }
1048
1049 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1050
1051 return rc;
1052 }
1053
1054 /*
1055 * Perform bottom half processing of work items queued by ISR.
1056 */
1057 static void mgsl_bh_handler(struct work_struct *work)
1058 {
1059 struct mgsl_struct *info =
1060 container_of(work, struct mgsl_struct, task);
1061 int action;
1062
1063 if (!info)
1064 return;
1065
1066 if ( debug_level >= DEBUG_LEVEL_BH )
1067 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1068 __FILE__,__LINE__,info->device_name);
1069
1070 info->bh_running = true;
1071
1072 while((action = mgsl_bh_action(info)) != 0) {
1073
1074 /* Process work item */
1075 if ( debug_level >= DEBUG_LEVEL_BH )
1076 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1077 __FILE__,__LINE__,action);
1078
1079 switch (action) {
1080
1081 case BH_RECEIVE:
1082 mgsl_bh_receive(info);
1083 break;
1084 case BH_TRANSMIT:
1085 mgsl_bh_transmit(info);
1086 break;
1087 case BH_STATUS:
1088 mgsl_bh_status(info);
1089 break;
1090 default:
1091 /* unknown work item ID */
1092 printk("Unknown work item ID=%08X!\n", action);
1093 break;
1094 }
1095 }
1096
1097 if ( debug_level >= DEBUG_LEVEL_BH )
1098 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1099 __FILE__,__LINE__,info->device_name);
1100 }
1101
1102 static void mgsl_bh_receive(struct mgsl_struct *info)
1103 {
1104 bool (*get_rx_frame)(struct mgsl_struct *info) =
1105 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1106
1107 if ( debug_level >= DEBUG_LEVEL_BH )
1108 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1109 __FILE__,__LINE__,info->device_name);
1110
1111 do
1112 {
1113 if (info->rx_rcc_underrun) {
1114 unsigned long flags;
1115 spin_lock_irqsave(&info->irq_spinlock,flags);
1116 usc_start_receiver(info);
1117 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1118 return;
1119 }
1120 } while(get_rx_frame(info));
1121 }
1122
1123 static void mgsl_bh_transmit(struct mgsl_struct *info)
1124 {
1125 struct tty_struct *tty = info->port.tty;
1126 unsigned long flags;
1127
1128 if ( debug_level >= DEBUG_LEVEL_BH )
1129 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1130 __FILE__,__LINE__,info->device_name);
1131
1132 if (tty)
1133 tty_wakeup(tty);
1134
1135 /* if transmitter idle and loopmode_send_done_requested
1136 * then start echoing RxD to TxD
1137 */
1138 spin_lock_irqsave(&info->irq_spinlock,flags);
1139 if ( !info->tx_active && info->loopmode_send_done_requested )
1140 usc_loopmode_send_done( info );
1141 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1142 }
1143
1144 static void mgsl_bh_status(struct mgsl_struct *info)
1145 {
1146 if ( debug_level >= DEBUG_LEVEL_BH )
1147 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1148 __FILE__,__LINE__,info->device_name);
1149
1150 info->ri_chkcount = 0;
1151 info->dsr_chkcount = 0;
1152 info->dcd_chkcount = 0;
1153 info->cts_chkcount = 0;
1154 }
1155
1156 /* mgsl_isr_receive_status()
1157 *
1158 * Service a receive status interrupt. The type of status
1159 * interrupt is indicated by the state of the RCSR.
1160 * This is only used for HDLC mode.
1161 *
1162 * Arguments: info pointer to device instance data
1163 * Return Value: None
1164 */
1165 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1166 {
1167 u16 status = usc_InReg( info, RCSR );
1168
1169 if ( debug_level >= DEBUG_LEVEL_ISR )
1170 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1171 __FILE__,__LINE__,status);
1172
1173 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1174 info->loopmode_insert_requested &&
1175 usc_loopmode_active(info) )
1176 {
1177 ++info->icount.rxabort;
1178 info->loopmode_insert_requested = false;
1179
1180 /* clear CMR:13 to start echoing RxD to TxD */
1181 info->cmr_value &= ~BIT13;
1182 usc_OutReg(info, CMR, info->cmr_value);
1183
1184 /* disable received abort irq (no longer required) */
1185 usc_OutReg(info, RICR,
1186 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1187 }
1188
1189 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1190 if (status & RXSTATUS_EXITED_HUNT)
1191 info->icount.exithunt++;
1192 if (status & RXSTATUS_IDLE_RECEIVED)
1193 info->icount.rxidle++;
1194 wake_up_interruptible(&info->event_wait_q);
1195 }
1196
1197 if (status & RXSTATUS_OVERRUN){
1198 info->icount.rxover++;
1199 usc_process_rxoverrun_sync( info );
1200 }
1201
1202 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1203 usc_UnlatchRxstatusBits( info, status );
1204
1205 } /* end of mgsl_isr_receive_status() */
1206
1207 /* mgsl_isr_transmit_status()
1208 *
1209 * Service a transmit status interrupt
1210 * HDLC mode :end of transmit frame
1211 * Async mode:all data is sent
1212 * transmit status is indicated by bits in the TCSR.
1213 *
1214 * Arguments: info pointer to device instance data
1215 * Return Value: None
1216 */
1217 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1218 {
1219 u16 status = usc_InReg( info, TCSR );
1220
1221 if ( debug_level >= DEBUG_LEVEL_ISR )
1222 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1223 __FILE__,__LINE__,status);
1224
1225 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1226 usc_UnlatchTxstatusBits( info, status );
1227
1228 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1229 {
1230 /* finished sending HDLC abort. This may leave */
1231 /* the TxFifo with data from the aborted frame */
1232 /* so purge the TxFifo. Also shutdown the DMA */
1233 /* channel in case there is data remaining in */
1234 /* the DMA buffer */
1235 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1236 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1237 }
1238
1239 if ( status & TXSTATUS_EOF_SENT )
1240 info->icount.txok++;
1241 else if ( status & TXSTATUS_UNDERRUN )
1242 info->icount.txunder++;
1243 else if ( status & TXSTATUS_ABORT_SENT )
1244 info->icount.txabort++;
1245 else
1246 info->icount.txunder++;
1247
1248 info->tx_active = false;
1249 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1250 del_timer(&info->tx_timer);
1251
1252 if ( info->drop_rts_on_tx_done ) {
1253 usc_get_serial_signals( info );
1254 if ( info->serial_signals & SerialSignal_RTS ) {
1255 info->serial_signals &= ~SerialSignal_RTS;
1256 usc_set_serial_signals( info );
1257 }
1258 info->drop_rts_on_tx_done = false;
1259 }
1260
1261 #if SYNCLINK_GENERIC_HDLC
1262 if (info->netcount)
1263 hdlcdev_tx_done(info);
1264 else
1265 #endif
1266 {
1267 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1268 usc_stop_transmitter(info);
1269 return;
1270 }
1271 info->pending_bh |= BH_TRANSMIT;
1272 }
1273
1274 } /* end of mgsl_isr_transmit_status() */
1275
1276 /* mgsl_isr_io_pin()
1277 *
1278 * Service an Input/Output pin interrupt. The type of
1279 * interrupt is indicated by bits in the MISR
1280 *
1281 * Arguments: info pointer to device instance data
1282 * Return Value: None
1283 */
1284 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1285 {
1286 struct mgsl_icount *icount;
1287 u16 status = usc_InReg( info, MISR );
1288
1289 if ( debug_level >= DEBUG_LEVEL_ISR )
1290 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1291 __FILE__,__LINE__,status);
1292
1293 usc_ClearIrqPendingBits( info, IO_PIN );
1294 usc_UnlatchIostatusBits( info, status );
1295
1296 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1297 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1298 icount = &info->icount;
1299 /* update input line counters */
1300 if (status & MISCSTATUS_RI_LATCHED) {
1301 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1302 usc_DisablestatusIrqs(info,SICR_RI);
1303 icount->rng++;
1304 if ( status & MISCSTATUS_RI )
1305 info->input_signal_events.ri_up++;
1306 else
1307 info->input_signal_events.ri_down++;
1308 }
1309 if (status & MISCSTATUS_DSR_LATCHED) {
1310 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1311 usc_DisablestatusIrqs(info,SICR_DSR);
1312 icount->dsr++;
1313 if ( status & MISCSTATUS_DSR )
1314 info->input_signal_events.dsr_up++;
1315 else
1316 info->input_signal_events.dsr_down++;
1317 }
1318 if (status & MISCSTATUS_DCD_LATCHED) {
1319 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1320 usc_DisablestatusIrqs(info,SICR_DCD);
1321 icount->dcd++;
1322 if (status & MISCSTATUS_DCD) {
1323 info->input_signal_events.dcd_up++;
1324 } else
1325 info->input_signal_events.dcd_down++;
1326 #if SYNCLINK_GENERIC_HDLC
1327 if (info->netcount) {
1328 if (status & MISCSTATUS_DCD)
1329 netif_carrier_on(info->netdev);
1330 else
1331 netif_carrier_off(info->netdev);
1332 }
1333 #endif
1334 }
1335 if (status & MISCSTATUS_CTS_LATCHED)
1336 {
1337 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1338 usc_DisablestatusIrqs(info,SICR_CTS);
1339 icount->cts++;
1340 if ( status & MISCSTATUS_CTS )
1341 info->input_signal_events.cts_up++;
1342 else
1343 info->input_signal_events.cts_down++;
1344 }
1345 wake_up_interruptible(&info->status_event_wait_q);
1346 wake_up_interruptible(&info->event_wait_q);
1347
1348 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1349 (status & MISCSTATUS_DCD_LATCHED) ) {
1350 if ( debug_level >= DEBUG_LEVEL_ISR )
1351 printk("%s CD now %s...", info->device_name,
1352 (status & MISCSTATUS_DCD) ? "on" : "off");
1353 if (status & MISCSTATUS_DCD)
1354 wake_up_interruptible(&info->port.open_wait);
1355 else {
1356 if ( debug_level >= DEBUG_LEVEL_ISR )
1357 printk("doing serial hangup...");
1358 if (info->port.tty)
1359 tty_hangup(info->port.tty);
1360 }
1361 }
1362
1363 if ( (info->port.flags & ASYNC_CTS_FLOW) &&
1364 (status & MISCSTATUS_CTS_LATCHED) ) {
1365 if (info->port.tty->hw_stopped) {
1366 if (status & MISCSTATUS_CTS) {
1367 if ( debug_level >= DEBUG_LEVEL_ISR )
1368 printk("CTS tx start...");
1369 if (info->port.tty)
1370 info->port.tty->hw_stopped = 0;
1371 usc_start_transmitter(info);
1372 info->pending_bh |= BH_TRANSMIT;
1373 return;
1374 }
1375 } else {
1376 if (!(status & MISCSTATUS_CTS)) {
1377 if ( debug_level >= DEBUG_LEVEL_ISR )
1378 printk("CTS tx stop...");
1379 if (info->port.tty)
1380 info->port.tty->hw_stopped = 1;
1381 usc_stop_transmitter(info);
1382 }
1383 }
1384 }
1385 }
1386
1387 info->pending_bh |= BH_STATUS;
1388
1389 /* for diagnostics set IRQ flag */
1390 if ( status & MISCSTATUS_TXC_LATCHED ){
1391 usc_OutReg( info, SICR,
1392 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1393 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1394 info->irq_occurred = true;
1395 }
1396
1397 } /* end of mgsl_isr_io_pin() */
1398
1399 /* mgsl_isr_transmit_data()
1400 *
1401 * Service a transmit data interrupt (async mode only).
1402 *
1403 * Arguments: info pointer to device instance data
1404 * Return Value: None
1405 */
1406 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1407 {
1408 if ( debug_level >= DEBUG_LEVEL_ISR )
1409 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1410 __FILE__,__LINE__,info->xmit_cnt);
1411
1412 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1413
1414 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1415 usc_stop_transmitter(info);
1416 return;
1417 }
1418
1419 if ( info->xmit_cnt )
1420 usc_load_txfifo( info );
1421 else
1422 info->tx_active = false;
1423
1424 if (info->xmit_cnt < WAKEUP_CHARS)
1425 info->pending_bh |= BH_TRANSMIT;
1426
1427 } /* end of mgsl_isr_transmit_data() */
1428
1429 /* mgsl_isr_receive_data()
1430 *
1431 * Service a receive data interrupt. This occurs
1432 * when operating in asynchronous interrupt transfer mode.
1433 * The receive data FIFO is flushed to the receive data buffers.
1434 *
1435 * Arguments: info pointer to device instance data
1436 * Return Value: None
1437 */
1438 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1439 {
1440 int Fifocount;
1441 u16 status;
1442 int work = 0;
1443 unsigned char DataByte;
1444 struct tty_struct *tty = info->port.tty;
1445 struct mgsl_icount *icount = &info->icount;
1446
1447 if ( debug_level >= DEBUG_LEVEL_ISR )
1448 printk("%s(%d):mgsl_isr_receive_data\n",
1449 __FILE__,__LINE__);
1450
1451 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1452
1453 /* select FIFO status for RICR readback */
1454 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1455
1456 /* clear the Wordstatus bit so that status readback */
1457 /* only reflects the status of this byte */
1458 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1459
1460 /* flush the receive FIFO */
1461
1462 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1463 int flag;
1464
1465 /* read one byte from RxFIFO */
1466 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1467 info->io_base + CCAR );
1468 DataByte = inb( info->io_base + CCAR );
1469
1470 /* get the status of the received byte */
1471 status = usc_InReg(info, RCSR);
1472 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1473 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1474 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1475
1476 icount->rx++;
1477
1478 flag = 0;
1479 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1480 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1481 printk("rxerr=%04X\n",status);
1482 /* update error statistics */
1483 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1484 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1485 icount->brk++;
1486 } else if (status & RXSTATUS_PARITY_ERROR)
1487 icount->parity++;
1488 else if (status & RXSTATUS_FRAMING_ERROR)
1489 icount->frame++;
1490 else if (status & RXSTATUS_OVERRUN) {
1491 /* must issue purge fifo cmd before */
1492 /* 16C32 accepts more receive chars */
1493 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1494 icount->overrun++;
1495 }
1496
1497 /* discard char if tty control flags say so */
1498 if (status & info->ignore_status_mask)
1499 continue;
1500
1501 status &= info->read_status_mask;
1502
1503 if (status & RXSTATUS_BREAK_RECEIVED) {
1504 flag = TTY_BREAK;
1505 if (info->port.flags & ASYNC_SAK)
1506 do_SAK(tty);
1507 } else if (status & RXSTATUS_PARITY_ERROR)
1508 flag = TTY_PARITY;
1509 else if (status & RXSTATUS_FRAMING_ERROR)
1510 flag = TTY_FRAME;
1511 } /* end of if (error) */
1512 tty_insert_flip_char(tty, DataByte, flag);
1513 if (status & RXSTATUS_OVERRUN) {
1514 /* Overrun is special, since it's
1515 * reported immediately, and doesn't
1516 * affect the current character
1517 */
1518 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1519 }
1520 }
1521
1522 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1523 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1524 __FILE__,__LINE__,icount->rx,icount->brk,
1525 icount->parity,icount->frame,icount->overrun);
1526 }
1527
1528 if(work)
1529 tty_flip_buffer_push(tty);
1530 }
1531
1532 /* mgsl_isr_misc()
1533 *
1534 * Service a miscellaneous interrupt source.
1535 *
1536 * Arguments: info pointer to device extension (instance data)
1537 * Return Value: None
1538 */
1539 static void mgsl_isr_misc( struct mgsl_struct *info )
1540 {
1541 u16 status = usc_InReg( info, MISR );
1542
1543 if ( debug_level >= DEBUG_LEVEL_ISR )
1544 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1545 __FILE__,__LINE__,status);
1546
1547 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1548 (info->params.mode == MGSL_MODE_HDLC)) {
1549
1550 /* turn off receiver and rx DMA */
1551 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1552 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1553 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1554 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1555 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1556
1557 /* schedule BH handler to restart receiver */
1558 info->pending_bh |= BH_RECEIVE;
1559 info->rx_rcc_underrun = true;
1560 }
1561
1562 usc_ClearIrqPendingBits( info, MISC );
1563 usc_UnlatchMiscstatusBits( info, status );
1564
1565 } /* end of mgsl_isr_misc() */
1566
1567 /* mgsl_isr_null()
1568 *
1569 * Services undefined interrupt vectors from the
1570 * USC. (hence this function SHOULD never be called)
1571 *
1572 * Arguments: info pointer to device extension (instance data)
1573 * Return Value: None
1574 */
1575 static void mgsl_isr_null( struct mgsl_struct *info )
1576 {
1577
1578 } /* end of mgsl_isr_null() */
1579
1580 /* mgsl_isr_receive_dma()
1581 *
1582 * Service a receive DMA channel interrupt.
1583 * For this driver there are two sources of receive DMA interrupts
1584 * as identified in the Receive DMA mode Register (RDMR):
1585 *
1586 * BIT3 EOA/EOL End of List, all receive buffers in receive
1587 * buffer list have been filled (no more free buffers
1588 * available). The DMA controller has shut down.
1589 *
1590 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1591 * DMA buffer is terminated in response to completion
1592 * of a good frame or a frame with errors. The status
1593 * of the frame is stored in the buffer entry in the
1594 * list of receive buffer entries.
1595 *
1596 * Arguments: info pointer to device instance data
1597 * Return Value: None
1598 */
1599 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1600 {
1601 u16 status;
1602
1603 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1604 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1605
1606 /* Read the receive DMA status to identify interrupt type. */
1607 /* This also clears the status bits. */
1608 status = usc_InDmaReg( info, RDMR );
1609
1610 if ( debug_level >= DEBUG_LEVEL_ISR )
1611 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1612 __FILE__,__LINE__,info->device_name,status);
1613
1614 info->pending_bh |= BH_RECEIVE;
1615
1616 if ( status & BIT3 ) {
1617 info->rx_overflow = true;
1618 info->icount.buf_overrun++;
1619 }
1620
1621 } /* end of mgsl_isr_receive_dma() */
1622
1623 /* mgsl_isr_transmit_dma()
1624 *
1625 * This function services a transmit DMA channel interrupt.
1626 *
1627 * For this driver there is one source of transmit DMA interrupts
1628 * as identified in the Transmit DMA Mode Register (TDMR):
1629 *
1630 * BIT2 EOB End of Buffer. This interrupt occurs when a
1631 * transmit DMA buffer has been emptied.
1632 *
1633 * The driver maintains enough transmit DMA buffers to hold at least
1634 * one max frame size transmit frame. When operating in a buffered
1635 * transmit mode, there may be enough transmit DMA buffers to hold at
1636 * least two or more max frame size frames. On an EOB condition,
1637 * determine if there are any queued transmit buffers and copy into
1638 * transmit DMA buffers if we have room.
1639 *
1640 * Arguments: info pointer to device instance data
1641 * Return Value: None
1642 */
1643 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1644 {
1645 u16 status;
1646
1647 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1648 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1649
1650 /* Read the transmit DMA status to identify interrupt type. */
1651 /* This also clears the status bits. */
1652
1653 status = usc_InDmaReg( info, TDMR );
1654
1655 if ( debug_level >= DEBUG_LEVEL_ISR )
1656 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1657 __FILE__,__LINE__,info->device_name,status);
1658
1659 if ( status & BIT2 ) {
1660 --info->tx_dma_buffers_used;
1661
1662 /* if there are transmit frames queued,
1663 * try to load the next one
1664 */
1665 if ( load_next_tx_holding_buffer(info) ) {
1666 /* if call returns non-zero value, we have
1667 * at least one free tx holding buffer
1668 */
1669 info->pending_bh |= BH_TRANSMIT;
1670 }
1671 }
1672
1673 } /* end of mgsl_isr_transmit_dma() */
1674
1675 /* mgsl_interrupt()
1676 *
1677 * Interrupt service routine entry point.
1678 *
1679 * Arguments:
1680 *
1681 * irq interrupt number that caused interrupt
1682 * dev_id device ID supplied during interrupt registration
1683 *
1684 * Return Value: None
1685 */
1686 static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1687 {
1688 struct mgsl_struct *info = dev_id;
1689 u16 UscVector;
1690 u16 DmaVector;
1691
1692 if ( debug_level >= DEBUG_LEVEL_ISR )
1693 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1694 __FILE__, __LINE__, info->irq_level);
1695
1696 spin_lock(&info->irq_spinlock);
1697
1698 for(;;) {
1699 /* Read the interrupt vectors from hardware. */
1700 UscVector = usc_InReg(info, IVR) >> 9;
1701 DmaVector = usc_InDmaReg(info, DIVR);
1702
1703 if ( debug_level >= DEBUG_LEVEL_ISR )
1704 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1705 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1706
1707 if ( !UscVector && !DmaVector )
1708 break;
1709
1710 /* Dispatch interrupt vector */
1711 if ( UscVector )
1712 (*UscIsrTable[UscVector])(info);
1713 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1714 mgsl_isr_transmit_dma(info);
1715 else
1716 mgsl_isr_receive_dma(info);
1717
1718 if ( info->isr_overflow ) {
1719 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1720 __FILE__, __LINE__, info->device_name, info->irq_level);
1721 usc_DisableMasterIrqBit(info);
1722 usc_DisableDmaInterrupts(info,DICR_MASTER);
1723 break;
1724 }
1725 }
1726
1727 /* Request bottom half processing if there's something
1728 * for it to do and the bh is not already running
1729 */
1730
1731 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1732 if ( debug_level >= DEBUG_LEVEL_ISR )
1733 printk("%s(%d):%s queueing bh task.\n",
1734 __FILE__,__LINE__,info->device_name);
1735 schedule_work(&info->task);
1736 info->bh_requested = true;
1737 }
1738
1739 spin_unlock(&info->irq_spinlock);
1740
1741 if ( debug_level >= DEBUG_LEVEL_ISR )
1742 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1743 __FILE__, __LINE__, info->irq_level);
1744
1745 return IRQ_HANDLED;
1746 } /* end of mgsl_interrupt() */
1747
1748 /* startup()
1749 *
1750 * Initialize and start device.
1751 *
1752 * Arguments: info pointer to device instance data
1753 * Return Value: 0 if success, otherwise error code
1754 */
1755 static int startup(struct mgsl_struct * info)
1756 {
1757 int retval = 0;
1758
1759 if ( debug_level >= DEBUG_LEVEL_INFO )
1760 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1761
1762 if (info->port.flags & ASYNC_INITIALIZED)
1763 return 0;
1764
1765 if (!info->xmit_buf) {
1766 /* allocate a page of memory for a transmit buffer */
1767 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1768 if (!info->xmit_buf) {
1769 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1770 __FILE__,__LINE__,info->device_name);
1771 return -ENOMEM;
1772 }
1773 }
1774
1775 info->pending_bh = 0;
1776
1777 memset(&info->icount, 0, sizeof(info->icount));
1778
1779 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1780
1781 /* Allocate and claim adapter resources */
1782 retval = mgsl_claim_resources(info);
1783
1784 /* perform existence check and diagnostics */
1785 if ( !retval )
1786 retval = mgsl_adapter_test(info);
1787
1788 if ( retval ) {
1789 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1790 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1791 mgsl_release_resources(info);
1792 return retval;
1793 }
1794
1795 /* program hardware for current parameters */
1796 mgsl_change_params(info);
1797
1798 if (info->port.tty)
1799 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1800
1801 info->port.flags |= ASYNC_INITIALIZED;
1802
1803 return 0;
1804
1805 } /* end of startup() */
1806
1807 /* shutdown()
1808 *
1809 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1810 *
1811 * Arguments: info pointer to device instance data
1812 * Return Value: None
1813 */
1814 static void shutdown(struct mgsl_struct * info)
1815 {
1816 unsigned long flags;
1817
1818 if (!(info->port.flags & ASYNC_INITIALIZED))
1819 return;
1820
1821 if (debug_level >= DEBUG_LEVEL_INFO)
1822 printk("%s(%d):mgsl_shutdown(%s)\n",
1823 __FILE__,__LINE__, info->device_name );
1824
1825 /* clear status wait queue because status changes */
1826 /* can't happen after shutting down the hardware */
1827 wake_up_interruptible(&info->status_event_wait_q);
1828 wake_up_interruptible(&info->event_wait_q);
1829
1830 del_timer_sync(&info->tx_timer);
1831
1832 if (info->xmit_buf) {
1833 free_page((unsigned long) info->xmit_buf);
1834 info->xmit_buf = NULL;
1835 }
1836
1837 spin_lock_irqsave(&info->irq_spinlock,flags);
1838 usc_DisableMasterIrqBit(info);
1839 usc_stop_receiver(info);
1840 usc_stop_transmitter(info);
1841 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1842 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1843 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1844
1845 /* Disable DMAEN (Port 7, Bit 14) */
1846 /* This disconnects the DMA request signal from the ISA bus */
1847 /* on the ISA adapter. This has no effect for the PCI adapter */
1848 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1849
1850 /* Disable INTEN (Port 6, Bit12) */
1851 /* This disconnects the IRQ request signal to the ISA bus */
1852 /* on the ISA adapter. This has no effect for the PCI adapter */
1853 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1854
1855 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1856 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1857 usc_set_serial_signals(info);
1858 }
1859
1860 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1861
1862 mgsl_release_resources(info);
1863
1864 if (info->port.tty)
1865 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1866
1867 info->port.flags &= ~ASYNC_INITIALIZED;
1868
1869 } /* end of shutdown() */
1870
1871 static void mgsl_program_hw(struct mgsl_struct *info)
1872 {
1873 unsigned long flags;
1874
1875 spin_lock_irqsave(&info->irq_spinlock,flags);
1876
1877 usc_stop_receiver(info);
1878 usc_stop_transmitter(info);
1879 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1880
1881 if (info->params.mode == MGSL_MODE_HDLC ||
1882 info->params.mode == MGSL_MODE_RAW ||
1883 info->netcount)
1884 usc_set_sync_mode(info);
1885 else
1886 usc_set_async_mode(info);
1887
1888 usc_set_serial_signals(info);
1889
1890 info->dcd_chkcount = 0;
1891 info->cts_chkcount = 0;
1892 info->ri_chkcount = 0;
1893 info->dsr_chkcount = 0;
1894
1895 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1896 usc_EnableInterrupts(info, IO_PIN);
1897 usc_get_serial_signals(info);
1898
1899 if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1900 usc_start_receiver(info);
1901
1902 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1903 }
1904
1905 /* Reconfigure adapter based on new parameters
1906 */
1907 static void mgsl_change_params(struct mgsl_struct *info)
1908 {
1909 unsigned cflag;
1910 int bits_per_char;
1911
1912 if (!info->port.tty || !info->port.tty->termios)
1913 return;
1914
1915 if (debug_level >= DEBUG_LEVEL_INFO)
1916 printk("%s(%d):mgsl_change_params(%s)\n",
1917 __FILE__,__LINE__, info->device_name );
1918
1919 cflag = info->port.tty->termios->c_cflag;
1920
1921 /* if B0 rate (hangup) specified then negate DTR and RTS */
1922 /* otherwise assert DTR and RTS */
1923 if (cflag & CBAUD)
1924 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1925 else
1926 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1927
1928 /* byte size and parity */
1929
1930 switch (cflag & CSIZE) {
1931 case CS5: info->params.data_bits = 5; break;
1932 case CS6: info->params.data_bits = 6; break;
1933 case CS7: info->params.data_bits = 7; break;
1934 case CS8: info->params.data_bits = 8; break;
1935 /* Never happens, but GCC is too dumb to figure it out */
1936 default: info->params.data_bits = 7; break;
1937 }
1938
1939 if (cflag & CSTOPB)
1940 info->params.stop_bits = 2;
1941 else
1942 info->params.stop_bits = 1;
1943
1944 info->params.parity = ASYNC_PARITY_NONE;
1945 if (cflag & PARENB) {
1946 if (cflag & PARODD)
1947 info->params.parity = ASYNC_PARITY_ODD;
1948 else
1949 info->params.parity = ASYNC_PARITY_EVEN;
1950 #ifdef CMSPAR
1951 if (cflag & CMSPAR)
1952 info->params.parity = ASYNC_PARITY_SPACE;
1953 #endif
1954 }
1955
1956 /* calculate number of jiffies to transmit a full
1957 * FIFO (32 bytes) at specified data rate
1958 */
1959 bits_per_char = info->params.data_bits +
1960 info->params.stop_bits + 1;
1961
1962 /* if port data rate is set to 460800 or less then
1963 * allow tty settings to override, otherwise keep the
1964 * current data rate.
1965 */
1966 if (info->params.data_rate <= 460800)
1967 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1968
1969 if ( info->params.data_rate ) {
1970 info->timeout = (32*HZ*bits_per_char) /
1971 info->params.data_rate;
1972 }
1973 info->timeout += HZ/50; /* Add .02 seconds of slop */
1974
1975 if (cflag & CRTSCTS)
1976 info->port.flags |= ASYNC_CTS_FLOW;
1977 else
1978 info->port.flags &= ~ASYNC_CTS_FLOW;
1979
1980 if (cflag & CLOCAL)
1981 info->port.flags &= ~ASYNC_CHECK_CD;
1982 else
1983 info->port.flags |= ASYNC_CHECK_CD;
1984
1985 /* process tty input control flags */
1986
1987 info->read_status_mask = RXSTATUS_OVERRUN;
1988 if (I_INPCK(info->port.tty))
1989 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1990 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1991 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1992
1993 if (I_IGNPAR(info->port.tty))
1994 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1995 if (I_IGNBRK(info->port.tty)) {
1996 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1997 /* If ignoring parity and break indicators, ignore
1998 * overruns too. (For real raw support).
1999 */
2000 if (I_IGNPAR(info->port.tty))
2001 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2002 }
2003
2004 mgsl_program_hw(info);
2005
2006 } /* end of mgsl_change_params() */
2007
2008 /* mgsl_put_char()
2009 *
2010 * Add a character to the transmit buffer.
2011 *
2012 * Arguments: tty pointer to tty information structure
2013 * ch character to add to transmit buffer
2014 *
2015 * Return Value: None
2016 */
2017 static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2018 {
2019 struct mgsl_struct *info = tty->driver_data;
2020 unsigned long flags;
2021 int ret = 0;
2022
2023 if (debug_level >= DEBUG_LEVEL_INFO) {
2024 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2025 __FILE__, __LINE__, ch, info->device_name);
2026 }
2027
2028 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2029 return 0;
2030
2031 if (!info->xmit_buf)
2032 return 0;
2033
2034 spin_lock_irqsave(&info->irq_spinlock, flags);
2035
2036 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2037 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2038 info->xmit_buf[info->xmit_head++] = ch;
2039 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2040 info->xmit_cnt++;
2041 ret = 1;
2042 }
2043 }
2044 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2045 return ret;
2046
2047 } /* end of mgsl_put_char() */
2048
2049 /* mgsl_flush_chars()
2050 *
2051 * Enable transmitter so remaining characters in the
2052 * transmit buffer are sent.
2053 *
2054 * Arguments: tty pointer to tty information structure
2055 * Return Value: None
2056 */
2057 static void mgsl_flush_chars(struct tty_struct *tty)
2058 {
2059 struct mgsl_struct *info = tty->driver_data;
2060 unsigned long flags;
2061
2062 if ( debug_level >= DEBUG_LEVEL_INFO )
2063 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2064 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2065
2066 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2067 return;
2068
2069 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2070 !info->xmit_buf)
2071 return;
2072
2073 if ( debug_level >= DEBUG_LEVEL_INFO )
2074 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2075 __FILE__,__LINE__,info->device_name );
2076
2077 spin_lock_irqsave(&info->irq_spinlock,flags);
2078
2079 if (!info->tx_active) {
2080 if ( (info->params.mode == MGSL_MODE_HDLC ||
2081 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2082 /* operating in synchronous (frame oriented) mode */
2083 /* copy data from circular xmit_buf to */
2084 /* transmit DMA buffer. */
2085 mgsl_load_tx_dma_buffer(info,
2086 info->xmit_buf,info->xmit_cnt);
2087 }
2088 usc_start_transmitter(info);
2089 }
2090
2091 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2092
2093 } /* end of mgsl_flush_chars() */
2094
2095 /* mgsl_write()
2096 *
2097 * Send a block of data
2098 *
2099 * Arguments:
2100 *
2101 * tty pointer to tty information structure
2102 * buf pointer to buffer containing send data
2103 * count size of send data in bytes
2104 *
2105 * Return Value: number of characters written
2106 */
2107 static int mgsl_write(struct tty_struct * tty,
2108 const unsigned char *buf, int count)
2109 {
2110 int c, ret = 0;
2111 struct mgsl_struct *info = tty->driver_data;
2112 unsigned long flags;
2113
2114 if ( debug_level >= DEBUG_LEVEL_INFO )
2115 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2116 __FILE__,__LINE__,info->device_name,count);
2117
2118 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2119 goto cleanup;
2120
2121 if (!info->xmit_buf)
2122 goto cleanup;
2123
2124 if ( info->params.mode == MGSL_MODE_HDLC ||
2125 info->params.mode == MGSL_MODE_RAW ) {
2126 /* operating in synchronous (frame oriented) mode */
2127 if (info->tx_active) {
2128
2129 if ( info->params.mode == MGSL_MODE_HDLC ) {
2130 ret = 0;
2131 goto cleanup;
2132 }
2133 /* transmitter is actively sending data -
2134 * if we have multiple transmit dma and
2135 * holding buffers, attempt to queue this
2136 * frame for transmission at a later time.
2137 */
2138 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2139 /* no tx holding buffers available */
2140 ret = 0;
2141 goto cleanup;
2142 }
2143
2144 /* queue transmit frame request */
2145 ret = count;
2146 save_tx_buffer_request(info,buf,count);
2147
2148 /* if we have sufficient tx dma buffers,
2149 * load the next buffered tx request
2150 */
2151 spin_lock_irqsave(&info->irq_spinlock,flags);
2152 load_next_tx_holding_buffer(info);
2153 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2154 goto cleanup;
2155 }
2156
2157 /* if operating in HDLC LoopMode and the adapter */
2158 /* has yet to be inserted into the loop, we can't */
2159 /* transmit */
2160
2161 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2162 !usc_loopmode_active(info) )
2163 {
2164 ret = 0;
2165 goto cleanup;
2166 }
2167
2168 if ( info->xmit_cnt ) {
2169 /* Send accumulated from send_char() calls */
2170 /* as frame and wait before accepting more data. */
2171 ret = 0;
2172
2173 /* copy data from circular xmit_buf to */
2174 /* transmit DMA buffer. */
2175 mgsl_load_tx_dma_buffer(info,
2176 info->xmit_buf,info->xmit_cnt);
2177 if ( debug_level >= DEBUG_LEVEL_INFO )
2178 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2179 __FILE__,__LINE__,info->device_name);
2180 } else {
2181 if ( debug_level >= DEBUG_LEVEL_INFO )
2182 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2183 __FILE__,__LINE__,info->device_name);
2184 ret = count;
2185 info->xmit_cnt = count;
2186 mgsl_load_tx_dma_buffer(info,buf,count);
2187 }
2188 } else {
2189 while (1) {
2190 spin_lock_irqsave(&info->irq_spinlock,flags);
2191 c = min_t(int, count,
2192 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2193 SERIAL_XMIT_SIZE - info->xmit_head));
2194 if (c <= 0) {
2195 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2196 break;
2197 }
2198 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2199 info->xmit_head = ((info->xmit_head + c) &
2200 (SERIAL_XMIT_SIZE-1));
2201 info->xmit_cnt += c;
2202 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2203 buf += c;
2204 count -= c;
2205 ret += c;
2206 }
2207 }
2208
2209 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2210 spin_lock_irqsave(&info->irq_spinlock,flags);
2211 if (!info->tx_active)
2212 usc_start_transmitter(info);
2213 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2214 }
2215 cleanup:
2216 if ( debug_level >= DEBUG_LEVEL_INFO )
2217 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2218 __FILE__,__LINE__,info->device_name,ret);
2219
2220 return ret;
2221
2222 } /* end of mgsl_write() */
2223
2224 /* mgsl_write_room()
2225 *
2226 * Return the count of free bytes in transmit buffer
2227 *
2228 * Arguments: tty pointer to tty info structure
2229 * Return Value: None
2230 */
2231 static int mgsl_write_room(struct tty_struct *tty)
2232 {
2233 struct mgsl_struct *info = tty->driver_data;
2234 int ret;
2235
2236 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2237 return 0;
2238 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2239 if (ret < 0)
2240 ret = 0;
2241
2242 if (debug_level >= DEBUG_LEVEL_INFO)
2243 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2244 __FILE__,__LINE__, info->device_name,ret );
2245
2246 if ( info->params.mode == MGSL_MODE_HDLC ||
2247 info->params.mode == MGSL_MODE_RAW ) {
2248 /* operating in synchronous (frame oriented) mode */
2249 if ( info->tx_active )
2250 return 0;
2251 else
2252 return HDLC_MAX_FRAME_SIZE;
2253 }
2254
2255 return ret;
2256
2257 } /* end of mgsl_write_room() */
2258
2259 /* mgsl_chars_in_buffer()
2260 *
2261 * Return the count of bytes in transmit buffer
2262 *
2263 * Arguments: tty pointer to tty info structure
2264 * Return Value: None
2265 */
2266 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2267 {
2268 struct mgsl_struct *info = tty->driver_data;
2269
2270 if (debug_level >= DEBUG_LEVEL_INFO)
2271 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2272 __FILE__,__LINE__, info->device_name );
2273
2274 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2275 return 0;
2276
2277 if (debug_level >= DEBUG_LEVEL_INFO)
2278 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2279 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2280
2281 if ( info->params.mode == MGSL_MODE_HDLC ||
2282 info->params.mode == MGSL_MODE_RAW ) {
2283 /* operating in synchronous (frame oriented) mode */
2284 if ( info->tx_active )
2285 return info->max_frame_size;
2286 else
2287 return 0;
2288 }
2289
2290 return info->xmit_cnt;
2291 } /* end of mgsl_chars_in_buffer() */
2292
2293 /* mgsl_flush_buffer()
2294 *
2295 * Discard all data in the send buffer
2296 *
2297 * Arguments: tty pointer to tty info structure
2298 * Return Value: None
2299 */
2300 static void mgsl_flush_buffer(struct tty_struct *tty)
2301 {
2302 struct mgsl_struct *info = tty->driver_data;
2303 unsigned long flags;
2304
2305 if (debug_level >= DEBUG_LEVEL_INFO)
2306 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2307 __FILE__,__LINE__, info->device_name );
2308
2309 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2310 return;
2311
2312 spin_lock_irqsave(&info->irq_spinlock,flags);
2313 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2314 del_timer(&info->tx_timer);
2315 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2316
2317 tty_wakeup(tty);
2318 }
2319
2320 /* mgsl_send_xchar()
2321 *
2322 * Send a high-priority XON/XOFF character
2323 *
2324 * Arguments: tty pointer to tty info structure
2325 * ch character to send
2326 * Return Value: None
2327 */
2328 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2329 {
2330 struct mgsl_struct *info = tty->driver_data;
2331 unsigned long flags;
2332
2333 if (debug_level >= DEBUG_LEVEL_INFO)
2334 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2335 __FILE__,__LINE__, info->device_name, ch );
2336
2337 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2338 return;
2339
2340 info->x_char = ch;
2341 if (ch) {
2342 /* Make sure transmit interrupts are on */
2343 spin_lock_irqsave(&info->irq_spinlock,flags);
2344 if (!info->tx_enabled)
2345 usc_start_transmitter(info);
2346 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2347 }
2348 } /* end of mgsl_send_xchar() */
2349
2350 /* mgsl_throttle()
2351 *
2352 * Signal remote device to throttle send data (our receive data)
2353 *
2354 * Arguments: tty pointer to tty info structure
2355 * Return Value: None
2356 */
2357 static void mgsl_throttle(struct tty_struct * tty)
2358 {
2359 struct mgsl_struct *info = tty->driver_data;
2360 unsigned long flags;
2361
2362 if (debug_level >= DEBUG_LEVEL_INFO)
2363 printk("%s(%d):mgsl_throttle(%s) entry\n",
2364 __FILE__,__LINE__, info->device_name );
2365
2366 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2367 return;
2368
2369 if (I_IXOFF(tty))
2370 mgsl_send_xchar(tty, STOP_CHAR(tty));
2371
2372 if (tty->termios->c_cflag & CRTSCTS) {
2373 spin_lock_irqsave(&info->irq_spinlock,flags);
2374 info->serial_signals &= ~SerialSignal_RTS;
2375 usc_set_serial_signals(info);
2376 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2377 }
2378 } /* end of mgsl_throttle() */
2379
2380 /* mgsl_unthrottle()
2381 *
2382 * Signal remote device to stop throttling send data (our receive data)
2383 *
2384 * Arguments: tty pointer to tty info structure
2385 * Return Value: None
2386 */
2387 static void mgsl_unthrottle(struct tty_struct * tty)
2388 {
2389 struct mgsl_struct *info = tty->driver_data;
2390 unsigned long flags;
2391
2392 if (debug_level >= DEBUG_LEVEL_INFO)
2393 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2394 __FILE__,__LINE__, info->device_name );
2395
2396 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2397 return;
2398
2399 if (I_IXOFF(tty)) {
2400 if (info->x_char)
2401 info->x_char = 0;
2402 else
2403 mgsl_send_xchar(tty, START_CHAR(tty));
2404 }
2405
2406 if (tty->termios->c_cflag & CRTSCTS) {
2407 spin_lock_irqsave(&info->irq_spinlock,flags);
2408 info->serial_signals |= SerialSignal_RTS;
2409 usc_set_serial_signals(info);
2410 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2411 }
2412
2413 } /* end of mgsl_unthrottle() */
2414
2415 /* mgsl_get_stats()
2416 *
2417 * get the current serial parameters information
2418 *
2419 * Arguments: info pointer to device instance data
2420 * user_icount pointer to buffer to hold returned stats
2421 *
2422 * Return Value: 0 if success, otherwise error code
2423 */
2424 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2425 {
2426 int err;
2427
2428 if (debug_level >= DEBUG_LEVEL_INFO)
2429 printk("%s(%d):mgsl_get_params(%s)\n",
2430 __FILE__,__LINE__, info->device_name);
2431
2432 if (!user_icount) {
2433 memset(&info->icount, 0, sizeof(info->icount));
2434 } else {
2435 mutex_lock(&info->port.mutex);
2436 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2437 mutex_unlock(&info->port.mutex);
2438 if (err)
2439 return -EFAULT;
2440 }
2441
2442 return 0;
2443
2444 } /* end of mgsl_get_stats() */
2445
2446 /* mgsl_get_params()
2447 *
2448 * get the current serial parameters information
2449 *
2450 * Arguments: info pointer to device instance data
2451 * user_params pointer to buffer to hold returned params
2452 *
2453 * Return Value: 0 if success, otherwise error code
2454 */
2455 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2456 {
2457 int err;
2458 if (debug_level >= DEBUG_LEVEL_INFO)
2459 printk("%s(%d):mgsl_get_params(%s)\n",
2460 __FILE__,__LINE__, info->device_name);
2461
2462 mutex_lock(&info->port.mutex);
2463 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2464 mutex_unlock(&info->port.mutex);
2465 if (err) {
2466 if ( debug_level >= DEBUG_LEVEL_INFO )
2467 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2468 __FILE__,__LINE__,info->device_name);
2469 return -EFAULT;
2470 }
2471
2472 return 0;
2473
2474 } /* end of mgsl_get_params() */
2475
2476 /* mgsl_set_params()
2477 *
2478 * set the serial parameters
2479 *
2480 * Arguments:
2481 *
2482 * info pointer to device instance data
2483 * new_params user buffer containing new serial params
2484 *
2485 * Return Value: 0 if success, otherwise error code
2486 */
2487 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2488 {
2489 unsigned long flags;
2490 MGSL_PARAMS tmp_params;
2491 int err;
2492
2493 if (debug_level >= DEBUG_LEVEL_INFO)
2494 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2495 info->device_name );
2496 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2497 if (err) {
2498 if ( debug_level >= DEBUG_LEVEL_INFO )
2499 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2500 __FILE__,__LINE__,info->device_name);
2501 return -EFAULT;
2502 }
2503
2504 mutex_lock(&info->port.mutex);
2505 spin_lock_irqsave(&info->irq_spinlock,flags);
2506 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2507 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2508
2509 mgsl_change_params(info);
2510 mutex_unlock(&info->port.mutex);
2511
2512 return 0;
2513
2514 } /* end of mgsl_set_params() */
2515
2516 /* mgsl_get_txidle()
2517 *
2518 * get the current transmit idle mode
2519 *
2520 * Arguments: info pointer to device instance data
2521 * idle_mode pointer to buffer to hold returned idle mode
2522 *
2523 * Return Value: 0 if success, otherwise error code
2524 */
2525 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2526 {
2527 int err;
2528
2529 if (debug_level >= DEBUG_LEVEL_INFO)
2530 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2531 __FILE__,__LINE__, info->device_name, info->idle_mode);
2532
2533 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2534 if (err) {
2535 if ( debug_level >= DEBUG_LEVEL_INFO )
2536 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2537 __FILE__,__LINE__,info->device_name);
2538 return -EFAULT;
2539 }
2540
2541 return 0;
2542
2543 } /* end of mgsl_get_txidle() */
2544
2545 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2546 *
2547 * Arguments: info pointer to device instance data
2548 * idle_mode new idle mode
2549 *
2550 * Return Value: 0 if success, otherwise error code
2551 */
2552 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2553 {
2554 unsigned long flags;
2555
2556 if (debug_level >= DEBUG_LEVEL_INFO)
2557 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2558 info->device_name, idle_mode );
2559
2560 spin_lock_irqsave(&info->irq_spinlock,flags);
2561 info->idle_mode = idle_mode;
2562 usc_set_txidle( info );
2563 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2564 return 0;
2565
2566 } /* end of mgsl_set_txidle() */
2567
2568 /* mgsl_txenable()
2569 *
2570 * enable or disable the transmitter
2571 *
2572 * Arguments:
2573 *
2574 * info pointer to device instance data
2575 * enable 1 = enable, 0 = disable
2576 *
2577 * Return Value: 0 if success, otherwise error code
2578 */
2579 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2580 {
2581 unsigned long flags;
2582
2583 if (debug_level >= DEBUG_LEVEL_INFO)
2584 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2585 info->device_name, enable);
2586
2587 spin_lock_irqsave(&info->irq_spinlock,flags);
2588 if ( enable ) {
2589 if ( !info->tx_enabled ) {
2590
2591 usc_start_transmitter(info);
2592 /*--------------------------------------------------
2593 * if HDLC/SDLC Loop mode, attempt to insert the
2594 * station in the 'loop' by setting CMR:13. Upon
2595 * receipt of the next GoAhead (RxAbort) sequence,
2596 * the OnLoop indicator (CCSR:7) should go active
2597 * to indicate that we are on the loop
2598 *--------------------------------------------------*/
2599 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2600 usc_loopmode_insert_request( info );
2601 }
2602 } else {
2603 if ( info->tx_enabled )
2604 usc_stop_transmitter(info);
2605 }
2606 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2607 return 0;
2608
2609 } /* end of mgsl_txenable() */
2610
2611 /* mgsl_txabort() abort send HDLC frame
2612 *
2613 * Arguments: info pointer to device instance data
2614 * Return Value: 0 if success, otherwise error code
2615 */
2616 static int mgsl_txabort(struct mgsl_struct * info)
2617 {
2618 unsigned long flags;
2619
2620 if (debug_level >= DEBUG_LEVEL_INFO)
2621 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2622 info->device_name);
2623
2624 spin_lock_irqsave(&info->irq_spinlock,flags);
2625 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2626 {
2627 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2628 usc_loopmode_cancel_transmit( info );
2629 else
2630 usc_TCmd(info,TCmd_SendAbort);
2631 }
2632 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2633 return 0;
2634
2635 } /* end of mgsl_txabort() */
2636
2637 /* mgsl_rxenable() enable or disable the receiver
2638 *
2639 * Arguments: info pointer to device instance data
2640 * enable 1 = enable, 0 = disable
2641 * Return Value: 0 if success, otherwise error code
2642 */
2643 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2644 {
2645 unsigned long flags;
2646
2647 if (debug_level >= DEBUG_LEVEL_INFO)
2648 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2649 info->device_name, enable);
2650
2651 spin_lock_irqsave(&info->irq_spinlock,flags);
2652 if ( enable ) {
2653 if ( !info->rx_enabled )
2654 usc_start_receiver(info);
2655 } else {
2656 if ( info->rx_enabled )
2657 usc_stop_receiver(info);
2658 }
2659 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2660 return 0;
2661
2662 } /* end of mgsl_rxenable() */
2663
2664 /* mgsl_wait_event() wait for specified event to occur
2665 *
2666 * Arguments: info pointer to device instance data
2667 * mask pointer to bitmask of events to wait for
2668 * Return Value: 0 if successful and bit mask updated with
2669 * of events triggerred,
2670 * otherwise error code
2671 */
2672 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2673 {
2674 unsigned long flags;
2675 int s;
2676 int rc=0;
2677 struct mgsl_icount cprev, cnow;
2678 int events;
2679 int mask;
2680 struct _input_signal_events oldsigs, newsigs;
2681 DECLARE_WAITQUEUE(wait, current);
2682
2683 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2684 if (rc) {
2685 return -EFAULT;
2686 }
2687
2688 if (debug_level >= DEBUG_LEVEL_INFO)
2689 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2690 info->device_name, mask);
2691
2692 spin_lock_irqsave(&info->irq_spinlock,flags);
2693
2694 /* return immediately if state matches requested events */
2695 usc_get_serial_signals(info);
2696 s = info->serial_signals;
2697 events = mask &
2698 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2699 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2700 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2701 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2702 if (events) {
2703 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2704 goto exit;
2705 }
2706
2707 /* save current irq counts */
2708 cprev = info->icount;
2709 oldsigs = info->input_signal_events;
2710
2711 /* enable hunt and idle irqs if needed */
2712 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2713 u16 oldreg = usc_InReg(info,RICR);
2714 u16 newreg = oldreg +
2715 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2716 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2717 if (oldreg != newreg)
2718 usc_OutReg(info, RICR, newreg);
2719 }
2720
2721 set_current_state(TASK_INTERRUPTIBLE);
2722 add_wait_queue(&info->event_wait_q, &wait);
2723
2724 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2725
2726
2727 for(;;) {
2728 schedule();
2729 if (signal_pending(current)) {
2730 rc = -ERESTARTSYS;
2731 break;
2732 }
2733
2734 /* get current irq counts */
2735 spin_lock_irqsave(&info->irq_spinlock,flags);
2736 cnow = info->icount;
2737 newsigs = info->input_signal_events;
2738 set_current_state(TASK_INTERRUPTIBLE);
2739 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2740
2741 /* if no change, wait aborted for some reason */
2742 if (newsigs.dsr_up == oldsigs.dsr_up &&
2743 newsigs.dsr_down == oldsigs.dsr_down &&
2744 newsigs.dcd_up == oldsigs.dcd_up &&
2745 newsigs.dcd_down == oldsigs.dcd_down &&
2746 newsigs.cts_up == oldsigs.cts_up &&
2747 newsigs.cts_down == oldsigs.cts_down &&
2748 newsigs.ri_up == oldsigs.ri_up &&
2749 newsigs.ri_down == oldsigs.ri_down &&
2750 cnow.exithunt == cprev.exithunt &&
2751 cnow.rxidle == cprev.rxidle) {
2752 rc = -EIO;
2753 break;
2754 }
2755
2756 events = mask &
2757 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2758 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2759 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2760 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2761 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2762 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2763 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2764 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2765 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2766 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2767 if (events)
2768 break;
2769
2770 cprev = cnow;
2771 oldsigs = newsigs;
2772 }
2773
2774 remove_wait_queue(&info->event_wait_q, &wait);
2775 set_current_state(TASK_RUNNING);
2776
2777 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2778 spin_lock_irqsave(&info->irq_spinlock,flags);
2779 if (!waitqueue_active(&info->event_wait_q)) {
2780 /* disable enable exit hunt mode/idle rcvd IRQs */
2781 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2782 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2783 }
2784 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2785 }
2786 exit:
2787 if ( rc == 0 )
2788 PUT_USER(rc, events, mask_ptr);
2789
2790 return rc;
2791
2792 } /* end of mgsl_wait_event() */
2793
2794 static int modem_input_wait(struct mgsl_struct *info,int arg)
2795 {
2796 unsigned long flags;
2797 int rc;
2798 struct mgsl_icount cprev, cnow;
2799 DECLARE_WAITQUEUE(wait, current);
2800
2801 /* save current irq counts */
2802 spin_lock_irqsave(&info->irq_spinlock,flags);
2803 cprev = info->icount;
2804 add_wait_queue(&info->status_event_wait_q, &wait);
2805 set_current_state(TASK_INTERRUPTIBLE);
2806 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2807
2808 for(;;) {
2809 schedule();
2810 if (signal_pending(current)) {
2811 rc = -ERESTARTSYS;
2812 break;
2813 }
2814
2815 /* get new irq counts */
2816 spin_lock_irqsave(&info->irq_spinlock,flags);
2817 cnow = info->icount;
2818 set_current_state(TASK_INTERRUPTIBLE);
2819 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2820
2821 /* if no change, wait aborted for some reason */
2822 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2823 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2824 rc = -EIO;
2825 break;
2826 }
2827
2828 /* check for change in caller specified modem input */
2829 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2830 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2831 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2832 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2833 rc = 0;
2834 break;
2835 }
2836
2837 cprev = cnow;
2838 }
2839 remove_wait_queue(&info->status_event_wait_q, &wait);
2840 set_current_state(TASK_RUNNING);
2841 return rc;
2842 }
2843
2844 /* return the state of the serial control and status signals
2845 */
2846 static int tiocmget(struct tty_struct *tty)
2847 {
2848 struct mgsl_struct *info = tty->driver_data;
2849 unsigned int result;
2850 unsigned long flags;
2851
2852 spin_lock_irqsave(&info->irq_spinlock,flags);
2853 usc_get_serial_signals(info);
2854 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2855
2856 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2857 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2858 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2859 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2860 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2861 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2862
2863 if (debug_level >= DEBUG_LEVEL_INFO)
2864 printk("%s(%d):%s tiocmget() value=%08X\n",
2865 __FILE__,__LINE__, info->device_name, result );
2866 return result;
2867 }
2868
2869 /* set modem control signals (DTR/RTS)
2870 */
2871 static int tiocmset(struct tty_struct *tty,
2872 unsigned int set, unsigned int clear)
2873 {
2874 struct mgsl_struct *info = tty->driver_data;
2875 unsigned long flags;
2876
2877 if (debug_level >= DEBUG_LEVEL_INFO)
2878 printk("%s(%d):%s tiocmset(%x,%x)\n",
2879 __FILE__,__LINE__,info->device_name, set, clear);
2880
2881 if (set & TIOCM_RTS)
2882 info->serial_signals |= SerialSignal_RTS;
2883 if (set & TIOCM_DTR)
2884 info->serial_signals |= SerialSignal_DTR;
2885 if (clear & TIOCM_RTS)
2886 info->serial_signals &= ~SerialSignal_RTS;
2887 if (clear & TIOCM_DTR)
2888 info->serial_signals &= ~SerialSignal_DTR;
2889
2890 spin_lock_irqsave(&info->irq_spinlock,flags);
2891 usc_set_serial_signals(info);
2892 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2893
2894 return 0;
2895 }
2896
2897 /* mgsl_break() Set or clear transmit break condition
2898 *
2899 * Arguments: tty pointer to tty instance data
2900 * break_state -1=set break condition, 0=clear
2901 * Return Value: error code
2902 */
2903 static int mgsl_break(struct tty_struct *tty, int break_state)
2904 {
2905 struct mgsl_struct * info = tty->driver_data;
2906 unsigned long flags;
2907
2908 if (debug_level >= DEBUG_LEVEL_INFO)
2909 printk("%s(%d):mgsl_break(%s,%d)\n",
2910 __FILE__,__LINE__, info->device_name, break_state);
2911
2912 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2913 return -EINVAL;
2914
2915 spin_lock_irqsave(&info->irq_spinlock,flags);
2916 if (break_state == -1)
2917 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2918 else
2919 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2920 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2921 return 0;
2922
2923 } /* end of mgsl_break() */
2924
2925 /*
2926 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2927 * Return: write counters to the user passed counter struct
2928 * NB: both 1->0 and 0->1 transitions are counted except for
2929 * RI where only 0->1 is counted.
2930 */
2931 static int msgl_get_icount(struct tty_struct *tty,
2932 struct serial_icounter_struct *icount)
2933
2934 {
2935 struct mgsl_struct * info = tty->driver_data;
2936 struct mgsl_icount cnow; /* kernel counter temps */
2937 unsigned long flags;
2938
2939 spin_lock_irqsave(&info->irq_spinlock,flags);
2940 cnow = info->icount;
2941 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2942
2943 icount->cts = cnow.cts;
2944 icount->dsr = cnow.dsr;
2945 icount->rng = cnow.rng;
2946 icount->dcd = cnow.dcd;
2947 icount->rx = cnow.rx;
2948 icount->tx = cnow.tx;
2949 icount->frame = cnow.frame;
2950 icount->overrun = cnow.overrun;
2951 icount->parity = cnow.parity;
2952 icount->brk = cnow.brk;
2953 icount->buf_overrun = cnow.buf_overrun;
2954 return 0;
2955 }
2956
2957 /* mgsl_ioctl() Service an IOCTL request
2958 *
2959 * Arguments:
2960 *
2961 * tty pointer to tty instance data
2962 * cmd IOCTL command code
2963 * arg command argument/context
2964 *
2965 * Return Value: 0 if success, otherwise error code
2966 */
2967 static int mgsl_ioctl(struct tty_struct *tty,
2968 unsigned int cmd, unsigned long arg)
2969 {
2970 struct mgsl_struct * info = tty->driver_data;
2971
2972 if (debug_level >= DEBUG_LEVEL_INFO)
2973 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2974 info->device_name, cmd );
2975
2976 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2977 return -ENODEV;
2978
2979 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2980 (cmd != TIOCMIWAIT)) {
2981 if (tty->flags & (1 << TTY_IO_ERROR))
2982 return -EIO;
2983 }
2984
2985 return mgsl_ioctl_common(info, cmd, arg);
2986 }
2987
2988 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2989 {
2990 void __user *argp = (void __user *)arg;
2991
2992 switch (cmd) {
2993 case MGSL_IOCGPARAMS:
2994 return mgsl_get_params(info, argp);
2995 case MGSL_IOCSPARAMS:
2996 return mgsl_set_params(info, argp);
2997 case MGSL_IOCGTXIDLE:
2998 return mgsl_get_txidle(info, argp);
2999 case MGSL_IOCSTXIDLE:
3000 return mgsl_set_txidle(info,(int)arg);
3001 case MGSL_IOCTXENABLE:
3002 return mgsl_txenable(info,(int)arg);
3003 case MGSL_IOCRXENABLE:
3004 return mgsl_rxenable(info,(int)arg);
3005 case MGSL_IOCTXABORT:
3006 return mgsl_txabort(info);
3007 case MGSL_IOCGSTATS:
3008 return mgsl_get_stats(info, argp);
3009 case MGSL_IOCWAITEVENT:
3010 return mgsl_wait_event(info, argp);
3011 case MGSL_IOCLOOPTXDONE:
3012 return mgsl_loopmode_send_done(info);
3013 /* Wait for modem input (DCD,RI,DSR,CTS) change
3014 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3015 */
3016 case TIOCMIWAIT:
3017 return modem_input_wait(info,(int)arg);
3018
3019 default:
3020 return -ENOIOCTLCMD;
3021 }
3022 return 0;
3023 }
3024
3025 /* mgsl_set_termios()
3026 *
3027 * Set new termios settings
3028 *
3029 * Arguments:
3030 *
3031 * tty pointer to tty structure
3032 * termios pointer to buffer to hold returned old termios
3033 *
3034 * Return Value: None
3035 */
3036 static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3037 {
3038 struct mgsl_struct *info = tty->driver_data;
3039 unsigned long flags;
3040
3041 if (debug_level >= DEBUG_LEVEL_INFO)
3042 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3043 tty->driver->name );
3044
3045 mgsl_change_params(info);
3046
3047 /* Handle transition to B0 status */
3048 if (old_termios->c_cflag & CBAUD &&
3049 !(tty->termios->c_cflag & CBAUD)) {
3050 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3051 spin_lock_irqsave(&info->irq_spinlock,flags);
3052 usc_set_serial_signals(info);
3053 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3054 }
3055
3056 /* Handle transition away from B0 status */
3057 if (!(old_termios->c_cflag & CBAUD) &&
3058 tty->termios->c_cflag & CBAUD) {
3059 info->serial_signals |= SerialSignal_DTR;
3060 if (!(tty->termios->c_cflag & CRTSCTS) ||
3061 !test_bit(TTY_THROTTLED, &tty->flags)) {
3062 info->serial_signals |= SerialSignal_RTS;
3063 }
3064 spin_lock_irqsave(&info->irq_spinlock,flags);
3065 usc_set_serial_signals(info);
3066 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3067 }
3068
3069 /* Handle turning off CRTSCTS */
3070 if (old_termios->c_cflag & CRTSCTS &&
3071 !(tty->termios->c_cflag & CRTSCTS)) {
3072 tty->hw_stopped = 0;
3073 mgsl_start(tty);
3074 }
3075
3076 } /* end of mgsl_set_termios() */
3077
3078 /* mgsl_close()
3079 *
3080 * Called when port is closed. Wait for remaining data to be
3081 * sent. Disable port and free resources.
3082 *
3083 * Arguments:
3084 *
3085 * tty pointer to open tty structure
3086 * filp pointer to open file object
3087 *
3088 * Return Value: None
3089 */
3090 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3091 {
3092 struct mgsl_struct * info = tty->driver_data;
3093
3094 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3095 return;
3096
3097 if (debug_level >= DEBUG_LEVEL_INFO)
3098 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3099 __FILE__,__LINE__, info->device_name, info->port.count);
3100
3101 if (tty_port_close_start(&info->port, tty, filp) == 0)
3102 goto cleanup;
3103
3104 mutex_lock(&info->port.mutex);
3105 if (info->port.flags & ASYNC_INITIALIZED)
3106 mgsl_wait_until_sent(tty, info->timeout);
3107 mgsl_flush_buffer(tty);
3108 tty_ldisc_flush(tty);
3109 shutdown(info);
3110 mutex_unlock(&info->port.mutex);
3111
3112 tty_port_close_end(&info->port, tty);
3113 info->port.tty = NULL;
3114 cleanup:
3115 if (debug_level >= DEBUG_LEVEL_INFO)
3116 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3117 tty->driver->name, info->port.count);
3118
3119 } /* end of mgsl_close() */
3120
3121 /* mgsl_wait_until_sent()
3122 *
3123 * Wait until the transmitter is empty.
3124 *
3125 * Arguments:
3126 *
3127 * tty pointer to tty info structure
3128 * timeout time to wait for send completion
3129 *
3130 * Return Value: None
3131 */
3132 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3133 {
3134 struct mgsl_struct * info = tty->driver_data;
3135 unsigned long orig_jiffies, char_time;
3136
3137 if (!info )
3138 return;
3139
3140 if (debug_level >= DEBUG_LEVEL_INFO)
3141 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3142 __FILE__,__LINE__, info->device_name );
3143
3144 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3145 return;
3146
3147 if (!(info->port.flags & ASYNC_INITIALIZED))
3148 goto exit;
3149
3150 orig_jiffies = jiffies;
3151
3152 /* Set check interval to 1/5 of estimated time to
3153 * send a character, and make it at least 1. The check
3154 * interval should also be less than the timeout.
3155 * Note: use tight timings here to satisfy the NIST-PCTS.
3156 */
3157
3158 if ( info->params.data_rate ) {
3159 char_time = info->timeout/(32 * 5);
3160 if (!char_time)
3161 char_time++;
3162 } else
3163 char_time = 1;
3164
3165 if (timeout)
3166 char_time = min_t(unsigned long, char_time, timeout);
3167
3168 if ( info->params.mode == MGSL_MODE_HDLC ||
3169 info->params.mode == MGSL_MODE_RAW ) {
3170 while (info->tx_active) {
3171 msleep_interruptible(jiffies_to_msecs(char_time));
3172 if (signal_pending(current))
3173 break;
3174 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3175 break;
3176 }
3177 } else {
3178 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3179 info->tx_enabled) {
3180 msleep_interruptible(jiffies_to_msecs(char_time));
3181 if (signal_pending(current))
3182 break;
3183 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3184 break;
3185 }
3186 }
3187
3188 exit:
3189 if (debug_level >= DEBUG_LEVEL_INFO)
3190 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3191 __FILE__,__LINE__, info->device_name );
3192
3193 } /* end of mgsl_wait_until_sent() */
3194
3195 /* mgsl_hangup()
3196 *
3197 * Called by tty_hangup() when a hangup is signaled.
3198 * This is the same as to closing all open files for the port.
3199 *
3200 * Arguments: tty pointer to associated tty object
3201 * Return Value: None
3202 */
3203 static void mgsl_hangup(struct tty_struct *tty)
3204 {
3205 struct mgsl_struct * info = tty->driver_data;
3206
3207 if (debug_level >= DEBUG_LEVEL_INFO)
3208 printk("%s(%d):mgsl_hangup(%s)\n",
3209 __FILE__,__LINE__, info->device_name );
3210
3211 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3212 return;
3213
3214 mgsl_flush_buffer(tty);
3215 shutdown(info);
3216
3217 info->port.count = 0;
3218 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3219 info->port.tty = NULL;
3220
3221 wake_up_interruptible(&info->port.open_wait);
3222
3223 } /* end of mgsl_hangup() */
3224
3225 /*
3226 * carrier_raised()
3227 *
3228 * Return true if carrier is raised
3229 */
3230
3231 static int carrier_raised(struct tty_port *port)
3232 {
3233 unsigned long flags;
3234 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3235
3236 spin_lock_irqsave(&info->irq_spinlock, flags);
3237 usc_get_serial_signals(info);
3238 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3239 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3240 }
3241
3242 static void dtr_rts(struct tty_port *port, int on)
3243 {
3244 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3245 unsigned long flags;
3246
3247 spin_lock_irqsave(&info->irq_spinlock,flags);
3248 if (on)
3249 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3250 else
3251 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3252 usc_set_serial_signals(info);
3253 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3254 }
3255
3256
3257 /* block_til_ready()
3258 *
3259 * Block the current process until the specified port
3260 * is ready to be opened.
3261 *
3262 * Arguments:
3263 *
3264 * tty pointer to tty info structure
3265 * filp pointer to open file object
3266 * info pointer to device instance data
3267 *
3268 * Return Value: 0 if success, otherwise error code
3269 */
3270 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3271 struct mgsl_struct *info)
3272 {
3273 DECLARE_WAITQUEUE(wait, current);
3274 int retval;
3275 bool do_clocal = false;
3276 bool extra_count = false;
3277 unsigned long flags;
3278 int dcd;
3279 struct tty_port *port = &info->port;
3280
3281 if (debug_level >= DEBUG_LEVEL_INFO)
3282 printk("%s(%d):block_til_ready on %s\n",
3283 __FILE__,__LINE__, tty->driver->name );
3284
3285 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3286 /* nonblock mode is set or port is not enabled */
3287 port->flags |= ASYNC_NORMAL_ACTIVE;
3288 return 0;
3289 }
3290
3291 if (tty->termios->c_cflag & CLOCAL)
3292 do_clocal = true;
3293
3294 /* Wait for carrier detect and the line to become
3295 * free (i.e., not in use by the callout). While we are in
3296 * this loop, port->count is dropped by one, so that
3297 * mgsl_close() knows when to free things. We restore it upon
3298 * exit, either normal or abnormal.
3299 */
3300
3301 retval = 0;
3302 add_wait_queue(&port->open_wait, &wait);
3303
3304 if (debug_level >= DEBUG_LEVEL_INFO)
3305 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3306 __FILE__,__LINE__, tty->driver->name, port->count );
3307
3308 spin_lock_irqsave(&info->irq_spinlock, flags);
3309 if (!tty_hung_up_p(filp)) {
3310 extra_count = true;
3311 port->count--;
3312 }
3313 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3314 port->blocked_open++;
3315
3316 while (1) {
3317 if (tty->termios->c_cflag & CBAUD)
3318 tty_port_raise_dtr_rts(port);
3319
3320 set_current_state(TASK_INTERRUPTIBLE);
3321
3322 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3323 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3324 -EAGAIN : -ERESTARTSYS;
3325 break;
3326 }
3327
3328 dcd = tty_port_carrier_raised(&info->port);
3329
3330 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3331 break;
3332
3333 if (signal_pending(current)) {
3334 retval = -ERESTARTSYS;
3335 break;
3336 }
3337
3338 if (debug_level >= DEBUG_LEVEL_INFO)
3339 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3340 __FILE__,__LINE__, tty->driver->name, port->count );
3341
3342 tty_unlock();
3343 schedule();
3344 tty_lock();
3345 }
3346
3347 set_current_state(TASK_RUNNING);
3348 remove_wait_queue(&port->open_wait, &wait);
3349
3350 /* FIXME: Racy on hangup during close wait */
3351 if (extra_count)
3352 port->count++;
3353 port->blocked_open--;
3354
3355 if (debug_level >= DEBUG_LEVEL_INFO)
3356 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3357 __FILE__,__LINE__, tty->driver->name, port->count );
3358
3359 if (!retval)
3360 port->flags |= ASYNC_NORMAL_ACTIVE;
3361
3362 return retval;
3363
3364 } /* end of block_til_ready() */
3365
3366 /* mgsl_open()
3367 *
3368 * Called when a port is opened. Init and enable port.
3369 * Perform serial-specific initialization for the tty structure.
3370 *
3371 * Arguments: tty pointer to tty info structure
3372 * filp associated file pointer
3373 *
3374 * Return Value: 0 if success, otherwise error code
3375 */
3376 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3377 {
3378 struct mgsl_struct *info;
3379 int retval, line;
3380 unsigned long flags;
3381
3382 /* verify range of specified line number */
3383 line = tty->index;
3384 if ((line < 0) || (line >= mgsl_device_count)) {
3385 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3386 __FILE__,__LINE__,line);
3387 return -ENODEV;
3388 }
3389
3390 /* find the info structure for the specified line */
3391 info = mgsl_device_list;
3392 while(info && info->line != line)
3393 info = info->next_device;
3394 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3395 return -ENODEV;
3396
3397 tty->driver_data = info;
3398 info->port.tty = tty;
3399
3400 if (debug_level >= DEBUG_LEVEL_INFO)
3401 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3402 __FILE__,__LINE__,tty->driver->name, info->port.count);
3403
3404 /* If port is closing, signal caller to try again */
3405 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3406 if (info->port.flags & ASYNC_CLOSING)
3407 interruptible_sleep_on(&info->port.close_wait);
3408 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3409 -EAGAIN : -ERESTARTSYS);
3410 goto cleanup;
3411 }
3412
3413 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3414
3415 spin_lock_irqsave(&info->netlock, flags);
3416 if (info->netcount) {
3417 retval = -EBUSY;
3418 spin_unlock_irqrestore(&info->netlock, flags);
3419 goto cleanup;
3420 }
3421 info->port.count++;
3422 spin_unlock_irqrestore(&info->netlock, flags);
3423
3424 if (info->port.count == 1) {
3425 /* 1st open on this device, init hardware */
3426 retval = startup(info);
3427 if (retval < 0)
3428 goto cleanup;
3429 }
3430
3431 retval = block_til_ready(tty, filp, info);
3432 if (retval) {
3433 if (debug_level >= DEBUG_LEVEL_INFO)
3434 printk("%s(%d):block_til_ready(%s) returned %d\n",
3435 __FILE__,__LINE__, info->device_name, retval);
3436 goto cleanup;
3437 }
3438
3439 if (debug_level >= DEBUG_LEVEL_INFO)
3440 printk("%s(%d):mgsl_open(%s) success\n",
3441 __FILE__,__LINE__, info->device_name);
3442 retval = 0;
3443
3444 cleanup:
3445 if (retval) {
3446 if (tty->count == 1)
3447 info->port.tty = NULL; /* tty layer will release tty struct */
3448 if(info->port.count)
3449 info->port.count--;
3450 }
3451
3452 return retval;
3453
3454 } /* end of mgsl_open() */
3455
3456 /*
3457 * /proc fs routines....
3458 */
3459
3460 static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3461 {
3462 char stat_buf[30];
3463 unsigned long flags;
3464
3465 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3466 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3467 info->device_name, info->io_base, info->irq_level,
3468 info->phys_memory_base, info->phys_lcr_base);
3469 } else {
3470 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3471 info->device_name, info->io_base,
3472 info->irq_level, info->dma_level);
3473 }
3474
3475 /* output current serial signal states */
3476 spin_lock_irqsave(&info->irq_spinlock,flags);
3477 usc_get_serial_signals(info);
3478 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3479
3480 stat_buf[0] = 0;
3481 stat_buf[1] = 0;
3482 if (info->serial_signals & SerialSignal_RTS)
3483 strcat(stat_buf, "|RTS");
3484 if (info->serial_signals & SerialSignal_CTS)
3485 strcat(stat_buf, "|CTS");
3486 if (info->serial_signals & SerialSignal_DTR)
3487 strcat(stat_buf, "|DTR");
3488 if (info->serial_signals & SerialSignal_DSR)
3489 strcat(stat_buf, "|DSR");
3490 if (info->serial_signals & SerialSignal_DCD)
3491 strcat(stat_buf, "|CD");
3492 if (info->serial_signals & SerialSignal_RI)
3493 strcat(stat_buf, "|RI");
3494
3495 if (info->params.mode == MGSL_MODE_HDLC ||
3496 info->params.mode == MGSL_MODE_RAW ) {
3497 seq_printf(m, " HDLC txok:%d rxok:%d",
3498 info->icount.txok, info->icount.rxok);
3499 if (info->icount.txunder)
3500 seq_printf(m, " txunder:%d", info->icount.txunder);
3501 if (info->icount.txabort)
3502 seq_printf(m, " txabort:%d", info->icount.txabort);
3503 if (info->icount.rxshort)
3504 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3505 if (info->icount.rxlong)
3506 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3507 if (info->icount.rxover)
3508 seq_printf(m, " rxover:%d", info->icount.rxover);
3509 if (info->icount.rxcrc)
3510 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3511 } else {
3512 seq_printf(m, " ASYNC tx:%d rx:%d",
3513 info->icount.tx, info->icount.rx);
3514 if (info->icount.frame)
3515 seq_printf(m, " fe:%d", info->icount.frame);
3516 if (info->icount.parity)
3517 seq_printf(m, " pe:%d", info->icount.parity);
3518 if (info->icount.brk)
3519 seq_printf(m, " brk:%d", info->icount.brk);
3520 if (info->icount.overrun)
3521 seq_printf(m, " oe:%d", info->icount.overrun);
3522 }
3523
3524 /* Append serial signal status to end */
3525 seq_printf(m, " %s\n", stat_buf+1);
3526
3527 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3528 info->tx_active,info->bh_requested,info->bh_running,
3529 info->pending_bh);
3530
3531 spin_lock_irqsave(&info->irq_spinlock,flags);
3532 {
3533 u16 Tcsr = usc_InReg( info, TCSR );
3534 u16 Tdmr = usc_InDmaReg( info, TDMR );
3535 u16 Ticr = usc_InReg( info, TICR );
3536 u16 Rscr = usc_InReg( info, RCSR );
3537 u16 Rdmr = usc_InDmaReg( info, RDMR );
3538 u16 Ricr = usc_InReg( info, RICR );
3539 u16 Icr = usc_InReg( info, ICR );
3540 u16 Dccr = usc_InReg( info, DCCR );
3541 u16 Tmr = usc_InReg( info, TMR );
3542 u16 Tccr = usc_InReg( info, TCCR );
3543 u16 Ccar = inw( info->io_base + CCAR );
3544 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3545 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3546 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3547 }
3548 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3549 }
3550
3551 /* Called to print information about devices */
3552 static int mgsl_proc_show(struct seq_file *m, void *v)
3553 {
3554 struct mgsl_struct *info;
3555
3556 seq_printf(m, "synclink driver:%s\n", driver_version);
3557
3558 info = mgsl_device_list;
3559 while( info ) {
3560 line_info(m, info);
3561 info = info->next_device;
3562 }
3563 return 0;
3564 }
3565
3566 static int mgsl_proc_open(struct inode *inode, struct file *file)
3567 {
3568 return single_open(file, mgsl_proc_show, NULL);
3569 }
3570
3571 static const struct file_operations mgsl_proc_fops = {
3572 .owner = THIS_MODULE,
3573 .open = mgsl_proc_open,
3574 .read = seq_read,
3575 .llseek = seq_lseek,
3576 .release = single_release,
3577 };
3578
3579 /* mgsl_allocate_dma_buffers()
3580 *
3581 * Allocate and format DMA buffers (ISA adapter)
3582 * or format shared memory buffers (PCI adapter).
3583 *
3584 * Arguments: info pointer to device instance data
3585 * Return Value: 0 if success, otherwise error
3586 */
3587 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3588 {
3589 unsigned short BuffersPerFrame;
3590
3591 info->last_mem_alloc = 0;
3592
3593 /* Calculate the number of DMA buffers necessary to hold the */
3594 /* largest allowable frame size. Note: If the max frame size is */
3595 /* not an even multiple of the DMA buffer size then we need to */
3596 /* round the buffer count per frame up one. */
3597
3598 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3599 if ( info->max_frame_size % DMABUFFERSIZE )
3600 BuffersPerFrame++;
3601
3602 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3603 /*
3604 * The PCI adapter has 256KBytes of shared memory to use.
3605 * This is 64 PAGE_SIZE buffers.
3606 *
3607 * The first page is used for padding at this time so the
3608 * buffer list does not begin at offset 0 of the PCI
3609 * adapter's shared memory.
3610 *
3611 * The 2nd page is used for the buffer list. A 4K buffer
3612 * list can hold 128 DMA_BUFFER structures at 32 bytes
3613 * each.
3614 *
3615 * This leaves 62 4K pages.
3616 *
3617 * The next N pages are used for transmit frame(s). We
3618 * reserve enough 4K page blocks to hold the required
3619 * number of transmit dma buffers (num_tx_dma_buffers),
3620 * each of MaxFrameSize size.
3621 *
3622 * Of the remaining pages (62-N), determine how many can
3623 * be used to receive full MaxFrameSize inbound frames
3624 */
3625 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3626 info->rx_buffer_count = 62 - info->tx_buffer_count;
3627 } else {
3628 /* Calculate the number of PAGE_SIZE buffers needed for */
3629 /* receive and transmit DMA buffers. */
3630
3631
3632 /* Calculate the number of DMA buffers necessary to */
3633 /* hold 7 max size receive frames and one max size transmit frame. */
3634 /* The receive buffer count is bumped by one so we avoid an */
3635 /* End of List condition if all receive buffers are used when */
3636 /* using linked list DMA buffers. */
3637
3638 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3639 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3640
3641 /*
3642 * limit total TxBuffers & RxBuffers to 62 4K total
3643 * (ala PCI Allocation)
3644 */
3645
3646 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3647 info->rx_buffer_count = 62 - info->tx_buffer_count;
3648
3649 }
3650
3651 if ( debug_level >= DEBUG_LEVEL_INFO )
3652 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3653 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3654
3655 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3656 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3657 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3658 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3659 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3660 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3661 return -ENOMEM;
3662 }
3663
3664 mgsl_reset_rx_dma_buffers( info );
3665 mgsl_reset_tx_dma_buffers( info );
3666
3667 return 0;
3668
3669 } /* end of mgsl_allocate_dma_buffers() */
3670
3671 /*
3672 * mgsl_alloc_buffer_list_memory()
3673 *
3674 * Allocate a common DMA buffer for use as the
3675 * receive and transmit buffer lists.
3676 *
3677 * A buffer list is a set of buffer entries where each entry contains
3678 * a pointer to an actual buffer and a pointer to the next buffer entry
3679 * (plus some other info about the buffer).
3680 *
3681 * The buffer entries for a list are built to form a circular list so
3682 * that when the entire list has been traversed you start back at the
3683 * beginning.
3684 *
3685 * This function allocates memory for just the buffer entries.
3686 * The links (pointer to next entry) are filled in with the physical
3687 * address of the next entry so the adapter can navigate the list
3688 * using bus master DMA. The pointers to the actual buffers are filled
3689 * out later when the actual buffers are allocated.
3690 *
3691 * Arguments: info pointer to device instance data
3692 * Return Value: 0 if success, otherwise error
3693 */
3694 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3695 {
3696 unsigned int i;
3697
3698 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3699 /* PCI adapter uses shared memory. */
3700 info->buffer_list = info->memory_base + info->last_mem_alloc;
3701 info->buffer_list_phys = info->last_mem_alloc;
3702 info->last_mem_alloc += BUFFERLISTSIZE;
3703 } else {
3704 /* ISA adapter uses system memory. */
3705 /* The buffer lists are allocated as a common buffer that both */
3706 /* the processor and adapter can access. This allows the driver to */
3707 /* inspect portions of the buffer while other portions are being */
3708 /* updated by the adapter using Bus Master DMA. */
3709
3710 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3711 if (info->buffer_list == NULL)
3712 return -ENOMEM;
3713 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3714 }
3715
3716 /* We got the memory for the buffer entry lists. */
3717 /* Initialize the memory block to all zeros. */
3718 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3719
3720 /* Save virtual address pointers to the receive and */
3721 /* transmit buffer lists. (Receive 1st). These pointers will */
3722 /* be used by the processor to access the lists. */
3723 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3724 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3725 info->tx_buffer_list += info->rx_buffer_count;
3726
3727 /*
3728 * Build the links for the buffer entry lists such that
3729 * two circular lists are built. (Transmit and Receive).
3730 *
3731 * Note: the links are physical addresses
3732 * which are read by the adapter to determine the next
3733 * buffer entry to use.
3734 */
3735
3736 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3737 /* calculate and store physical address of this buffer entry */
3738 info->rx_buffer_list[i].phys_entry =
3739 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3740
3741 /* calculate and store physical address of */
3742 /* next entry in cirular list of entries */
3743
3744 info->rx_buffer_list[i].link = info->buffer_list_phys;
3745
3746 if ( i < info->rx_buffer_count - 1 )
3747 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3748 }
3749
3750 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3751 /* calculate and store physical address of this buffer entry */
3752 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3753 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3754
3755 /* calculate and store physical address of */
3756 /* next entry in cirular list of entries */
3757
3758 info->tx_buffer_list[i].link = info->buffer_list_phys +
3759 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3760
3761 if ( i < info->tx_buffer_count - 1 )
3762 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3763 }
3764
3765 return 0;
3766
3767 } /* end of mgsl_alloc_buffer_list_memory() */
3768
3769 /* Free DMA buffers allocated for use as the
3770 * receive and transmit buffer lists.
3771 * Warning:
3772 *
3773 * The data transfer buffers associated with the buffer list
3774 * MUST be freed before freeing the buffer list itself because
3775 * the buffer list contains the information necessary to free
3776 * the individual buffers!
3777 */
3778 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3779 {
3780 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3781 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3782
3783 info->buffer_list = NULL;
3784 info->rx_buffer_list = NULL;
3785 info->tx_buffer_list = NULL;
3786
3787 } /* end of mgsl_free_buffer_list_memory() */
3788
3789 /*
3790 * mgsl_alloc_frame_memory()
3791 *
3792 * Allocate the frame DMA buffers used by the specified buffer list.
3793 * Each DMA buffer will be one memory page in size. This is necessary
3794 * because memory can fragment enough that it may be impossible
3795 * contiguous pages.
3796 *
3797 * Arguments:
3798 *
3799 * info pointer to device instance data
3800 * BufferList pointer to list of buffer entries
3801 * Buffercount count of buffer entries in buffer list
3802 *
3803 * Return Value: 0 if success, otherwise -ENOMEM
3804 */
3805 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3806 {
3807 int i;
3808 u32 phys_addr;
3809
3810 /* Allocate page sized buffers for the receive buffer list */
3811
3812 for ( i = 0; i < Buffercount; i++ ) {
3813 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3814 /* PCI adapter uses shared memory buffers. */
3815 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3816 phys_addr = info->last_mem_alloc;
3817 info->last_mem_alloc += DMABUFFERSIZE;
3818 } else {
3819 /* ISA adapter uses system memory. */
3820 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3821 if (BufferList[i].virt_addr == NULL)
3822 return -ENOMEM;
3823 phys_addr = (u32)(BufferList[i].dma_addr);
3824 }
3825 BufferList[i].phys_addr = phys_addr;
3826 }
3827
3828 return 0;
3829
3830 } /* end of mgsl_alloc_frame_memory() */
3831
3832 /*
3833 * mgsl_free_frame_memory()
3834 *
3835 * Free the buffers associated with
3836 * each buffer entry of a buffer list.
3837 *
3838 * Arguments:
3839 *
3840 * info pointer to device instance data
3841 * BufferList pointer to list of buffer entries
3842 * Buffercount count of buffer entries in buffer list
3843 *
3844 * Return Value: None
3845 */
3846 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3847 {
3848 int i;
3849
3850 if ( BufferList ) {
3851 for ( i = 0 ; i < Buffercount ; i++ ) {
3852 if ( BufferList[i].virt_addr ) {
3853 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3854 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3855 BufferList[i].virt_addr = NULL;
3856 }
3857 }
3858 }
3859
3860 } /* end of mgsl_free_frame_memory() */
3861
3862 /* mgsl_free_dma_buffers()
3863 *
3864 * Free DMA buffers
3865 *
3866 * Arguments: info pointer to device instance data
3867 * Return Value: None
3868 */
3869 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3870 {
3871 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3872 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3873 mgsl_free_buffer_list_memory( info );
3874
3875 } /* end of mgsl_free_dma_buffers() */
3876
3877
3878 /*
3879 * mgsl_alloc_intermediate_rxbuffer_memory()
3880 *
3881 * Allocate a buffer large enough to hold max_frame_size. This buffer
3882 * is used to pass an assembled frame to the line discipline.
3883 *
3884 * Arguments:
3885 *
3886 * info pointer to device instance data
3887 *
3888 * Return Value: 0 if success, otherwise -ENOMEM
3889 */
3890 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3891 {
3892 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3893 if ( info->intermediate_rxbuffer == NULL )
3894 return -ENOMEM;
3895
3896 return 0;
3897
3898 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3899
3900 /*
3901 * mgsl_free_intermediate_rxbuffer_memory()
3902 *
3903 *
3904 * Arguments:
3905 *
3906 * info pointer to device instance data
3907 *
3908 * Return Value: None
3909 */
3910 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3911 {
3912 kfree(info->intermediate_rxbuffer);
3913 info->intermediate_rxbuffer = NULL;
3914
3915 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3916
3917 /*
3918 * mgsl_alloc_intermediate_txbuffer_memory()
3919 *
3920 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3921 * This buffer is used to load transmit frames into the adapter's dma transfer
3922 * buffers when there is sufficient space.
3923 *
3924 * Arguments:
3925 *
3926 * info pointer to device instance data
3927 *
3928 * Return Value: 0 if success, otherwise -ENOMEM
3929 */
3930 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3931 {
3932 int i;
3933
3934 if ( debug_level >= DEBUG_LEVEL_INFO )
3935 printk("%s %s(%d) allocating %d tx holding buffers\n",
3936 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3937
3938 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3939
3940 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3941 info->tx_holding_buffers[i].buffer =
3942 kmalloc(info->max_frame_size, GFP_KERNEL);
3943 if (info->tx_holding_buffers[i].buffer == NULL) {
3944 for (--i; i >= 0; i--) {
3945 kfree(info->tx_holding_buffers[i].buffer);
3946 info->tx_holding_buffers[i].buffer = NULL;
3947 }
3948 return -ENOMEM;
3949 }
3950 }
3951
3952 return 0;
3953
3954 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3955
3956 /*
3957 * mgsl_free_intermediate_txbuffer_memory()
3958 *
3959 *
3960 * Arguments:
3961 *
3962 * info pointer to device instance data
3963 *
3964 * Return Value: None
3965 */
3966 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3967 {
3968 int i;
3969
3970 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3971 kfree(info->tx_holding_buffers[i].buffer);
3972 info->tx_holding_buffers[i].buffer = NULL;
3973 }
3974
3975 info->get_tx_holding_index = 0;
3976 info->put_tx_holding_index = 0;
3977 info->tx_holding_count = 0;
3978
3979 } /* end of mgsl_free_intermediate_txbuffer_memory() */
3980
3981
3982 /*
3983 * load_next_tx_holding_buffer()
3984 *
3985 * attempts to load the next buffered tx request into the
3986 * tx dma buffers
3987 *
3988 * Arguments:
3989 *
3990 * info pointer to device instance data
3991 *
3992 * Return Value: true if next buffered tx request loaded
3993 * into adapter's tx dma buffer,
3994 * false otherwise
3995 */
3996 static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3997 {
3998 bool ret = false;
3999
4000 if ( info->tx_holding_count ) {
4001 /* determine if we have enough tx dma buffers
4002 * to accommodate the next tx frame
4003 */
4004 struct tx_holding_buffer *ptx =
4005 &info->tx_holding_buffers[info->get_tx_holding_index];
4006 int num_free = num_free_tx_dma_buffers(info);
4007 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4008 if ( ptx->buffer_size % DMABUFFERSIZE )
4009 ++num_needed;
4010
4011 if (num_needed <= num_free) {
4012 info->xmit_cnt = ptx->buffer_size;
4013 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4014
4015 --info->tx_holding_count;
4016 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4017 info->get_tx_holding_index=0;
4018
4019 /* restart transmit timer */
4020 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4021
4022 ret = true;
4023 }
4024 }
4025
4026 return ret;
4027 }
4028
4029 /*
4030 * save_tx_buffer_request()
4031 *
4032 * attempt to store transmit frame request for later transmission
4033 *
4034 * Arguments:
4035 *
4036 * info pointer to device instance data
4037 * Buffer pointer to buffer containing frame to load
4038 * BufferSize size in bytes of frame in Buffer
4039 *
4040 * Return Value: 1 if able to store, 0 otherwise
4041 */
4042 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4043 {
4044 struct tx_holding_buffer *ptx;
4045
4046 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4047 return 0; /* all buffers in use */
4048 }
4049
4050 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4051 ptx->buffer_size = BufferSize;
4052 memcpy( ptx->buffer, Buffer, BufferSize);
4053
4054 ++info->tx_holding_count;
4055 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4056 info->put_tx_holding_index=0;
4057
4058 return 1;
4059 }
4060
4061 static int mgsl_claim_resources(struct mgsl_struct *info)
4062 {
4063 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4064 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4065 __FILE__,__LINE__,info->device_name, info->io_base);
4066 return -ENODEV;
4067 }
4068 info->io_addr_requested = true;
4069
4070 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4071 info->device_name, info ) < 0 ) {
4072 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4073 __FILE__,__LINE__,info->device_name, info->irq_level );
4074 goto errout;
4075 }
4076 info->irq_requested = true;
4077
4078 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4079 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4080 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4081 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4082 goto errout;
4083 }
4084 info->shared_mem_requested = true;
4085 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4086 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4087 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4088 goto errout;
4089 }
4090 info->lcr_mem_requested = true;
4091
4092 info->memory_base = ioremap_nocache(info->phys_memory_base,
4093 0x40000);
4094 if (!info->memory_base) {
4095 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4096 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4097 goto errout;
4098 }
4099
4100 if ( !mgsl_memory_test(info) ) {
4101 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4102 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4103 goto errout;
4104 }
4105
4106 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4107 PAGE_SIZE);
4108 if (!info->lcr_base) {
4109 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4110 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4111 goto errout;
4112 }
4113 info->lcr_base += info->lcr_offset;
4114
4115 } else {
4116 /* claim DMA channel */
4117
4118 if (request_dma(info->dma_level,info->device_name) < 0){
4119 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4120 __FILE__,__LINE__,info->device_name, info->dma_level );
4121 mgsl_release_resources( info );
4122 return -ENODEV;
4123 }
4124 info->dma_requested = true;
4125
4126 /* ISA adapter uses bus master DMA */
4127 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4128 enable_dma(info->dma_level);
4129 }
4130
4131 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4132 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4133 __FILE__,__LINE__,info->device_name, info->dma_level );
4134 goto errout;
4135 }
4136
4137 return 0;
4138 errout:
4139 mgsl_release_resources(info);
4140 return -ENODEV;
4141
4142 } /* end of mgsl_claim_resources() */
4143
4144 static void mgsl_release_resources(struct mgsl_struct *info)
4145 {
4146 if ( debug_level >= DEBUG_LEVEL_INFO )
4147 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4148 __FILE__,__LINE__,info->device_name );
4149
4150 if ( info->irq_requested ) {
4151 free_irq(info->irq_level, info);
4152 info->irq_requested = false;
4153 }
4154 if ( info->dma_requested ) {
4155 disable_dma(info->dma_level);
4156 free_dma(info->dma_level);
4157 info->dma_requested = false;
4158 }
4159 mgsl_free_dma_buffers(info);
4160 mgsl_free_intermediate_rxbuffer_memory(info);
4161 mgsl_free_intermediate_txbuffer_memory(info);
4162
4163 if ( info->io_addr_requested ) {
4164 release_region(info->io_base,info->io_addr_size);
4165 info->io_addr_requested = false;
4166 }
4167 if ( info->shared_mem_requested ) {
4168 release_mem_region(info->phys_memory_base,0x40000);
4169 info->shared_mem_requested = false;
4170 }
4171 if ( info->lcr_mem_requested ) {
4172 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4173 info->lcr_mem_requested = false;
4174 }
4175 if (info->memory_base){
4176 iounmap(info->memory_base);
4177 info->memory_base = NULL;
4178 }
4179 if (info->lcr_base){
4180 iounmap(info->lcr_base - info->lcr_offset);
4181 info->lcr_base = NULL;
4182 }
4183
4184 if ( debug_level >= DEBUG_LEVEL_INFO )
4185 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4186 __FILE__,__LINE__,info->device_name );
4187
4188 } /* end of mgsl_release_resources() */
4189
4190 /* mgsl_add_device()
4191 *
4192 * Add the specified device instance data structure to the
4193 * global linked list of devices and increment the device count.
4194 *
4195 * Arguments: info pointer to device instance data
4196 * Return Value: None
4197 */
4198 static void mgsl_add_device( struct mgsl_struct *info )
4199 {
4200 info->next_device = NULL;
4201 info->line = mgsl_device_count;
4202 sprintf(info->device_name,"ttySL%d",info->line);
4203
4204 if (info->line < MAX_TOTAL_DEVICES) {
4205 if (maxframe[info->line])
4206 info->max_frame_size = maxframe[info->line];
4207
4208 if (txdmabufs[info->line]) {
4209 info->num_tx_dma_buffers = txdmabufs[info->line];
4210 if (info->num_tx_dma_buffers < 1)
4211 info->num_tx_dma_buffers = 1;
4212 }
4213
4214 if (txholdbufs[info->line]) {
4215 info->num_tx_holding_buffers = txholdbufs[info->line];
4216 if (info->num_tx_holding_buffers < 1)
4217 info->num_tx_holding_buffers = 1;
4218 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4219 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4220 }
4221 }
4222
4223 mgsl_device_count++;
4224
4225 if ( !mgsl_device_list )
4226 mgsl_device_list = info;
4227 else {
4228 struct mgsl_struct *current_dev = mgsl_device_list;
4229 while( current_dev->next_device )
4230 current_dev = current_dev->next_device;
4231 current_dev->next_device = info;
4232 }
4233
4234 if ( info->max_frame_size < 4096 )
4235 info->max_frame_size = 4096;
4236 else if ( info->max_frame_size > 65535 )
4237 info->max_frame_size = 65535;
4238
4239 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4240 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4241 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4242 info->phys_memory_base, info->phys_lcr_base,
4243 info->max_frame_size );
4244 } else {
4245 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4246 info->device_name, info->io_base, info->irq_level, info->dma_level,
4247 info->max_frame_size );
4248 }
4249
4250 #if SYNCLINK_GENERIC_HDLC
4251 hdlcdev_init(info);
4252 #endif
4253
4254 } /* end of mgsl_add_device() */
4255
4256 static const struct tty_port_operations mgsl_port_ops = {
4257 .carrier_raised = carrier_raised,
4258 .dtr_rts = dtr_rts,
4259 };
4260
4261
4262 /* mgsl_allocate_device()
4263 *
4264 * Allocate and initialize a device instance structure
4265 *
4266 * Arguments: none
4267 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4268 */
4269 static struct mgsl_struct* mgsl_allocate_device(void)
4270 {
4271 struct mgsl_struct *info;
4272
4273 info = kzalloc(sizeof(struct mgsl_struct),
4274 GFP_KERNEL);
4275
4276 if (!info) {
4277 printk("Error can't allocate device instance data\n");
4278 } else {
4279 tty_port_init(&info->port);
4280 info->port.ops = &mgsl_port_ops;
4281 info->magic = MGSL_MAGIC;
4282 INIT_WORK(&info->task, mgsl_bh_handler);
4283 info->max_frame_size = 4096;
4284 info->port.close_delay = 5*HZ/10;
4285 info->port.closing_wait = 30*HZ;
4286 init_waitqueue_head(&info->status_event_wait_q);
4287 init_waitqueue_head(&info->event_wait_q);
4288 spin_lock_init(&info->irq_spinlock);
4289 spin_lock_init(&info->netlock);
4290 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4291 info->idle_mode = HDLC_TXIDLE_FLAGS;
4292 info->num_tx_dma_buffers = 1;
4293 info->num_tx_holding_buffers = 0;
4294 }
4295
4296 return info;
4297
4298 } /* end of mgsl_allocate_device()*/
4299
4300 static const struct tty_operations mgsl_ops = {
4301 .open = mgsl_open,
4302 .close = mgsl_close,
4303 .write = mgsl_write,
4304 .put_char = mgsl_put_char,
4305 .flush_chars = mgsl_flush_chars,
4306 .write_room = mgsl_write_room,
4307 .chars_in_buffer = mgsl_chars_in_buffer,
4308 .flush_buffer = mgsl_flush_buffer,
4309 .ioctl = mgsl_ioctl,
4310 .throttle = mgsl_throttle,
4311 .unthrottle = mgsl_unthrottle,
4312 .send_xchar = mgsl_send_xchar,
4313 .break_ctl = mgsl_break,
4314 .wait_until_sent = mgsl_wait_until_sent,
4315 .set_termios = mgsl_set_termios,
4316 .stop = mgsl_stop,
4317 .start = mgsl_start,
4318 .hangup = mgsl_hangup,
4319 .tiocmget = tiocmget,
4320 .tiocmset = tiocmset,
4321 .get_icount = msgl_get_icount,
4322 .proc_fops = &mgsl_proc_fops,
4323 };
4324
4325 /*
4326 * perform tty device initialization
4327 */
4328 static int mgsl_init_tty(void)
4329 {
4330 int rc;
4331
4332 serial_driver = alloc_tty_driver(128);
4333 if (!serial_driver)
4334 return -ENOMEM;
4335
4336 serial_driver->driver_name = "synclink";
4337 serial_driver->name = "ttySL";
4338 serial_driver->major = ttymajor;
4339 serial_driver->minor_start = 64;
4340 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4341 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4342 serial_driver->init_termios = tty_std_termios;
4343 serial_driver->init_termios.c_cflag =
4344 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4345 serial_driver->init_termios.c_ispeed = 9600;
4346 serial_driver->init_termios.c_ospeed = 9600;
4347 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4348 tty_set_operations(serial_driver, &mgsl_ops);
4349 if ((rc = tty_register_driver(serial_driver)) < 0) {
4350 printk("%s(%d):Couldn't register serial driver\n",
4351 __FILE__,__LINE__);
4352 put_tty_driver(serial_driver);
4353 serial_driver = NULL;
4354 return rc;
4355 }
4356
4357 printk("%s %s, tty major#%d\n",
4358 driver_name, driver_version,
4359 serial_driver->major);
4360 return 0;
4361 }
4362
4363 /* enumerate user specified ISA adapters
4364 */
4365 static void mgsl_enum_isa_devices(void)
4366 {
4367 struct mgsl_struct *info;
4368 int i;
4369
4370 /* Check for user specified ISA devices */
4371
4372 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4373 if ( debug_level >= DEBUG_LEVEL_INFO )
4374 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4375 io[i], irq[i], dma[i] );
4376
4377 info = mgsl_allocate_device();
4378 if ( !info ) {
4379 /* error allocating device instance data */
4380 if ( debug_level >= DEBUG_LEVEL_ERROR )
4381 printk( "can't allocate device instance data.\n");
4382 continue;
4383 }
4384
4385 /* Copy user configuration info to device instance data */
4386 info->io_base = (unsigned int)io[i];
4387 info->irq_level = (unsigned int)irq[i];
4388 info->irq_level = irq_canonicalize(info->irq_level);
4389 info->dma_level = (unsigned int)dma[i];
4390 info->bus_type = MGSL_BUS_TYPE_ISA;
4391 info->io_addr_size = 16;
4392 info->irq_flags = 0;
4393
4394 mgsl_add_device( info );
4395 }
4396 }
4397
4398 static void synclink_cleanup(void)
4399 {
4400 int rc;
4401 struct mgsl_struct *info;
4402 struct mgsl_struct *tmp;
4403
4404 printk("Unloading %s: %s\n", driver_name, driver_version);
4405
4406 if (serial_driver) {
4407 if ((rc = tty_unregister_driver(serial_driver)))
4408 printk("%s(%d) failed to unregister tty driver err=%d\n",
4409 __FILE__,__LINE__,rc);
4410 put_tty_driver(serial_driver);
4411 }
4412
4413 info = mgsl_device_list;
4414 while(info) {
4415 #if SYNCLINK_GENERIC_HDLC
4416 hdlcdev_exit(info);
4417 #endif
4418 mgsl_release_resources(info);
4419 tmp = info;
4420 info = info->next_device;
4421 kfree(tmp);
4422 }
4423
4424 if (pci_registered)
4425 pci_unregister_driver(&synclink_pci_driver);
4426 }
4427
4428 static int __init synclink_init(void)
4429 {
4430 int rc;
4431
4432 if (break_on_load) {
4433 mgsl_get_text_ptr();
4434 BREAKPOINT();
4435 }
4436
4437 printk("%s %s\n", driver_name, driver_version);
4438
4439 mgsl_enum_isa_devices();
4440 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4441 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4442 else
4443 pci_registered = true;
4444
4445 if ((rc = mgsl_init_tty()) < 0)
4446 goto error;
4447
4448 return 0;
4449
4450 error:
4451 synclink_cleanup();
4452 return rc;
4453 }
4454
4455 static void __exit synclink_exit(void)
4456 {
4457 synclink_cleanup();
4458 }
4459
4460 module_init(synclink_init);
4461 module_exit(synclink_exit);
4462
4463 /*
4464 * usc_RTCmd()
4465 *
4466 * Issue a USC Receive/Transmit command to the
4467 * Channel Command/Address Register (CCAR).
4468 *
4469 * Notes:
4470 *
4471 * The command is encoded in the most significant 5 bits <15..11>
4472 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4473 * and Bits <6..0> must be written as zeros.
4474 *
4475 * Arguments:
4476 *
4477 * info pointer to device information structure
4478 * Cmd command mask (use symbolic macros)
4479 *
4480 * Return Value:
4481 *
4482 * None
4483 */
4484 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4485 {
4486 /* output command to CCAR in bits <15..11> */
4487 /* preserve bits <10..7>, bits <6..0> must be zero */
4488
4489 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4490
4491 /* Read to flush write to CCAR */
4492 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4493 inw( info->io_base + CCAR );
4494
4495 } /* end of usc_RTCmd() */
4496
4497 /*
4498 * usc_DmaCmd()
4499 *
4500 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4501 *
4502 * Arguments:
4503 *
4504 * info pointer to device information structure
4505 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4506 *
4507 * Return Value:
4508 *
4509 * None
4510 */
4511 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4512 {
4513 /* write command mask to DCAR */
4514 outw( Cmd + info->mbre_bit, info->io_base );
4515
4516 /* Read to flush write to DCAR */
4517 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4518 inw( info->io_base );
4519
4520 } /* end of usc_DmaCmd() */
4521
4522 /*
4523 * usc_OutDmaReg()
4524 *
4525 * Write a 16-bit value to a USC DMA register
4526 *
4527 * Arguments:
4528 *
4529 * info pointer to device info structure
4530 * RegAddr register address (number) for write
4531 * RegValue 16-bit value to write to register
4532 *
4533 * Return Value:
4534 *
4535 * None
4536 *
4537 */
4538 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4539 {
4540 /* Note: The DCAR is located at the adapter base address */
4541 /* Note: must preserve state of BIT8 in DCAR */
4542
4543 outw( RegAddr + info->mbre_bit, info->io_base );
4544 outw( RegValue, info->io_base );
4545
4546 /* Read to flush write to DCAR */
4547 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4548 inw( info->io_base );
4549
4550 } /* end of usc_OutDmaReg() */
4551
4552 /*
4553 * usc_InDmaReg()
4554 *
4555 * Read a 16-bit value from a DMA register
4556 *
4557 * Arguments:
4558 *
4559 * info pointer to device info structure
4560 * RegAddr register address (number) to read from
4561 *
4562 * Return Value:
4563 *
4564 * The 16-bit value read from register
4565 *
4566 */
4567 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4568 {
4569 /* Note: The DCAR is located at the adapter base address */
4570 /* Note: must preserve state of BIT8 in DCAR */
4571
4572 outw( RegAddr + info->mbre_bit, info->io_base );
4573 return inw( info->io_base );
4574
4575 } /* end of usc_InDmaReg() */
4576
4577 /*
4578 *
4579 * usc_OutReg()
4580 *
4581 * Write a 16-bit value to a USC serial channel register
4582 *
4583 * Arguments:
4584 *
4585 * info pointer to device info structure
4586 * RegAddr register address (number) to write to
4587 * RegValue 16-bit value to write to register
4588 *
4589 * Return Value:
4590 *
4591 * None
4592 *
4593 */
4594 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4595 {
4596 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4597 outw( RegValue, info->io_base + CCAR );
4598
4599 /* Read to flush write to CCAR */
4600 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4601 inw( info->io_base + CCAR );
4602
4603 } /* end of usc_OutReg() */
4604
4605 /*
4606 * usc_InReg()
4607 *
4608 * Reads a 16-bit value from a USC serial channel register
4609 *
4610 * Arguments:
4611 *
4612 * info pointer to device extension
4613 * RegAddr register address (number) to read from
4614 *
4615 * Return Value:
4616 *
4617 * 16-bit value read from register
4618 */
4619 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4620 {
4621 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4622 return inw( info->io_base + CCAR );
4623
4624 } /* end of usc_InReg() */
4625
4626 /* usc_set_sdlc_mode()
4627 *
4628 * Set up the adapter for SDLC DMA communications.
4629 *
4630 * Arguments: info pointer to device instance data
4631 * Return Value: NONE
4632 */
4633 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4634 {
4635 u16 RegValue;
4636 bool PreSL1660;
4637
4638 /*
4639 * determine if the IUSC on the adapter is pre-SL1660. If
4640 * not, take advantage of the UnderWait feature of more
4641 * modern chips. If an underrun occurs and this bit is set,
4642 * the transmitter will idle the programmed idle pattern
4643 * until the driver has time to service the underrun. Otherwise,
4644 * the dma controller may get the cycles previously requested
4645 * and begin transmitting queued tx data.
4646 */
4647 usc_OutReg(info,TMCR,0x1f);
4648 RegValue=usc_InReg(info,TMDR);
4649 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4650
4651 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4652 {
4653 /*
4654 ** Channel Mode Register (CMR)
4655 **
4656 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4657 ** <13> 0 0 = Transmit Disabled (initially)
4658 ** <12> 0 1 = Consecutive Idles share common 0
4659 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4660 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4661 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4662 **
4663 ** 1000 1110 0000 0110 = 0x8e06
4664 */
4665 RegValue = 0x8e06;
4666
4667 /*--------------------------------------------------
4668 * ignore user options for UnderRun Actions and
4669 * preambles
4670 *--------------------------------------------------*/
4671 }
4672 else
4673 {
4674 /* Channel mode Register (CMR)
4675 *
4676 * <15..14> 00 Tx Sub modes, Underrun Action
4677 * <13> 0 1 = Send Preamble before opening flag
4678 * <12> 0 1 = Consecutive Idles share common 0
4679 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4680 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4681 * <3..0> 0110 Receiver mode = HDLC/SDLC
4682 *
4683 * 0000 0110 0000 0110 = 0x0606
4684 */
4685 if (info->params.mode == MGSL_MODE_RAW) {
4686 RegValue = 0x0001; /* Set Receive mode = external sync */
4687
4688 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4689 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4690
4691 /*
4692 * TxSubMode:
4693 * CMR <15> 0 Don't send CRC on Tx Underrun
4694 * CMR <14> x undefined
4695 * CMR <13> 0 Send preamble before openning sync
4696 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4697 *
4698 * TxMode:
4699 * CMR <11-8) 0100 MonoSync
4700 *
4701 * 0x00 0100 xxxx xxxx 04xx
4702 */
4703 RegValue |= 0x0400;
4704 }
4705 else {
4706
4707 RegValue = 0x0606;
4708
4709 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4710 RegValue |= BIT14;
4711 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4712 RegValue |= BIT15;
4713 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4714 RegValue |= BIT15 + BIT14;
4715 }
4716
4717 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4718 RegValue |= BIT13;
4719 }
4720
4721 if ( info->params.mode == MGSL_MODE_HDLC &&
4722 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4723 RegValue |= BIT12;
4724
4725 if ( info->params.addr_filter != 0xff )
4726 {
4727 /* set up receive address filtering */
4728 usc_OutReg( info, RSR, info->params.addr_filter );
4729 RegValue |= BIT4;
4730 }
4731
4732 usc_OutReg( info, CMR, RegValue );
4733 info->cmr_value = RegValue;
4734
4735 /* Receiver mode Register (RMR)
4736 *
4737 * <15..13> 000 encoding
4738 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4739 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4740 * <9> 0 1 = Include Receive chars in CRC
4741 * <8> 1 1 = Use Abort/PE bit as abort indicator
4742 * <7..6> 00 Even parity
4743 * <5> 0 parity disabled
4744 * <4..2> 000 Receive Char Length = 8 bits
4745 * <1..0> 00 Disable Receiver
4746 *
4747 * 0000 0101 0000 0000 = 0x0500
4748 */
4749
4750 RegValue = 0x0500;
4751
4752 switch ( info->params.encoding ) {
4753 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4754 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4755 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4756 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4757 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4758 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4759 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4760 }
4761
4762 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4763 RegValue |= BIT9;
4764 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4765 RegValue |= ( BIT12 | BIT10 | BIT9 );
4766
4767 usc_OutReg( info, RMR, RegValue );
4768
4769 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4770 /* When an opening flag of an SDLC frame is recognized the */
4771 /* Receive Character count (RCC) is loaded with the value in */
4772 /* RCLR. The RCC is decremented for each received byte. The */
4773 /* value of RCC is stored after the closing flag of the frame */
4774 /* allowing the frame size to be computed. */
4775
4776 usc_OutReg( info, RCLR, RCLRVALUE );
4777
4778 usc_RCmd( info, RCmd_SelectRicrdma_level );
4779
4780 /* Receive Interrupt Control Register (RICR)
4781 *
4782 * <15..8> ? RxFIFO DMA Request Level
4783 * <7> 0 Exited Hunt IA (Interrupt Arm)
4784 * <6> 0 Idle Received IA
4785 * <5> 0 Break/Abort IA
4786 * <4> 0 Rx Bound IA
4787 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4788 * <2> 0 Abort/PE IA
4789 * <1> 1 Rx Overrun IA
4790 * <0> 0 Select TC0 value for readback
4791 *
4792 * 0000 0000 0000 1000 = 0x000a
4793 */
4794
4795 /* Carry over the Exit Hunt and Idle Received bits */
4796 /* in case they have been armed by usc_ArmEvents. */
4797
4798 RegValue = usc_InReg( info, RICR ) & 0xc0;
4799
4800 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4801 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4802 else
4803 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4804
4805 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4806
4807 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4808 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4809
4810 /* Transmit mode Register (TMR)
4811 *
4812 * <15..13> 000 encoding
4813 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4814 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4815 * <9> 0 1 = Tx CRC Enabled
4816 * <8> 0 1 = Append CRC to end of transmit frame
4817 * <7..6> 00 Transmit parity Even
4818 * <5> 0 Transmit parity Disabled
4819 * <4..2> 000 Tx Char Length = 8 bits
4820 * <1..0> 00 Disable Transmitter
4821 *
4822 * 0000 0100 0000 0000 = 0x0400
4823 */
4824
4825 RegValue = 0x0400;
4826
4827 switch ( info->params.encoding ) {
4828 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4829 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4830 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4831 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4832 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4833 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4834 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4835 }
4836
4837 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4838 RegValue |= BIT9 + BIT8;
4839 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4840 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4841
4842 usc_OutReg( info, TMR, RegValue );
4843
4844 usc_set_txidle( info );
4845
4846
4847 usc_TCmd( info, TCmd_SelectTicrdma_level );
4848
4849 /* Transmit Interrupt Control Register (TICR)
4850 *
4851 * <15..8> ? Transmit FIFO DMA Level
4852 * <7> 0 Present IA (Interrupt Arm)
4853 * <6> 0 Idle Sent IA
4854 * <5> 1 Abort Sent IA
4855 * <4> 1 EOF/EOM Sent IA
4856 * <3> 0 CRC Sent IA
4857 * <2> 1 1 = Wait for SW Trigger to Start Frame
4858 * <1> 1 Tx Underrun IA
4859 * <0> 0 TC0 constant on read back
4860 *
4861 * 0000 0000 0011 0110 = 0x0036
4862 */
4863
4864 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4865 usc_OutReg( info, TICR, 0x0736 );
4866 else
4867 usc_OutReg( info, TICR, 0x1436 );
4868
4869 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4870 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4871
4872 /*
4873 ** Transmit Command/Status Register (TCSR)
4874 **
4875 ** <15..12> 0000 TCmd
4876 ** <11> 0/1 UnderWait
4877 ** <10..08> 000 TxIdle
4878 ** <7> x PreSent
4879 ** <6> x IdleSent
4880 ** <5> x AbortSent
4881 ** <4> x EOF/EOM Sent
4882 ** <3> x CRC Sent
4883 ** <2> x All Sent
4884 ** <1> x TxUnder
4885 ** <0> x TxEmpty
4886 **
4887 ** 0000 0000 0000 0000 = 0x0000
4888 */
4889 info->tcsr_value = 0;
4890
4891 if ( !PreSL1660 )
4892 info->tcsr_value |= TCSR_UNDERWAIT;
4893
4894 usc_OutReg( info, TCSR, info->tcsr_value );
4895
4896 /* Clock mode Control Register (CMCR)
4897 *
4898 * <15..14> 00 counter 1 Source = Disabled
4899 * <13..12> 00 counter 0 Source = Disabled
4900 * <11..10> 11 BRG1 Input is TxC Pin
4901 * <9..8> 11 BRG0 Input is TxC Pin
4902 * <7..6> 01 DPLL Input is BRG1 Output
4903 * <5..3> XXX TxCLK comes from Port 0
4904 * <2..0> XXX RxCLK comes from Port 1
4905 *
4906 * 0000 1111 0111 0111 = 0x0f77
4907 */
4908
4909 RegValue = 0x0f40;
4910
4911 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4912 RegValue |= 0x0003; /* RxCLK from DPLL */
4913 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4914 RegValue |= 0x0004; /* RxCLK from BRG0 */
4915 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4916 RegValue |= 0x0006; /* RxCLK from TXC Input */
4917 else
4918 RegValue |= 0x0007; /* RxCLK from Port1 */
4919
4920 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4921 RegValue |= 0x0018; /* TxCLK from DPLL */
4922 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4923 RegValue |= 0x0020; /* TxCLK from BRG0 */
4924 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4925 RegValue |= 0x0038; /* RxCLK from TXC Input */
4926 else
4927 RegValue |= 0x0030; /* TxCLK from Port0 */
4928
4929 usc_OutReg( info, CMCR, RegValue );
4930
4931
4932 /* Hardware Configuration Register (HCR)
4933 *
4934 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4935 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4936 * <12> 0 CVOK:0=report code violation in biphase
4937 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4938 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4939 * <7..6> 00 reserved
4940 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4941 * <4> X BRG1 Enable
4942 * <3..2> 00 reserved
4943 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4944 * <0> 0 BRG0 Enable
4945 */
4946
4947 RegValue = 0x0000;
4948
4949 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4950 u32 XtalSpeed;
4951 u32 DpllDivisor;
4952 u16 Tc;
4953
4954 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4955 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4956
4957 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4958 XtalSpeed = 11059200;
4959 else
4960 XtalSpeed = 14745600;
4961
4962 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4963 DpllDivisor = 16;
4964 RegValue |= BIT10;
4965 }
4966 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4967 DpllDivisor = 8;
4968 RegValue |= BIT11;
4969 }
4970 else
4971 DpllDivisor = 32;
4972
4973 /* Tc = (Xtal/Speed) - 1 */
4974 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4975 /* then rounding up gives a more precise time constant. Instead */
4976 /* of rounding up and then subtracting 1 we just don't subtract */
4977 /* the one in this case. */
4978
4979 /*--------------------------------------------------
4980 * ejz: for DPLL mode, application should use the
4981 * same clock speed as the partner system, even
4982 * though clocking is derived from the input RxData.
4983 * In case the user uses a 0 for the clock speed,
4984 * default to 0xffffffff and don't try to divide by
4985 * zero
4986 *--------------------------------------------------*/
4987 if ( info->params.clock_speed )
4988 {
4989 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4990 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4991 / info->params.clock_speed) )
4992 Tc--;
4993 }
4994 else
4995 Tc = -1;
4996
4997
4998 /* Write 16-bit Time Constant for BRG1 */
4999 usc_OutReg( info, TC1R, Tc );
5000
5001 RegValue |= BIT4; /* enable BRG1 */
5002
5003 switch ( info->params.encoding ) {
5004 case HDLC_ENCODING_NRZ:
5005 case HDLC_ENCODING_NRZB:
5006 case HDLC_ENCODING_NRZI_MARK:
5007 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5008 case HDLC_ENCODING_BIPHASE_MARK:
5009 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5010 case HDLC_ENCODING_BIPHASE_LEVEL:
5011 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5012 }
5013 }
5014
5015 usc_OutReg( info, HCR, RegValue );
5016
5017
5018 /* Channel Control/status Register (CCSR)
5019 *
5020 * <15> X RCC FIFO Overflow status (RO)
5021 * <14> X RCC FIFO Not Empty status (RO)
5022 * <13> 0 1 = Clear RCC FIFO (WO)
5023 * <12> X DPLL Sync (RW)
5024 * <11> X DPLL 2 Missed Clocks status (RO)
5025 * <10> X DPLL 1 Missed Clock status (RO)
5026 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5027 * <7> X SDLC Loop On status (RO)
5028 * <6> X SDLC Loop Send status (RO)
5029 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5030 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5031 * <1..0> 00 reserved
5032 *
5033 * 0000 0000 0010 0000 = 0x0020
5034 */
5035
5036 usc_OutReg( info, CCSR, 0x1020 );
5037
5038
5039 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5040 usc_OutReg( info, SICR,
5041 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5042 }
5043
5044
5045 /* enable Master Interrupt Enable bit (MIE) */
5046 usc_EnableMasterIrqBit( info );
5047
5048 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5049 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5050
5051 /* arm RCC underflow interrupt */
5052 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5053 usc_EnableInterrupts(info, MISC);
5054
5055 info->mbre_bit = 0;
5056 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5057 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5058 info->mbre_bit = BIT8;
5059 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5060
5061 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5062 /* Enable DMAEN (Port 7, Bit 14) */
5063 /* This connects the DMA request signal to the ISA bus */
5064 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5065 }
5066
5067 /* DMA Control Register (DCR)
5068 *
5069 * <15..14> 10 Priority mode = Alternating Tx/Rx
5070 * 01 Rx has priority
5071 * 00 Tx has priority
5072 *
5073 * <13> 1 Enable Priority Preempt per DCR<15..14>
5074 * (WARNING DCR<11..10> must be 00 when this is 1)
5075 * 0 Choose activate channel per DCR<11..10>
5076 *
5077 * <12> 0 Little Endian for Array/List
5078 * <11..10> 00 Both Channels can use each bus grant
5079 * <9..6> 0000 reserved
5080 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5081 * <4> 0 1 = drive D/C and S/D pins
5082 * <3> 1 1 = Add one wait state to all DMA cycles.
5083 * <2> 0 1 = Strobe /UAS on every transfer.
5084 * <1..0> 11 Addr incrementing only affects LS24 bits
5085 *
5086 * 0110 0000 0000 1011 = 0x600b
5087 */
5088
5089 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5090 /* PCI adapter does not need DMA wait state */
5091 usc_OutDmaReg( info, DCR, 0xa00b );
5092 }
5093 else
5094 usc_OutDmaReg( info, DCR, 0x800b );
5095
5096
5097 /* Receive DMA mode Register (RDMR)
5098 *
5099 * <15..14> 11 DMA mode = Linked List Buffer mode
5100 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5101 * <12> 1 Clear count of List Entry after fetching
5102 * <11..10> 00 Address mode = Increment
5103 * <9> 1 Terminate Buffer on RxBound
5104 * <8> 0 Bus Width = 16bits
5105 * <7..0> ? status Bits (write as 0s)
5106 *
5107 * 1111 0010 0000 0000 = 0xf200
5108 */
5109
5110 usc_OutDmaReg( info, RDMR, 0xf200 );
5111
5112
5113 /* Transmit DMA mode Register (TDMR)
5114 *
5115 * <15..14> 11 DMA mode = Linked List Buffer mode
5116 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5117 * <12> 1 Clear count of List Entry after fetching
5118 * <11..10> 00 Address mode = Increment
5119 * <9> 1 Terminate Buffer on end of frame
5120 * <8> 0 Bus Width = 16bits
5121 * <7..0> ? status Bits (Read Only so write as 0)
5122 *
5123 * 1111 0010 0000 0000 = 0xf200
5124 */
5125
5126 usc_OutDmaReg( info, TDMR, 0xf200 );
5127
5128
5129 /* DMA Interrupt Control Register (DICR)
5130 *
5131 * <15> 1 DMA Interrupt Enable
5132 * <14> 0 1 = Disable IEO from USC
5133 * <13> 0 1 = Don't provide vector during IntAck
5134 * <12> 1 1 = Include status in Vector
5135 * <10..2> 0 reserved, Must be 0s
5136 * <1> 0 1 = Rx DMA Interrupt Enabled
5137 * <0> 0 1 = Tx DMA Interrupt Enabled
5138 *
5139 * 1001 0000 0000 0000 = 0x9000
5140 */
5141
5142 usc_OutDmaReg( info, DICR, 0x9000 );
5143
5144 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5145 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5146 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5147
5148 /* Channel Control Register (CCR)
5149 *
5150 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5151 * <13> 0 Trigger Tx on SW Command Disabled
5152 * <12> 0 Flag Preamble Disabled
5153 * <11..10> 00 Preamble Length
5154 * <9..8> 00 Preamble Pattern
5155 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5156 * <5> 0 Trigger Rx on SW Command Disabled
5157 * <4..0> 0 reserved
5158 *
5159 * 1000 0000 1000 0000 = 0x8080
5160 */
5161
5162 RegValue = 0x8080;
5163
5164 switch ( info->params.preamble_length ) {
5165 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5166 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5167 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5168 }
5169
5170 switch ( info->params.preamble ) {
5171 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5172 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5173 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5174 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5175 }
5176
5177 usc_OutReg( info, CCR, RegValue );
5178
5179
5180 /*
5181 * Burst/Dwell Control Register
5182 *
5183 * <15..8> 0x20 Maximum number of transfers per bus grant
5184 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5185 */
5186
5187 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5188 /* don't limit bus occupancy on PCI adapter */
5189 usc_OutDmaReg( info, BDCR, 0x0000 );
5190 }
5191 else
5192 usc_OutDmaReg( info, BDCR, 0x2000 );
5193
5194 usc_stop_transmitter(info);
5195 usc_stop_receiver(info);
5196
5197 } /* end of usc_set_sdlc_mode() */
5198
5199 /* usc_enable_loopback()
5200 *
5201 * Set the 16C32 for internal loopback mode.
5202 * The TxCLK and RxCLK signals are generated from the BRG0 and
5203 * the TxD is looped back to the RxD internally.
5204 *
5205 * Arguments: info pointer to device instance data
5206 * enable 1 = enable loopback, 0 = disable
5207 * Return Value: None
5208 */
5209 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5210 {
5211 if (enable) {
5212 /* blank external TXD output */
5213 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5214
5215 /* Clock mode Control Register (CMCR)
5216 *
5217 * <15..14> 00 counter 1 Disabled
5218 * <13..12> 00 counter 0 Disabled
5219 * <11..10> 11 BRG1 Input is TxC Pin
5220 * <9..8> 11 BRG0 Input is TxC Pin
5221 * <7..6> 01 DPLL Input is BRG1 Output
5222 * <5..3> 100 TxCLK comes from BRG0
5223 * <2..0> 100 RxCLK comes from BRG0
5224 *
5225 * 0000 1111 0110 0100 = 0x0f64
5226 */
5227
5228 usc_OutReg( info, CMCR, 0x0f64 );
5229
5230 /* Write 16-bit Time Constant for BRG0 */
5231 /* use clock speed if available, otherwise use 8 for diagnostics */
5232 if (info->params.clock_speed) {
5233 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5234 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5235 else
5236 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5237 } else
5238 usc_OutReg(info, TC0R, (u16)8);
5239
5240 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5241 mode = Continuous Set Bit 0 to enable BRG0. */
5242 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5243
5244 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5245 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5246
5247 /* set Internal Data loopback mode */
5248 info->loopback_bits = 0x300;
5249 outw( 0x0300, info->io_base + CCAR );
5250 } else {
5251 /* enable external TXD output */
5252 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5253
5254 /* clear Internal Data loopback mode */
5255 info->loopback_bits = 0;
5256 outw( 0,info->io_base + CCAR );
5257 }
5258
5259 } /* end of usc_enable_loopback() */
5260
5261 /* usc_enable_aux_clock()
5262 *
5263 * Enabled the AUX clock output at the specified frequency.
5264 *
5265 * Arguments:
5266 *
5267 * info pointer to device extension
5268 * data_rate data rate of clock in bits per second
5269 * A data rate of 0 disables the AUX clock.
5270 *
5271 * Return Value: None
5272 */
5273 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5274 {
5275 u32 XtalSpeed;
5276 u16 Tc;
5277
5278 if ( data_rate ) {
5279 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5280 XtalSpeed = 11059200;
5281 else
5282 XtalSpeed = 14745600;
5283
5284
5285 /* Tc = (Xtal/Speed) - 1 */
5286 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5287 /* then rounding up gives a more precise time constant. Instead */
5288 /* of rounding up and then subtracting 1 we just don't subtract */
5289 /* the one in this case. */
5290
5291
5292 Tc = (u16)(XtalSpeed/data_rate);
5293 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5294 Tc--;
5295
5296 /* Write 16-bit Time Constant for BRG0 */
5297 usc_OutReg( info, TC0R, Tc );
5298
5299 /*
5300 * Hardware Configuration Register (HCR)
5301 * Clear Bit 1, BRG0 mode = Continuous
5302 * Set Bit 0 to enable BRG0.
5303 */
5304
5305 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5306
5307 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5308 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5309 } else {
5310 /* data rate == 0 so turn off BRG0 */
5311 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5312 }
5313
5314 } /* end of usc_enable_aux_clock() */
5315
5316 /*
5317 *
5318 * usc_process_rxoverrun_sync()
5319 *
5320 * This function processes a receive overrun by resetting the
5321 * receive DMA buffers and issuing a Purge Rx FIFO command
5322 * to allow the receiver to continue receiving.
5323 *
5324 * Arguments:
5325 *
5326 * info pointer to device extension
5327 *
5328 * Return Value: None
5329 */
5330 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5331 {
5332 int start_index;
5333 int end_index;
5334 int frame_start_index;
5335 bool start_of_frame_found = false;
5336 bool end_of_frame_found = false;
5337 bool reprogram_dma = false;
5338
5339 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5340 u32 phys_addr;
5341
5342 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5343 usc_RCmd( info, RCmd_EnterHuntmode );
5344 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5345
5346 /* CurrentRxBuffer points to the 1st buffer of the next */
5347 /* possibly available receive frame. */
5348
5349 frame_start_index = start_index = end_index = info->current_rx_buffer;
5350
5351 /* Search for an unfinished string of buffers. This means */
5352 /* that a receive frame started (at least one buffer with */
5353 /* count set to zero) but there is no terminiting buffer */
5354 /* (status set to non-zero). */
5355
5356 while( !buffer_list[end_index].count )
5357 {
5358 /* Count field has been reset to zero by 16C32. */
5359 /* This buffer is currently in use. */
5360
5361 if ( !start_of_frame_found )
5362 {
5363 start_of_frame_found = true;
5364 frame_start_index = end_index;
5365 end_of_frame_found = false;
5366 }
5367
5368 if ( buffer_list[end_index].status )
5369 {
5370 /* Status field has been set by 16C32. */
5371 /* This is the last buffer of a received frame. */
5372
5373 /* We want to leave the buffers for this frame intact. */
5374 /* Move on to next possible frame. */
5375
5376 start_of_frame_found = false;
5377 end_of_frame_found = true;
5378 }
5379
5380 /* advance to next buffer entry in linked list */
5381 end_index++;
5382 if ( end_index == info->rx_buffer_count )
5383 end_index = 0;
5384
5385 if ( start_index == end_index )
5386 {
5387 /* The entire list has been searched with all Counts == 0 and */
5388 /* all Status == 0. The receive buffers are */
5389 /* completely screwed, reset all receive buffers! */
5390 mgsl_reset_rx_dma_buffers( info );
5391 frame_start_index = 0;
5392 start_of_frame_found = false;
5393 reprogram_dma = true;
5394 break;
5395 }
5396 }
5397
5398 if ( start_of_frame_found && !end_of_frame_found )
5399 {
5400 /* There is an unfinished string of receive DMA buffers */
5401 /* as a result of the receiver overrun. */
5402
5403 /* Reset the buffers for the unfinished frame */
5404 /* and reprogram the receive DMA controller to start */
5405 /* at the 1st buffer of unfinished frame. */
5406
5407 start_index = frame_start_index;
5408
5409 do
5410 {
5411 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5412
5413 /* Adjust index for wrap around. */
5414 if ( start_index == info->rx_buffer_count )
5415 start_index = 0;
5416
5417 } while( start_index != end_index );
5418
5419 reprogram_dma = true;
5420 }
5421
5422 if ( reprogram_dma )
5423 {
5424 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5425 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5426 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5427
5428 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5429
5430 /* This empties the receive FIFO and loads the RCC with RCLR */
5431 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5432
5433 /* program 16C32 with physical address of 1st DMA buffer entry */
5434 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5435 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5436 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5437
5438 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5439 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5440 usc_EnableInterrupts( info, RECEIVE_STATUS );
5441
5442 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5443 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5444
5445 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5446 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5447 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5448 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5449 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5450 else
5451 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5452 }
5453 else
5454 {
5455 /* This empties the receive FIFO and loads the RCC with RCLR */
5456 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5457 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5458 }
5459
5460 } /* end of usc_process_rxoverrun_sync() */
5461
5462 /* usc_stop_receiver()
5463 *
5464 * Disable USC receiver
5465 *
5466 * Arguments: info pointer to device instance data
5467 * Return Value: None
5468 */
5469 static void usc_stop_receiver( struct mgsl_struct *info )
5470 {
5471 if (debug_level >= DEBUG_LEVEL_ISR)
5472 printk("%s(%d):usc_stop_receiver(%s)\n",
5473 __FILE__,__LINE__, info->device_name );
5474
5475 /* Disable receive DMA channel. */
5476 /* This also disables receive DMA channel interrupts */
5477 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5478
5479 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5480 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5481 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5482
5483 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5484
5485 /* This empties the receive FIFO and loads the RCC with RCLR */
5486 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5487 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5488
5489 info->rx_enabled = false;
5490 info->rx_overflow = false;
5491 info->rx_rcc_underrun = false;
5492
5493 } /* end of stop_receiver() */
5494
5495 /* usc_start_receiver()
5496 *
5497 * Enable the USC receiver
5498 *
5499 * Arguments: info pointer to device instance data
5500 * Return Value: None
5501 */
5502 static void usc_start_receiver( struct mgsl_struct *info )
5503 {
5504 u32 phys_addr;
5505
5506 if (debug_level >= DEBUG_LEVEL_ISR)
5507 printk("%s(%d):usc_start_receiver(%s)\n",
5508 __FILE__,__LINE__, info->device_name );
5509
5510 mgsl_reset_rx_dma_buffers( info );
5511 usc_stop_receiver( info );
5512
5513 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5514 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5515
5516 if ( info->params.mode == MGSL_MODE_HDLC ||
5517 info->params.mode == MGSL_MODE_RAW ) {
5518 /* DMA mode Transfers */
5519 /* Program the DMA controller. */
5520 /* Enable the DMA controller end of buffer interrupt. */
5521
5522 /* program 16C32 with physical address of 1st DMA buffer entry */
5523 phys_addr = info->rx_buffer_list[0].phys_entry;
5524 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5525 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5526
5527 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5528 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5529 usc_EnableInterrupts( info, RECEIVE_STATUS );
5530
5531 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5532 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5533
5534 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5535 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5536 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5537 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5538 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5539 else
5540 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5541 } else {
5542 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5543 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5544 usc_EnableInterrupts(info, RECEIVE_DATA);
5545
5546 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5547 usc_RCmd( info, RCmd_EnterHuntmode );
5548
5549 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5550 }
5551
5552 usc_OutReg( info, CCSR, 0x1020 );
5553
5554 info->rx_enabled = true;
5555
5556 } /* end of usc_start_receiver() */
5557
5558 /* usc_start_transmitter()
5559 *
5560 * Enable the USC transmitter and send a transmit frame if
5561 * one is loaded in the DMA buffers.
5562 *
5563 * Arguments: info pointer to device instance data
5564 * Return Value: None
5565 */
5566 static void usc_start_transmitter( struct mgsl_struct *info )
5567 {
5568 u32 phys_addr;
5569 unsigned int FrameSize;
5570
5571 if (debug_level >= DEBUG_LEVEL_ISR)
5572 printk("%s(%d):usc_start_transmitter(%s)\n",
5573 __FILE__,__LINE__, info->device_name );
5574
5575 if ( info->xmit_cnt ) {
5576
5577 /* If auto RTS enabled and RTS is inactive, then assert */
5578 /* RTS and set a flag indicating that the driver should */
5579 /* negate RTS when the transmission completes. */
5580
5581 info->drop_rts_on_tx_done = false;
5582
5583 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5584 usc_get_serial_signals( info );
5585 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5586 info->serial_signals |= SerialSignal_RTS;
5587 usc_set_serial_signals( info );
5588 info->drop_rts_on_tx_done = true;
5589 }
5590 }
5591
5592
5593 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5594 if ( !info->tx_active ) {
5595 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5596 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5597 usc_EnableInterrupts(info, TRANSMIT_DATA);
5598 usc_load_txfifo(info);
5599 }
5600 } else {
5601 /* Disable transmit DMA controller while programming. */
5602 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5603
5604 /* Transmit DMA buffer is loaded, so program USC */
5605 /* to send the frame contained in the buffers. */
5606
5607 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5608
5609 /* if operating in Raw sync mode, reset the rcc component
5610 * of the tx dma buffer entry, otherwise, the serial controller
5611 * will send a closing sync char after this count.
5612 */
5613 if ( info->params.mode == MGSL_MODE_RAW )
5614 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5615
5616 /* Program the Transmit Character Length Register (TCLR) */
5617 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5618 usc_OutReg( info, TCLR, (u16)FrameSize );
5619
5620 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5621
5622 /* Program the address of the 1st DMA Buffer Entry in linked list */
5623 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5624 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5625 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5626
5627 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5628 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5629 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5630
5631 if ( info->params.mode == MGSL_MODE_RAW &&
5632 info->num_tx_dma_buffers > 1 ) {
5633 /* When running external sync mode, attempt to 'stream' transmit */
5634 /* by filling tx dma buffers as they become available. To do this */
5635 /* we need to enable Tx DMA EOB Status interrupts : */
5636 /* */
5637 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5638 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5639
5640 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5641 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5642 }
5643
5644 /* Initialize Transmit DMA Channel */
5645 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5646
5647 usc_TCmd( info, TCmd_SendFrame );
5648
5649 mod_timer(&info->tx_timer, jiffies +
5650 msecs_to_jiffies(5000));
5651 }
5652 info->tx_active = true;
5653 }
5654
5655 if ( !info->tx_enabled ) {
5656 info->tx_enabled = true;
5657 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5658 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5659 else
5660 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5661 }
5662
5663 } /* end of usc_start_transmitter() */
5664
5665 /* usc_stop_transmitter()
5666 *
5667 * Stops the transmitter and DMA
5668 *
5669 * Arguments: info pointer to device isntance data
5670 * Return Value: None
5671 */
5672 static void usc_stop_transmitter( struct mgsl_struct *info )
5673 {
5674 if (debug_level >= DEBUG_LEVEL_ISR)
5675 printk("%s(%d):usc_stop_transmitter(%s)\n",
5676 __FILE__,__LINE__, info->device_name );
5677
5678 del_timer(&info->tx_timer);
5679
5680 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5681 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5682 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5683
5684 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5685 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5686 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5687
5688 info->tx_enabled = false;
5689 info->tx_active = false;
5690
5691 } /* end of usc_stop_transmitter() */
5692
5693 /* usc_load_txfifo()
5694 *
5695 * Fill the transmit FIFO until the FIFO is full or
5696 * there is no more data to load.
5697 *
5698 * Arguments: info pointer to device extension (instance data)
5699 * Return Value: None
5700 */
5701 static void usc_load_txfifo( struct mgsl_struct *info )
5702 {
5703 int Fifocount;
5704 u8 TwoBytes[2];
5705
5706 if ( !info->xmit_cnt && !info->x_char )
5707 return;
5708
5709 /* Select transmit FIFO status readback in TICR */
5710 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5711
5712 /* load the Transmit FIFO until FIFOs full or all data sent */
5713
5714 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5715 /* there is more space in the transmit FIFO and */
5716 /* there is more data in transmit buffer */
5717
5718 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5719 /* write a 16-bit word from transmit buffer to 16C32 */
5720
5721 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5722 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5723 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5724 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5725
5726 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5727
5728 info->xmit_cnt -= 2;
5729 info->icount.tx += 2;
5730 } else {
5731 /* only 1 byte left to transmit or 1 FIFO slot left */
5732
5733 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5734 info->io_base + CCAR );
5735
5736 if (info->x_char) {
5737 /* transmit pending high priority char */
5738 outw( info->x_char,info->io_base + CCAR );
5739 info->x_char = 0;
5740 } else {
5741 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5742 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5743 info->xmit_cnt--;
5744 }
5745 info->icount.tx++;
5746 }
5747 }
5748
5749 } /* end of usc_load_txfifo() */
5750
5751 /* usc_reset()
5752 *
5753 * Reset the adapter to a known state and prepare it for further use.
5754 *
5755 * Arguments: info pointer to device instance data
5756 * Return Value: None
5757 */
5758 static void usc_reset( struct mgsl_struct *info )
5759 {
5760 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5761 int i;
5762 u32 readval;
5763
5764 /* Set BIT30 of Misc Control Register */
5765 /* (Local Control Register 0x50) to force reset of USC. */
5766
5767 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5768 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5769
5770 info->misc_ctrl_value |= BIT30;
5771 *MiscCtrl = info->misc_ctrl_value;
5772
5773 /*
5774 * Force at least 170ns delay before clearing
5775 * reset bit. Each read from LCR takes at least
5776 * 30ns so 10 times for 300ns to be safe.
5777 */
5778 for(i=0;i<10;i++)
5779 readval = *MiscCtrl;
5780
5781 info->misc_ctrl_value &= ~BIT30;
5782 *MiscCtrl = info->misc_ctrl_value;
5783
5784 *LCR0BRDR = BUS_DESCRIPTOR(
5785 1, // Write Strobe Hold (0-3)
5786 2, // Write Strobe Delay (0-3)
5787 2, // Read Strobe Delay (0-3)
5788 0, // NWDD (Write data-data) (0-3)
5789 4, // NWAD (Write Addr-data) (0-31)
5790 0, // NXDA (Read/Write Data-Addr) (0-3)
5791 0, // NRDD (Read Data-Data) (0-3)
5792 5 // NRAD (Read Addr-Data) (0-31)
5793 );
5794 } else {
5795 /* do HW reset */
5796 outb( 0,info->io_base + 8 );
5797 }
5798
5799 info->mbre_bit = 0;
5800 info->loopback_bits = 0;
5801 info->usc_idle_mode = 0;
5802
5803 /*
5804 * Program the Bus Configuration Register (BCR)
5805 *
5806 * <15> 0 Don't use separate address
5807 * <14..6> 0 reserved
5808 * <5..4> 00 IAckmode = Default, don't care
5809 * <3> 1 Bus Request Totem Pole output
5810 * <2> 1 Use 16 Bit data bus
5811 * <1> 0 IRQ Totem Pole output
5812 * <0> 0 Don't Shift Right Addr
5813 *
5814 * 0000 0000 0000 1100 = 0x000c
5815 *
5816 * By writing to io_base + SDPIN the Wait/Ack pin is
5817 * programmed to work as a Wait pin.
5818 */
5819
5820 outw( 0x000c,info->io_base + SDPIN );
5821
5822
5823 outw( 0,info->io_base );
5824 outw( 0,info->io_base + CCAR );
5825
5826 /* select little endian byte ordering */
5827 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5828
5829
5830 /* Port Control Register (PCR)
5831 *
5832 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5833 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5834 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5835 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5836 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5837 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5838 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5839 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5840 *
5841 * 1111 0000 1111 0101 = 0xf0f5
5842 */
5843
5844 usc_OutReg( info, PCR, 0xf0f5 );
5845
5846
5847 /*
5848 * Input/Output Control Register
5849 *
5850 * <15..14> 00 CTS is active low input
5851 * <13..12> 00 DCD is active low input
5852 * <11..10> 00 TxREQ pin is input (DSR)
5853 * <9..8> 00 RxREQ pin is input (RI)
5854 * <7..6> 00 TxD is output (Transmit Data)
5855 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5856 * <2..0> 100 RxC is Output (drive with BRG0)
5857 *
5858 * 0000 0000 0000 0100 = 0x0004
5859 */
5860
5861 usc_OutReg( info, IOCR, 0x0004 );
5862
5863 } /* end of usc_reset() */
5864
5865 /* usc_set_async_mode()
5866 *
5867 * Program adapter for asynchronous communications.
5868 *
5869 * Arguments: info pointer to device instance data
5870 * Return Value: None
5871 */
5872 static void usc_set_async_mode( struct mgsl_struct *info )
5873 {
5874 u16 RegValue;
5875
5876 /* disable interrupts while programming USC */
5877 usc_DisableMasterIrqBit( info );
5878
5879 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5880 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5881
5882 usc_loopback_frame( info );
5883
5884 /* Channel mode Register (CMR)
5885 *
5886 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5887 * <13..12> 00 00 = 16X Clock
5888 * <11..8> 0000 Transmitter mode = Asynchronous
5889 * <7..6> 00 reserved?
5890 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5891 * <3..0> 0000 Receiver mode = Asynchronous
5892 *
5893 * 0000 0000 0000 0000 = 0x0
5894 */
5895
5896 RegValue = 0;
5897 if ( info->params.stop_bits != 1 )
5898 RegValue |= BIT14;
5899 usc_OutReg( info, CMR, RegValue );
5900
5901
5902 /* Receiver mode Register (RMR)
5903 *
5904 * <15..13> 000 encoding = None
5905 * <12..08> 00000 reserved (Sync Only)
5906 * <7..6> 00 Even parity
5907 * <5> 0 parity disabled
5908 * <4..2> 000 Receive Char Length = 8 bits
5909 * <1..0> 00 Disable Receiver
5910 *
5911 * 0000 0000 0000 0000 = 0x0
5912 */
5913
5914 RegValue = 0;
5915
5916 if ( info->params.data_bits != 8 )
5917 RegValue |= BIT4+BIT3+BIT2;
5918
5919 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5920 RegValue |= BIT5;
5921 if ( info->params.parity != ASYNC_PARITY_ODD )
5922 RegValue |= BIT6;
5923 }
5924
5925 usc_OutReg( info, RMR, RegValue );
5926
5927
5928 /* Set IRQ trigger level */
5929
5930 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5931
5932
5933 /* Receive Interrupt Control Register (RICR)
5934 *
5935 * <15..8> ? RxFIFO IRQ Request Level
5936 *
5937 * Note: For async mode the receive FIFO level must be set
5938 * to 0 to avoid the situation where the FIFO contains fewer bytes
5939 * than the trigger level and no more data is expected.
5940 *
5941 * <7> 0 Exited Hunt IA (Interrupt Arm)
5942 * <6> 0 Idle Received IA
5943 * <5> 0 Break/Abort IA
5944 * <4> 0 Rx Bound IA
5945 * <3> 0 Queued status reflects oldest byte in FIFO
5946 * <2> 0 Abort/PE IA
5947 * <1> 0 Rx Overrun IA
5948 * <0> 0 Select TC0 value for readback
5949 *
5950 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5951 */
5952
5953 usc_OutReg( info, RICR, 0x0000 );
5954
5955 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5956 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5957
5958
5959 /* Transmit mode Register (TMR)
5960 *
5961 * <15..13> 000 encoding = None
5962 * <12..08> 00000 reserved (Sync Only)
5963 * <7..6> 00 Transmit parity Even
5964 * <5> 0 Transmit parity Disabled
5965 * <4..2> 000 Tx Char Length = 8 bits
5966 * <1..0> 00 Disable Transmitter
5967 *
5968 * 0000 0000 0000 0000 = 0x0
5969 */
5970
5971 RegValue = 0;
5972
5973 if ( info->params.data_bits != 8 )
5974 RegValue |= BIT4+BIT3+BIT2;
5975
5976 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5977 RegValue |= BIT5;
5978 if ( info->params.parity != ASYNC_PARITY_ODD )
5979 RegValue |= BIT6;
5980 }
5981
5982 usc_OutReg( info, TMR, RegValue );
5983
5984 usc_set_txidle( info );
5985
5986
5987 /* Set IRQ trigger level */
5988
5989 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5990
5991
5992 /* Transmit Interrupt Control Register (TICR)
5993 *
5994 * <15..8> ? Transmit FIFO IRQ Level
5995 * <7> 0 Present IA (Interrupt Arm)
5996 * <6> 1 Idle Sent IA
5997 * <5> 0 Abort Sent IA
5998 * <4> 0 EOF/EOM Sent IA
5999 * <3> 0 CRC Sent IA
6000 * <2> 0 1 = Wait for SW Trigger to Start Frame
6001 * <1> 0 Tx Underrun IA
6002 * <0> 0 TC0 constant on read back
6003 *
6004 * 0000 0000 0100 0000 = 0x0040
6005 */
6006
6007 usc_OutReg( info, TICR, 0x1f40 );
6008
6009 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6010 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6011
6012 usc_enable_async_clock( info, info->params.data_rate );
6013
6014
6015 /* Channel Control/status Register (CCSR)
6016 *
6017 * <15> X RCC FIFO Overflow status (RO)
6018 * <14> X RCC FIFO Not Empty status (RO)
6019 * <13> 0 1 = Clear RCC FIFO (WO)
6020 * <12> X DPLL in Sync status (RO)
6021 * <11> X DPLL 2 Missed Clocks status (RO)
6022 * <10> X DPLL 1 Missed Clock status (RO)
6023 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6024 * <7> X SDLC Loop On status (RO)
6025 * <6> X SDLC Loop Send status (RO)
6026 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6027 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6028 * <1..0> 00 reserved
6029 *
6030 * 0000 0000 0010 0000 = 0x0020
6031 */
6032
6033 usc_OutReg( info, CCSR, 0x0020 );
6034
6035 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6036 RECEIVE_DATA + RECEIVE_STATUS );
6037
6038 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6039 RECEIVE_DATA + RECEIVE_STATUS );
6040
6041 usc_EnableMasterIrqBit( info );
6042
6043 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6044 /* Enable INTEN (Port 6, Bit12) */
6045 /* This connects the IRQ request signal to the ISA bus */
6046 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6047 }
6048
6049 if (info->params.loopback) {
6050 info->loopback_bits = 0x300;
6051 outw(0x0300, info->io_base + CCAR);
6052 }
6053
6054 } /* end of usc_set_async_mode() */
6055
6056 /* usc_loopback_frame()
6057 *
6058 * Loop back a small (2 byte) dummy SDLC frame.
6059 * Interrupts and DMA are NOT used. The purpose of this is to
6060 * clear any 'stale' status info left over from running in async mode.
6061 *
6062 * The 16C32 shows the strange behaviour of marking the 1st
6063 * received SDLC frame with a CRC error even when there is no
6064 * CRC error. To get around this a small dummy from of 2 bytes
6065 * is looped back when switching from async to sync mode.
6066 *
6067 * Arguments: info pointer to device instance data
6068 * Return Value: None
6069 */
6070 static void usc_loopback_frame( struct mgsl_struct *info )
6071 {
6072 int i;
6073 unsigned long oldmode = info->params.mode;
6074
6075 info->params.mode = MGSL_MODE_HDLC;
6076
6077 usc_DisableMasterIrqBit( info );
6078
6079 usc_set_sdlc_mode( info );
6080 usc_enable_loopback( info, 1 );
6081
6082 /* Write 16-bit Time Constant for BRG0 */
6083 usc_OutReg( info, TC0R, 0 );
6084
6085 /* Channel Control Register (CCR)
6086 *
6087 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6088 * <13> 0 Trigger Tx on SW Command Disabled
6089 * <12> 0 Flag Preamble Disabled
6090 * <11..10> 00 Preamble Length = 8-Bits
6091 * <9..8> 01 Preamble Pattern = flags
6092 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6093 * <5> 0 Trigger Rx on SW Command Disabled
6094 * <4..0> 0 reserved
6095 *
6096 * 0000 0001 0000 0000 = 0x0100
6097 */
6098
6099 usc_OutReg( info, CCR, 0x0100 );
6100
6101 /* SETUP RECEIVER */
6102 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6103 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6104
6105 /* SETUP TRANSMITTER */
6106 /* Program the Transmit Character Length Register (TCLR) */
6107 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6108 usc_OutReg( info, TCLR, 2 );
6109 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6110
6111 /* unlatch Tx status bits, and start transmit channel. */
6112 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6113 outw(0,info->io_base + DATAREG);
6114
6115 /* ENABLE TRANSMITTER */
6116 usc_TCmd( info, TCmd_SendFrame );
6117 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6118
6119 /* WAIT FOR RECEIVE COMPLETE */
6120 for (i=0 ; i<1000 ; i++)
6121 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6122 break;
6123
6124 /* clear Internal Data loopback mode */
6125 usc_enable_loopback(info, 0);
6126
6127 usc_EnableMasterIrqBit(info);
6128
6129 info->params.mode = oldmode;
6130
6131 } /* end of usc_loopback_frame() */
6132
6133 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6134 *
6135 * Arguments: info pointer to adapter info structure
6136 * Return Value: None
6137 */
6138 static void usc_set_sync_mode( struct mgsl_struct *info )
6139 {
6140 usc_loopback_frame( info );
6141 usc_set_sdlc_mode( info );
6142
6143 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6144 /* Enable INTEN (Port 6, Bit12) */
6145 /* This connects the IRQ request signal to the ISA bus */
6146 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6147 }
6148
6149 usc_enable_aux_clock(info, info->params.clock_speed);
6150
6151 if (info->params.loopback)
6152 usc_enable_loopback(info,1);
6153
6154 } /* end of mgsl_set_sync_mode() */
6155
6156 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6157 *
6158 * Arguments: info pointer to device instance data
6159 * Return Value: None
6160 */
6161 static void usc_set_txidle( struct mgsl_struct *info )
6162 {
6163 u16 usc_idle_mode = IDLEMODE_FLAGS;
6164
6165 /* Map API idle mode to USC register bits */
6166
6167 switch( info->idle_mode ){
6168 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6169 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6170 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6171 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6172 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6173 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6174 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6175 }
6176
6177 info->usc_idle_mode = usc_idle_mode;
6178 //usc_OutReg(info, TCSR, usc_idle_mode);
6179 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6180 info->tcsr_value += usc_idle_mode;
6181 usc_OutReg(info, TCSR, info->tcsr_value);
6182
6183 /*
6184 * if SyncLink WAN adapter is running in external sync mode, the
6185 * transmitter has been set to Monosync in order to try to mimic
6186 * a true raw outbound bit stream. Monosync still sends an open/close
6187 * sync char at the start/end of a frame. Try to match those sync
6188 * patterns to the idle mode set here
6189 */
6190 if ( info->params.mode == MGSL_MODE_RAW ) {
6191 unsigned char syncpat = 0;
6192 switch( info->idle_mode ) {
6193 case HDLC_TXIDLE_FLAGS:
6194 syncpat = 0x7e;
6195 break;
6196 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6197 syncpat = 0x55;
6198 break;
6199 case HDLC_TXIDLE_ZEROS:
6200 case HDLC_TXIDLE_SPACE:
6201 syncpat = 0x00;
6202 break;
6203 case HDLC_TXIDLE_ONES:
6204 case HDLC_TXIDLE_MARK:
6205 syncpat = 0xff;
6206 break;
6207 case HDLC_TXIDLE_ALT_MARK_SPACE:
6208 syncpat = 0xaa;
6209 break;
6210 }
6211
6212 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6213 }
6214
6215 } /* end of usc_set_txidle() */
6216
6217 /* usc_get_serial_signals()
6218 *
6219 * Query the adapter for the state of the V24 status (input) signals.
6220 *
6221 * Arguments: info pointer to device instance data
6222 * Return Value: None
6223 */
6224 static void usc_get_serial_signals( struct mgsl_struct *info )
6225 {
6226 u16 status;
6227
6228 /* clear all serial signals except DTR and RTS */
6229 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6230
6231 /* Read the Misc Interrupt status Register (MISR) to get */
6232 /* the V24 status signals. */
6233
6234 status = usc_InReg( info, MISR );
6235
6236 /* set serial signal bits to reflect MISR */
6237
6238 if ( status & MISCSTATUS_CTS )
6239 info->serial_signals |= SerialSignal_CTS;
6240
6241 if ( status & MISCSTATUS_DCD )
6242 info->serial_signals |= SerialSignal_DCD;
6243
6244 if ( status & MISCSTATUS_RI )
6245 info->serial_signals |= SerialSignal_RI;
6246
6247 if ( status & MISCSTATUS_DSR )
6248 info->serial_signals |= SerialSignal_DSR;
6249
6250 } /* end of usc_get_serial_signals() */
6251
6252 /* usc_set_serial_signals()
6253 *
6254 * Set the state of DTR and RTS based on contents of
6255 * serial_signals member of device extension.
6256 *
6257 * Arguments: info pointer to device instance data
6258 * Return Value: None
6259 */
6260 static void usc_set_serial_signals( struct mgsl_struct *info )
6261 {
6262 u16 Control;
6263 unsigned char V24Out = info->serial_signals;
6264
6265 /* get the current value of the Port Control Register (PCR) */
6266
6267 Control = usc_InReg( info, PCR );
6268
6269 if ( V24Out & SerialSignal_RTS )
6270 Control &= ~(BIT6);
6271 else
6272 Control |= BIT6;
6273
6274 if ( V24Out & SerialSignal_DTR )
6275 Control &= ~(BIT4);
6276 else
6277 Control |= BIT4;
6278
6279 usc_OutReg( info, PCR, Control );
6280
6281 } /* end of usc_set_serial_signals() */
6282
6283 /* usc_enable_async_clock()
6284 *
6285 * Enable the async clock at the specified frequency.
6286 *
6287 * Arguments: info pointer to device instance data
6288 * data_rate data rate of clock in bps
6289 * 0 disables the AUX clock.
6290 * Return Value: None
6291 */
6292 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6293 {
6294 if ( data_rate ) {
6295 /*
6296 * Clock mode Control Register (CMCR)
6297 *
6298 * <15..14> 00 counter 1 Disabled
6299 * <13..12> 00 counter 0 Disabled
6300 * <11..10> 11 BRG1 Input is TxC Pin
6301 * <9..8> 11 BRG0 Input is TxC Pin
6302 * <7..6> 01 DPLL Input is BRG1 Output
6303 * <5..3> 100 TxCLK comes from BRG0
6304 * <2..0> 100 RxCLK comes from BRG0
6305 *
6306 * 0000 1111 0110 0100 = 0x0f64
6307 */
6308
6309 usc_OutReg( info, CMCR, 0x0f64 );
6310
6311
6312 /*
6313 * Write 16-bit Time Constant for BRG0
6314 * Time Constant = (ClkSpeed / data_rate) - 1
6315 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6316 */
6317
6318 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6319 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6320 else
6321 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6322
6323
6324 /*
6325 * Hardware Configuration Register (HCR)
6326 * Clear Bit 1, BRG0 mode = Continuous
6327 * Set Bit 0 to enable BRG0.
6328 */
6329
6330 usc_OutReg( info, HCR,
6331 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6332
6333
6334 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6335
6336 usc_OutReg( info, IOCR,
6337 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6338 } else {
6339 /* data rate == 0 so turn off BRG0 */
6340 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6341 }
6342
6343 } /* end of usc_enable_async_clock() */
6344
6345 /*
6346 * Buffer Structures:
6347 *
6348 * Normal memory access uses virtual addresses that can make discontiguous
6349 * physical memory pages appear to be contiguous in the virtual address
6350 * space (the processors memory mapping handles the conversions).
6351 *
6352 * DMA transfers require physically contiguous memory. This is because
6353 * the DMA system controller and DMA bus masters deal with memory using
6354 * only physical addresses.
6355 *
6356 * This causes a problem under Windows NT when large DMA buffers are
6357 * needed. Fragmentation of the nonpaged pool prevents allocations of
6358 * physically contiguous buffers larger than the PAGE_SIZE.
6359 *
6360 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6361 * allows DMA transfers to physically discontiguous buffers. Information
6362 * about each data transfer buffer is contained in a memory structure
6363 * called a 'buffer entry'. A list of buffer entries is maintained
6364 * to track and control the use of the data transfer buffers.
6365 *
6366 * To support this strategy we will allocate sufficient PAGE_SIZE
6367 * contiguous memory buffers to allow for the total required buffer
6368 * space.
6369 *
6370 * The 16C32 accesses the list of buffer entries using Bus Master
6371 * DMA. Control information is read from the buffer entries by the
6372 * 16C32 to control data transfers. status information is written to
6373 * the buffer entries by the 16C32 to indicate the status of completed
6374 * transfers.
6375 *
6376 * The CPU writes control information to the buffer entries to control
6377 * the 16C32 and reads status information from the buffer entries to
6378 * determine information about received and transmitted frames.
6379 *
6380 * Because the CPU and 16C32 (adapter) both need simultaneous access
6381 * to the buffer entries, the buffer entry memory is allocated with
6382 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6383 * entry list to PAGE_SIZE.
6384 *
6385 * The actual data buffers on the other hand will only be accessed
6386 * by the CPU or the adapter but not by both simultaneously. This allows
6387 * Scatter/Gather packet based DMA procedures for using physically
6388 * discontiguous pages.
6389 */
6390
6391 /*
6392 * mgsl_reset_tx_dma_buffers()
6393 *
6394 * Set the count for all transmit buffers to 0 to indicate the
6395 * buffer is available for use and set the current buffer to the
6396 * first buffer. This effectively makes all buffers free and
6397 * discards any data in buffers.
6398 *
6399 * Arguments: info pointer to device instance data
6400 * Return Value: None
6401 */
6402 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6403 {
6404 unsigned int i;
6405
6406 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6407 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6408 }
6409
6410 info->current_tx_buffer = 0;
6411 info->start_tx_dma_buffer = 0;
6412 info->tx_dma_buffers_used = 0;
6413
6414 info->get_tx_holding_index = 0;
6415 info->put_tx_holding_index = 0;
6416 info->tx_holding_count = 0;
6417
6418 } /* end of mgsl_reset_tx_dma_buffers() */
6419
6420 /*
6421 * num_free_tx_dma_buffers()
6422 *
6423 * returns the number of free tx dma buffers available
6424 *
6425 * Arguments: info pointer to device instance data
6426 * Return Value: number of free tx dma buffers
6427 */
6428 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6429 {
6430 return info->tx_buffer_count - info->tx_dma_buffers_used;
6431 }
6432
6433 /*
6434 * mgsl_reset_rx_dma_buffers()
6435 *
6436 * Set the count for all receive buffers to DMABUFFERSIZE
6437 * and set the current buffer to the first buffer. This effectively
6438 * makes all buffers free and discards any data in buffers.
6439 *
6440 * Arguments: info pointer to device instance data
6441 * Return Value: None
6442 */
6443 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6444 {
6445 unsigned int i;
6446
6447 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6448 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6449 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6450 // info->rx_buffer_list[i].status = 0;
6451 }
6452
6453 info->current_rx_buffer = 0;
6454
6455 } /* end of mgsl_reset_rx_dma_buffers() */
6456
6457 /*
6458 * mgsl_free_rx_frame_buffers()
6459 *
6460 * Free the receive buffers used by a received SDLC
6461 * frame such that the buffers can be reused.
6462 *
6463 * Arguments:
6464 *
6465 * info pointer to device instance data
6466 * StartIndex index of 1st receive buffer of frame
6467 * EndIndex index of last receive buffer of frame
6468 *
6469 * Return Value: None
6470 */
6471 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6472 {
6473 bool Done = false;
6474 DMABUFFERENTRY *pBufEntry;
6475 unsigned int Index;
6476
6477 /* Starting with 1st buffer entry of the frame clear the status */
6478 /* field and set the count field to DMA Buffer Size. */
6479
6480 Index = StartIndex;
6481
6482 while( !Done ) {
6483 pBufEntry = &(info->rx_buffer_list[Index]);
6484
6485 if ( Index == EndIndex ) {
6486 /* This is the last buffer of the frame! */
6487 Done = true;
6488 }
6489
6490 /* reset current buffer for reuse */
6491 // pBufEntry->status = 0;
6492 // pBufEntry->count = DMABUFFERSIZE;
6493 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6494
6495 /* advance to next buffer entry in linked list */
6496 Index++;
6497 if ( Index == info->rx_buffer_count )
6498 Index = 0;
6499 }
6500
6501 /* set current buffer to next buffer after last buffer of frame */
6502 info->current_rx_buffer = Index;
6503
6504 } /* end of free_rx_frame_buffers() */
6505
6506 /* mgsl_get_rx_frame()
6507 *
6508 * This function attempts to return a received SDLC frame from the
6509 * receive DMA buffers. Only frames received without errors are returned.
6510 *
6511 * Arguments: info pointer to device extension
6512 * Return Value: true if frame returned, otherwise false
6513 */
6514 static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6515 {
6516 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6517 unsigned short status;
6518 DMABUFFERENTRY *pBufEntry;
6519 unsigned int framesize = 0;
6520 bool ReturnCode = false;
6521 unsigned long flags;
6522 struct tty_struct *tty = info->port.tty;
6523 bool return_frame = false;
6524
6525 /*
6526 * current_rx_buffer points to the 1st buffer of the next available
6527 * receive frame. To find the last buffer of the frame look for
6528 * a non-zero status field in the buffer entries. (The status
6529 * field is set by the 16C32 after completing a receive frame.
6530 */
6531
6532 StartIndex = EndIndex = info->current_rx_buffer;
6533
6534 while( !info->rx_buffer_list[EndIndex].status ) {
6535 /*
6536 * If the count field of the buffer entry is non-zero then
6537 * this buffer has not been used. (The 16C32 clears the count
6538 * field when it starts using the buffer.) If an unused buffer
6539 * is encountered then there are no frames available.
6540 */
6541
6542 if ( info->rx_buffer_list[EndIndex].count )
6543 goto Cleanup;
6544
6545 /* advance to next buffer entry in linked list */
6546 EndIndex++;
6547 if ( EndIndex == info->rx_buffer_count )
6548 EndIndex = 0;
6549
6550 /* if entire list searched then no frame available */
6551 if ( EndIndex == StartIndex ) {
6552 /* If this occurs then something bad happened,
6553 * all buffers have been 'used' but none mark
6554 * the end of a frame. Reset buffers and receiver.
6555 */
6556
6557 if ( info->rx_enabled ){
6558 spin_lock_irqsave(&info->irq_spinlock,flags);
6559 usc_start_receiver(info);
6560 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6561 }
6562 goto Cleanup;
6563 }
6564 }
6565
6566
6567 /* check status of receive frame */
6568
6569 status = info->rx_buffer_list[EndIndex].status;
6570
6571 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6572 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6573 if ( status & RXSTATUS_SHORT_FRAME )
6574 info->icount.rxshort++;
6575 else if ( status & RXSTATUS_ABORT )
6576 info->icount.rxabort++;
6577 else if ( status & RXSTATUS_OVERRUN )
6578 info->icount.rxover++;
6579 else {
6580 info->icount.rxcrc++;
6581 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6582 return_frame = true;
6583 }
6584 framesize = 0;
6585 #if SYNCLINK_GENERIC_HDLC
6586 {
6587 info->netdev->stats.rx_errors++;
6588 info->netdev->stats.rx_frame_errors++;
6589 }
6590 #endif
6591 } else
6592 return_frame = true;
6593
6594 if ( return_frame ) {
6595 /* receive frame has no errors, get frame size.
6596 * The frame size is the starting value of the RCC (which was
6597 * set to 0xffff) minus the ending value of the RCC (decremented
6598 * once for each receive character) minus 2 for the 16-bit CRC.
6599 */
6600
6601 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6602
6603 /* adjust frame size for CRC if any */
6604 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6605 framesize -= 2;
6606 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6607 framesize -= 4;
6608 }
6609
6610 if ( debug_level >= DEBUG_LEVEL_BH )
6611 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6612 __FILE__,__LINE__,info->device_name,status,framesize);
6613
6614 if ( debug_level >= DEBUG_LEVEL_DATA )
6615 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6616 min_t(int, framesize, DMABUFFERSIZE),0);
6617
6618 if (framesize) {
6619 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6620 ((framesize+1) > info->max_frame_size) ) ||
6621 (framesize > info->max_frame_size) )
6622 info->icount.rxlong++;
6623 else {
6624 /* copy dma buffer(s) to contiguous intermediate buffer */
6625 int copy_count = framesize;
6626 int index = StartIndex;
6627 unsigned char *ptmp = info->intermediate_rxbuffer;
6628
6629 if ( !(status & RXSTATUS_CRC_ERROR))
6630 info->icount.rxok++;
6631
6632 while(copy_count) {
6633 int partial_count;
6634 if ( copy_count > DMABUFFERSIZE )
6635 partial_count = DMABUFFERSIZE;
6636 else
6637 partial_count = copy_count;
6638
6639 pBufEntry = &(info->rx_buffer_list[index]);
6640 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6641 ptmp += partial_count;
6642 copy_count -= partial_count;
6643
6644 if ( ++index == info->rx_buffer_count )
6645 index = 0;
6646 }
6647
6648 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6649 ++framesize;
6650 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6651 RX_CRC_ERROR :
6652 RX_OK);
6653
6654 if ( debug_level >= DEBUG_LEVEL_DATA )
6655 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6656 __FILE__,__LINE__,info->device_name,
6657 *ptmp);
6658 }
6659
6660 #if SYNCLINK_GENERIC_HDLC
6661 if (info->netcount)
6662 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6663 else
6664 #endif
6665 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6666 }
6667 }
6668 /* Free the buffers used by this frame. */
6669 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6670
6671 ReturnCode = true;
6672
6673 Cleanup:
6674
6675 if ( info->rx_enabled && info->rx_overflow ) {
6676 /* The receiver needs to restarted because of
6677 * a receive overflow (buffer or FIFO). If the
6678 * receive buffers are now empty, then restart receiver.
6679 */
6680
6681 if ( !info->rx_buffer_list[EndIndex].status &&
6682 info->rx_buffer_list[EndIndex].count ) {
6683 spin_lock_irqsave(&info->irq_spinlock,flags);
6684 usc_start_receiver(info);
6685 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6686 }
6687 }
6688
6689 return ReturnCode;
6690
6691 } /* end of mgsl_get_rx_frame() */
6692
6693 /* mgsl_get_raw_rx_frame()
6694 *
6695 * This function attempts to return a received frame from the
6696 * receive DMA buffers when running in external loop mode. In this mode,
6697 * we will return at most one DMABUFFERSIZE frame to the application.
6698 * The USC receiver is triggering off of DCD going active to start a new
6699 * frame, and DCD going inactive to terminate the frame (similar to
6700 * processing a closing flag character).
6701 *
6702 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6703 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6704 * status field and the RCC field will indicate the length of the
6705 * entire received frame. We take this RCC field and get the modulus
6706 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6707 * last Rx DMA buffer and return that last portion of the frame.
6708 *
6709 * Arguments: info pointer to device extension
6710 * Return Value: true if frame returned, otherwise false
6711 */
6712 static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6713 {
6714 unsigned int CurrentIndex, NextIndex;
6715 unsigned short status;
6716 DMABUFFERENTRY *pBufEntry;
6717 unsigned int framesize = 0;
6718 bool ReturnCode = false;
6719 unsigned long flags;
6720 struct tty_struct *tty = info->port.tty;
6721
6722 /*
6723 * current_rx_buffer points to the 1st buffer of the next available
6724 * receive frame. The status field is set by the 16C32 after
6725 * completing a receive frame. If the status field of this buffer
6726 * is zero, either the USC is still filling this buffer or this
6727 * is one of a series of buffers making up a received frame.
6728 *
6729 * If the count field of this buffer is zero, the USC is either
6730 * using this buffer or has used this buffer. Look at the count
6731 * field of the next buffer. If that next buffer's count is
6732 * non-zero, the USC is still actively using the current buffer.
6733 * Otherwise, if the next buffer's count field is zero, the
6734 * current buffer is complete and the USC is using the next
6735 * buffer.
6736 */
6737 CurrentIndex = NextIndex = info->current_rx_buffer;
6738 ++NextIndex;
6739 if ( NextIndex == info->rx_buffer_count )
6740 NextIndex = 0;
6741
6742 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6743 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6744 info->rx_buffer_list[NextIndex].count == 0)) {
6745 /*
6746 * Either the status field of this dma buffer is non-zero
6747 * (indicating the last buffer of a receive frame) or the next
6748 * buffer is marked as in use -- implying this buffer is complete
6749 * and an intermediate buffer for this received frame.
6750 */
6751
6752 status = info->rx_buffer_list[CurrentIndex].status;
6753
6754 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6755 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6756 if ( status & RXSTATUS_SHORT_FRAME )
6757 info->icount.rxshort++;
6758 else if ( status & RXSTATUS_ABORT )
6759 info->icount.rxabort++;
6760 else if ( status & RXSTATUS_OVERRUN )
6761 info->icount.rxover++;
6762 else
6763 info->icount.rxcrc++;
6764 framesize = 0;
6765 } else {
6766 /*
6767 * A receive frame is available, get frame size and status.
6768 *
6769 * The frame size is the starting value of the RCC (which was
6770 * set to 0xffff) minus the ending value of the RCC (decremented
6771 * once for each receive character) minus 2 or 4 for the 16-bit
6772 * or 32-bit CRC.
6773 *
6774 * If the status field is zero, this is an intermediate buffer.
6775 * It's size is 4K.
6776 *
6777 * If the DMA Buffer Entry's Status field is non-zero, the
6778 * receive operation completed normally (ie: DCD dropped). The
6779 * RCC field is valid and holds the received frame size.
6780 * It is possible that the RCC field will be zero on a DMA buffer
6781 * entry with a non-zero status. This can occur if the total
6782 * frame size (number of bytes between the time DCD goes active
6783 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6784 * case the 16C32 has underrun on the RCC count and appears to
6785 * stop updating this counter to let us know the actual received
6786 * frame size. If this happens (non-zero status and zero RCC),
6787 * simply return the entire RxDMA Buffer
6788 */
6789 if ( status ) {
6790 /*
6791 * In the event that the final RxDMA Buffer is
6792 * terminated with a non-zero status and the RCC
6793 * field is zero, we interpret this as the RCC
6794 * having underflowed (received frame > 65535 bytes).
6795 *
6796 * Signal the event to the user by passing back
6797 * a status of RxStatus_CrcError returning the full
6798 * buffer and let the app figure out what data is
6799 * actually valid
6800 */
6801 if ( info->rx_buffer_list[CurrentIndex].rcc )
6802 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6803 else
6804 framesize = DMABUFFERSIZE;
6805 }
6806 else
6807 framesize = DMABUFFERSIZE;
6808 }
6809
6810 if ( framesize > DMABUFFERSIZE ) {
6811 /*
6812 * if running in raw sync mode, ISR handler for
6813 * End Of Buffer events terminates all buffers at 4K.
6814 * If this frame size is said to be >4K, get the
6815 * actual number of bytes of the frame in this buffer.
6816 */
6817 framesize = framesize % DMABUFFERSIZE;
6818 }
6819
6820
6821 if ( debug_level >= DEBUG_LEVEL_BH )
6822 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6823 __FILE__,__LINE__,info->device_name,status,framesize);
6824
6825 if ( debug_level >= DEBUG_LEVEL_DATA )
6826 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6827 min_t(int, framesize, DMABUFFERSIZE),0);
6828
6829 if (framesize) {
6830 /* copy dma buffer(s) to contiguous intermediate buffer */
6831 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6832
6833 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6834 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6835 info->icount.rxok++;
6836
6837 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6838 }
6839
6840 /* Free the buffers used by this frame. */
6841 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6842
6843 ReturnCode = true;
6844 }
6845
6846
6847 if ( info->rx_enabled && info->rx_overflow ) {
6848 /* The receiver needs to restarted because of
6849 * a receive overflow (buffer or FIFO). If the
6850 * receive buffers are now empty, then restart receiver.
6851 */
6852
6853 if ( !info->rx_buffer_list[CurrentIndex].status &&
6854 info->rx_buffer_list[CurrentIndex].count ) {
6855 spin_lock_irqsave(&info->irq_spinlock,flags);
6856 usc_start_receiver(info);
6857 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6858 }
6859 }
6860
6861 return ReturnCode;
6862
6863 } /* end of mgsl_get_raw_rx_frame() */
6864
6865 /* mgsl_load_tx_dma_buffer()
6866 *
6867 * Load the transmit DMA buffer with the specified data.
6868 *
6869 * Arguments:
6870 *
6871 * info pointer to device extension
6872 * Buffer pointer to buffer containing frame to load
6873 * BufferSize size in bytes of frame in Buffer
6874 *
6875 * Return Value: None
6876 */
6877 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6878 const char *Buffer, unsigned int BufferSize)
6879 {
6880 unsigned short Copycount;
6881 unsigned int i = 0;
6882 DMABUFFERENTRY *pBufEntry;
6883
6884 if ( debug_level >= DEBUG_LEVEL_DATA )
6885 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6886
6887 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6888 /* set CMR:13 to start transmit when
6889 * next GoAhead (abort) is received
6890 */
6891 info->cmr_value |= BIT13;
6892 }
6893
6894 /* begin loading the frame in the next available tx dma
6895 * buffer, remember it's starting location for setting
6896 * up tx dma operation
6897 */
6898 i = info->current_tx_buffer;
6899 info->start_tx_dma_buffer = i;
6900
6901 /* Setup the status and RCC (Frame Size) fields of the 1st */
6902 /* buffer entry in the transmit DMA buffer list. */
6903
6904 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6905 info->tx_buffer_list[i].rcc = BufferSize;
6906 info->tx_buffer_list[i].count = BufferSize;
6907
6908 /* Copy frame data from 1st source buffer to the DMA buffers. */
6909 /* The frame data may span multiple DMA buffers. */
6910
6911 while( BufferSize ){
6912 /* Get a pointer to next DMA buffer entry. */
6913 pBufEntry = &info->tx_buffer_list[i++];
6914
6915 if ( i == info->tx_buffer_count )
6916 i=0;
6917
6918 /* Calculate the number of bytes that can be copied from */
6919 /* the source buffer to this DMA buffer. */
6920 if ( BufferSize > DMABUFFERSIZE )
6921 Copycount = DMABUFFERSIZE;
6922 else
6923 Copycount = BufferSize;
6924
6925 /* Actually copy data from source buffer to DMA buffer. */
6926 /* Also set the data count for this individual DMA buffer. */
6927 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6928 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6929 else
6930 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6931
6932 pBufEntry->count = Copycount;
6933
6934 /* Advance source pointer and reduce remaining data count. */
6935 Buffer += Copycount;
6936 BufferSize -= Copycount;
6937
6938 ++info->tx_dma_buffers_used;
6939 }
6940
6941 /* remember next available tx dma buffer */
6942 info->current_tx_buffer = i;
6943
6944 } /* end of mgsl_load_tx_dma_buffer() */
6945
6946 /*
6947 * mgsl_register_test()
6948 *
6949 * Performs a register test of the 16C32.
6950 *
6951 * Arguments: info pointer to device instance data
6952 * Return Value: true if test passed, otherwise false
6953 */
6954 static bool mgsl_register_test( struct mgsl_struct *info )
6955 {
6956 static unsigned short BitPatterns[] =
6957 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6958 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6959 unsigned int i;
6960 bool rc = true;
6961 unsigned long flags;
6962
6963 spin_lock_irqsave(&info->irq_spinlock,flags);
6964 usc_reset(info);
6965
6966 /* Verify the reset state of some registers. */
6967
6968 if ( (usc_InReg( info, SICR ) != 0) ||
6969 (usc_InReg( info, IVR ) != 0) ||
6970 (usc_InDmaReg( info, DIVR ) != 0) ){
6971 rc = false;
6972 }
6973
6974 if ( rc ){
6975 /* Write bit patterns to various registers but do it out of */
6976 /* sync, then read back and verify values. */
6977
6978 for ( i = 0 ; i < Patterncount ; i++ ) {
6979 usc_OutReg( info, TC0R, BitPatterns[i] );
6980 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6981 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6982 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6983 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6984 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6985
6986 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6987 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6988 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6989 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6990 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6991 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6992 rc = false;
6993 break;
6994 }
6995 }
6996 }
6997
6998 usc_reset(info);
6999 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7000
7001 return rc;
7002
7003 } /* end of mgsl_register_test() */
7004
7005 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7006 *
7007 * Arguments: info pointer to device instance data
7008 * Return Value: true if test passed, otherwise false
7009 */
7010 static bool mgsl_irq_test( struct mgsl_struct *info )
7011 {
7012 unsigned long EndTime;
7013 unsigned long flags;
7014
7015 spin_lock_irqsave(&info->irq_spinlock,flags);
7016 usc_reset(info);
7017
7018 /*
7019 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7020 * The ISR sets irq_occurred to true.
7021 */
7022
7023 info->irq_occurred = false;
7024
7025 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7026 /* Enable INTEN (Port 6, Bit12) */
7027 /* This connects the IRQ request signal to the ISA bus */
7028 /* on the ISA adapter. This has no effect for the PCI adapter */
7029 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7030
7031 usc_EnableMasterIrqBit(info);
7032 usc_EnableInterrupts(info, IO_PIN);
7033 usc_ClearIrqPendingBits(info, IO_PIN);
7034
7035 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7036 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7037
7038 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7039
7040 EndTime=100;
7041 while( EndTime-- && !info->irq_occurred ) {
7042 msleep_interruptible(10);
7043 }
7044
7045 spin_lock_irqsave(&info->irq_spinlock,flags);
7046 usc_reset(info);
7047 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7048
7049 return info->irq_occurred;
7050
7051 } /* end of mgsl_irq_test() */
7052
7053 /* mgsl_dma_test()
7054 *
7055 * Perform a DMA test of the 16C32. A small frame is
7056 * transmitted via DMA from a transmit buffer to a receive buffer
7057 * using single buffer DMA mode.
7058 *
7059 * Arguments: info pointer to device instance data
7060 * Return Value: true if test passed, otherwise false
7061 */
7062 static bool mgsl_dma_test( struct mgsl_struct *info )
7063 {
7064 unsigned short FifoLevel;
7065 unsigned long phys_addr;
7066 unsigned int FrameSize;
7067 unsigned int i;
7068 char *TmpPtr;
7069 bool rc = true;
7070 unsigned short status=0;
7071 unsigned long EndTime;
7072 unsigned long flags;
7073 MGSL_PARAMS tmp_params;
7074
7075 /* save current port options */
7076 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7077 /* load default port options */
7078 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7079
7080 #define TESTFRAMESIZE 40
7081
7082 spin_lock_irqsave(&info->irq_spinlock,flags);
7083
7084 /* setup 16C32 for SDLC DMA transfer mode */
7085
7086 usc_reset(info);
7087 usc_set_sdlc_mode(info);
7088 usc_enable_loopback(info,1);
7089
7090 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7091 * field of the buffer entry after fetching buffer address. This
7092 * way we can detect a DMA failure for a DMA read (which should be
7093 * non-destructive to system memory) before we try and write to
7094 * memory (where a failure could corrupt system memory).
7095 */
7096
7097 /* Receive DMA mode Register (RDMR)
7098 *
7099 * <15..14> 11 DMA mode = Linked List Buffer mode
7100 * <13> 1 RSBinA/L = store Rx status Block in List entry
7101 * <12> 0 1 = Clear count of List Entry after fetching
7102 * <11..10> 00 Address mode = Increment
7103 * <9> 1 Terminate Buffer on RxBound
7104 * <8> 0 Bus Width = 16bits
7105 * <7..0> ? status Bits (write as 0s)
7106 *
7107 * 1110 0010 0000 0000 = 0xe200
7108 */
7109
7110 usc_OutDmaReg( info, RDMR, 0xe200 );
7111
7112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7113
7114
7115 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7116
7117 FrameSize = TESTFRAMESIZE;
7118
7119 /* setup 1st transmit buffer entry: */
7120 /* with frame size and transmit control word */
7121
7122 info->tx_buffer_list[0].count = FrameSize;
7123 info->tx_buffer_list[0].rcc = FrameSize;
7124 info->tx_buffer_list[0].status = 0x4000;
7125
7126 /* build a transmit frame in 1st transmit DMA buffer */
7127
7128 TmpPtr = info->tx_buffer_list[0].virt_addr;
7129 for (i = 0; i < FrameSize; i++ )
7130 *TmpPtr++ = i;
7131
7132 /* setup 1st receive buffer entry: */
7133 /* clear status, set max receive buffer size */
7134
7135 info->rx_buffer_list[0].status = 0;
7136 info->rx_buffer_list[0].count = FrameSize + 4;
7137
7138 /* zero out the 1st receive buffer */
7139
7140 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7141
7142 /* Set count field of next buffer entries to prevent */
7143 /* 16C32 from using buffers after the 1st one. */
7144
7145 info->tx_buffer_list[1].count = 0;
7146 info->rx_buffer_list[1].count = 0;
7147
7148
7149 /***************************/
7150 /* Program 16C32 receiver. */
7151 /***************************/
7152
7153 spin_lock_irqsave(&info->irq_spinlock,flags);
7154
7155 /* setup DMA transfers */
7156 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7157
7158 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7159 phys_addr = info->rx_buffer_list[0].phys_entry;
7160 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7161 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7162
7163 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7164 usc_InDmaReg( info, RDMR );
7165 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7166
7167 /* Enable Receiver (RMR <1..0> = 10) */
7168 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7169
7170 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7171
7172
7173 /*************************************************************/
7174 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7175 /*************************************************************/
7176
7177 /* Wait 100ms for interrupt. */
7178 EndTime = jiffies + msecs_to_jiffies(100);
7179
7180 for(;;) {
7181 if (time_after(jiffies, EndTime)) {
7182 rc = false;
7183 break;
7184 }
7185
7186 spin_lock_irqsave(&info->irq_spinlock,flags);
7187 status = usc_InDmaReg( info, RDMR );
7188 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7189
7190 if ( !(status & BIT4) && (status & BIT5) ) {
7191 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7192 /* BUSY (BIT 5) is active (channel still active). */
7193 /* This means the buffer entry read has completed. */
7194 break;
7195 }
7196 }
7197
7198
7199 /******************************/
7200 /* Program 16C32 transmitter. */
7201 /******************************/
7202
7203 spin_lock_irqsave(&info->irq_spinlock,flags);
7204
7205 /* Program the Transmit Character Length Register (TCLR) */
7206 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7207
7208 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7209 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7210
7211 /* Program the address of the 1st DMA Buffer Entry in linked list */
7212
7213 phys_addr = info->tx_buffer_list[0].phys_entry;
7214 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7215 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7216
7217 /* unlatch Tx status bits, and start transmit channel. */
7218
7219 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7220 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7221
7222 /* wait for DMA controller to fill transmit FIFO */
7223
7224 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7225
7226 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7227
7228
7229 /**********************************/
7230 /* WAIT FOR TRANSMIT FIFO TO FILL */
7231 /**********************************/
7232
7233 /* Wait 100ms */
7234 EndTime = jiffies + msecs_to_jiffies(100);
7235
7236 for(;;) {
7237 if (time_after(jiffies, EndTime)) {
7238 rc = false;
7239 break;
7240 }
7241
7242 spin_lock_irqsave(&info->irq_spinlock,flags);
7243 FifoLevel = usc_InReg(info, TICR) >> 8;
7244 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7245
7246 if ( FifoLevel < 16 )
7247 break;
7248 else
7249 if ( FrameSize < 32 ) {
7250 /* This frame is smaller than the entire transmit FIFO */
7251 /* so wait for the entire frame to be loaded. */
7252 if ( FifoLevel <= (32 - FrameSize) )
7253 break;
7254 }
7255 }
7256
7257
7258 if ( rc )
7259 {
7260 /* Enable 16C32 transmitter. */
7261
7262 spin_lock_irqsave(&info->irq_spinlock,flags);
7263
7264 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7265 usc_TCmd( info, TCmd_SendFrame );
7266 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7267
7268 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7269
7270
7271 /******************************/
7272 /* WAIT FOR TRANSMIT COMPLETE */
7273 /******************************/
7274
7275 /* Wait 100ms */
7276 EndTime = jiffies + msecs_to_jiffies(100);
7277
7278 /* While timer not expired wait for transmit complete */
7279
7280 spin_lock_irqsave(&info->irq_spinlock,flags);
7281 status = usc_InReg( info, TCSR );
7282 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7283
7284 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7285 if (time_after(jiffies, EndTime)) {
7286 rc = false;
7287 break;
7288 }
7289
7290 spin_lock_irqsave(&info->irq_spinlock,flags);
7291 status = usc_InReg( info, TCSR );
7292 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7293 }
7294 }
7295
7296
7297 if ( rc ){
7298 /* CHECK FOR TRANSMIT ERRORS */
7299 if ( status & (BIT5 + BIT1) )
7300 rc = false;
7301 }
7302
7303 if ( rc ) {
7304 /* WAIT FOR RECEIVE COMPLETE */
7305
7306 /* Wait 100ms */
7307 EndTime = jiffies + msecs_to_jiffies(100);
7308
7309 /* Wait for 16C32 to write receive status to buffer entry. */
7310 status=info->rx_buffer_list[0].status;
7311 while ( status == 0 ) {
7312 if (time_after(jiffies, EndTime)) {
7313 rc = false;
7314 break;
7315 }
7316 status=info->rx_buffer_list[0].status;
7317 }
7318 }
7319
7320
7321 if ( rc ) {
7322 /* CHECK FOR RECEIVE ERRORS */
7323 status = info->rx_buffer_list[0].status;
7324
7325 if ( status & (BIT8 + BIT3 + BIT1) ) {
7326 /* receive error has occurred */
7327 rc = false;
7328 } else {
7329 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7330 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7331 rc = false;
7332 }
7333 }
7334 }
7335
7336 spin_lock_irqsave(&info->irq_spinlock,flags);
7337 usc_reset( info );
7338 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7339
7340 /* restore current port options */
7341 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7342
7343 return rc;
7344
7345 } /* end of mgsl_dma_test() */
7346
7347 /* mgsl_adapter_test()
7348 *
7349 * Perform the register, IRQ, and DMA tests for the 16C32.
7350 *
7351 * Arguments: info pointer to device instance data
7352 * Return Value: 0 if success, otherwise -ENODEV
7353 */
7354 static int mgsl_adapter_test( struct mgsl_struct *info )
7355 {
7356 if ( debug_level >= DEBUG_LEVEL_INFO )
7357 printk( "%s(%d):Testing device %s\n",
7358 __FILE__,__LINE__,info->device_name );
7359
7360 if ( !mgsl_register_test( info ) ) {
7361 info->init_error = DiagStatus_AddressFailure;
7362 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7363 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7364 return -ENODEV;
7365 }
7366
7367 if ( !mgsl_irq_test( info ) ) {
7368 info->init_error = DiagStatus_IrqFailure;
7369 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7370 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7371 return -ENODEV;
7372 }
7373
7374 if ( !mgsl_dma_test( info ) ) {
7375 info->init_error = DiagStatus_DmaFailure;
7376 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7377 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7378 return -ENODEV;
7379 }
7380
7381 if ( debug_level >= DEBUG_LEVEL_INFO )
7382 printk( "%s(%d):device %s passed diagnostics\n",
7383 __FILE__,__LINE__,info->device_name );
7384
7385 return 0;
7386
7387 } /* end of mgsl_adapter_test() */
7388
7389 /* mgsl_memory_test()
7390 *
7391 * Test the shared memory on a PCI adapter.
7392 *
7393 * Arguments: info pointer to device instance data
7394 * Return Value: true if test passed, otherwise false
7395 */
7396 static bool mgsl_memory_test( struct mgsl_struct *info )
7397 {
7398 static unsigned long BitPatterns[] =
7399 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7400 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7401 unsigned long i;
7402 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7403 unsigned long * TestAddr;
7404
7405 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7406 return true;
7407
7408 TestAddr = (unsigned long *)info->memory_base;
7409
7410 /* Test data lines with test pattern at one location. */
7411
7412 for ( i = 0 ; i < Patterncount ; i++ ) {
7413 *TestAddr = BitPatterns[i];
7414 if ( *TestAddr != BitPatterns[i] )
7415 return false;
7416 }
7417
7418 /* Test address lines with incrementing pattern over */
7419 /* entire address range. */
7420
7421 for ( i = 0 ; i < TestLimit ; i++ ) {
7422 *TestAddr = i * 4;
7423 TestAddr++;
7424 }
7425
7426 TestAddr = (unsigned long *)info->memory_base;
7427
7428 for ( i = 0 ; i < TestLimit ; i++ ) {
7429 if ( *TestAddr != i * 4 )
7430 return false;
7431 TestAddr++;
7432 }
7433
7434 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7435
7436 return true;
7437
7438 } /* End Of mgsl_memory_test() */
7439
7440
7441 /* mgsl_load_pci_memory()
7442 *
7443 * Load a large block of data into the PCI shared memory.
7444 * Use this instead of memcpy() or memmove() to move data
7445 * into the PCI shared memory.
7446 *
7447 * Notes:
7448 *
7449 * This function prevents the PCI9050 interface chip from hogging
7450 * the adapter local bus, which can starve the 16C32 by preventing
7451 * 16C32 bus master cycles.
7452 *
7453 * The PCI9050 documentation says that the 9050 will always release
7454 * control of the local bus after completing the current read
7455 * or write operation.
7456 *
7457 * It appears that as long as the PCI9050 write FIFO is full, the
7458 * PCI9050 treats all of the writes as a single burst transaction
7459 * and will not release the bus. This causes DMA latency problems
7460 * at high speeds when copying large data blocks to the shared
7461 * memory.
7462 *
7463 * This function in effect, breaks the a large shared memory write
7464 * into multiple transations by interleaving a shared memory read
7465 * which will flush the write FIFO and 'complete' the write
7466 * transation. This allows any pending DMA request to gain control
7467 * of the local bus in a timely fasion.
7468 *
7469 * Arguments:
7470 *
7471 * TargetPtr pointer to target address in PCI shared memory
7472 * SourcePtr pointer to source buffer for data
7473 * count count in bytes of data to copy
7474 *
7475 * Return Value: None
7476 */
7477 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7478 unsigned short count )
7479 {
7480 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7481 #define PCI_LOAD_INTERVAL 64
7482
7483 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7484 unsigned short Index;
7485 unsigned long Dummy;
7486
7487 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7488 {
7489 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7490 Dummy = *((volatile unsigned long *)TargetPtr);
7491 TargetPtr += PCI_LOAD_INTERVAL;
7492 SourcePtr += PCI_LOAD_INTERVAL;
7493 }
7494
7495 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7496
7497 } /* End Of mgsl_load_pci_memory() */
7498
7499 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7500 {
7501 int i;
7502 int linecount;
7503 if (xmit)
7504 printk("%s tx data:\n",info->device_name);
7505 else
7506 printk("%s rx data:\n",info->device_name);
7507
7508 while(count) {
7509 if (count > 16)
7510 linecount = 16;
7511 else
7512 linecount = count;
7513
7514 for(i=0;i<linecount;i++)
7515 printk("%02X ",(unsigned char)data[i]);
7516 for(;i<17;i++)
7517 printk(" ");
7518 for(i=0;i<linecount;i++) {
7519 if (data[i]>=040 && data[i]<=0176)
7520 printk("%c",data[i]);
7521 else
7522 printk(".");
7523 }
7524 printk("\n");
7525
7526 data += linecount;
7527 count -= linecount;
7528 }
7529 } /* end of mgsl_trace_block() */
7530
7531 /* mgsl_tx_timeout()
7532 *
7533 * called when HDLC frame times out
7534 * update stats and do tx completion processing
7535 *
7536 * Arguments: context pointer to device instance data
7537 * Return Value: None
7538 */
7539 static void mgsl_tx_timeout(unsigned long context)
7540 {
7541 struct mgsl_struct *info = (struct mgsl_struct*)context;
7542 unsigned long flags;
7543
7544 if ( debug_level >= DEBUG_LEVEL_INFO )
7545 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7546 __FILE__,__LINE__,info->device_name);
7547 if(info->tx_active &&
7548 (info->params.mode == MGSL_MODE_HDLC ||
7549 info->params.mode == MGSL_MODE_RAW) ) {
7550 info->icount.txtimeout++;
7551 }
7552 spin_lock_irqsave(&info->irq_spinlock,flags);
7553 info->tx_active = false;
7554 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7555
7556 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7557 usc_loopmode_cancel_transmit( info );
7558
7559 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7560
7561 #if SYNCLINK_GENERIC_HDLC
7562 if (info->netcount)
7563 hdlcdev_tx_done(info);
7564 else
7565 #endif
7566 mgsl_bh_transmit(info);
7567
7568 } /* end of mgsl_tx_timeout() */
7569
7570 /* signal that there are no more frames to send, so that
7571 * line is 'released' by echoing RxD to TxD when current
7572 * transmission is complete (or immediately if no tx in progress).
7573 */
7574 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7575 {
7576 unsigned long flags;
7577
7578 spin_lock_irqsave(&info->irq_spinlock,flags);
7579 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7580 if (info->tx_active)
7581 info->loopmode_send_done_requested = true;
7582 else
7583 usc_loopmode_send_done(info);
7584 }
7585 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7586
7587 return 0;
7588 }
7589
7590 /* release the line by echoing RxD to TxD
7591 * upon completion of a transmit frame
7592 */
7593 static void usc_loopmode_send_done( struct mgsl_struct * info )
7594 {
7595 info->loopmode_send_done_requested = false;
7596 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7597 info->cmr_value &= ~BIT13;
7598 usc_OutReg(info, CMR, info->cmr_value);
7599 }
7600
7601 /* abort a transmit in progress while in HDLC LoopMode
7602 */
7603 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7604 {
7605 /* reset tx dma channel and purge TxFifo */
7606 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7607 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7608 usc_loopmode_send_done( info );
7609 }
7610
7611 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7612 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7613 * we must clear CMR:13 to begin repeating TxData to RxData
7614 */
7615 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7616 {
7617 info->loopmode_insert_requested = true;
7618
7619 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7620 * begin repeating TxData on RxData (complete insertion)
7621 */
7622 usc_OutReg( info, RICR,
7623 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7624
7625 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7626 info->cmr_value |= BIT13;
7627 usc_OutReg(info, CMR, info->cmr_value);
7628 }
7629
7630 /* return 1 if station is inserted into the loop, otherwise 0
7631 */
7632 static int usc_loopmode_active( struct mgsl_struct * info)
7633 {
7634 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7635 }
7636
7637 #if SYNCLINK_GENERIC_HDLC
7638
7639 /**
7640 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7641 * set encoding and frame check sequence (FCS) options
7642 *
7643 * dev pointer to network device structure
7644 * encoding serial encoding setting
7645 * parity FCS setting
7646 *
7647 * returns 0 if success, otherwise error code
7648 */
7649 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7650 unsigned short parity)
7651 {
7652 struct mgsl_struct *info = dev_to_port(dev);
7653 unsigned char new_encoding;
7654 unsigned short new_crctype;
7655
7656 /* return error if TTY interface open */
7657 if (info->port.count)
7658 return -EBUSY;
7659
7660 switch (encoding)
7661 {
7662 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7663 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7664 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7665 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7666 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7667 default: return -EINVAL;
7668 }
7669
7670 switch (parity)
7671 {
7672 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7673 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7674 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7675 default: return -EINVAL;
7676 }
7677
7678 info->params.encoding = new_encoding;
7679 info->params.crc_type = new_crctype;
7680
7681 /* if network interface up, reprogram hardware */
7682 if (info->netcount)
7683 mgsl_program_hw(info);
7684
7685 return 0;
7686 }
7687
7688 /**
7689 * called by generic HDLC layer to send frame
7690 *
7691 * skb socket buffer containing HDLC frame
7692 * dev pointer to network device structure
7693 */
7694 static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7695 struct net_device *dev)
7696 {
7697 struct mgsl_struct *info = dev_to_port(dev);
7698 unsigned long flags;
7699
7700 if (debug_level >= DEBUG_LEVEL_INFO)
7701 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7702
7703 /* stop sending until this frame completes */
7704 netif_stop_queue(dev);
7705
7706 /* copy data to device buffers */
7707 info->xmit_cnt = skb->len;
7708 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7709
7710 /* update network statistics */
7711 dev->stats.tx_packets++;
7712 dev->stats.tx_bytes += skb->len;
7713
7714 /* done with socket buffer, so free it */
7715 dev_kfree_skb(skb);
7716
7717 /* save start time for transmit timeout detection */
7718 dev->trans_start = jiffies;
7719
7720 /* start hardware transmitter if necessary */
7721 spin_lock_irqsave(&info->irq_spinlock,flags);
7722 if (!info->tx_active)
7723 usc_start_transmitter(info);
7724 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7725
7726 return NETDEV_TX_OK;
7727 }
7728
7729 /**
7730 * called by network layer when interface enabled
7731 * claim resources and initialize hardware
7732 *
7733 * dev pointer to network device structure
7734 *
7735 * returns 0 if success, otherwise error code
7736 */
7737 static int hdlcdev_open(struct net_device *dev)
7738 {
7739 struct mgsl_struct *info = dev_to_port(dev);
7740 int rc;
7741 unsigned long flags;
7742
7743 if (debug_level >= DEBUG_LEVEL_INFO)
7744 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7745
7746 /* generic HDLC layer open processing */
7747 if ((rc = hdlc_open(dev)))
7748 return rc;
7749
7750 /* arbitrate between network and tty opens */
7751 spin_lock_irqsave(&info->netlock, flags);
7752 if (info->port.count != 0 || info->netcount != 0) {
7753 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7754 spin_unlock_irqrestore(&info->netlock, flags);
7755 return -EBUSY;
7756 }
7757 info->netcount=1;
7758 spin_unlock_irqrestore(&info->netlock, flags);
7759
7760 /* claim resources and init adapter */
7761 if ((rc = startup(info)) != 0) {
7762 spin_lock_irqsave(&info->netlock, flags);
7763 info->netcount=0;
7764 spin_unlock_irqrestore(&info->netlock, flags);
7765 return rc;
7766 }
7767
7768 /* assert DTR and RTS, apply hardware settings */
7769 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7770 mgsl_program_hw(info);
7771
7772 /* enable network layer transmit */
7773 dev->trans_start = jiffies;
7774 netif_start_queue(dev);
7775
7776 /* inform generic HDLC layer of current DCD status */
7777 spin_lock_irqsave(&info->irq_spinlock, flags);
7778 usc_get_serial_signals(info);
7779 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7780 if (info->serial_signals & SerialSignal_DCD)
7781 netif_carrier_on(dev);
7782 else
7783 netif_carrier_off(dev);
7784 return 0;
7785 }
7786
7787 /**
7788 * called by network layer when interface is disabled
7789 * shutdown hardware and release resources
7790 *
7791 * dev pointer to network device structure
7792 *
7793 * returns 0 if success, otherwise error code
7794 */
7795 static int hdlcdev_close(struct net_device *dev)
7796 {
7797 struct mgsl_struct *info = dev_to_port(dev);
7798 unsigned long flags;
7799
7800 if (debug_level >= DEBUG_LEVEL_INFO)
7801 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7802
7803 netif_stop_queue(dev);
7804
7805 /* shutdown adapter and release resources */
7806 shutdown(info);
7807
7808 hdlc_close(dev);
7809
7810 spin_lock_irqsave(&info->netlock, flags);
7811 info->netcount=0;
7812 spin_unlock_irqrestore(&info->netlock, flags);
7813
7814 return 0;
7815 }
7816
7817 /**
7818 * called by network layer to process IOCTL call to network device
7819 *
7820 * dev pointer to network device structure
7821 * ifr pointer to network interface request structure
7822 * cmd IOCTL command code
7823 *
7824 * returns 0 if success, otherwise error code
7825 */
7826 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7827 {
7828 const size_t size = sizeof(sync_serial_settings);
7829 sync_serial_settings new_line;
7830 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7831 struct mgsl_struct *info = dev_to_port(dev);
7832 unsigned int flags;
7833
7834 if (debug_level >= DEBUG_LEVEL_INFO)
7835 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7836
7837 /* return error if TTY interface open */
7838 if (info->port.count)
7839 return -EBUSY;
7840
7841 if (cmd != SIOCWANDEV)
7842 return hdlc_ioctl(dev, ifr, cmd);
7843
7844 switch(ifr->ifr_settings.type) {
7845 case IF_GET_IFACE: /* return current sync_serial_settings */
7846
7847 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7848 if (ifr->ifr_settings.size < size) {
7849 ifr->ifr_settings.size = size; /* data size wanted */
7850 return -ENOBUFS;
7851 }
7852
7853 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7854 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7855 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7856 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7857
7858 switch (flags){
7859 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7860 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7861 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7862 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7863 default: new_line.clock_type = CLOCK_DEFAULT;
7864 }
7865
7866 new_line.clock_rate = info->params.clock_speed;
7867 new_line.loopback = info->params.loopback ? 1:0;
7868
7869 if (copy_to_user(line, &new_line, size))
7870 return -EFAULT;
7871 return 0;
7872
7873 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7874
7875 if(!capable(CAP_NET_ADMIN))
7876 return -EPERM;
7877 if (copy_from_user(&new_line, line, size))
7878 return -EFAULT;
7879
7880 switch (new_line.clock_type)
7881 {
7882 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7883 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7884 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7885 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7886 case CLOCK_DEFAULT: flags = info->params.flags &
7887 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7888 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7889 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7890 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7891 default: return -EINVAL;
7892 }
7893
7894 if (new_line.loopback != 0 && new_line.loopback != 1)
7895 return -EINVAL;
7896
7897 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7898 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7899 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7900 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7901 info->params.flags |= flags;
7902
7903 info->params.loopback = new_line.loopback;
7904
7905 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7906 info->params.clock_speed = new_line.clock_rate;
7907 else
7908 info->params.clock_speed = 0;
7909
7910 /* if network interface up, reprogram hardware */
7911 if (info->netcount)
7912 mgsl_program_hw(info);
7913 return 0;
7914
7915 default:
7916 return hdlc_ioctl(dev, ifr, cmd);
7917 }
7918 }
7919
7920 /**
7921 * called by network layer when transmit timeout is detected
7922 *
7923 * dev pointer to network device structure
7924 */
7925 static void hdlcdev_tx_timeout(struct net_device *dev)
7926 {
7927 struct mgsl_struct *info = dev_to_port(dev);
7928 unsigned long flags;
7929
7930 if (debug_level >= DEBUG_LEVEL_INFO)
7931 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7932
7933 dev->stats.tx_errors++;
7934 dev->stats.tx_aborted_errors++;
7935
7936 spin_lock_irqsave(&info->irq_spinlock,flags);
7937 usc_stop_transmitter(info);
7938 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7939
7940 netif_wake_queue(dev);
7941 }
7942
7943 /**
7944 * called by device driver when transmit completes
7945 * reenable network layer transmit if stopped
7946 *
7947 * info pointer to device instance information
7948 */
7949 static void hdlcdev_tx_done(struct mgsl_struct *info)
7950 {
7951 if (netif_queue_stopped(info->netdev))
7952 netif_wake_queue(info->netdev);
7953 }
7954
7955 /**
7956 * called by device driver when frame received
7957 * pass frame to network layer
7958 *
7959 * info pointer to device instance information
7960 * buf pointer to buffer contianing frame data
7961 * size count of data bytes in buf
7962 */
7963 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7964 {
7965 struct sk_buff *skb = dev_alloc_skb(size);
7966 struct net_device *dev = info->netdev;
7967
7968 if (debug_level >= DEBUG_LEVEL_INFO)
7969 printk("hdlcdev_rx(%s)\n", dev->name);
7970
7971 if (skb == NULL) {
7972 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7973 dev->name);
7974 dev->stats.rx_dropped++;
7975 return;
7976 }
7977
7978 memcpy(skb_put(skb, size), buf, size);
7979
7980 skb->protocol = hdlc_type_trans(skb, dev);
7981
7982 dev->stats.rx_packets++;
7983 dev->stats.rx_bytes += size;
7984
7985 netif_rx(skb);
7986 }
7987
7988 static const struct net_device_ops hdlcdev_ops = {
7989 .ndo_open = hdlcdev_open,
7990 .ndo_stop = hdlcdev_close,
7991 .ndo_change_mtu = hdlc_change_mtu,
7992 .ndo_start_xmit = hdlc_start_xmit,
7993 .ndo_do_ioctl = hdlcdev_ioctl,
7994 .ndo_tx_timeout = hdlcdev_tx_timeout,
7995 };
7996
7997 /**
7998 * called by device driver when adding device instance
7999 * do generic HDLC initialization
8000 *
8001 * info pointer to device instance information
8002 *
8003 * returns 0 if success, otherwise error code
8004 */
8005 static int hdlcdev_init(struct mgsl_struct *info)
8006 {
8007 int rc;
8008 struct net_device *dev;
8009 hdlc_device *hdlc;
8010
8011 /* allocate and initialize network and HDLC layer objects */
8012
8013 if (!(dev = alloc_hdlcdev(info))) {
8014 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8015 return -ENOMEM;
8016 }
8017
8018 /* for network layer reporting purposes only */
8019 dev->base_addr = info->io_base;
8020 dev->irq = info->irq_level;
8021 dev->dma = info->dma_level;
8022
8023 /* network layer callbacks and settings */
8024 dev->netdev_ops = &hdlcdev_ops;
8025 dev->watchdog_timeo = 10 * HZ;
8026 dev->tx_queue_len = 50;
8027
8028 /* generic HDLC layer callbacks and settings */
8029 hdlc = dev_to_hdlc(dev);
8030 hdlc->attach = hdlcdev_attach;
8031 hdlc->xmit = hdlcdev_xmit;
8032
8033 /* register objects with HDLC layer */
8034 if ((rc = register_hdlc_device(dev))) {
8035 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8036 free_netdev(dev);
8037 return rc;
8038 }
8039
8040 info->netdev = dev;
8041 return 0;
8042 }
8043
8044 /**
8045 * called by device driver when removing device instance
8046 * do generic HDLC cleanup
8047 *
8048 * info pointer to device instance information
8049 */
8050 static void hdlcdev_exit(struct mgsl_struct *info)
8051 {
8052 unregister_hdlc_device(info->netdev);
8053 free_netdev(info->netdev);
8054 info->netdev = NULL;
8055 }
8056
8057 #endif /* CONFIG_HDLC */
8058
8059
8060 static int __devinit synclink_init_one (struct pci_dev *dev,
8061 const struct pci_device_id *ent)
8062 {
8063 struct mgsl_struct *info;
8064
8065 if (pci_enable_device(dev)) {
8066 printk("error enabling pci device %p\n", dev);
8067 return -EIO;
8068 }
8069
8070 if (!(info = mgsl_allocate_device())) {
8071 printk("can't allocate device instance data.\n");
8072 return -EIO;
8073 }
8074
8075 /* Copy user configuration info to device instance data */
8076
8077 info->io_base = pci_resource_start(dev, 2);
8078 info->irq_level = dev->irq;
8079 info->phys_memory_base = pci_resource_start(dev, 3);
8080
8081 /* Because veremap only works on page boundaries we must map
8082 * a larger area than is actually implemented for the LCR
8083 * memory range. We map a full page starting at the page boundary.
8084 */
8085 info->phys_lcr_base = pci_resource_start(dev, 0);
8086 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8087 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8088
8089 info->bus_type = MGSL_BUS_TYPE_PCI;
8090 info->io_addr_size = 8;
8091 info->irq_flags = IRQF_SHARED;
8092
8093 if (dev->device == 0x0210) {
8094 /* Version 1 PCI9030 based universal PCI adapter */
8095 info->misc_ctrl_value = 0x007c4080;
8096 info->hw_version = 1;
8097 } else {
8098 /* Version 0 PCI9050 based 5V PCI adapter
8099 * A PCI9050 bug prevents reading LCR registers if
8100 * LCR base address bit 7 is set. Maintain shadow
8101 * value so we can write to LCR misc control reg.
8102 */
8103 info->misc_ctrl_value = 0x087e4546;
8104 info->hw_version = 0;
8105 }
8106
8107 mgsl_add_device(info);
8108
8109 return 0;
8110 }
8111
8112 static void __devexit synclink_remove_one (struct pci_dev *dev)
8113 {
8114 }
8115