]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/tty/synclink.c
f2c34d65614462f395627dc981c19928ce77fb7f
[mirror_ubuntu-bionic-kernel.git] / drivers / tty / synclink.c
1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
4 *
5 * Device driver for Microgate SyncLink ISA and PCI
6 * high speed multiprotocol serial adapters.
7 *
8 * written by Paul Fulghum for Microgate Corporation
9 * paulkf@microgate.com
10 *
11 * Microgate and SyncLink are trademarks of Microgate Corporation
12 *
13 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
14 *
15 * Original release 01/11/99
16 *
17 * This driver is primarily intended for use in synchronous
18 * HDLC mode. Asynchronous mode is also provided.
19 *
20 * When operating in synchronous mode, each call to mgsl_write()
21 * contains exactly one complete HDLC frame. Calling mgsl_put_char
22 * will start assembling an HDLC frame that will not be sent until
23 * mgsl_flush_chars or mgsl_write is called.
24 *
25 * Synchronous receive data is reported as complete frames. To accomplish
26 * this, the TTY flip buffer is bypassed (too small to hold largest
27 * frame and may fragment frames) and the line discipline
28 * receive entry point is called directly.
29 *
30 * This driver has been tested with a slightly modified ppp.c driver
31 * for synchronous PPP.
32 *
33 * 2000/02/16
34 * Added interface for syncppp.c driver (an alternate synchronous PPP
35 * implementation that also supports Cisco HDLC). Each device instance
36 * registers as a tty device AND a network device (if dosyncppp option
37 * is set for the device). The functionality is determined by which
38 * device interface is opened.
39 *
40 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
41 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
44 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
45 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50 * OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53 #if defined(__i386__)
54 # define BREAKPOINT() asm(" int $3");
55 #else
56 # define BREAKPOINT() { }
57 #endif
58
59 #define MAX_ISA_DEVICES 10
60 #define MAX_PCI_DEVICES 10
61 #define MAX_TOTAL_DEVICES 20
62
63 #include <linux/module.h>
64 #include <linux/errno.h>
65 #include <linux/signal.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/pci.h>
70 #include <linux/tty.h>
71 #include <linux/tty_flip.h>
72 #include <linux/serial.h>
73 #include <linux/major.h>
74 #include <linux/string.h>
75 #include <linux/fcntl.h>
76 #include <linux/ptrace.h>
77 #include <linux/ioport.h>
78 #include <linux/mm.h>
79 #include <linux/seq_file.h>
80 #include <linux/slab.h>
81 #include <linux/delay.h>
82 #include <linux/netdevice.h>
83 #include <linux/vmalloc.h>
84 #include <linux/init.h>
85 #include <linux/ioctl.h>
86 #include <linux/synclink.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/dma.h>
91 #include <linux/bitops.h>
92 #include <asm/types.h>
93 #include <linux/termios.h>
94 #include <linux/workqueue.h>
95 #include <linux/hdlc.h>
96 #include <linux/dma-mapping.h>
97
98 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
99 #define SYNCLINK_GENERIC_HDLC 1
100 #else
101 #define SYNCLINK_GENERIC_HDLC 0
102 #endif
103
104 #define GET_USER(error,value,addr) error = get_user(value,addr)
105 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
106 #define PUT_USER(error,value,addr) error = put_user(value,addr)
107 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
108
109 #include <linux/uaccess.h>
110
111 #define RCLRVALUE 0xffff
112
113 static MGSL_PARAMS default_params = {
114 MGSL_MODE_HDLC, /* unsigned long mode */
115 0, /* unsigned char loopback; */
116 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
117 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
118 0, /* unsigned long clock_speed; */
119 0xff, /* unsigned char addr_filter; */
120 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
121 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
122 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
123 9600, /* unsigned long data_rate; */
124 8, /* unsigned char data_bits; */
125 1, /* unsigned char stop_bits; */
126 ASYNC_PARITY_NONE /* unsigned char parity; */
127 };
128
129 #define SHARED_MEM_ADDRESS_SIZE 0x40000
130 #define BUFFERLISTSIZE 4096
131 #define DMABUFFERSIZE 4096
132 #define MAXRXFRAMES 7
133
134 typedef struct _DMABUFFERENTRY
135 {
136 u32 phys_addr; /* 32-bit flat physical address of data buffer */
137 volatile u16 count; /* buffer size/data count */
138 volatile u16 status; /* Control/status field */
139 volatile u16 rcc; /* character count field */
140 u16 reserved; /* padding required by 16C32 */
141 u32 link; /* 32-bit flat link to next buffer entry */
142 char *virt_addr; /* virtual address of data buffer */
143 u32 phys_entry; /* physical address of this buffer entry */
144 dma_addr_t dma_addr;
145 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
146
147 /* The queue of BH actions to be performed */
148
149 #define BH_RECEIVE 1
150 #define BH_TRANSMIT 2
151 #define BH_STATUS 4
152
153 #define IO_PIN_SHUTDOWN_LIMIT 100
154
155 struct _input_signal_events {
156 int ri_up;
157 int ri_down;
158 int dsr_up;
159 int dsr_down;
160 int dcd_up;
161 int dcd_down;
162 int cts_up;
163 int cts_down;
164 };
165
166 /* transmit holding buffer definitions*/
167 #define MAX_TX_HOLDING_BUFFERS 5
168 struct tx_holding_buffer {
169 int buffer_size;
170 unsigned char * buffer;
171 };
172
173
174 /*
175 * Device instance data structure
176 */
177
178 struct mgsl_struct {
179 int magic;
180 struct tty_port port;
181 int line;
182 int hw_version;
183
184 struct mgsl_icount icount;
185
186 int timeout;
187 int x_char; /* xon/xoff character */
188 u16 read_status_mask;
189 u16 ignore_status_mask;
190 unsigned char *xmit_buf;
191 int xmit_head;
192 int xmit_tail;
193 int xmit_cnt;
194
195 wait_queue_head_t status_event_wait_q;
196 wait_queue_head_t event_wait_q;
197 struct timer_list tx_timer; /* HDLC transmit timeout timer */
198 struct mgsl_struct *next_device; /* device list link */
199
200 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
201 struct work_struct task; /* task structure for scheduling bh */
202
203 u32 EventMask; /* event trigger mask */
204 u32 RecordedEvents; /* pending events */
205
206 u32 max_frame_size; /* as set by device config */
207
208 u32 pending_bh;
209
210 bool bh_running; /* Protection from multiple */
211 int isr_overflow;
212 bool bh_requested;
213
214 int dcd_chkcount; /* check counts to prevent */
215 int cts_chkcount; /* too many IRQs if a signal */
216 int dsr_chkcount; /* is floating */
217 int ri_chkcount;
218
219 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
220 u32 buffer_list_phys;
221 dma_addr_t buffer_list_dma_addr;
222
223 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
224 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
225 unsigned int current_rx_buffer;
226
227 int num_tx_dma_buffers; /* number of tx dma frames required */
228 int tx_dma_buffers_used;
229 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
230 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
231 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
232 int current_tx_buffer; /* next tx dma buffer to be loaded */
233
234 unsigned char *intermediate_rxbuffer;
235
236 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
237 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
238 int put_tx_holding_index; /* next tx holding buffer to store user request */
239 int tx_holding_count; /* number of tx holding buffers waiting */
240 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
241
242 bool rx_enabled;
243 bool rx_overflow;
244 bool rx_rcc_underrun;
245
246 bool tx_enabled;
247 bool tx_active;
248 u32 idle_mode;
249
250 u16 cmr_value;
251 u16 tcsr_value;
252
253 char device_name[25]; /* device instance name */
254
255 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
256 unsigned char bus; /* expansion bus number (zero based) */
257 unsigned char function; /* PCI device number */
258
259 unsigned int io_base; /* base I/O address of adapter */
260 unsigned int io_addr_size; /* size of the I/O address range */
261 bool io_addr_requested; /* true if I/O address requested */
262
263 unsigned int irq_level; /* interrupt level */
264 unsigned long irq_flags;
265 bool irq_requested; /* true if IRQ requested */
266
267 unsigned int dma_level; /* DMA channel */
268 bool dma_requested; /* true if dma channel requested */
269
270 u16 mbre_bit;
271 u16 loopback_bits;
272 u16 usc_idle_mode;
273
274 MGSL_PARAMS params; /* communications parameters */
275
276 unsigned char serial_signals; /* current serial signal states */
277
278 bool irq_occurred; /* for diagnostics use */
279 unsigned int init_error; /* Initialization startup error (DIAGS) */
280 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
281
282 u32 last_mem_alloc;
283 unsigned char* memory_base; /* shared memory address (PCI only) */
284 u32 phys_memory_base;
285 bool shared_mem_requested;
286
287 unsigned char* lcr_base; /* local config registers (PCI only) */
288 u32 phys_lcr_base;
289 u32 lcr_offset;
290 bool lcr_mem_requested;
291
292 u32 misc_ctrl_value;
293 char *flag_buf;
294 bool drop_rts_on_tx_done;
295
296 bool loopmode_insert_requested;
297 bool loopmode_send_done_requested;
298
299 struct _input_signal_events input_signal_events;
300
301 /* generic HDLC device parts */
302 int netcount;
303 spinlock_t netlock;
304
305 #if SYNCLINK_GENERIC_HDLC
306 struct net_device *netdev;
307 #endif
308 };
309
310 #define MGSL_MAGIC 0x5401
311
312 /*
313 * The size of the serial xmit buffer is 1 page, or 4096 bytes
314 */
315 #ifndef SERIAL_XMIT_SIZE
316 #define SERIAL_XMIT_SIZE 4096
317 #endif
318
319 /*
320 * These macros define the offsets used in calculating the
321 * I/O address of the specified USC registers.
322 */
323
324
325 #define DCPIN 2 /* Bit 1 of I/O address */
326 #define SDPIN 4 /* Bit 2 of I/O address */
327
328 #define DCAR 0 /* DMA command/address register */
329 #define CCAR SDPIN /* channel command/address register */
330 #define DATAREG DCPIN + SDPIN /* serial data register */
331 #define MSBONLY 0x41
332 #define LSBONLY 0x40
333
334 /*
335 * These macros define the register address (ordinal number)
336 * used for writing address/value pairs to the USC.
337 */
338
339 #define CMR 0x02 /* Channel mode Register */
340 #define CCSR 0x04 /* Channel Command/status Register */
341 #define CCR 0x06 /* Channel Control Register */
342 #define PSR 0x08 /* Port status Register */
343 #define PCR 0x0a /* Port Control Register */
344 #define TMDR 0x0c /* Test mode Data Register */
345 #define TMCR 0x0e /* Test mode Control Register */
346 #define CMCR 0x10 /* Clock mode Control Register */
347 #define HCR 0x12 /* Hardware Configuration Register */
348 #define IVR 0x14 /* Interrupt Vector Register */
349 #define IOCR 0x16 /* Input/Output Control Register */
350 #define ICR 0x18 /* Interrupt Control Register */
351 #define DCCR 0x1a /* Daisy Chain Control Register */
352 #define MISR 0x1c /* Misc Interrupt status Register */
353 #define SICR 0x1e /* status Interrupt Control Register */
354 #define RDR 0x20 /* Receive Data Register */
355 #define RMR 0x22 /* Receive mode Register */
356 #define RCSR 0x24 /* Receive Command/status Register */
357 #define RICR 0x26 /* Receive Interrupt Control Register */
358 #define RSR 0x28 /* Receive Sync Register */
359 #define RCLR 0x2a /* Receive count Limit Register */
360 #define RCCR 0x2c /* Receive Character count Register */
361 #define TC0R 0x2e /* Time Constant 0 Register */
362 #define TDR 0x30 /* Transmit Data Register */
363 #define TMR 0x32 /* Transmit mode Register */
364 #define TCSR 0x34 /* Transmit Command/status Register */
365 #define TICR 0x36 /* Transmit Interrupt Control Register */
366 #define TSR 0x38 /* Transmit Sync Register */
367 #define TCLR 0x3a /* Transmit count Limit Register */
368 #define TCCR 0x3c /* Transmit Character count Register */
369 #define TC1R 0x3e /* Time Constant 1 Register */
370
371
372 /*
373 * MACRO DEFINITIONS FOR DMA REGISTERS
374 */
375
376 #define DCR 0x06 /* DMA Control Register (shared) */
377 #define DACR 0x08 /* DMA Array count Register (shared) */
378 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
379 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
380 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
381 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
382 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
383
384 #define TDMR 0x02 /* Transmit DMA mode Register */
385 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
386 #define TBCR 0x2a /* Transmit Byte count Register */
387 #define TARL 0x2c /* Transmit Address Register (low) */
388 #define TARU 0x2e /* Transmit Address Register (high) */
389 #define NTBCR 0x3a /* Next Transmit Byte count Register */
390 #define NTARL 0x3c /* Next Transmit Address Register (low) */
391 #define NTARU 0x3e /* Next Transmit Address Register (high) */
392
393 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
394 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
395 #define RBCR 0xaa /* Receive Byte count Register */
396 #define RARL 0xac /* Receive Address Register (low) */
397 #define RARU 0xae /* Receive Address Register (high) */
398 #define NRBCR 0xba /* Next Receive Byte count Register */
399 #define NRARL 0xbc /* Next Receive Address Register (low) */
400 #define NRARU 0xbe /* Next Receive Address Register (high) */
401
402
403 /*
404 * MACRO DEFINITIONS FOR MODEM STATUS BITS
405 */
406
407 #define MODEMSTATUS_DTR 0x80
408 #define MODEMSTATUS_DSR 0x40
409 #define MODEMSTATUS_RTS 0x20
410 #define MODEMSTATUS_CTS 0x10
411 #define MODEMSTATUS_RI 0x04
412 #define MODEMSTATUS_DCD 0x01
413
414
415 /*
416 * Channel Command/Address Register (CCAR) Command Codes
417 */
418
419 #define RTCmd_Null 0x0000
420 #define RTCmd_ResetHighestIus 0x1000
421 #define RTCmd_TriggerChannelLoadDma 0x2000
422 #define RTCmd_TriggerRxDma 0x2800
423 #define RTCmd_TriggerTxDma 0x3000
424 #define RTCmd_TriggerRxAndTxDma 0x3800
425 #define RTCmd_PurgeRxFifo 0x4800
426 #define RTCmd_PurgeTxFifo 0x5000
427 #define RTCmd_PurgeRxAndTxFifo 0x5800
428 #define RTCmd_LoadRcc 0x6800
429 #define RTCmd_LoadTcc 0x7000
430 #define RTCmd_LoadRccAndTcc 0x7800
431 #define RTCmd_LoadTC0 0x8800
432 #define RTCmd_LoadTC1 0x9000
433 #define RTCmd_LoadTC0AndTC1 0x9800
434 #define RTCmd_SerialDataLSBFirst 0xa000
435 #define RTCmd_SerialDataMSBFirst 0xa800
436 #define RTCmd_SelectBigEndian 0xb000
437 #define RTCmd_SelectLittleEndian 0xb800
438
439
440 /*
441 * DMA Command/Address Register (DCAR) Command Codes
442 */
443
444 #define DmaCmd_Null 0x0000
445 #define DmaCmd_ResetTxChannel 0x1000
446 #define DmaCmd_ResetRxChannel 0x1200
447 #define DmaCmd_StartTxChannel 0x2000
448 #define DmaCmd_StartRxChannel 0x2200
449 #define DmaCmd_ContinueTxChannel 0x3000
450 #define DmaCmd_ContinueRxChannel 0x3200
451 #define DmaCmd_PauseTxChannel 0x4000
452 #define DmaCmd_PauseRxChannel 0x4200
453 #define DmaCmd_AbortTxChannel 0x5000
454 #define DmaCmd_AbortRxChannel 0x5200
455 #define DmaCmd_InitTxChannel 0x7000
456 #define DmaCmd_InitRxChannel 0x7200
457 #define DmaCmd_ResetHighestDmaIus 0x8000
458 #define DmaCmd_ResetAllChannels 0x9000
459 #define DmaCmd_StartAllChannels 0xa000
460 #define DmaCmd_ContinueAllChannels 0xb000
461 #define DmaCmd_PauseAllChannels 0xc000
462 #define DmaCmd_AbortAllChannels 0xd000
463 #define DmaCmd_InitAllChannels 0xf000
464
465 #define TCmd_Null 0x0000
466 #define TCmd_ClearTxCRC 0x2000
467 #define TCmd_SelectTicrTtsaData 0x4000
468 #define TCmd_SelectTicrTxFifostatus 0x5000
469 #define TCmd_SelectTicrIntLevel 0x6000
470 #define TCmd_SelectTicrdma_level 0x7000
471 #define TCmd_SendFrame 0x8000
472 #define TCmd_SendAbort 0x9000
473 #define TCmd_EnableDleInsertion 0xc000
474 #define TCmd_DisableDleInsertion 0xd000
475 #define TCmd_ClearEofEom 0xe000
476 #define TCmd_SetEofEom 0xf000
477
478 #define RCmd_Null 0x0000
479 #define RCmd_ClearRxCRC 0x2000
480 #define RCmd_EnterHuntmode 0x3000
481 #define RCmd_SelectRicrRtsaData 0x4000
482 #define RCmd_SelectRicrRxFifostatus 0x5000
483 #define RCmd_SelectRicrIntLevel 0x6000
484 #define RCmd_SelectRicrdma_level 0x7000
485
486 /*
487 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
488 */
489
490 #define RECEIVE_STATUS BIT5
491 #define RECEIVE_DATA BIT4
492 #define TRANSMIT_STATUS BIT3
493 #define TRANSMIT_DATA BIT2
494 #define IO_PIN BIT1
495 #define MISC BIT0
496
497
498 /*
499 * Receive status Bits in Receive Command/status Register RCSR
500 */
501
502 #define RXSTATUS_SHORT_FRAME BIT8
503 #define RXSTATUS_CODE_VIOLATION BIT8
504 #define RXSTATUS_EXITED_HUNT BIT7
505 #define RXSTATUS_IDLE_RECEIVED BIT6
506 #define RXSTATUS_BREAK_RECEIVED BIT5
507 #define RXSTATUS_ABORT_RECEIVED BIT5
508 #define RXSTATUS_RXBOUND BIT4
509 #define RXSTATUS_CRC_ERROR BIT3
510 #define RXSTATUS_FRAMING_ERROR BIT3
511 #define RXSTATUS_ABORT BIT2
512 #define RXSTATUS_PARITY_ERROR BIT2
513 #define RXSTATUS_OVERRUN BIT1
514 #define RXSTATUS_DATA_AVAILABLE BIT0
515 #define RXSTATUS_ALL 0x01f6
516 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
517
518 /*
519 * Values for setting transmit idle mode in
520 * Transmit Control/status Register (TCSR)
521 */
522 #define IDLEMODE_FLAGS 0x0000
523 #define IDLEMODE_ALT_ONE_ZERO 0x0100
524 #define IDLEMODE_ZERO 0x0200
525 #define IDLEMODE_ONE 0x0300
526 #define IDLEMODE_ALT_MARK_SPACE 0x0500
527 #define IDLEMODE_SPACE 0x0600
528 #define IDLEMODE_MARK 0x0700
529 #define IDLEMODE_MASK 0x0700
530
531 /*
532 * IUSC revision identifiers
533 */
534 #define IUSC_SL1660 0x4d44
535 #define IUSC_PRE_SL1660 0x4553
536
537 /*
538 * Transmit status Bits in Transmit Command/status Register (TCSR)
539 */
540
541 #define TCSR_PRESERVE 0x0F00
542
543 #define TCSR_UNDERWAIT BIT11
544 #define TXSTATUS_PREAMBLE_SENT BIT7
545 #define TXSTATUS_IDLE_SENT BIT6
546 #define TXSTATUS_ABORT_SENT BIT5
547 #define TXSTATUS_EOF_SENT BIT4
548 #define TXSTATUS_EOM_SENT BIT4
549 #define TXSTATUS_CRC_SENT BIT3
550 #define TXSTATUS_ALL_SENT BIT2
551 #define TXSTATUS_UNDERRUN BIT1
552 #define TXSTATUS_FIFO_EMPTY BIT0
553 #define TXSTATUS_ALL 0x00fa
554 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
555
556
557 #define MISCSTATUS_RXC_LATCHED BIT15
558 #define MISCSTATUS_RXC BIT14
559 #define MISCSTATUS_TXC_LATCHED BIT13
560 #define MISCSTATUS_TXC BIT12
561 #define MISCSTATUS_RI_LATCHED BIT11
562 #define MISCSTATUS_RI BIT10
563 #define MISCSTATUS_DSR_LATCHED BIT9
564 #define MISCSTATUS_DSR BIT8
565 #define MISCSTATUS_DCD_LATCHED BIT7
566 #define MISCSTATUS_DCD BIT6
567 #define MISCSTATUS_CTS_LATCHED BIT5
568 #define MISCSTATUS_CTS BIT4
569 #define MISCSTATUS_RCC_UNDERRUN BIT3
570 #define MISCSTATUS_DPLL_NO_SYNC BIT2
571 #define MISCSTATUS_BRG1_ZERO BIT1
572 #define MISCSTATUS_BRG0_ZERO BIT0
573
574 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
575 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
576
577 #define SICR_RXC_ACTIVE BIT15
578 #define SICR_RXC_INACTIVE BIT14
579 #define SICR_RXC (BIT15|BIT14)
580 #define SICR_TXC_ACTIVE BIT13
581 #define SICR_TXC_INACTIVE BIT12
582 #define SICR_TXC (BIT13|BIT12)
583 #define SICR_RI_ACTIVE BIT11
584 #define SICR_RI_INACTIVE BIT10
585 #define SICR_RI (BIT11|BIT10)
586 #define SICR_DSR_ACTIVE BIT9
587 #define SICR_DSR_INACTIVE BIT8
588 #define SICR_DSR (BIT9|BIT8)
589 #define SICR_DCD_ACTIVE BIT7
590 #define SICR_DCD_INACTIVE BIT6
591 #define SICR_DCD (BIT7|BIT6)
592 #define SICR_CTS_ACTIVE BIT5
593 #define SICR_CTS_INACTIVE BIT4
594 #define SICR_CTS (BIT5|BIT4)
595 #define SICR_RCC_UNDERFLOW BIT3
596 #define SICR_DPLL_NO_SYNC BIT2
597 #define SICR_BRG1_ZERO BIT1
598 #define SICR_BRG0_ZERO BIT0
599
600 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
601 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
602 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
603 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
604 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
605
606 #define usc_EnableInterrupts( a, b ) \
607 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
608
609 #define usc_DisableInterrupts( a, b ) \
610 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
611
612 #define usc_EnableMasterIrqBit(a) \
613 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
614
615 #define usc_DisableMasterIrqBit(a) \
616 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
617
618 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
619
620 /*
621 * Transmit status Bits in Transmit Control status Register (TCSR)
622 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
623 */
624
625 #define TXSTATUS_PREAMBLE_SENT BIT7
626 #define TXSTATUS_IDLE_SENT BIT6
627 #define TXSTATUS_ABORT_SENT BIT5
628 #define TXSTATUS_EOF BIT4
629 #define TXSTATUS_CRC_SENT BIT3
630 #define TXSTATUS_ALL_SENT BIT2
631 #define TXSTATUS_UNDERRUN BIT1
632 #define TXSTATUS_FIFO_EMPTY BIT0
633
634 #define DICR_MASTER BIT15
635 #define DICR_TRANSMIT BIT0
636 #define DICR_RECEIVE BIT1
637
638 #define usc_EnableDmaInterrupts(a,b) \
639 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
640
641 #define usc_DisableDmaInterrupts(a,b) \
642 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
643
644 #define usc_EnableStatusIrqs(a,b) \
645 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
646
647 #define usc_DisablestatusIrqs(a,b) \
648 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
649
650 /* Transmit status Bits in Transmit Control status Register (TCSR) */
651 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
652
653
654 #define DISABLE_UNCONDITIONAL 0
655 #define DISABLE_END_OF_FRAME 1
656 #define ENABLE_UNCONDITIONAL 2
657 #define ENABLE_AUTO_CTS 3
658 #define ENABLE_AUTO_DCD 3
659 #define usc_EnableTransmitter(a,b) \
660 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
661 #define usc_EnableReceiver(a,b) \
662 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
663
664 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
665 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
666 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
667
668 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
669 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
670 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
671 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
672 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
673
674 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
675 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
676
677 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
678
679 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
680 static void usc_start_receiver( struct mgsl_struct *info );
681 static void usc_stop_receiver( struct mgsl_struct *info );
682
683 static void usc_start_transmitter( struct mgsl_struct *info );
684 static void usc_stop_transmitter( struct mgsl_struct *info );
685 static void usc_set_txidle( struct mgsl_struct *info );
686 static void usc_load_txfifo( struct mgsl_struct *info );
687
688 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
689 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
690
691 static void usc_get_serial_signals( struct mgsl_struct *info );
692 static void usc_set_serial_signals( struct mgsl_struct *info );
693
694 static void usc_reset( struct mgsl_struct *info );
695
696 static void usc_set_sync_mode( struct mgsl_struct *info );
697 static void usc_set_sdlc_mode( struct mgsl_struct *info );
698 static void usc_set_async_mode( struct mgsl_struct *info );
699 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
700
701 static void usc_loopback_frame( struct mgsl_struct *info );
702
703 static void mgsl_tx_timeout(unsigned long context);
704
705
706 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
707 static void usc_loopmode_insert_request( struct mgsl_struct * info );
708 static int usc_loopmode_active( struct mgsl_struct * info);
709 static void usc_loopmode_send_done( struct mgsl_struct * info );
710
711 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
712
713 #if SYNCLINK_GENERIC_HDLC
714 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
715 static void hdlcdev_tx_done(struct mgsl_struct *info);
716 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
717 static int hdlcdev_init(struct mgsl_struct *info);
718 static void hdlcdev_exit(struct mgsl_struct *info);
719 #endif
720
721 /*
722 * Defines a BUS descriptor value for the PCI adapter
723 * local bus address ranges.
724 */
725
726 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
727 (0x00400020 + \
728 ((WrHold) << 30) + \
729 ((WrDly) << 28) + \
730 ((RdDly) << 26) + \
731 ((Nwdd) << 20) + \
732 ((Nwad) << 15) + \
733 ((Nxda) << 13) + \
734 ((Nrdd) << 11) + \
735 ((Nrad) << 6) )
736
737 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
738
739 /*
740 * Adapter diagnostic routines
741 */
742 static bool mgsl_register_test( struct mgsl_struct *info );
743 static bool mgsl_irq_test( struct mgsl_struct *info );
744 static bool mgsl_dma_test( struct mgsl_struct *info );
745 static bool mgsl_memory_test( struct mgsl_struct *info );
746 static int mgsl_adapter_test( struct mgsl_struct *info );
747
748 /*
749 * device and resource management routines
750 */
751 static int mgsl_claim_resources(struct mgsl_struct *info);
752 static void mgsl_release_resources(struct mgsl_struct *info);
753 static void mgsl_add_device(struct mgsl_struct *info);
754 static struct mgsl_struct* mgsl_allocate_device(void);
755
756 /*
757 * DMA buffer manupulation functions.
758 */
759 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
760 static bool mgsl_get_rx_frame( struct mgsl_struct *info );
761 static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
762 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
763 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
764 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
765 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
766 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
767
768 /*
769 * DMA and Shared Memory buffer allocation and formatting
770 */
771 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
772 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
773 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
774 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
775 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
776 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
777 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
778 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
779 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
780 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
781 static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
782 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
783
784 /*
785 * Bottom half interrupt handlers
786 */
787 static void mgsl_bh_handler(struct work_struct *work);
788 static void mgsl_bh_receive(struct mgsl_struct *info);
789 static void mgsl_bh_transmit(struct mgsl_struct *info);
790 static void mgsl_bh_status(struct mgsl_struct *info);
791
792 /*
793 * Interrupt handler routines and dispatch table.
794 */
795 static void mgsl_isr_null( struct mgsl_struct *info );
796 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
797 static void mgsl_isr_receive_data( struct mgsl_struct *info );
798 static void mgsl_isr_receive_status( struct mgsl_struct *info );
799 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
800 static void mgsl_isr_io_pin( struct mgsl_struct *info );
801 static void mgsl_isr_misc( struct mgsl_struct *info );
802 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
803 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
804
805 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
806
807 static isr_dispatch_func UscIsrTable[7] =
808 {
809 mgsl_isr_null,
810 mgsl_isr_misc,
811 mgsl_isr_io_pin,
812 mgsl_isr_transmit_data,
813 mgsl_isr_transmit_status,
814 mgsl_isr_receive_data,
815 mgsl_isr_receive_status
816 };
817
818 /*
819 * ioctl call handlers
820 */
821 static int tiocmget(struct tty_struct *tty);
822 static int tiocmset(struct tty_struct *tty,
823 unsigned int set, unsigned int clear);
824 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
825 __user *user_icount);
826 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
827 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
828 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
829 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
830 static int mgsl_txenable(struct mgsl_struct * info, int enable);
831 static int mgsl_txabort(struct mgsl_struct * info);
832 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
833 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
834 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
835
836 /* set non-zero on successful registration with PCI subsystem */
837 static bool pci_registered;
838
839 /*
840 * Global linked list of SyncLink devices
841 */
842 static struct mgsl_struct *mgsl_device_list;
843 static int mgsl_device_count;
844
845 /*
846 * Set this param to non-zero to load eax with the
847 * .text section address and breakpoint on module load.
848 * This is useful for use with gdb and add-symbol-file command.
849 */
850 static bool break_on_load;
851
852 /*
853 * Driver major number, defaults to zero to get auto
854 * assigned major number. May be forced as module parameter.
855 */
856 static int ttymajor;
857
858 /*
859 * Array of user specified options for ISA adapters.
860 */
861 static int io[MAX_ISA_DEVICES];
862 static int irq[MAX_ISA_DEVICES];
863 static int dma[MAX_ISA_DEVICES];
864 static int debug_level;
865 static int maxframe[MAX_TOTAL_DEVICES];
866 static int txdmabufs[MAX_TOTAL_DEVICES];
867 static int txholdbufs[MAX_TOTAL_DEVICES];
868
869 module_param(break_on_load, bool, 0);
870 module_param(ttymajor, int, 0);
871 module_param_hw_array(io, int, ioport, NULL, 0);
872 module_param_hw_array(irq, int, irq, NULL, 0);
873 module_param_hw_array(dma, int, dma, NULL, 0);
874 module_param(debug_level, int, 0);
875 module_param_array(maxframe, int, NULL, 0);
876 module_param_array(txdmabufs, int, NULL, 0);
877 module_param_array(txholdbufs, int, NULL, 0);
878
879 static char *driver_name = "SyncLink serial driver";
880 static char *driver_version = "$Revision: 4.38 $";
881
882 static int synclink_init_one (struct pci_dev *dev,
883 const struct pci_device_id *ent);
884 static void synclink_remove_one (struct pci_dev *dev);
885
886 static const struct pci_device_id synclink_pci_tbl[] = {
887 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
888 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
889 { 0, }, /* terminate list */
890 };
891 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
892
893 MODULE_LICENSE("GPL");
894
895 static struct pci_driver synclink_pci_driver = {
896 .name = "synclink",
897 .id_table = synclink_pci_tbl,
898 .probe = synclink_init_one,
899 .remove = synclink_remove_one,
900 };
901
902 static struct tty_driver *serial_driver;
903
904 /* number of characters left in xmit buffer before we ask for more */
905 #define WAKEUP_CHARS 256
906
907
908 static void mgsl_change_params(struct mgsl_struct *info);
909 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
910
911 /*
912 * 1st function defined in .text section. Calling this function in
913 * init_module() followed by a breakpoint allows a remote debugger
914 * (gdb) to get the .text address for the add-symbol-file command.
915 * This allows remote debugging of dynamically loadable modules.
916 */
917 static void* mgsl_get_text_ptr(void)
918 {
919 return mgsl_get_text_ptr;
920 }
921
922 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
923 char *name, const char *routine)
924 {
925 #ifdef MGSL_PARANOIA_CHECK
926 static const char *badmagic =
927 "Warning: bad magic number for mgsl struct (%s) in %s\n";
928 static const char *badinfo =
929 "Warning: null mgsl_struct for (%s) in %s\n";
930
931 if (!info) {
932 printk(badinfo, name, routine);
933 return 1;
934 }
935 if (info->magic != MGSL_MAGIC) {
936 printk(badmagic, name, routine);
937 return 1;
938 }
939 #else
940 if (!info)
941 return 1;
942 #endif
943 return 0;
944 }
945
946 /**
947 * line discipline callback wrappers
948 *
949 * The wrappers maintain line discipline references
950 * while calling into the line discipline.
951 *
952 * ldisc_receive_buf - pass receive data to line discipline
953 */
954
955 static void ldisc_receive_buf(struct tty_struct *tty,
956 const __u8 *data, char *flags, int count)
957 {
958 struct tty_ldisc *ld;
959 if (!tty)
960 return;
961 ld = tty_ldisc_ref(tty);
962 if (ld) {
963 if (ld->ops->receive_buf)
964 ld->ops->receive_buf(tty, data, flags, count);
965 tty_ldisc_deref(ld);
966 }
967 }
968
969 /* mgsl_stop() throttle (stop) transmitter
970 *
971 * Arguments: tty pointer to tty info structure
972 * Return Value: None
973 */
974 static void mgsl_stop(struct tty_struct *tty)
975 {
976 struct mgsl_struct *info = tty->driver_data;
977 unsigned long flags;
978
979 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
980 return;
981
982 if ( debug_level >= DEBUG_LEVEL_INFO )
983 printk("mgsl_stop(%s)\n",info->device_name);
984
985 spin_lock_irqsave(&info->irq_spinlock,flags);
986 if (info->tx_enabled)
987 usc_stop_transmitter(info);
988 spin_unlock_irqrestore(&info->irq_spinlock,flags);
989
990 } /* end of mgsl_stop() */
991
992 /* mgsl_start() release (start) transmitter
993 *
994 * Arguments: tty pointer to tty info structure
995 * Return Value: None
996 */
997 static void mgsl_start(struct tty_struct *tty)
998 {
999 struct mgsl_struct *info = tty->driver_data;
1000 unsigned long flags;
1001
1002 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1003 return;
1004
1005 if ( debug_level >= DEBUG_LEVEL_INFO )
1006 printk("mgsl_start(%s)\n",info->device_name);
1007
1008 spin_lock_irqsave(&info->irq_spinlock,flags);
1009 if (!info->tx_enabled)
1010 usc_start_transmitter(info);
1011 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1012
1013 } /* end of mgsl_start() */
1014
1015 /*
1016 * Bottom half work queue access functions
1017 */
1018
1019 /* mgsl_bh_action() Return next bottom half action to perform.
1020 * Return Value: BH action code or 0 if nothing to do.
1021 */
1022 static int mgsl_bh_action(struct mgsl_struct *info)
1023 {
1024 unsigned long flags;
1025 int rc = 0;
1026
1027 spin_lock_irqsave(&info->irq_spinlock,flags);
1028
1029 if (info->pending_bh & BH_RECEIVE) {
1030 info->pending_bh &= ~BH_RECEIVE;
1031 rc = BH_RECEIVE;
1032 } else if (info->pending_bh & BH_TRANSMIT) {
1033 info->pending_bh &= ~BH_TRANSMIT;
1034 rc = BH_TRANSMIT;
1035 } else if (info->pending_bh & BH_STATUS) {
1036 info->pending_bh &= ~BH_STATUS;
1037 rc = BH_STATUS;
1038 }
1039
1040 if (!rc) {
1041 /* Mark BH routine as complete */
1042 info->bh_running = false;
1043 info->bh_requested = false;
1044 }
1045
1046 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1047
1048 return rc;
1049 }
1050
1051 /*
1052 * Perform bottom half processing of work items queued by ISR.
1053 */
1054 static void mgsl_bh_handler(struct work_struct *work)
1055 {
1056 struct mgsl_struct *info =
1057 container_of(work, struct mgsl_struct, task);
1058 int action;
1059
1060 if ( debug_level >= DEBUG_LEVEL_BH )
1061 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1062 __FILE__,__LINE__,info->device_name);
1063
1064 info->bh_running = true;
1065
1066 while((action = mgsl_bh_action(info)) != 0) {
1067
1068 /* Process work item */
1069 if ( debug_level >= DEBUG_LEVEL_BH )
1070 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1071 __FILE__,__LINE__,action);
1072
1073 switch (action) {
1074
1075 case BH_RECEIVE:
1076 mgsl_bh_receive(info);
1077 break;
1078 case BH_TRANSMIT:
1079 mgsl_bh_transmit(info);
1080 break;
1081 case BH_STATUS:
1082 mgsl_bh_status(info);
1083 break;
1084 default:
1085 /* unknown work item ID */
1086 printk("Unknown work item ID=%08X!\n", action);
1087 break;
1088 }
1089 }
1090
1091 if ( debug_level >= DEBUG_LEVEL_BH )
1092 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1093 __FILE__,__LINE__,info->device_name);
1094 }
1095
1096 static void mgsl_bh_receive(struct mgsl_struct *info)
1097 {
1098 bool (*get_rx_frame)(struct mgsl_struct *info) =
1099 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1100
1101 if ( debug_level >= DEBUG_LEVEL_BH )
1102 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1103 __FILE__,__LINE__,info->device_name);
1104
1105 do
1106 {
1107 if (info->rx_rcc_underrun) {
1108 unsigned long flags;
1109 spin_lock_irqsave(&info->irq_spinlock,flags);
1110 usc_start_receiver(info);
1111 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1112 return;
1113 }
1114 } while(get_rx_frame(info));
1115 }
1116
1117 static void mgsl_bh_transmit(struct mgsl_struct *info)
1118 {
1119 struct tty_struct *tty = info->port.tty;
1120 unsigned long flags;
1121
1122 if ( debug_level >= DEBUG_LEVEL_BH )
1123 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1124 __FILE__,__LINE__,info->device_name);
1125
1126 if (tty)
1127 tty_wakeup(tty);
1128
1129 /* if transmitter idle and loopmode_send_done_requested
1130 * then start echoing RxD to TxD
1131 */
1132 spin_lock_irqsave(&info->irq_spinlock,flags);
1133 if ( !info->tx_active && info->loopmode_send_done_requested )
1134 usc_loopmode_send_done( info );
1135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1136 }
1137
1138 static void mgsl_bh_status(struct mgsl_struct *info)
1139 {
1140 if ( debug_level >= DEBUG_LEVEL_BH )
1141 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1142 __FILE__,__LINE__,info->device_name);
1143
1144 info->ri_chkcount = 0;
1145 info->dsr_chkcount = 0;
1146 info->dcd_chkcount = 0;
1147 info->cts_chkcount = 0;
1148 }
1149
1150 /* mgsl_isr_receive_status()
1151 *
1152 * Service a receive status interrupt. The type of status
1153 * interrupt is indicated by the state of the RCSR.
1154 * This is only used for HDLC mode.
1155 *
1156 * Arguments: info pointer to device instance data
1157 * Return Value: None
1158 */
1159 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1160 {
1161 u16 status = usc_InReg( info, RCSR );
1162
1163 if ( debug_level >= DEBUG_LEVEL_ISR )
1164 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1165 __FILE__,__LINE__,status);
1166
1167 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1168 info->loopmode_insert_requested &&
1169 usc_loopmode_active(info) )
1170 {
1171 ++info->icount.rxabort;
1172 info->loopmode_insert_requested = false;
1173
1174 /* clear CMR:13 to start echoing RxD to TxD */
1175 info->cmr_value &= ~BIT13;
1176 usc_OutReg(info, CMR, info->cmr_value);
1177
1178 /* disable received abort irq (no longer required) */
1179 usc_OutReg(info, RICR,
1180 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1181 }
1182
1183 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1184 if (status & RXSTATUS_EXITED_HUNT)
1185 info->icount.exithunt++;
1186 if (status & RXSTATUS_IDLE_RECEIVED)
1187 info->icount.rxidle++;
1188 wake_up_interruptible(&info->event_wait_q);
1189 }
1190
1191 if (status & RXSTATUS_OVERRUN){
1192 info->icount.rxover++;
1193 usc_process_rxoverrun_sync( info );
1194 }
1195
1196 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1197 usc_UnlatchRxstatusBits( info, status );
1198
1199 } /* end of mgsl_isr_receive_status() */
1200
1201 /* mgsl_isr_transmit_status()
1202 *
1203 * Service a transmit status interrupt
1204 * HDLC mode :end of transmit frame
1205 * Async mode:all data is sent
1206 * transmit status is indicated by bits in the TCSR.
1207 *
1208 * Arguments: info pointer to device instance data
1209 * Return Value: None
1210 */
1211 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1212 {
1213 u16 status = usc_InReg( info, TCSR );
1214
1215 if ( debug_level >= DEBUG_LEVEL_ISR )
1216 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1217 __FILE__,__LINE__,status);
1218
1219 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1220 usc_UnlatchTxstatusBits( info, status );
1221
1222 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1223 {
1224 /* finished sending HDLC abort. This may leave */
1225 /* the TxFifo with data from the aborted frame */
1226 /* so purge the TxFifo. Also shutdown the DMA */
1227 /* channel in case there is data remaining in */
1228 /* the DMA buffer */
1229 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1230 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1231 }
1232
1233 if ( status & TXSTATUS_EOF_SENT )
1234 info->icount.txok++;
1235 else if ( status & TXSTATUS_UNDERRUN )
1236 info->icount.txunder++;
1237 else if ( status & TXSTATUS_ABORT_SENT )
1238 info->icount.txabort++;
1239 else
1240 info->icount.txunder++;
1241
1242 info->tx_active = false;
1243 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1244 del_timer(&info->tx_timer);
1245
1246 if ( info->drop_rts_on_tx_done ) {
1247 usc_get_serial_signals( info );
1248 if ( info->serial_signals & SerialSignal_RTS ) {
1249 info->serial_signals &= ~SerialSignal_RTS;
1250 usc_set_serial_signals( info );
1251 }
1252 info->drop_rts_on_tx_done = false;
1253 }
1254
1255 #if SYNCLINK_GENERIC_HDLC
1256 if (info->netcount)
1257 hdlcdev_tx_done(info);
1258 else
1259 #endif
1260 {
1261 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1262 usc_stop_transmitter(info);
1263 return;
1264 }
1265 info->pending_bh |= BH_TRANSMIT;
1266 }
1267
1268 } /* end of mgsl_isr_transmit_status() */
1269
1270 /* mgsl_isr_io_pin()
1271 *
1272 * Service an Input/Output pin interrupt. The type of
1273 * interrupt is indicated by bits in the MISR
1274 *
1275 * Arguments: info pointer to device instance data
1276 * Return Value: None
1277 */
1278 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1279 {
1280 struct mgsl_icount *icount;
1281 u16 status = usc_InReg( info, MISR );
1282
1283 if ( debug_level >= DEBUG_LEVEL_ISR )
1284 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1285 __FILE__,__LINE__,status);
1286
1287 usc_ClearIrqPendingBits( info, IO_PIN );
1288 usc_UnlatchIostatusBits( info, status );
1289
1290 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1291 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1292 icount = &info->icount;
1293 /* update input line counters */
1294 if (status & MISCSTATUS_RI_LATCHED) {
1295 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1296 usc_DisablestatusIrqs(info,SICR_RI);
1297 icount->rng++;
1298 if ( status & MISCSTATUS_RI )
1299 info->input_signal_events.ri_up++;
1300 else
1301 info->input_signal_events.ri_down++;
1302 }
1303 if (status & MISCSTATUS_DSR_LATCHED) {
1304 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1305 usc_DisablestatusIrqs(info,SICR_DSR);
1306 icount->dsr++;
1307 if ( status & MISCSTATUS_DSR )
1308 info->input_signal_events.dsr_up++;
1309 else
1310 info->input_signal_events.dsr_down++;
1311 }
1312 if (status & MISCSTATUS_DCD_LATCHED) {
1313 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1314 usc_DisablestatusIrqs(info,SICR_DCD);
1315 icount->dcd++;
1316 if (status & MISCSTATUS_DCD) {
1317 info->input_signal_events.dcd_up++;
1318 } else
1319 info->input_signal_events.dcd_down++;
1320 #if SYNCLINK_GENERIC_HDLC
1321 if (info->netcount) {
1322 if (status & MISCSTATUS_DCD)
1323 netif_carrier_on(info->netdev);
1324 else
1325 netif_carrier_off(info->netdev);
1326 }
1327 #endif
1328 }
1329 if (status & MISCSTATUS_CTS_LATCHED)
1330 {
1331 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1332 usc_DisablestatusIrqs(info,SICR_CTS);
1333 icount->cts++;
1334 if ( status & MISCSTATUS_CTS )
1335 info->input_signal_events.cts_up++;
1336 else
1337 info->input_signal_events.cts_down++;
1338 }
1339 wake_up_interruptible(&info->status_event_wait_q);
1340 wake_up_interruptible(&info->event_wait_q);
1341
1342 if (tty_port_check_carrier(&info->port) &&
1343 (status & MISCSTATUS_DCD_LATCHED) ) {
1344 if ( debug_level >= DEBUG_LEVEL_ISR )
1345 printk("%s CD now %s...", info->device_name,
1346 (status & MISCSTATUS_DCD) ? "on" : "off");
1347 if (status & MISCSTATUS_DCD)
1348 wake_up_interruptible(&info->port.open_wait);
1349 else {
1350 if ( debug_level >= DEBUG_LEVEL_ISR )
1351 printk("doing serial hangup...");
1352 if (info->port.tty)
1353 tty_hangup(info->port.tty);
1354 }
1355 }
1356
1357 if (tty_port_cts_enabled(&info->port) &&
1358 (status & MISCSTATUS_CTS_LATCHED) ) {
1359 if (info->port.tty->hw_stopped) {
1360 if (status & MISCSTATUS_CTS) {
1361 if ( debug_level >= DEBUG_LEVEL_ISR )
1362 printk("CTS tx start...");
1363 info->port.tty->hw_stopped = 0;
1364 usc_start_transmitter(info);
1365 info->pending_bh |= BH_TRANSMIT;
1366 return;
1367 }
1368 } else {
1369 if (!(status & MISCSTATUS_CTS)) {
1370 if ( debug_level >= DEBUG_LEVEL_ISR )
1371 printk("CTS tx stop...");
1372 if (info->port.tty)
1373 info->port.tty->hw_stopped = 1;
1374 usc_stop_transmitter(info);
1375 }
1376 }
1377 }
1378 }
1379
1380 info->pending_bh |= BH_STATUS;
1381
1382 /* for diagnostics set IRQ flag */
1383 if ( status & MISCSTATUS_TXC_LATCHED ){
1384 usc_OutReg( info, SICR,
1385 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1386 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1387 info->irq_occurred = true;
1388 }
1389
1390 } /* end of mgsl_isr_io_pin() */
1391
1392 /* mgsl_isr_transmit_data()
1393 *
1394 * Service a transmit data interrupt (async mode only).
1395 *
1396 * Arguments: info pointer to device instance data
1397 * Return Value: None
1398 */
1399 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1400 {
1401 if ( debug_level >= DEBUG_LEVEL_ISR )
1402 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1403 __FILE__,__LINE__,info->xmit_cnt);
1404
1405 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1406
1407 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1408 usc_stop_transmitter(info);
1409 return;
1410 }
1411
1412 if ( info->xmit_cnt )
1413 usc_load_txfifo( info );
1414 else
1415 info->tx_active = false;
1416
1417 if (info->xmit_cnt < WAKEUP_CHARS)
1418 info->pending_bh |= BH_TRANSMIT;
1419
1420 } /* end of mgsl_isr_transmit_data() */
1421
1422 /* mgsl_isr_receive_data()
1423 *
1424 * Service a receive data interrupt. This occurs
1425 * when operating in asynchronous interrupt transfer mode.
1426 * The receive data FIFO is flushed to the receive data buffers.
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
1431 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1432 {
1433 int Fifocount;
1434 u16 status;
1435 int work = 0;
1436 unsigned char DataByte;
1437 struct mgsl_icount *icount = &info->icount;
1438
1439 if ( debug_level >= DEBUG_LEVEL_ISR )
1440 printk("%s(%d):mgsl_isr_receive_data\n",
1441 __FILE__,__LINE__);
1442
1443 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1444
1445 /* select FIFO status for RICR readback */
1446 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1447
1448 /* clear the Wordstatus bit so that status readback */
1449 /* only reflects the status of this byte */
1450 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1451
1452 /* flush the receive FIFO */
1453
1454 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1455 int flag;
1456
1457 /* read one byte from RxFIFO */
1458 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1459 info->io_base + CCAR );
1460 DataByte = inb( info->io_base + CCAR );
1461
1462 /* get the status of the received byte */
1463 status = usc_InReg(info, RCSR);
1464 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1465 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1466 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1467
1468 icount->rx++;
1469
1470 flag = 0;
1471 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1472 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1473 printk("rxerr=%04X\n",status);
1474 /* update error statistics */
1475 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1476 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1477 icount->brk++;
1478 } else if (status & RXSTATUS_PARITY_ERROR)
1479 icount->parity++;
1480 else if (status & RXSTATUS_FRAMING_ERROR)
1481 icount->frame++;
1482 else if (status & RXSTATUS_OVERRUN) {
1483 /* must issue purge fifo cmd before */
1484 /* 16C32 accepts more receive chars */
1485 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1486 icount->overrun++;
1487 }
1488
1489 /* discard char if tty control flags say so */
1490 if (status & info->ignore_status_mask)
1491 continue;
1492
1493 status &= info->read_status_mask;
1494
1495 if (status & RXSTATUS_BREAK_RECEIVED) {
1496 flag = TTY_BREAK;
1497 if (info->port.flags & ASYNC_SAK)
1498 do_SAK(info->port.tty);
1499 } else if (status & RXSTATUS_PARITY_ERROR)
1500 flag = TTY_PARITY;
1501 else if (status & RXSTATUS_FRAMING_ERROR)
1502 flag = TTY_FRAME;
1503 } /* end of if (error) */
1504 tty_insert_flip_char(&info->port, DataByte, flag);
1505 if (status & RXSTATUS_OVERRUN) {
1506 /* Overrun is special, since it's
1507 * reported immediately, and doesn't
1508 * affect the current character
1509 */
1510 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1511 }
1512 }
1513
1514 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1515 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1516 __FILE__,__LINE__,icount->rx,icount->brk,
1517 icount->parity,icount->frame,icount->overrun);
1518 }
1519
1520 if(work)
1521 tty_flip_buffer_push(&info->port);
1522 }
1523
1524 /* mgsl_isr_misc()
1525 *
1526 * Service a miscellaneous interrupt source.
1527 *
1528 * Arguments: info pointer to device extension (instance data)
1529 * Return Value: None
1530 */
1531 static void mgsl_isr_misc( struct mgsl_struct *info )
1532 {
1533 u16 status = usc_InReg( info, MISR );
1534
1535 if ( debug_level >= DEBUG_LEVEL_ISR )
1536 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1537 __FILE__,__LINE__,status);
1538
1539 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1540 (info->params.mode == MGSL_MODE_HDLC)) {
1541
1542 /* turn off receiver and rx DMA */
1543 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1544 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1545 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1546 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1547 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1548
1549 /* schedule BH handler to restart receiver */
1550 info->pending_bh |= BH_RECEIVE;
1551 info->rx_rcc_underrun = true;
1552 }
1553
1554 usc_ClearIrqPendingBits( info, MISC );
1555 usc_UnlatchMiscstatusBits( info, status );
1556
1557 } /* end of mgsl_isr_misc() */
1558
1559 /* mgsl_isr_null()
1560 *
1561 * Services undefined interrupt vectors from the
1562 * USC. (hence this function SHOULD never be called)
1563 *
1564 * Arguments: info pointer to device extension (instance data)
1565 * Return Value: None
1566 */
1567 static void mgsl_isr_null( struct mgsl_struct *info )
1568 {
1569
1570 } /* end of mgsl_isr_null() */
1571
1572 /* mgsl_isr_receive_dma()
1573 *
1574 * Service a receive DMA channel interrupt.
1575 * For this driver there are two sources of receive DMA interrupts
1576 * as identified in the Receive DMA mode Register (RDMR):
1577 *
1578 * BIT3 EOA/EOL End of List, all receive buffers in receive
1579 * buffer list have been filled (no more free buffers
1580 * available). The DMA controller has shut down.
1581 *
1582 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1583 * DMA buffer is terminated in response to completion
1584 * of a good frame or a frame with errors. The status
1585 * of the frame is stored in the buffer entry in the
1586 * list of receive buffer entries.
1587 *
1588 * Arguments: info pointer to device instance data
1589 * Return Value: None
1590 */
1591 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1592 {
1593 u16 status;
1594
1595 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1596 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1597
1598 /* Read the receive DMA status to identify interrupt type. */
1599 /* This also clears the status bits. */
1600 status = usc_InDmaReg( info, RDMR );
1601
1602 if ( debug_level >= DEBUG_LEVEL_ISR )
1603 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1604 __FILE__,__LINE__,info->device_name,status);
1605
1606 info->pending_bh |= BH_RECEIVE;
1607
1608 if ( status & BIT3 ) {
1609 info->rx_overflow = true;
1610 info->icount.buf_overrun++;
1611 }
1612
1613 } /* end of mgsl_isr_receive_dma() */
1614
1615 /* mgsl_isr_transmit_dma()
1616 *
1617 * This function services a transmit DMA channel interrupt.
1618 *
1619 * For this driver there is one source of transmit DMA interrupts
1620 * as identified in the Transmit DMA Mode Register (TDMR):
1621 *
1622 * BIT2 EOB End of Buffer. This interrupt occurs when a
1623 * transmit DMA buffer has been emptied.
1624 *
1625 * The driver maintains enough transmit DMA buffers to hold at least
1626 * one max frame size transmit frame. When operating in a buffered
1627 * transmit mode, there may be enough transmit DMA buffers to hold at
1628 * least two or more max frame size frames. On an EOB condition,
1629 * determine if there are any queued transmit buffers and copy into
1630 * transmit DMA buffers if we have room.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
1635 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1636 {
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1640 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1641
1642 /* Read the transmit DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644
1645 status = usc_InDmaReg( info, TDMR );
1646
1647 if ( debug_level >= DEBUG_LEVEL_ISR )
1648 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1649 __FILE__,__LINE__,info->device_name,status);
1650
1651 if ( status & BIT2 ) {
1652 --info->tx_dma_buffers_used;
1653
1654 /* if there are transmit frames queued,
1655 * try to load the next one
1656 */
1657 if ( load_next_tx_holding_buffer(info) ) {
1658 /* if call returns non-zero value, we have
1659 * at least one free tx holding buffer
1660 */
1661 info->pending_bh |= BH_TRANSMIT;
1662 }
1663 }
1664
1665 } /* end of mgsl_isr_transmit_dma() */
1666
1667 /* mgsl_interrupt()
1668 *
1669 * Interrupt service routine entry point.
1670 *
1671 * Arguments:
1672 *
1673 * irq interrupt number that caused interrupt
1674 * dev_id device ID supplied during interrupt registration
1675 *
1676 * Return Value: None
1677 */
1678 static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1679 {
1680 struct mgsl_struct *info = dev_id;
1681 u16 UscVector;
1682 u16 DmaVector;
1683
1684 if ( debug_level >= DEBUG_LEVEL_ISR )
1685 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1686 __FILE__, __LINE__, info->irq_level);
1687
1688 spin_lock(&info->irq_spinlock);
1689
1690 for(;;) {
1691 /* Read the interrupt vectors from hardware. */
1692 UscVector = usc_InReg(info, IVR) >> 9;
1693 DmaVector = usc_InDmaReg(info, DIVR);
1694
1695 if ( debug_level >= DEBUG_LEVEL_ISR )
1696 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1697 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1698
1699 if ( !UscVector && !DmaVector )
1700 break;
1701
1702 /* Dispatch interrupt vector */
1703 if ( UscVector )
1704 (*UscIsrTable[UscVector])(info);
1705 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1706 mgsl_isr_transmit_dma(info);
1707 else
1708 mgsl_isr_receive_dma(info);
1709
1710 if ( info->isr_overflow ) {
1711 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1712 __FILE__, __LINE__, info->device_name, info->irq_level);
1713 usc_DisableMasterIrqBit(info);
1714 usc_DisableDmaInterrupts(info,DICR_MASTER);
1715 break;
1716 }
1717 }
1718
1719 /* Request bottom half processing if there's something
1720 * for it to do and the bh is not already running
1721 */
1722
1723 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1724 if ( debug_level >= DEBUG_LEVEL_ISR )
1725 printk("%s(%d):%s queueing bh task.\n",
1726 __FILE__,__LINE__,info->device_name);
1727 schedule_work(&info->task);
1728 info->bh_requested = true;
1729 }
1730
1731 spin_unlock(&info->irq_spinlock);
1732
1733 if ( debug_level >= DEBUG_LEVEL_ISR )
1734 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1735 __FILE__, __LINE__, info->irq_level);
1736
1737 return IRQ_HANDLED;
1738 } /* end of mgsl_interrupt() */
1739
1740 /* startup()
1741 *
1742 * Initialize and start device.
1743 *
1744 * Arguments: info pointer to device instance data
1745 * Return Value: 0 if success, otherwise error code
1746 */
1747 static int startup(struct mgsl_struct * info)
1748 {
1749 int retval = 0;
1750
1751 if ( debug_level >= DEBUG_LEVEL_INFO )
1752 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1753
1754 if (tty_port_initialized(&info->port))
1755 return 0;
1756
1757 if (!info->xmit_buf) {
1758 /* allocate a page of memory for a transmit buffer */
1759 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1760 if (!info->xmit_buf) {
1761 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1762 __FILE__,__LINE__,info->device_name);
1763 return -ENOMEM;
1764 }
1765 }
1766
1767 info->pending_bh = 0;
1768
1769 memset(&info->icount, 0, sizeof(info->icount));
1770
1771 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1772
1773 /* Allocate and claim adapter resources */
1774 retval = mgsl_claim_resources(info);
1775
1776 /* perform existence check and diagnostics */
1777 if ( !retval )
1778 retval = mgsl_adapter_test(info);
1779
1780 if ( retval ) {
1781 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1782 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1783 mgsl_release_resources(info);
1784 return retval;
1785 }
1786
1787 /* program hardware for current parameters */
1788 mgsl_change_params(info);
1789
1790 if (info->port.tty)
1791 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1792
1793 tty_port_set_initialized(&info->port, 1);
1794
1795 return 0;
1796 } /* end of startup() */
1797
1798 /* shutdown()
1799 *
1800 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1801 *
1802 * Arguments: info pointer to device instance data
1803 * Return Value: None
1804 */
1805 static void shutdown(struct mgsl_struct * info)
1806 {
1807 unsigned long flags;
1808
1809 if (!tty_port_initialized(&info->port))
1810 return;
1811
1812 if (debug_level >= DEBUG_LEVEL_INFO)
1813 printk("%s(%d):mgsl_shutdown(%s)\n",
1814 __FILE__,__LINE__, info->device_name );
1815
1816 /* clear status wait queue because status changes */
1817 /* can't happen after shutting down the hardware */
1818 wake_up_interruptible(&info->status_event_wait_q);
1819 wake_up_interruptible(&info->event_wait_q);
1820
1821 del_timer_sync(&info->tx_timer);
1822
1823 if (info->xmit_buf) {
1824 free_page((unsigned long) info->xmit_buf);
1825 info->xmit_buf = NULL;
1826 }
1827
1828 spin_lock_irqsave(&info->irq_spinlock,flags);
1829 usc_DisableMasterIrqBit(info);
1830 usc_stop_receiver(info);
1831 usc_stop_transmitter(info);
1832 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1833 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1834 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1835
1836 /* Disable DMAEN (Port 7, Bit 14) */
1837 /* This disconnects the DMA request signal from the ISA bus */
1838 /* on the ISA adapter. This has no effect for the PCI adapter */
1839 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1840
1841 /* Disable INTEN (Port 6, Bit12) */
1842 /* This disconnects the IRQ request signal to the ISA bus */
1843 /* on the ISA adapter. This has no effect for the PCI adapter */
1844 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1845
1846 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1847 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1848 usc_set_serial_signals(info);
1849 }
1850
1851 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1852
1853 mgsl_release_resources(info);
1854
1855 if (info->port.tty)
1856 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1857
1858 tty_port_set_initialized(&info->port, 0);
1859 } /* end of shutdown() */
1860
1861 static void mgsl_program_hw(struct mgsl_struct *info)
1862 {
1863 unsigned long flags;
1864
1865 spin_lock_irqsave(&info->irq_spinlock,flags);
1866
1867 usc_stop_receiver(info);
1868 usc_stop_transmitter(info);
1869 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1870
1871 if (info->params.mode == MGSL_MODE_HDLC ||
1872 info->params.mode == MGSL_MODE_RAW ||
1873 info->netcount)
1874 usc_set_sync_mode(info);
1875 else
1876 usc_set_async_mode(info);
1877
1878 usc_set_serial_signals(info);
1879
1880 info->dcd_chkcount = 0;
1881 info->cts_chkcount = 0;
1882 info->ri_chkcount = 0;
1883 info->dsr_chkcount = 0;
1884
1885 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1886 usc_EnableInterrupts(info, IO_PIN);
1887 usc_get_serial_signals(info);
1888
1889 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1890 usc_start_receiver(info);
1891
1892 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1893 }
1894
1895 /* Reconfigure adapter based on new parameters
1896 */
1897 static void mgsl_change_params(struct mgsl_struct *info)
1898 {
1899 unsigned cflag;
1900 int bits_per_char;
1901
1902 if (!info->port.tty)
1903 return;
1904
1905 if (debug_level >= DEBUG_LEVEL_INFO)
1906 printk("%s(%d):mgsl_change_params(%s)\n",
1907 __FILE__,__LINE__, info->device_name );
1908
1909 cflag = info->port.tty->termios.c_cflag;
1910
1911 /* if B0 rate (hangup) specified then negate RTS and DTR */
1912 /* otherwise assert RTS and DTR */
1913 if (cflag & CBAUD)
1914 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1915 else
1916 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1917
1918 /* byte size and parity */
1919
1920 switch (cflag & CSIZE) {
1921 case CS5: info->params.data_bits = 5; break;
1922 case CS6: info->params.data_bits = 6; break;
1923 case CS7: info->params.data_bits = 7; break;
1924 case CS8: info->params.data_bits = 8; break;
1925 /* Never happens, but GCC is too dumb to figure it out */
1926 default: info->params.data_bits = 7; break;
1927 }
1928
1929 if (cflag & CSTOPB)
1930 info->params.stop_bits = 2;
1931 else
1932 info->params.stop_bits = 1;
1933
1934 info->params.parity = ASYNC_PARITY_NONE;
1935 if (cflag & PARENB) {
1936 if (cflag & PARODD)
1937 info->params.parity = ASYNC_PARITY_ODD;
1938 else
1939 info->params.parity = ASYNC_PARITY_EVEN;
1940 #ifdef CMSPAR
1941 if (cflag & CMSPAR)
1942 info->params.parity = ASYNC_PARITY_SPACE;
1943 #endif
1944 }
1945
1946 /* calculate number of jiffies to transmit a full
1947 * FIFO (32 bytes) at specified data rate
1948 */
1949 bits_per_char = info->params.data_bits +
1950 info->params.stop_bits + 1;
1951
1952 /* if port data rate is set to 460800 or less then
1953 * allow tty settings to override, otherwise keep the
1954 * current data rate.
1955 */
1956 if (info->params.data_rate <= 460800)
1957 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1958
1959 if ( info->params.data_rate ) {
1960 info->timeout = (32*HZ*bits_per_char) /
1961 info->params.data_rate;
1962 }
1963 info->timeout += HZ/50; /* Add .02 seconds of slop */
1964
1965 tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
1966 tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
1967
1968 /* process tty input control flags */
1969
1970 info->read_status_mask = RXSTATUS_OVERRUN;
1971 if (I_INPCK(info->port.tty))
1972 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1973 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1974 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1975
1976 if (I_IGNPAR(info->port.tty))
1977 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1978 if (I_IGNBRK(info->port.tty)) {
1979 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1980 /* If ignoring parity and break indicators, ignore
1981 * overruns too. (For real raw support).
1982 */
1983 if (I_IGNPAR(info->port.tty))
1984 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1985 }
1986
1987 mgsl_program_hw(info);
1988
1989 } /* end of mgsl_change_params() */
1990
1991 /* mgsl_put_char()
1992 *
1993 * Add a character to the transmit buffer.
1994 *
1995 * Arguments: tty pointer to tty information structure
1996 * ch character to add to transmit buffer
1997 *
1998 * Return Value: None
1999 */
2000 static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2001 {
2002 struct mgsl_struct *info = tty->driver_data;
2003 unsigned long flags;
2004 int ret = 0;
2005
2006 if (debug_level >= DEBUG_LEVEL_INFO) {
2007 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2008 __FILE__, __LINE__, ch, info->device_name);
2009 }
2010
2011 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2012 return 0;
2013
2014 if (!info->xmit_buf)
2015 return 0;
2016
2017 spin_lock_irqsave(&info->irq_spinlock, flags);
2018
2019 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2020 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2021 info->xmit_buf[info->xmit_head++] = ch;
2022 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2023 info->xmit_cnt++;
2024 ret = 1;
2025 }
2026 }
2027 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2028 return ret;
2029
2030 } /* end of mgsl_put_char() */
2031
2032 /* mgsl_flush_chars()
2033 *
2034 * Enable transmitter so remaining characters in the
2035 * transmit buffer are sent.
2036 *
2037 * Arguments: tty pointer to tty information structure
2038 * Return Value: None
2039 */
2040 static void mgsl_flush_chars(struct tty_struct *tty)
2041 {
2042 struct mgsl_struct *info = tty->driver_data;
2043 unsigned long flags;
2044
2045 if ( debug_level >= DEBUG_LEVEL_INFO )
2046 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2047 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2048
2049 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2050 return;
2051
2052 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2053 !info->xmit_buf)
2054 return;
2055
2056 if ( debug_level >= DEBUG_LEVEL_INFO )
2057 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2058 __FILE__,__LINE__,info->device_name );
2059
2060 spin_lock_irqsave(&info->irq_spinlock,flags);
2061
2062 if (!info->tx_active) {
2063 if ( (info->params.mode == MGSL_MODE_HDLC ||
2064 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2065 /* operating in synchronous (frame oriented) mode */
2066 /* copy data from circular xmit_buf to */
2067 /* transmit DMA buffer. */
2068 mgsl_load_tx_dma_buffer(info,
2069 info->xmit_buf,info->xmit_cnt);
2070 }
2071 usc_start_transmitter(info);
2072 }
2073
2074 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2075
2076 } /* end of mgsl_flush_chars() */
2077
2078 /* mgsl_write()
2079 *
2080 * Send a block of data
2081 *
2082 * Arguments:
2083 *
2084 * tty pointer to tty information structure
2085 * buf pointer to buffer containing send data
2086 * count size of send data in bytes
2087 *
2088 * Return Value: number of characters written
2089 */
2090 static int mgsl_write(struct tty_struct * tty,
2091 const unsigned char *buf, int count)
2092 {
2093 int c, ret = 0;
2094 struct mgsl_struct *info = tty->driver_data;
2095 unsigned long flags;
2096
2097 if ( debug_level >= DEBUG_LEVEL_INFO )
2098 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2099 __FILE__,__LINE__,info->device_name,count);
2100
2101 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2102 goto cleanup;
2103
2104 if (!info->xmit_buf)
2105 goto cleanup;
2106
2107 if ( info->params.mode == MGSL_MODE_HDLC ||
2108 info->params.mode == MGSL_MODE_RAW ) {
2109 /* operating in synchronous (frame oriented) mode */
2110 if (info->tx_active) {
2111
2112 if ( info->params.mode == MGSL_MODE_HDLC ) {
2113 ret = 0;
2114 goto cleanup;
2115 }
2116 /* transmitter is actively sending data -
2117 * if we have multiple transmit dma and
2118 * holding buffers, attempt to queue this
2119 * frame for transmission at a later time.
2120 */
2121 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2122 /* no tx holding buffers available */
2123 ret = 0;
2124 goto cleanup;
2125 }
2126
2127 /* queue transmit frame request */
2128 ret = count;
2129 save_tx_buffer_request(info,buf,count);
2130
2131 /* if we have sufficient tx dma buffers,
2132 * load the next buffered tx request
2133 */
2134 spin_lock_irqsave(&info->irq_spinlock,flags);
2135 load_next_tx_holding_buffer(info);
2136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2137 goto cleanup;
2138 }
2139
2140 /* if operating in HDLC LoopMode and the adapter */
2141 /* has yet to be inserted into the loop, we can't */
2142 /* transmit */
2143
2144 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2145 !usc_loopmode_active(info) )
2146 {
2147 ret = 0;
2148 goto cleanup;
2149 }
2150
2151 if ( info->xmit_cnt ) {
2152 /* Send accumulated from send_char() calls */
2153 /* as frame and wait before accepting more data. */
2154 ret = 0;
2155
2156 /* copy data from circular xmit_buf to */
2157 /* transmit DMA buffer. */
2158 mgsl_load_tx_dma_buffer(info,
2159 info->xmit_buf,info->xmit_cnt);
2160 if ( debug_level >= DEBUG_LEVEL_INFO )
2161 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2162 __FILE__,__LINE__,info->device_name);
2163 } else {
2164 if ( debug_level >= DEBUG_LEVEL_INFO )
2165 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2166 __FILE__,__LINE__,info->device_name);
2167 ret = count;
2168 info->xmit_cnt = count;
2169 mgsl_load_tx_dma_buffer(info,buf,count);
2170 }
2171 } else {
2172 while (1) {
2173 spin_lock_irqsave(&info->irq_spinlock,flags);
2174 c = min_t(int, count,
2175 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2176 SERIAL_XMIT_SIZE - info->xmit_head));
2177 if (c <= 0) {
2178 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2179 break;
2180 }
2181 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2182 info->xmit_head = ((info->xmit_head + c) &
2183 (SERIAL_XMIT_SIZE-1));
2184 info->xmit_cnt += c;
2185 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2186 buf += c;
2187 count -= c;
2188 ret += c;
2189 }
2190 }
2191
2192 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 if (!info->tx_active)
2195 usc_start_transmitter(info);
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 }
2198 cleanup:
2199 if ( debug_level >= DEBUG_LEVEL_INFO )
2200 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2201 __FILE__,__LINE__,info->device_name,ret);
2202
2203 return ret;
2204
2205 } /* end of mgsl_write() */
2206
2207 /* mgsl_write_room()
2208 *
2209 * Return the count of free bytes in transmit buffer
2210 *
2211 * Arguments: tty pointer to tty info structure
2212 * Return Value: None
2213 */
2214 static int mgsl_write_room(struct tty_struct *tty)
2215 {
2216 struct mgsl_struct *info = tty->driver_data;
2217 int ret;
2218
2219 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2220 return 0;
2221 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2222 if (ret < 0)
2223 ret = 0;
2224
2225 if (debug_level >= DEBUG_LEVEL_INFO)
2226 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2227 __FILE__,__LINE__, info->device_name,ret );
2228
2229 if ( info->params.mode == MGSL_MODE_HDLC ||
2230 info->params.mode == MGSL_MODE_RAW ) {
2231 /* operating in synchronous (frame oriented) mode */
2232 if ( info->tx_active )
2233 return 0;
2234 else
2235 return HDLC_MAX_FRAME_SIZE;
2236 }
2237
2238 return ret;
2239
2240 } /* end of mgsl_write_room() */
2241
2242 /* mgsl_chars_in_buffer()
2243 *
2244 * Return the count of bytes in transmit buffer
2245 *
2246 * Arguments: tty pointer to tty info structure
2247 * Return Value: None
2248 */
2249 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2250 {
2251 struct mgsl_struct *info = tty->driver_data;
2252
2253 if (debug_level >= DEBUG_LEVEL_INFO)
2254 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2255 __FILE__,__LINE__, info->device_name );
2256
2257 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2258 return 0;
2259
2260 if (debug_level >= DEBUG_LEVEL_INFO)
2261 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2262 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2263
2264 if ( info->params.mode == MGSL_MODE_HDLC ||
2265 info->params.mode == MGSL_MODE_RAW ) {
2266 /* operating in synchronous (frame oriented) mode */
2267 if ( info->tx_active )
2268 return info->max_frame_size;
2269 else
2270 return 0;
2271 }
2272
2273 return info->xmit_cnt;
2274 } /* end of mgsl_chars_in_buffer() */
2275
2276 /* mgsl_flush_buffer()
2277 *
2278 * Discard all data in the send buffer
2279 *
2280 * Arguments: tty pointer to tty info structure
2281 * Return Value: None
2282 */
2283 static void mgsl_flush_buffer(struct tty_struct *tty)
2284 {
2285 struct mgsl_struct *info = tty->driver_data;
2286 unsigned long flags;
2287
2288 if (debug_level >= DEBUG_LEVEL_INFO)
2289 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2290 __FILE__,__LINE__, info->device_name );
2291
2292 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2293 return;
2294
2295 spin_lock_irqsave(&info->irq_spinlock,flags);
2296 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2297 del_timer(&info->tx_timer);
2298 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2299
2300 tty_wakeup(tty);
2301 }
2302
2303 /* mgsl_send_xchar()
2304 *
2305 * Send a high-priority XON/XOFF character
2306 *
2307 * Arguments: tty pointer to tty info structure
2308 * ch character to send
2309 * Return Value: None
2310 */
2311 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2312 {
2313 struct mgsl_struct *info = tty->driver_data;
2314 unsigned long flags;
2315
2316 if (debug_level >= DEBUG_LEVEL_INFO)
2317 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2318 __FILE__,__LINE__, info->device_name, ch );
2319
2320 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2321 return;
2322
2323 info->x_char = ch;
2324 if (ch) {
2325 /* Make sure transmit interrupts are on */
2326 spin_lock_irqsave(&info->irq_spinlock,flags);
2327 if (!info->tx_enabled)
2328 usc_start_transmitter(info);
2329 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2330 }
2331 } /* end of mgsl_send_xchar() */
2332
2333 /* mgsl_throttle()
2334 *
2335 * Signal remote device to throttle send data (our receive data)
2336 *
2337 * Arguments: tty pointer to tty info structure
2338 * Return Value: None
2339 */
2340 static void mgsl_throttle(struct tty_struct * tty)
2341 {
2342 struct mgsl_struct *info = tty->driver_data;
2343 unsigned long flags;
2344
2345 if (debug_level >= DEBUG_LEVEL_INFO)
2346 printk("%s(%d):mgsl_throttle(%s) entry\n",
2347 __FILE__,__LINE__, info->device_name );
2348
2349 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2350 return;
2351
2352 if (I_IXOFF(tty))
2353 mgsl_send_xchar(tty, STOP_CHAR(tty));
2354
2355 if (C_CRTSCTS(tty)) {
2356 spin_lock_irqsave(&info->irq_spinlock,flags);
2357 info->serial_signals &= ~SerialSignal_RTS;
2358 usc_set_serial_signals(info);
2359 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2360 }
2361 } /* end of mgsl_throttle() */
2362
2363 /* mgsl_unthrottle()
2364 *
2365 * Signal remote device to stop throttling send data (our receive data)
2366 *
2367 * Arguments: tty pointer to tty info structure
2368 * Return Value: None
2369 */
2370 static void mgsl_unthrottle(struct tty_struct * tty)
2371 {
2372 struct mgsl_struct *info = tty->driver_data;
2373 unsigned long flags;
2374
2375 if (debug_level >= DEBUG_LEVEL_INFO)
2376 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2377 __FILE__,__LINE__, info->device_name );
2378
2379 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2380 return;
2381
2382 if (I_IXOFF(tty)) {
2383 if (info->x_char)
2384 info->x_char = 0;
2385 else
2386 mgsl_send_xchar(tty, START_CHAR(tty));
2387 }
2388
2389 if (C_CRTSCTS(tty)) {
2390 spin_lock_irqsave(&info->irq_spinlock,flags);
2391 info->serial_signals |= SerialSignal_RTS;
2392 usc_set_serial_signals(info);
2393 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2394 }
2395
2396 } /* end of mgsl_unthrottle() */
2397
2398 /* mgsl_get_stats()
2399 *
2400 * get the current serial parameters information
2401 *
2402 * Arguments: info pointer to device instance data
2403 * user_icount pointer to buffer to hold returned stats
2404 *
2405 * Return Value: 0 if success, otherwise error code
2406 */
2407 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2408 {
2409 int err;
2410
2411 if (debug_level >= DEBUG_LEVEL_INFO)
2412 printk("%s(%d):mgsl_get_params(%s)\n",
2413 __FILE__,__LINE__, info->device_name);
2414
2415 if (!user_icount) {
2416 memset(&info->icount, 0, sizeof(info->icount));
2417 } else {
2418 mutex_lock(&info->port.mutex);
2419 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2420 mutex_unlock(&info->port.mutex);
2421 if (err)
2422 return -EFAULT;
2423 }
2424
2425 return 0;
2426
2427 } /* end of mgsl_get_stats() */
2428
2429 /* mgsl_get_params()
2430 *
2431 * get the current serial parameters information
2432 *
2433 * Arguments: info pointer to device instance data
2434 * user_params pointer to buffer to hold returned params
2435 *
2436 * Return Value: 0 if success, otherwise error code
2437 */
2438 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2439 {
2440 int err;
2441 if (debug_level >= DEBUG_LEVEL_INFO)
2442 printk("%s(%d):mgsl_get_params(%s)\n",
2443 __FILE__,__LINE__, info->device_name);
2444
2445 mutex_lock(&info->port.mutex);
2446 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2447 mutex_unlock(&info->port.mutex);
2448 if (err) {
2449 if ( debug_level >= DEBUG_LEVEL_INFO )
2450 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2451 __FILE__,__LINE__,info->device_name);
2452 return -EFAULT;
2453 }
2454
2455 return 0;
2456
2457 } /* end of mgsl_get_params() */
2458
2459 /* mgsl_set_params()
2460 *
2461 * set the serial parameters
2462 *
2463 * Arguments:
2464 *
2465 * info pointer to device instance data
2466 * new_params user buffer containing new serial params
2467 *
2468 * Return Value: 0 if success, otherwise error code
2469 */
2470 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2471 {
2472 unsigned long flags;
2473 MGSL_PARAMS tmp_params;
2474 int err;
2475
2476 if (debug_level >= DEBUG_LEVEL_INFO)
2477 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2478 info->device_name );
2479 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2480 if (err) {
2481 if ( debug_level >= DEBUG_LEVEL_INFO )
2482 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2483 __FILE__,__LINE__,info->device_name);
2484 return -EFAULT;
2485 }
2486
2487 mutex_lock(&info->port.mutex);
2488 spin_lock_irqsave(&info->irq_spinlock,flags);
2489 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2490 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2491
2492 mgsl_change_params(info);
2493 mutex_unlock(&info->port.mutex);
2494
2495 return 0;
2496
2497 } /* end of mgsl_set_params() */
2498
2499 /* mgsl_get_txidle()
2500 *
2501 * get the current transmit idle mode
2502 *
2503 * Arguments: info pointer to device instance data
2504 * idle_mode pointer to buffer to hold returned idle mode
2505 *
2506 * Return Value: 0 if success, otherwise error code
2507 */
2508 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2509 {
2510 int err;
2511
2512 if (debug_level >= DEBUG_LEVEL_INFO)
2513 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2514 __FILE__,__LINE__, info->device_name, info->idle_mode);
2515
2516 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2517 if (err) {
2518 if ( debug_level >= DEBUG_LEVEL_INFO )
2519 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2520 __FILE__,__LINE__,info->device_name);
2521 return -EFAULT;
2522 }
2523
2524 return 0;
2525
2526 } /* end of mgsl_get_txidle() */
2527
2528 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2529 *
2530 * Arguments: info pointer to device instance data
2531 * idle_mode new idle mode
2532 *
2533 * Return Value: 0 if success, otherwise error code
2534 */
2535 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2536 {
2537 unsigned long flags;
2538
2539 if (debug_level >= DEBUG_LEVEL_INFO)
2540 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2541 info->device_name, idle_mode );
2542
2543 spin_lock_irqsave(&info->irq_spinlock,flags);
2544 info->idle_mode = idle_mode;
2545 usc_set_txidle( info );
2546 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2547 return 0;
2548
2549 } /* end of mgsl_set_txidle() */
2550
2551 /* mgsl_txenable()
2552 *
2553 * enable or disable the transmitter
2554 *
2555 * Arguments:
2556 *
2557 * info pointer to device instance data
2558 * enable 1 = enable, 0 = disable
2559 *
2560 * Return Value: 0 if success, otherwise error code
2561 */
2562 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2563 {
2564 unsigned long flags;
2565
2566 if (debug_level >= DEBUG_LEVEL_INFO)
2567 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2568 info->device_name, enable);
2569
2570 spin_lock_irqsave(&info->irq_spinlock,flags);
2571 if ( enable ) {
2572 if ( !info->tx_enabled ) {
2573
2574 usc_start_transmitter(info);
2575 /*--------------------------------------------------
2576 * if HDLC/SDLC Loop mode, attempt to insert the
2577 * station in the 'loop' by setting CMR:13. Upon
2578 * receipt of the next GoAhead (RxAbort) sequence,
2579 * the OnLoop indicator (CCSR:7) should go active
2580 * to indicate that we are on the loop
2581 *--------------------------------------------------*/
2582 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2583 usc_loopmode_insert_request( info );
2584 }
2585 } else {
2586 if ( info->tx_enabled )
2587 usc_stop_transmitter(info);
2588 }
2589 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2590 return 0;
2591
2592 } /* end of mgsl_txenable() */
2593
2594 /* mgsl_txabort() abort send HDLC frame
2595 *
2596 * Arguments: info pointer to device instance data
2597 * Return Value: 0 if success, otherwise error code
2598 */
2599 static int mgsl_txabort(struct mgsl_struct * info)
2600 {
2601 unsigned long flags;
2602
2603 if (debug_level >= DEBUG_LEVEL_INFO)
2604 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2605 info->device_name);
2606
2607 spin_lock_irqsave(&info->irq_spinlock,flags);
2608 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2609 {
2610 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2611 usc_loopmode_cancel_transmit( info );
2612 else
2613 usc_TCmd(info,TCmd_SendAbort);
2614 }
2615 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2616 return 0;
2617
2618 } /* end of mgsl_txabort() */
2619
2620 /* mgsl_rxenable() enable or disable the receiver
2621 *
2622 * Arguments: info pointer to device instance data
2623 * enable 1 = enable, 0 = disable
2624 * Return Value: 0 if success, otherwise error code
2625 */
2626 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2627 {
2628 unsigned long flags;
2629
2630 if (debug_level >= DEBUG_LEVEL_INFO)
2631 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2632 info->device_name, enable);
2633
2634 spin_lock_irqsave(&info->irq_spinlock,flags);
2635 if ( enable ) {
2636 if ( !info->rx_enabled )
2637 usc_start_receiver(info);
2638 } else {
2639 if ( info->rx_enabled )
2640 usc_stop_receiver(info);
2641 }
2642 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2643 return 0;
2644
2645 } /* end of mgsl_rxenable() */
2646
2647 /* mgsl_wait_event() wait for specified event to occur
2648 *
2649 * Arguments: info pointer to device instance data
2650 * mask pointer to bitmask of events to wait for
2651 * Return Value: 0 if successful and bit mask updated with
2652 * of events triggerred,
2653 * otherwise error code
2654 */
2655 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2656 {
2657 unsigned long flags;
2658 int s;
2659 int rc=0;
2660 struct mgsl_icount cprev, cnow;
2661 int events;
2662 int mask;
2663 struct _input_signal_events oldsigs, newsigs;
2664 DECLARE_WAITQUEUE(wait, current);
2665
2666 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2667 if (rc) {
2668 return -EFAULT;
2669 }
2670
2671 if (debug_level >= DEBUG_LEVEL_INFO)
2672 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2673 info->device_name, mask);
2674
2675 spin_lock_irqsave(&info->irq_spinlock,flags);
2676
2677 /* return immediately if state matches requested events */
2678 usc_get_serial_signals(info);
2679 s = info->serial_signals;
2680 events = mask &
2681 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2682 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2683 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2684 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2685 if (events) {
2686 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2687 goto exit;
2688 }
2689
2690 /* save current irq counts */
2691 cprev = info->icount;
2692 oldsigs = info->input_signal_events;
2693
2694 /* enable hunt and idle irqs if needed */
2695 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2696 u16 oldreg = usc_InReg(info,RICR);
2697 u16 newreg = oldreg +
2698 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2699 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2700 if (oldreg != newreg)
2701 usc_OutReg(info, RICR, newreg);
2702 }
2703
2704 set_current_state(TASK_INTERRUPTIBLE);
2705 add_wait_queue(&info->event_wait_q, &wait);
2706
2707 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2708
2709
2710 for(;;) {
2711 schedule();
2712 if (signal_pending(current)) {
2713 rc = -ERESTARTSYS;
2714 break;
2715 }
2716
2717 /* get current irq counts */
2718 spin_lock_irqsave(&info->irq_spinlock,flags);
2719 cnow = info->icount;
2720 newsigs = info->input_signal_events;
2721 set_current_state(TASK_INTERRUPTIBLE);
2722 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2723
2724 /* if no change, wait aborted for some reason */
2725 if (newsigs.dsr_up == oldsigs.dsr_up &&
2726 newsigs.dsr_down == oldsigs.dsr_down &&
2727 newsigs.dcd_up == oldsigs.dcd_up &&
2728 newsigs.dcd_down == oldsigs.dcd_down &&
2729 newsigs.cts_up == oldsigs.cts_up &&
2730 newsigs.cts_down == oldsigs.cts_down &&
2731 newsigs.ri_up == oldsigs.ri_up &&
2732 newsigs.ri_down == oldsigs.ri_down &&
2733 cnow.exithunt == cprev.exithunt &&
2734 cnow.rxidle == cprev.rxidle) {
2735 rc = -EIO;
2736 break;
2737 }
2738
2739 events = mask &
2740 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2741 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2742 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2743 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2744 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2745 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2746 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2747 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2748 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2749 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2750 if (events)
2751 break;
2752
2753 cprev = cnow;
2754 oldsigs = newsigs;
2755 }
2756
2757 remove_wait_queue(&info->event_wait_q, &wait);
2758 set_current_state(TASK_RUNNING);
2759
2760 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2761 spin_lock_irqsave(&info->irq_spinlock,flags);
2762 if (!waitqueue_active(&info->event_wait_q)) {
2763 /* disable enable exit hunt mode/idle rcvd IRQs */
2764 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2765 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2766 }
2767 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2768 }
2769 exit:
2770 if ( rc == 0 )
2771 PUT_USER(rc, events, mask_ptr);
2772
2773 return rc;
2774
2775 } /* end of mgsl_wait_event() */
2776
2777 static int modem_input_wait(struct mgsl_struct *info,int arg)
2778 {
2779 unsigned long flags;
2780 int rc;
2781 struct mgsl_icount cprev, cnow;
2782 DECLARE_WAITQUEUE(wait, current);
2783
2784 /* save current irq counts */
2785 spin_lock_irqsave(&info->irq_spinlock,flags);
2786 cprev = info->icount;
2787 add_wait_queue(&info->status_event_wait_q, &wait);
2788 set_current_state(TASK_INTERRUPTIBLE);
2789 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2790
2791 for(;;) {
2792 schedule();
2793 if (signal_pending(current)) {
2794 rc = -ERESTARTSYS;
2795 break;
2796 }
2797
2798 /* get new irq counts */
2799 spin_lock_irqsave(&info->irq_spinlock,flags);
2800 cnow = info->icount;
2801 set_current_state(TASK_INTERRUPTIBLE);
2802 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2803
2804 /* if no change, wait aborted for some reason */
2805 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2806 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2807 rc = -EIO;
2808 break;
2809 }
2810
2811 /* check for change in caller specified modem input */
2812 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2813 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2814 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2815 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2816 rc = 0;
2817 break;
2818 }
2819
2820 cprev = cnow;
2821 }
2822 remove_wait_queue(&info->status_event_wait_q, &wait);
2823 set_current_state(TASK_RUNNING);
2824 return rc;
2825 }
2826
2827 /* return the state of the serial control and status signals
2828 */
2829 static int tiocmget(struct tty_struct *tty)
2830 {
2831 struct mgsl_struct *info = tty->driver_data;
2832 unsigned int result;
2833 unsigned long flags;
2834
2835 spin_lock_irqsave(&info->irq_spinlock,flags);
2836 usc_get_serial_signals(info);
2837 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2838
2839 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2840 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2841 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2842 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2843 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2844 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2845
2846 if (debug_level >= DEBUG_LEVEL_INFO)
2847 printk("%s(%d):%s tiocmget() value=%08X\n",
2848 __FILE__,__LINE__, info->device_name, result );
2849 return result;
2850 }
2851
2852 /* set modem control signals (DTR/RTS)
2853 */
2854 static int tiocmset(struct tty_struct *tty,
2855 unsigned int set, unsigned int clear)
2856 {
2857 struct mgsl_struct *info = tty->driver_data;
2858 unsigned long flags;
2859
2860 if (debug_level >= DEBUG_LEVEL_INFO)
2861 printk("%s(%d):%s tiocmset(%x,%x)\n",
2862 __FILE__,__LINE__,info->device_name, set, clear);
2863
2864 if (set & TIOCM_RTS)
2865 info->serial_signals |= SerialSignal_RTS;
2866 if (set & TIOCM_DTR)
2867 info->serial_signals |= SerialSignal_DTR;
2868 if (clear & TIOCM_RTS)
2869 info->serial_signals &= ~SerialSignal_RTS;
2870 if (clear & TIOCM_DTR)
2871 info->serial_signals &= ~SerialSignal_DTR;
2872
2873 spin_lock_irqsave(&info->irq_spinlock,flags);
2874 usc_set_serial_signals(info);
2875 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2876
2877 return 0;
2878 }
2879
2880 /* mgsl_break() Set or clear transmit break condition
2881 *
2882 * Arguments: tty pointer to tty instance data
2883 * break_state -1=set break condition, 0=clear
2884 * Return Value: error code
2885 */
2886 static int mgsl_break(struct tty_struct *tty, int break_state)
2887 {
2888 struct mgsl_struct * info = tty->driver_data;
2889 unsigned long flags;
2890
2891 if (debug_level >= DEBUG_LEVEL_INFO)
2892 printk("%s(%d):mgsl_break(%s,%d)\n",
2893 __FILE__,__LINE__, info->device_name, break_state);
2894
2895 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2896 return -EINVAL;
2897
2898 spin_lock_irqsave(&info->irq_spinlock,flags);
2899 if (break_state == -1)
2900 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2901 else
2902 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2903 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2904 return 0;
2905
2906 } /* end of mgsl_break() */
2907
2908 /*
2909 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2910 * Return: write counters to the user passed counter struct
2911 * NB: both 1->0 and 0->1 transitions are counted except for
2912 * RI where only 0->1 is counted.
2913 */
2914 static int msgl_get_icount(struct tty_struct *tty,
2915 struct serial_icounter_struct *icount)
2916
2917 {
2918 struct mgsl_struct * info = tty->driver_data;
2919 struct mgsl_icount cnow; /* kernel counter temps */
2920 unsigned long flags;
2921
2922 spin_lock_irqsave(&info->irq_spinlock,flags);
2923 cnow = info->icount;
2924 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2925
2926 icount->cts = cnow.cts;
2927 icount->dsr = cnow.dsr;
2928 icount->rng = cnow.rng;
2929 icount->dcd = cnow.dcd;
2930 icount->rx = cnow.rx;
2931 icount->tx = cnow.tx;
2932 icount->frame = cnow.frame;
2933 icount->overrun = cnow.overrun;
2934 icount->parity = cnow.parity;
2935 icount->brk = cnow.brk;
2936 icount->buf_overrun = cnow.buf_overrun;
2937 return 0;
2938 }
2939
2940 /* mgsl_ioctl() Service an IOCTL request
2941 *
2942 * Arguments:
2943 *
2944 * tty pointer to tty instance data
2945 * cmd IOCTL command code
2946 * arg command argument/context
2947 *
2948 * Return Value: 0 if success, otherwise error code
2949 */
2950 static int mgsl_ioctl(struct tty_struct *tty,
2951 unsigned int cmd, unsigned long arg)
2952 {
2953 struct mgsl_struct * info = tty->driver_data;
2954
2955 if (debug_level >= DEBUG_LEVEL_INFO)
2956 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2957 info->device_name, cmd );
2958
2959 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2960 return -ENODEV;
2961
2962 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2963 (cmd != TIOCMIWAIT)) {
2964 if (tty_io_error(tty))
2965 return -EIO;
2966 }
2967
2968 return mgsl_ioctl_common(info, cmd, arg);
2969 }
2970
2971 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2972 {
2973 void __user *argp = (void __user *)arg;
2974
2975 switch (cmd) {
2976 case MGSL_IOCGPARAMS:
2977 return mgsl_get_params(info, argp);
2978 case MGSL_IOCSPARAMS:
2979 return mgsl_set_params(info, argp);
2980 case MGSL_IOCGTXIDLE:
2981 return mgsl_get_txidle(info, argp);
2982 case MGSL_IOCSTXIDLE:
2983 return mgsl_set_txidle(info,(int)arg);
2984 case MGSL_IOCTXENABLE:
2985 return mgsl_txenable(info,(int)arg);
2986 case MGSL_IOCRXENABLE:
2987 return mgsl_rxenable(info,(int)arg);
2988 case MGSL_IOCTXABORT:
2989 return mgsl_txabort(info);
2990 case MGSL_IOCGSTATS:
2991 return mgsl_get_stats(info, argp);
2992 case MGSL_IOCWAITEVENT:
2993 return mgsl_wait_event(info, argp);
2994 case MGSL_IOCLOOPTXDONE:
2995 return mgsl_loopmode_send_done(info);
2996 /* Wait for modem input (DCD,RI,DSR,CTS) change
2997 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
2998 */
2999 case TIOCMIWAIT:
3000 return modem_input_wait(info,(int)arg);
3001
3002 default:
3003 return -ENOIOCTLCMD;
3004 }
3005 return 0;
3006 }
3007
3008 /* mgsl_set_termios()
3009 *
3010 * Set new termios settings
3011 *
3012 * Arguments:
3013 *
3014 * tty pointer to tty structure
3015 * termios pointer to buffer to hold returned old termios
3016 *
3017 * Return Value: None
3018 */
3019 static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3020 {
3021 struct mgsl_struct *info = tty->driver_data;
3022 unsigned long flags;
3023
3024 if (debug_level >= DEBUG_LEVEL_INFO)
3025 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3026 tty->driver->name );
3027
3028 mgsl_change_params(info);
3029
3030 /* Handle transition to B0 status */
3031 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3032 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3033 spin_lock_irqsave(&info->irq_spinlock,flags);
3034 usc_set_serial_signals(info);
3035 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3036 }
3037
3038 /* Handle transition away from B0 status */
3039 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3040 info->serial_signals |= SerialSignal_DTR;
3041 if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3042 info->serial_signals |= SerialSignal_RTS;
3043 spin_lock_irqsave(&info->irq_spinlock,flags);
3044 usc_set_serial_signals(info);
3045 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3046 }
3047
3048 /* Handle turning off CRTSCTS */
3049 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3050 tty->hw_stopped = 0;
3051 mgsl_start(tty);
3052 }
3053
3054 } /* end of mgsl_set_termios() */
3055
3056 /* mgsl_close()
3057 *
3058 * Called when port is closed. Wait for remaining data to be
3059 * sent. Disable port and free resources.
3060 *
3061 * Arguments:
3062 *
3063 * tty pointer to open tty structure
3064 * filp pointer to open file object
3065 *
3066 * Return Value: None
3067 */
3068 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3069 {
3070 struct mgsl_struct * info = tty->driver_data;
3071
3072 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3073 return;
3074
3075 if (debug_level >= DEBUG_LEVEL_INFO)
3076 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3077 __FILE__,__LINE__, info->device_name, info->port.count);
3078
3079 if (tty_port_close_start(&info->port, tty, filp) == 0)
3080 goto cleanup;
3081
3082 mutex_lock(&info->port.mutex);
3083 if (tty_port_initialized(&info->port))
3084 mgsl_wait_until_sent(tty, info->timeout);
3085 mgsl_flush_buffer(tty);
3086 tty_ldisc_flush(tty);
3087 shutdown(info);
3088 mutex_unlock(&info->port.mutex);
3089
3090 tty_port_close_end(&info->port, tty);
3091 info->port.tty = NULL;
3092 cleanup:
3093 if (debug_level >= DEBUG_LEVEL_INFO)
3094 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3095 tty->driver->name, info->port.count);
3096
3097 } /* end of mgsl_close() */
3098
3099 /* mgsl_wait_until_sent()
3100 *
3101 * Wait until the transmitter is empty.
3102 *
3103 * Arguments:
3104 *
3105 * tty pointer to tty info structure
3106 * timeout time to wait for send completion
3107 *
3108 * Return Value: None
3109 */
3110 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3111 {
3112 struct mgsl_struct * info = tty->driver_data;
3113 unsigned long orig_jiffies, char_time;
3114
3115 if (!info )
3116 return;
3117
3118 if (debug_level >= DEBUG_LEVEL_INFO)
3119 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3120 __FILE__,__LINE__, info->device_name );
3121
3122 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3123 return;
3124
3125 if (!tty_port_initialized(&info->port))
3126 goto exit;
3127
3128 orig_jiffies = jiffies;
3129
3130 /* Set check interval to 1/5 of estimated time to
3131 * send a character, and make it at least 1. The check
3132 * interval should also be less than the timeout.
3133 * Note: use tight timings here to satisfy the NIST-PCTS.
3134 */
3135
3136 if ( info->params.data_rate ) {
3137 char_time = info->timeout/(32 * 5);
3138 if (!char_time)
3139 char_time++;
3140 } else
3141 char_time = 1;
3142
3143 if (timeout)
3144 char_time = min_t(unsigned long, char_time, timeout);
3145
3146 if ( info->params.mode == MGSL_MODE_HDLC ||
3147 info->params.mode == MGSL_MODE_RAW ) {
3148 while (info->tx_active) {
3149 msleep_interruptible(jiffies_to_msecs(char_time));
3150 if (signal_pending(current))
3151 break;
3152 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3153 break;
3154 }
3155 } else {
3156 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3157 info->tx_enabled) {
3158 msleep_interruptible(jiffies_to_msecs(char_time));
3159 if (signal_pending(current))
3160 break;
3161 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3162 break;
3163 }
3164 }
3165
3166 exit:
3167 if (debug_level >= DEBUG_LEVEL_INFO)
3168 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3169 __FILE__,__LINE__, info->device_name );
3170
3171 } /* end of mgsl_wait_until_sent() */
3172
3173 /* mgsl_hangup()
3174 *
3175 * Called by tty_hangup() when a hangup is signaled.
3176 * This is the same as to closing all open files for the port.
3177 *
3178 * Arguments: tty pointer to associated tty object
3179 * Return Value: None
3180 */
3181 static void mgsl_hangup(struct tty_struct *tty)
3182 {
3183 struct mgsl_struct * info = tty->driver_data;
3184
3185 if (debug_level >= DEBUG_LEVEL_INFO)
3186 printk("%s(%d):mgsl_hangup(%s)\n",
3187 __FILE__,__LINE__, info->device_name );
3188
3189 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3190 return;
3191
3192 mgsl_flush_buffer(tty);
3193 shutdown(info);
3194
3195 info->port.count = 0;
3196 tty_port_set_active(&info->port, 0);
3197 info->port.tty = NULL;
3198
3199 wake_up_interruptible(&info->port.open_wait);
3200
3201 } /* end of mgsl_hangup() */
3202
3203 /*
3204 * carrier_raised()
3205 *
3206 * Return true if carrier is raised
3207 */
3208
3209 static int carrier_raised(struct tty_port *port)
3210 {
3211 unsigned long flags;
3212 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3213
3214 spin_lock_irqsave(&info->irq_spinlock, flags);
3215 usc_get_serial_signals(info);
3216 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3217 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3218 }
3219
3220 static void dtr_rts(struct tty_port *port, int on)
3221 {
3222 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3223 unsigned long flags;
3224
3225 spin_lock_irqsave(&info->irq_spinlock,flags);
3226 if (on)
3227 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3228 else
3229 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3230 usc_set_serial_signals(info);
3231 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3232 }
3233
3234
3235 /* block_til_ready()
3236 *
3237 * Block the current process until the specified port
3238 * is ready to be opened.
3239 *
3240 * Arguments:
3241 *
3242 * tty pointer to tty info structure
3243 * filp pointer to open file object
3244 * info pointer to device instance data
3245 *
3246 * Return Value: 0 if success, otherwise error code
3247 */
3248 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3249 struct mgsl_struct *info)
3250 {
3251 DECLARE_WAITQUEUE(wait, current);
3252 int retval;
3253 bool do_clocal = false;
3254 unsigned long flags;
3255 int dcd;
3256 struct tty_port *port = &info->port;
3257
3258 if (debug_level >= DEBUG_LEVEL_INFO)
3259 printk("%s(%d):block_til_ready on %s\n",
3260 __FILE__,__LINE__, tty->driver->name );
3261
3262 if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3263 /* nonblock mode is set or port is not enabled */
3264 tty_port_set_active(port, 1);
3265 return 0;
3266 }
3267
3268 if (C_CLOCAL(tty))
3269 do_clocal = true;
3270
3271 /* Wait for carrier detect and the line to become
3272 * free (i.e., not in use by the callout). While we are in
3273 * this loop, port->count is dropped by one, so that
3274 * mgsl_close() knows when to free things. We restore it upon
3275 * exit, either normal or abnormal.
3276 */
3277
3278 retval = 0;
3279 add_wait_queue(&port->open_wait, &wait);
3280
3281 if (debug_level >= DEBUG_LEVEL_INFO)
3282 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3283 __FILE__,__LINE__, tty->driver->name, port->count );
3284
3285 spin_lock_irqsave(&info->irq_spinlock, flags);
3286 port->count--;
3287 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3288 port->blocked_open++;
3289
3290 while (1) {
3291 if (C_BAUD(tty) && tty_port_initialized(port))
3292 tty_port_raise_dtr_rts(port);
3293
3294 set_current_state(TASK_INTERRUPTIBLE);
3295
3296 if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3297 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3298 -EAGAIN : -ERESTARTSYS;
3299 break;
3300 }
3301
3302 dcd = tty_port_carrier_raised(&info->port);
3303 if (do_clocal || dcd)
3304 break;
3305
3306 if (signal_pending(current)) {
3307 retval = -ERESTARTSYS;
3308 break;
3309 }
3310
3311 if (debug_level >= DEBUG_LEVEL_INFO)
3312 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3313 __FILE__,__LINE__, tty->driver->name, port->count );
3314
3315 tty_unlock(tty);
3316 schedule();
3317 tty_lock(tty);
3318 }
3319
3320 set_current_state(TASK_RUNNING);
3321 remove_wait_queue(&port->open_wait, &wait);
3322
3323 /* FIXME: Racy on hangup during close wait */
3324 if (!tty_hung_up_p(filp))
3325 port->count++;
3326 port->blocked_open--;
3327
3328 if (debug_level >= DEBUG_LEVEL_INFO)
3329 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3330 __FILE__,__LINE__, tty->driver->name, port->count );
3331
3332 if (!retval)
3333 tty_port_set_active(port, 1);
3334
3335 return retval;
3336
3337 } /* end of block_til_ready() */
3338
3339 static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3340 {
3341 struct mgsl_struct *info;
3342 int line = tty->index;
3343
3344 /* verify range of specified line number */
3345 if (line >= mgsl_device_count) {
3346 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3347 __FILE__, __LINE__, line);
3348 return -ENODEV;
3349 }
3350
3351 /* find the info structure for the specified line */
3352 info = mgsl_device_list;
3353 while (info && info->line != line)
3354 info = info->next_device;
3355 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3356 return -ENODEV;
3357 tty->driver_data = info;
3358
3359 return tty_port_install(&info->port, driver, tty);
3360 }
3361
3362 /* mgsl_open()
3363 *
3364 * Called when a port is opened. Init and enable port.
3365 * Perform serial-specific initialization for the tty structure.
3366 *
3367 * Arguments: tty pointer to tty info structure
3368 * filp associated file pointer
3369 *
3370 * Return Value: 0 if success, otherwise error code
3371 */
3372 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3373 {
3374 struct mgsl_struct *info = tty->driver_data;
3375 unsigned long flags;
3376 int retval;
3377
3378 info->port.tty = tty;
3379
3380 if (debug_level >= DEBUG_LEVEL_INFO)
3381 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3382 __FILE__,__LINE__,tty->driver->name, info->port.count);
3383
3384 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3385
3386 spin_lock_irqsave(&info->netlock, flags);
3387 if (info->netcount) {
3388 retval = -EBUSY;
3389 spin_unlock_irqrestore(&info->netlock, flags);
3390 goto cleanup;
3391 }
3392 info->port.count++;
3393 spin_unlock_irqrestore(&info->netlock, flags);
3394
3395 if (info->port.count == 1) {
3396 /* 1st open on this device, init hardware */
3397 retval = startup(info);
3398 if (retval < 0)
3399 goto cleanup;
3400 }
3401
3402 retval = block_til_ready(tty, filp, info);
3403 if (retval) {
3404 if (debug_level >= DEBUG_LEVEL_INFO)
3405 printk("%s(%d):block_til_ready(%s) returned %d\n",
3406 __FILE__,__LINE__, info->device_name, retval);
3407 goto cleanup;
3408 }
3409
3410 if (debug_level >= DEBUG_LEVEL_INFO)
3411 printk("%s(%d):mgsl_open(%s) success\n",
3412 __FILE__,__LINE__, info->device_name);
3413 retval = 0;
3414
3415 cleanup:
3416 if (retval) {
3417 if (tty->count == 1)
3418 info->port.tty = NULL; /* tty layer will release tty struct */
3419 if(info->port.count)
3420 info->port.count--;
3421 }
3422
3423 return retval;
3424
3425 } /* end of mgsl_open() */
3426
3427 /*
3428 * /proc fs routines....
3429 */
3430
3431 static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3432 {
3433 char stat_buf[30];
3434 unsigned long flags;
3435
3436 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3437 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3438 info->device_name, info->io_base, info->irq_level,
3439 info->phys_memory_base, info->phys_lcr_base);
3440 } else {
3441 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3442 info->device_name, info->io_base,
3443 info->irq_level, info->dma_level);
3444 }
3445
3446 /* output current serial signal states */
3447 spin_lock_irqsave(&info->irq_spinlock,flags);
3448 usc_get_serial_signals(info);
3449 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3450
3451 stat_buf[0] = 0;
3452 stat_buf[1] = 0;
3453 if (info->serial_signals & SerialSignal_RTS)
3454 strcat(stat_buf, "|RTS");
3455 if (info->serial_signals & SerialSignal_CTS)
3456 strcat(stat_buf, "|CTS");
3457 if (info->serial_signals & SerialSignal_DTR)
3458 strcat(stat_buf, "|DTR");
3459 if (info->serial_signals & SerialSignal_DSR)
3460 strcat(stat_buf, "|DSR");
3461 if (info->serial_signals & SerialSignal_DCD)
3462 strcat(stat_buf, "|CD");
3463 if (info->serial_signals & SerialSignal_RI)
3464 strcat(stat_buf, "|RI");
3465
3466 if (info->params.mode == MGSL_MODE_HDLC ||
3467 info->params.mode == MGSL_MODE_RAW ) {
3468 seq_printf(m, " HDLC txok:%d rxok:%d",
3469 info->icount.txok, info->icount.rxok);
3470 if (info->icount.txunder)
3471 seq_printf(m, " txunder:%d", info->icount.txunder);
3472 if (info->icount.txabort)
3473 seq_printf(m, " txabort:%d", info->icount.txabort);
3474 if (info->icount.rxshort)
3475 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3476 if (info->icount.rxlong)
3477 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3478 if (info->icount.rxover)
3479 seq_printf(m, " rxover:%d", info->icount.rxover);
3480 if (info->icount.rxcrc)
3481 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3482 } else {
3483 seq_printf(m, " ASYNC tx:%d rx:%d",
3484 info->icount.tx, info->icount.rx);
3485 if (info->icount.frame)
3486 seq_printf(m, " fe:%d", info->icount.frame);
3487 if (info->icount.parity)
3488 seq_printf(m, " pe:%d", info->icount.parity);
3489 if (info->icount.brk)
3490 seq_printf(m, " brk:%d", info->icount.brk);
3491 if (info->icount.overrun)
3492 seq_printf(m, " oe:%d", info->icount.overrun);
3493 }
3494
3495 /* Append serial signal status to end */
3496 seq_printf(m, " %s\n", stat_buf+1);
3497
3498 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3499 info->tx_active,info->bh_requested,info->bh_running,
3500 info->pending_bh);
3501
3502 spin_lock_irqsave(&info->irq_spinlock,flags);
3503 {
3504 u16 Tcsr = usc_InReg( info, TCSR );
3505 u16 Tdmr = usc_InDmaReg( info, TDMR );
3506 u16 Ticr = usc_InReg( info, TICR );
3507 u16 Rscr = usc_InReg( info, RCSR );
3508 u16 Rdmr = usc_InDmaReg( info, RDMR );
3509 u16 Ricr = usc_InReg( info, RICR );
3510 u16 Icr = usc_InReg( info, ICR );
3511 u16 Dccr = usc_InReg( info, DCCR );
3512 u16 Tmr = usc_InReg( info, TMR );
3513 u16 Tccr = usc_InReg( info, TCCR );
3514 u16 Ccar = inw( info->io_base + CCAR );
3515 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3516 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3517 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3518 }
3519 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3520 }
3521
3522 /* Called to print information about devices */
3523 static int mgsl_proc_show(struct seq_file *m, void *v)
3524 {
3525 struct mgsl_struct *info;
3526
3527 seq_printf(m, "synclink driver:%s\n", driver_version);
3528
3529 info = mgsl_device_list;
3530 while( info ) {
3531 line_info(m, info);
3532 info = info->next_device;
3533 }
3534 return 0;
3535 }
3536
3537 static int mgsl_proc_open(struct inode *inode, struct file *file)
3538 {
3539 return single_open(file, mgsl_proc_show, NULL);
3540 }
3541
3542 static const struct file_operations mgsl_proc_fops = {
3543 .owner = THIS_MODULE,
3544 .open = mgsl_proc_open,
3545 .read = seq_read,
3546 .llseek = seq_lseek,
3547 .release = single_release,
3548 };
3549
3550 /* mgsl_allocate_dma_buffers()
3551 *
3552 * Allocate and format DMA buffers (ISA adapter)
3553 * or format shared memory buffers (PCI adapter).
3554 *
3555 * Arguments: info pointer to device instance data
3556 * Return Value: 0 if success, otherwise error
3557 */
3558 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3559 {
3560 unsigned short BuffersPerFrame;
3561
3562 info->last_mem_alloc = 0;
3563
3564 /* Calculate the number of DMA buffers necessary to hold the */
3565 /* largest allowable frame size. Note: If the max frame size is */
3566 /* not an even multiple of the DMA buffer size then we need to */
3567 /* round the buffer count per frame up one. */
3568
3569 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3570 if ( info->max_frame_size % DMABUFFERSIZE )
3571 BuffersPerFrame++;
3572
3573 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3574 /*
3575 * The PCI adapter has 256KBytes of shared memory to use.
3576 * This is 64 PAGE_SIZE buffers.
3577 *
3578 * The first page is used for padding at this time so the
3579 * buffer list does not begin at offset 0 of the PCI
3580 * adapter's shared memory.
3581 *
3582 * The 2nd page is used for the buffer list. A 4K buffer
3583 * list can hold 128 DMA_BUFFER structures at 32 bytes
3584 * each.
3585 *
3586 * This leaves 62 4K pages.
3587 *
3588 * The next N pages are used for transmit frame(s). We
3589 * reserve enough 4K page blocks to hold the required
3590 * number of transmit dma buffers (num_tx_dma_buffers),
3591 * each of MaxFrameSize size.
3592 *
3593 * Of the remaining pages (62-N), determine how many can
3594 * be used to receive full MaxFrameSize inbound frames
3595 */
3596 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3597 info->rx_buffer_count = 62 - info->tx_buffer_count;
3598 } else {
3599 /* Calculate the number of PAGE_SIZE buffers needed for */
3600 /* receive and transmit DMA buffers. */
3601
3602
3603 /* Calculate the number of DMA buffers necessary to */
3604 /* hold 7 max size receive frames and one max size transmit frame. */
3605 /* The receive buffer count is bumped by one so we avoid an */
3606 /* End of List condition if all receive buffers are used when */
3607 /* using linked list DMA buffers. */
3608
3609 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3610 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3611
3612 /*
3613 * limit total TxBuffers & RxBuffers to 62 4K total
3614 * (ala PCI Allocation)
3615 */
3616
3617 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3618 info->rx_buffer_count = 62 - info->tx_buffer_count;
3619
3620 }
3621
3622 if ( debug_level >= DEBUG_LEVEL_INFO )
3623 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3624 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3625
3626 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3627 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3628 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3629 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3630 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3631 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3632 return -ENOMEM;
3633 }
3634
3635 mgsl_reset_rx_dma_buffers( info );
3636 mgsl_reset_tx_dma_buffers( info );
3637
3638 return 0;
3639
3640 } /* end of mgsl_allocate_dma_buffers() */
3641
3642 /*
3643 * mgsl_alloc_buffer_list_memory()
3644 *
3645 * Allocate a common DMA buffer for use as the
3646 * receive and transmit buffer lists.
3647 *
3648 * A buffer list is a set of buffer entries where each entry contains
3649 * a pointer to an actual buffer and a pointer to the next buffer entry
3650 * (plus some other info about the buffer).
3651 *
3652 * The buffer entries for a list are built to form a circular list so
3653 * that when the entire list has been traversed you start back at the
3654 * beginning.
3655 *
3656 * This function allocates memory for just the buffer entries.
3657 * The links (pointer to next entry) are filled in with the physical
3658 * address of the next entry so the adapter can navigate the list
3659 * using bus master DMA. The pointers to the actual buffers are filled
3660 * out later when the actual buffers are allocated.
3661 *
3662 * Arguments: info pointer to device instance data
3663 * Return Value: 0 if success, otherwise error
3664 */
3665 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3666 {
3667 unsigned int i;
3668
3669 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3670 /* PCI adapter uses shared memory. */
3671 info->buffer_list = info->memory_base + info->last_mem_alloc;
3672 info->buffer_list_phys = info->last_mem_alloc;
3673 info->last_mem_alloc += BUFFERLISTSIZE;
3674 } else {
3675 /* ISA adapter uses system memory. */
3676 /* The buffer lists are allocated as a common buffer that both */
3677 /* the processor and adapter can access. This allows the driver to */
3678 /* inspect portions of the buffer while other portions are being */
3679 /* updated by the adapter using Bus Master DMA. */
3680
3681 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3682 if (info->buffer_list == NULL)
3683 return -ENOMEM;
3684 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3685 }
3686
3687 /* We got the memory for the buffer entry lists. */
3688 /* Initialize the memory block to all zeros. */
3689 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3690
3691 /* Save virtual address pointers to the receive and */
3692 /* transmit buffer lists. (Receive 1st). These pointers will */
3693 /* be used by the processor to access the lists. */
3694 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3695 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3696 info->tx_buffer_list += info->rx_buffer_count;
3697
3698 /*
3699 * Build the links for the buffer entry lists such that
3700 * two circular lists are built. (Transmit and Receive).
3701 *
3702 * Note: the links are physical addresses
3703 * which are read by the adapter to determine the next
3704 * buffer entry to use.
3705 */
3706
3707 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3708 /* calculate and store physical address of this buffer entry */
3709 info->rx_buffer_list[i].phys_entry =
3710 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3711
3712 /* calculate and store physical address of */
3713 /* next entry in cirular list of entries */
3714
3715 info->rx_buffer_list[i].link = info->buffer_list_phys;
3716
3717 if ( i < info->rx_buffer_count - 1 )
3718 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3719 }
3720
3721 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3722 /* calculate and store physical address of this buffer entry */
3723 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3724 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3725
3726 /* calculate and store physical address of */
3727 /* next entry in cirular list of entries */
3728
3729 info->tx_buffer_list[i].link = info->buffer_list_phys +
3730 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3731
3732 if ( i < info->tx_buffer_count - 1 )
3733 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3734 }
3735
3736 return 0;
3737
3738 } /* end of mgsl_alloc_buffer_list_memory() */
3739
3740 /* Free DMA buffers allocated for use as the
3741 * receive and transmit buffer lists.
3742 * Warning:
3743 *
3744 * The data transfer buffers associated with the buffer list
3745 * MUST be freed before freeing the buffer list itself because
3746 * the buffer list contains the information necessary to free
3747 * the individual buffers!
3748 */
3749 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3750 {
3751 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3752 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3753
3754 info->buffer_list = NULL;
3755 info->rx_buffer_list = NULL;
3756 info->tx_buffer_list = NULL;
3757
3758 } /* end of mgsl_free_buffer_list_memory() */
3759
3760 /*
3761 * mgsl_alloc_frame_memory()
3762 *
3763 * Allocate the frame DMA buffers used by the specified buffer list.
3764 * Each DMA buffer will be one memory page in size. This is necessary
3765 * because memory can fragment enough that it may be impossible
3766 * contiguous pages.
3767 *
3768 * Arguments:
3769 *
3770 * info pointer to device instance data
3771 * BufferList pointer to list of buffer entries
3772 * Buffercount count of buffer entries in buffer list
3773 *
3774 * Return Value: 0 if success, otherwise -ENOMEM
3775 */
3776 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3777 {
3778 int i;
3779 u32 phys_addr;
3780
3781 /* Allocate page sized buffers for the receive buffer list */
3782
3783 for ( i = 0; i < Buffercount; i++ ) {
3784 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3785 /* PCI adapter uses shared memory buffers. */
3786 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3787 phys_addr = info->last_mem_alloc;
3788 info->last_mem_alloc += DMABUFFERSIZE;
3789 } else {
3790 /* ISA adapter uses system memory. */
3791 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3792 if (BufferList[i].virt_addr == NULL)
3793 return -ENOMEM;
3794 phys_addr = (u32)(BufferList[i].dma_addr);
3795 }
3796 BufferList[i].phys_addr = phys_addr;
3797 }
3798
3799 return 0;
3800
3801 } /* end of mgsl_alloc_frame_memory() */
3802
3803 /*
3804 * mgsl_free_frame_memory()
3805 *
3806 * Free the buffers associated with
3807 * each buffer entry of a buffer list.
3808 *
3809 * Arguments:
3810 *
3811 * info pointer to device instance data
3812 * BufferList pointer to list of buffer entries
3813 * Buffercount count of buffer entries in buffer list
3814 *
3815 * Return Value: None
3816 */
3817 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3818 {
3819 int i;
3820
3821 if ( BufferList ) {
3822 for ( i = 0 ; i < Buffercount ; i++ ) {
3823 if ( BufferList[i].virt_addr ) {
3824 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3825 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3826 BufferList[i].virt_addr = NULL;
3827 }
3828 }
3829 }
3830
3831 } /* end of mgsl_free_frame_memory() */
3832
3833 /* mgsl_free_dma_buffers()
3834 *
3835 * Free DMA buffers
3836 *
3837 * Arguments: info pointer to device instance data
3838 * Return Value: None
3839 */
3840 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3841 {
3842 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3843 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3844 mgsl_free_buffer_list_memory( info );
3845
3846 } /* end of mgsl_free_dma_buffers() */
3847
3848
3849 /*
3850 * mgsl_alloc_intermediate_rxbuffer_memory()
3851 *
3852 * Allocate a buffer large enough to hold max_frame_size. This buffer
3853 * is used to pass an assembled frame to the line discipline.
3854 *
3855 * Arguments:
3856 *
3857 * info pointer to device instance data
3858 *
3859 * Return Value: 0 if success, otherwise -ENOMEM
3860 */
3861 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3862 {
3863 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3864 if ( info->intermediate_rxbuffer == NULL )
3865 return -ENOMEM;
3866 /* unused flag buffer to satisfy receive_buf calling interface */
3867 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3868 if (!info->flag_buf) {
3869 kfree(info->intermediate_rxbuffer);
3870 info->intermediate_rxbuffer = NULL;
3871 return -ENOMEM;
3872 }
3873 return 0;
3874
3875 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3876
3877 /*
3878 * mgsl_free_intermediate_rxbuffer_memory()
3879 *
3880 *
3881 * Arguments:
3882 *
3883 * info pointer to device instance data
3884 *
3885 * Return Value: None
3886 */
3887 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3888 {
3889 kfree(info->intermediate_rxbuffer);
3890 info->intermediate_rxbuffer = NULL;
3891 kfree(info->flag_buf);
3892 info->flag_buf = NULL;
3893
3894 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3895
3896 /*
3897 * mgsl_alloc_intermediate_txbuffer_memory()
3898 *
3899 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3900 * This buffer is used to load transmit frames into the adapter's dma transfer
3901 * buffers when there is sufficient space.
3902 *
3903 * Arguments:
3904 *
3905 * info pointer to device instance data
3906 *
3907 * Return Value: 0 if success, otherwise -ENOMEM
3908 */
3909 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3910 {
3911 int i;
3912
3913 if ( debug_level >= DEBUG_LEVEL_INFO )
3914 printk("%s %s(%d) allocating %d tx holding buffers\n",
3915 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3916
3917 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3918
3919 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3920 info->tx_holding_buffers[i].buffer =
3921 kmalloc(info->max_frame_size, GFP_KERNEL);
3922 if (info->tx_holding_buffers[i].buffer == NULL) {
3923 for (--i; i >= 0; i--) {
3924 kfree(info->tx_holding_buffers[i].buffer);
3925 info->tx_holding_buffers[i].buffer = NULL;
3926 }
3927 return -ENOMEM;
3928 }
3929 }
3930
3931 return 0;
3932
3933 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3934
3935 /*
3936 * mgsl_free_intermediate_txbuffer_memory()
3937 *
3938 *
3939 * Arguments:
3940 *
3941 * info pointer to device instance data
3942 *
3943 * Return Value: None
3944 */
3945 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3946 {
3947 int i;
3948
3949 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3950 kfree(info->tx_holding_buffers[i].buffer);
3951 info->tx_holding_buffers[i].buffer = NULL;
3952 }
3953
3954 info->get_tx_holding_index = 0;
3955 info->put_tx_holding_index = 0;
3956 info->tx_holding_count = 0;
3957
3958 } /* end of mgsl_free_intermediate_txbuffer_memory() */
3959
3960
3961 /*
3962 * load_next_tx_holding_buffer()
3963 *
3964 * attempts to load the next buffered tx request into the
3965 * tx dma buffers
3966 *
3967 * Arguments:
3968 *
3969 * info pointer to device instance data
3970 *
3971 * Return Value: true if next buffered tx request loaded
3972 * into adapter's tx dma buffer,
3973 * false otherwise
3974 */
3975 static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3976 {
3977 bool ret = false;
3978
3979 if ( info->tx_holding_count ) {
3980 /* determine if we have enough tx dma buffers
3981 * to accommodate the next tx frame
3982 */
3983 struct tx_holding_buffer *ptx =
3984 &info->tx_holding_buffers[info->get_tx_holding_index];
3985 int num_free = num_free_tx_dma_buffers(info);
3986 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3987 if ( ptx->buffer_size % DMABUFFERSIZE )
3988 ++num_needed;
3989
3990 if (num_needed <= num_free) {
3991 info->xmit_cnt = ptx->buffer_size;
3992 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
3993
3994 --info->tx_holding_count;
3995 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
3996 info->get_tx_holding_index=0;
3997
3998 /* restart transmit timer */
3999 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4000
4001 ret = true;
4002 }
4003 }
4004
4005 return ret;
4006 }
4007
4008 /*
4009 * save_tx_buffer_request()
4010 *
4011 * attempt to store transmit frame request for later transmission
4012 *
4013 * Arguments:
4014 *
4015 * info pointer to device instance data
4016 * Buffer pointer to buffer containing frame to load
4017 * BufferSize size in bytes of frame in Buffer
4018 *
4019 * Return Value: 1 if able to store, 0 otherwise
4020 */
4021 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4022 {
4023 struct tx_holding_buffer *ptx;
4024
4025 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4026 return 0; /* all buffers in use */
4027 }
4028
4029 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4030 ptx->buffer_size = BufferSize;
4031 memcpy( ptx->buffer, Buffer, BufferSize);
4032
4033 ++info->tx_holding_count;
4034 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4035 info->put_tx_holding_index=0;
4036
4037 return 1;
4038 }
4039
4040 static int mgsl_claim_resources(struct mgsl_struct *info)
4041 {
4042 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4043 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4044 __FILE__,__LINE__,info->device_name, info->io_base);
4045 return -ENODEV;
4046 }
4047 info->io_addr_requested = true;
4048
4049 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4050 info->device_name, info ) < 0 ) {
4051 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4052 __FILE__,__LINE__,info->device_name, info->irq_level );
4053 goto errout;
4054 }
4055 info->irq_requested = true;
4056
4057 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4058 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4059 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4060 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4061 goto errout;
4062 }
4063 info->shared_mem_requested = true;
4064 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4065 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4066 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4067 goto errout;
4068 }
4069 info->lcr_mem_requested = true;
4070
4071 info->memory_base = ioremap_nocache(info->phys_memory_base,
4072 0x40000);
4073 if (!info->memory_base) {
4074 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4075 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4076 goto errout;
4077 }
4078
4079 if ( !mgsl_memory_test(info) ) {
4080 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4081 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4082 goto errout;
4083 }
4084
4085 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4086 PAGE_SIZE);
4087 if (!info->lcr_base) {
4088 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4089 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4090 goto errout;
4091 }
4092 info->lcr_base += info->lcr_offset;
4093
4094 } else {
4095 /* claim DMA channel */
4096
4097 if (request_dma(info->dma_level,info->device_name) < 0){
4098 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4099 __FILE__,__LINE__,info->device_name, info->dma_level );
4100 goto errout;
4101 }
4102 info->dma_requested = true;
4103
4104 /* ISA adapter uses bus master DMA */
4105 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4106 enable_dma(info->dma_level);
4107 }
4108
4109 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4110 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4111 __FILE__,__LINE__,info->device_name, info->dma_level );
4112 goto errout;
4113 }
4114
4115 return 0;
4116 errout:
4117 mgsl_release_resources(info);
4118 return -ENODEV;
4119
4120 } /* end of mgsl_claim_resources() */
4121
4122 static void mgsl_release_resources(struct mgsl_struct *info)
4123 {
4124 if ( debug_level >= DEBUG_LEVEL_INFO )
4125 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4126 __FILE__,__LINE__,info->device_name );
4127
4128 if ( info->irq_requested ) {
4129 free_irq(info->irq_level, info);
4130 info->irq_requested = false;
4131 }
4132 if ( info->dma_requested ) {
4133 disable_dma(info->dma_level);
4134 free_dma(info->dma_level);
4135 info->dma_requested = false;
4136 }
4137 mgsl_free_dma_buffers(info);
4138 mgsl_free_intermediate_rxbuffer_memory(info);
4139 mgsl_free_intermediate_txbuffer_memory(info);
4140
4141 if ( info->io_addr_requested ) {
4142 release_region(info->io_base,info->io_addr_size);
4143 info->io_addr_requested = false;
4144 }
4145 if ( info->shared_mem_requested ) {
4146 release_mem_region(info->phys_memory_base,0x40000);
4147 info->shared_mem_requested = false;
4148 }
4149 if ( info->lcr_mem_requested ) {
4150 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4151 info->lcr_mem_requested = false;
4152 }
4153 if (info->memory_base){
4154 iounmap(info->memory_base);
4155 info->memory_base = NULL;
4156 }
4157 if (info->lcr_base){
4158 iounmap(info->lcr_base - info->lcr_offset);
4159 info->lcr_base = NULL;
4160 }
4161
4162 if ( debug_level >= DEBUG_LEVEL_INFO )
4163 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4164 __FILE__,__LINE__,info->device_name );
4165
4166 } /* end of mgsl_release_resources() */
4167
4168 /* mgsl_add_device()
4169 *
4170 * Add the specified device instance data structure to the
4171 * global linked list of devices and increment the device count.
4172 *
4173 * Arguments: info pointer to device instance data
4174 * Return Value: None
4175 */
4176 static void mgsl_add_device( struct mgsl_struct *info )
4177 {
4178 info->next_device = NULL;
4179 info->line = mgsl_device_count;
4180 sprintf(info->device_name,"ttySL%d",info->line);
4181
4182 if (info->line < MAX_TOTAL_DEVICES) {
4183 if (maxframe[info->line])
4184 info->max_frame_size = maxframe[info->line];
4185
4186 if (txdmabufs[info->line]) {
4187 info->num_tx_dma_buffers = txdmabufs[info->line];
4188 if (info->num_tx_dma_buffers < 1)
4189 info->num_tx_dma_buffers = 1;
4190 }
4191
4192 if (txholdbufs[info->line]) {
4193 info->num_tx_holding_buffers = txholdbufs[info->line];
4194 if (info->num_tx_holding_buffers < 1)
4195 info->num_tx_holding_buffers = 1;
4196 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4197 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4198 }
4199 }
4200
4201 mgsl_device_count++;
4202
4203 if ( !mgsl_device_list )
4204 mgsl_device_list = info;
4205 else {
4206 struct mgsl_struct *current_dev = mgsl_device_list;
4207 while( current_dev->next_device )
4208 current_dev = current_dev->next_device;
4209 current_dev->next_device = info;
4210 }
4211
4212 if ( info->max_frame_size < 4096 )
4213 info->max_frame_size = 4096;
4214 else if ( info->max_frame_size > 65535 )
4215 info->max_frame_size = 65535;
4216
4217 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4218 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4219 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4220 info->phys_memory_base, info->phys_lcr_base,
4221 info->max_frame_size );
4222 } else {
4223 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4224 info->device_name, info->io_base, info->irq_level, info->dma_level,
4225 info->max_frame_size );
4226 }
4227
4228 #if SYNCLINK_GENERIC_HDLC
4229 hdlcdev_init(info);
4230 #endif
4231
4232 } /* end of mgsl_add_device() */
4233
4234 static const struct tty_port_operations mgsl_port_ops = {
4235 .carrier_raised = carrier_raised,
4236 .dtr_rts = dtr_rts,
4237 };
4238
4239
4240 /* mgsl_allocate_device()
4241 *
4242 * Allocate and initialize a device instance structure
4243 *
4244 * Arguments: none
4245 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4246 */
4247 static struct mgsl_struct* mgsl_allocate_device(void)
4248 {
4249 struct mgsl_struct *info;
4250
4251 info = kzalloc(sizeof(struct mgsl_struct),
4252 GFP_KERNEL);
4253
4254 if (!info) {
4255 printk("Error can't allocate device instance data\n");
4256 } else {
4257 tty_port_init(&info->port);
4258 info->port.ops = &mgsl_port_ops;
4259 info->magic = MGSL_MAGIC;
4260 INIT_WORK(&info->task, mgsl_bh_handler);
4261 info->max_frame_size = 4096;
4262 info->port.close_delay = 5*HZ/10;
4263 info->port.closing_wait = 30*HZ;
4264 init_waitqueue_head(&info->status_event_wait_q);
4265 init_waitqueue_head(&info->event_wait_q);
4266 spin_lock_init(&info->irq_spinlock);
4267 spin_lock_init(&info->netlock);
4268 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4269 info->idle_mode = HDLC_TXIDLE_FLAGS;
4270 info->num_tx_dma_buffers = 1;
4271 info->num_tx_holding_buffers = 0;
4272 }
4273
4274 return info;
4275
4276 } /* end of mgsl_allocate_device()*/
4277
4278 static const struct tty_operations mgsl_ops = {
4279 .install = mgsl_install,
4280 .open = mgsl_open,
4281 .close = mgsl_close,
4282 .write = mgsl_write,
4283 .put_char = mgsl_put_char,
4284 .flush_chars = mgsl_flush_chars,
4285 .write_room = mgsl_write_room,
4286 .chars_in_buffer = mgsl_chars_in_buffer,
4287 .flush_buffer = mgsl_flush_buffer,
4288 .ioctl = mgsl_ioctl,
4289 .throttle = mgsl_throttle,
4290 .unthrottle = mgsl_unthrottle,
4291 .send_xchar = mgsl_send_xchar,
4292 .break_ctl = mgsl_break,
4293 .wait_until_sent = mgsl_wait_until_sent,
4294 .set_termios = mgsl_set_termios,
4295 .stop = mgsl_stop,
4296 .start = mgsl_start,
4297 .hangup = mgsl_hangup,
4298 .tiocmget = tiocmget,
4299 .tiocmset = tiocmset,
4300 .get_icount = msgl_get_icount,
4301 .proc_fops = &mgsl_proc_fops,
4302 };
4303
4304 /*
4305 * perform tty device initialization
4306 */
4307 static int mgsl_init_tty(void)
4308 {
4309 int rc;
4310
4311 serial_driver = alloc_tty_driver(128);
4312 if (!serial_driver)
4313 return -ENOMEM;
4314
4315 serial_driver->driver_name = "synclink";
4316 serial_driver->name = "ttySL";
4317 serial_driver->major = ttymajor;
4318 serial_driver->minor_start = 64;
4319 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4320 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4321 serial_driver->init_termios = tty_std_termios;
4322 serial_driver->init_termios.c_cflag =
4323 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4324 serial_driver->init_termios.c_ispeed = 9600;
4325 serial_driver->init_termios.c_ospeed = 9600;
4326 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4327 tty_set_operations(serial_driver, &mgsl_ops);
4328 if ((rc = tty_register_driver(serial_driver)) < 0) {
4329 printk("%s(%d):Couldn't register serial driver\n",
4330 __FILE__,__LINE__);
4331 put_tty_driver(serial_driver);
4332 serial_driver = NULL;
4333 return rc;
4334 }
4335
4336 printk("%s %s, tty major#%d\n",
4337 driver_name, driver_version,
4338 serial_driver->major);
4339 return 0;
4340 }
4341
4342 /* enumerate user specified ISA adapters
4343 */
4344 static void mgsl_enum_isa_devices(void)
4345 {
4346 struct mgsl_struct *info;
4347 int i;
4348
4349 /* Check for user specified ISA devices */
4350
4351 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4352 if ( debug_level >= DEBUG_LEVEL_INFO )
4353 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4354 io[i], irq[i], dma[i] );
4355
4356 info = mgsl_allocate_device();
4357 if ( !info ) {
4358 /* error allocating device instance data */
4359 if ( debug_level >= DEBUG_LEVEL_ERROR )
4360 printk( "can't allocate device instance data.\n");
4361 continue;
4362 }
4363
4364 /* Copy user configuration info to device instance data */
4365 info->io_base = (unsigned int)io[i];
4366 info->irq_level = (unsigned int)irq[i];
4367 info->irq_level = irq_canonicalize(info->irq_level);
4368 info->dma_level = (unsigned int)dma[i];
4369 info->bus_type = MGSL_BUS_TYPE_ISA;
4370 info->io_addr_size = 16;
4371 info->irq_flags = 0;
4372
4373 mgsl_add_device( info );
4374 }
4375 }
4376
4377 static void synclink_cleanup(void)
4378 {
4379 int rc;
4380 struct mgsl_struct *info;
4381 struct mgsl_struct *tmp;
4382
4383 printk("Unloading %s: %s\n", driver_name, driver_version);
4384
4385 if (serial_driver) {
4386 rc = tty_unregister_driver(serial_driver);
4387 if (rc)
4388 printk("%s(%d) failed to unregister tty driver err=%d\n",
4389 __FILE__,__LINE__,rc);
4390 put_tty_driver(serial_driver);
4391 }
4392
4393 info = mgsl_device_list;
4394 while(info) {
4395 #if SYNCLINK_GENERIC_HDLC
4396 hdlcdev_exit(info);
4397 #endif
4398 mgsl_release_resources(info);
4399 tmp = info;
4400 info = info->next_device;
4401 tty_port_destroy(&tmp->port);
4402 kfree(tmp);
4403 }
4404
4405 if (pci_registered)
4406 pci_unregister_driver(&synclink_pci_driver);
4407 }
4408
4409 static int __init synclink_init(void)
4410 {
4411 int rc;
4412
4413 if (break_on_load) {
4414 mgsl_get_text_ptr();
4415 BREAKPOINT();
4416 }
4417
4418 printk("%s %s\n", driver_name, driver_version);
4419
4420 mgsl_enum_isa_devices();
4421 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4422 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4423 else
4424 pci_registered = true;
4425
4426 if ((rc = mgsl_init_tty()) < 0)
4427 goto error;
4428
4429 return 0;
4430
4431 error:
4432 synclink_cleanup();
4433 return rc;
4434 }
4435
4436 static void __exit synclink_exit(void)
4437 {
4438 synclink_cleanup();
4439 }
4440
4441 module_init(synclink_init);
4442 module_exit(synclink_exit);
4443
4444 /*
4445 * usc_RTCmd()
4446 *
4447 * Issue a USC Receive/Transmit command to the
4448 * Channel Command/Address Register (CCAR).
4449 *
4450 * Notes:
4451 *
4452 * The command is encoded in the most significant 5 bits <15..11>
4453 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4454 * and Bits <6..0> must be written as zeros.
4455 *
4456 * Arguments:
4457 *
4458 * info pointer to device information structure
4459 * Cmd command mask (use symbolic macros)
4460 *
4461 * Return Value:
4462 *
4463 * None
4464 */
4465 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4466 {
4467 /* output command to CCAR in bits <15..11> */
4468 /* preserve bits <10..7>, bits <6..0> must be zero */
4469
4470 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4471
4472 /* Read to flush write to CCAR */
4473 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4474 inw( info->io_base + CCAR );
4475
4476 } /* end of usc_RTCmd() */
4477
4478 /*
4479 * usc_DmaCmd()
4480 *
4481 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4482 *
4483 * Arguments:
4484 *
4485 * info pointer to device information structure
4486 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4487 *
4488 * Return Value:
4489 *
4490 * None
4491 */
4492 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4493 {
4494 /* write command mask to DCAR */
4495 outw( Cmd + info->mbre_bit, info->io_base );
4496
4497 /* Read to flush write to DCAR */
4498 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4499 inw( info->io_base );
4500
4501 } /* end of usc_DmaCmd() */
4502
4503 /*
4504 * usc_OutDmaReg()
4505 *
4506 * Write a 16-bit value to a USC DMA register
4507 *
4508 * Arguments:
4509 *
4510 * info pointer to device info structure
4511 * RegAddr register address (number) for write
4512 * RegValue 16-bit value to write to register
4513 *
4514 * Return Value:
4515 *
4516 * None
4517 *
4518 */
4519 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4520 {
4521 /* Note: The DCAR is located at the adapter base address */
4522 /* Note: must preserve state of BIT8 in DCAR */
4523
4524 outw( RegAddr + info->mbre_bit, info->io_base );
4525 outw( RegValue, info->io_base );
4526
4527 /* Read to flush write to DCAR */
4528 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4529 inw( info->io_base );
4530
4531 } /* end of usc_OutDmaReg() */
4532
4533 /*
4534 * usc_InDmaReg()
4535 *
4536 * Read a 16-bit value from a DMA register
4537 *
4538 * Arguments:
4539 *
4540 * info pointer to device info structure
4541 * RegAddr register address (number) to read from
4542 *
4543 * Return Value:
4544 *
4545 * The 16-bit value read from register
4546 *
4547 */
4548 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4549 {
4550 /* Note: The DCAR is located at the adapter base address */
4551 /* Note: must preserve state of BIT8 in DCAR */
4552
4553 outw( RegAddr + info->mbre_bit, info->io_base );
4554 return inw( info->io_base );
4555
4556 } /* end of usc_InDmaReg() */
4557
4558 /*
4559 *
4560 * usc_OutReg()
4561 *
4562 * Write a 16-bit value to a USC serial channel register
4563 *
4564 * Arguments:
4565 *
4566 * info pointer to device info structure
4567 * RegAddr register address (number) to write to
4568 * RegValue 16-bit value to write to register
4569 *
4570 * Return Value:
4571 *
4572 * None
4573 *
4574 */
4575 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4576 {
4577 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4578 outw( RegValue, info->io_base + CCAR );
4579
4580 /* Read to flush write to CCAR */
4581 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4582 inw( info->io_base + CCAR );
4583
4584 } /* end of usc_OutReg() */
4585
4586 /*
4587 * usc_InReg()
4588 *
4589 * Reads a 16-bit value from a USC serial channel register
4590 *
4591 * Arguments:
4592 *
4593 * info pointer to device extension
4594 * RegAddr register address (number) to read from
4595 *
4596 * Return Value:
4597 *
4598 * 16-bit value read from register
4599 */
4600 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4601 {
4602 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4603 return inw( info->io_base + CCAR );
4604
4605 } /* end of usc_InReg() */
4606
4607 /* usc_set_sdlc_mode()
4608 *
4609 * Set up the adapter for SDLC DMA communications.
4610 *
4611 * Arguments: info pointer to device instance data
4612 * Return Value: NONE
4613 */
4614 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4615 {
4616 u16 RegValue;
4617 bool PreSL1660;
4618
4619 /*
4620 * determine if the IUSC on the adapter is pre-SL1660. If
4621 * not, take advantage of the UnderWait feature of more
4622 * modern chips. If an underrun occurs and this bit is set,
4623 * the transmitter will idle the programmed idle pattern
4624 * until the driver has time to service the underrun. Otherwise,
4625 * the dma controller may get the cycles previously requested
4626 * and begin transmitting queued tx data.
4627 */
4628 usc_OutReg(info,TMCR,0x1f);
4629 RegValue=usc_InReg(info,TMDR);
4630 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4631
4632 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4633 {
4634 /*
4635 ** Channel Mode Register (CMR)
4636 **
4637 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4638 ** <13> 0 0 = Transmit Disabled (initially)
4639 ** <12> 0 1 = Consecutive Idles share common 0
4640 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4641 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4642 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4643 **
4644 ** 1000 1110 0000 0110 = 0x8e06
4645 */
4646 RegValue = 0x8e06;
4647
4648 /*--------------------------------------------------
4649 * ignore user options for UnderRun Actions and
4650 * preambles
4651 *--------------------------------------------------*/
4652 }
4653 else
4654 {
4655 /* Channel mode Register (CMR)
4656 *
4657 * <15..14> 00 Tx Sub modes, Underrun Action
4658 * <13> 0 1 = Send Preamble before opening flag
4659 * <12> 0 1 = Consecutive Idles share common 0
4660 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4661 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4662 * <3..0> 0110 Receiver mode = HDLC/SDLC
4663 *
4664 * 0000 0110 0000 0110 = 0x0606
4665 */
4666 if (info->params.mode == MGSL_MODE_RAW) {
4667 RegValue = 0x0001; /* Set Receive mode = external sync */
4668
4669 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4670 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4671
4672 /*
4673 * TxSubMode:
4674 * CMR <15> 0 Don't send CRC on Tx Underrun
4675 * CMR <14> x undefined
4676 * CMR <13> 0 Send preamble before openning sync
4677 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4678 *
4679 * TxMode:
4680 * CMR <11-8) 0100 MonoSync
4681 *
4682 * 0x00 0100 xxxx xxxx 04xx
4683 */
4684 RegValue |= 0x0400;
4685 }
4686 else {
4687
4688 RegValue = 0x0606;
4689
4690 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4691 RegValue |= BIT14;
4692 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4693 RegValue |= BIT15;
4694 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4695 RegValue |= BIT15 | BIT14;
4696 }
4697
4698 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4699 RegValue |= BIT13;
4700 }
4701
4702 if ( info->params.mode == MGSL_MODE_HDLC &&
4703 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4704 RegValue |= BIT12;
4705
4706 if ( info->params.addr_filter != 0xff )
4707 {
4708 /* set up receive address filtering */
4709 usc_OutReg( info, RSR, info->params.addr_filter );
4710 RegValue |= BIT4;
4711 }
4712
4713 usc_OutReg( info, CMR, RegValue );
4714 info->cmr_value = RegValue;
4715
4716 /* Receiver mode Register (RMR)
4717 *
4718 * <15..13> 000 encoding
4719 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4720 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4721 * <9> 0 1 = Include Receive chars in CRC
4722 * <8> 1 1 = Use Abort/PE bit as abort indicator
4723 * <7..6> 00 Even parity
4724 * <5> 0 parity disabled
4725 * <4..2> 000 Receive Char Length = 8 bits
4726 * <1..0> 00 Disable Receiver
4727 *
4728 * 0000 0101 0000 0000 = 0x0500
4729 */
4730
4731 RegValue = 0x0500;
4732
4733 switch ( info->params.encoding ) {
4734 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4735 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4736 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4737 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4738 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4739 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4740 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4741 }
4742
4743 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4744 RegValue |= BIT9;
4745 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4746 RegValue |= ( BIT12 | BIT10 | BIT9 );
4747
4748 usc_OutReg( info, RMR, RegValue );
4749
4750 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4751 /* When an opening flag of an SDLC frame is recognized the */
4752 /* Receive Character count (RCC) is loaded with the value in */
4753 /* RCLR. The RCC is decremented for each received byte. The */
4754 /* value of RCC is stored after the closing flag of the frame */
4755 /* allowing the frame size to be computed. */
4756
4757 usc_OutReg( info, RCLR, RCLRVALUE );
4758
4759 usc_RCmd( info, RCmd_SelectRicrdma_level );
4760
4761 /* Receive Interrupt Control Register (RICR)
4762 *
4763 * <15..8> ? RxFIFO DMA Request Level
4764 * <7> 0 Exited Hunt IA (Interrupt Arm)
4765 * <6> 0 Idle Received IA
4766 * <5> 0 Break/Abort IA
4767 * <4> 0 Rx Bound IA
4768 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4769 * <2> 0 Abort/PE IA
4770 * <1> 1 Rx Overrun IA
4771 * <0> 0 Select TC0 value for readback
4772 *
4773 * 0000 0000 0000 1000 = 0x000a
4774 */
4775
4776 /* Carry over the Exit Hunt and Idle Received bits */
4777 /* in case they have been armed by usc_ArmEvents. */
4778
4779 RegValue = usc_InReg( info, RICR ) & 0xc0;
4780
4781 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4782 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4783 else
4784 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4785
4786 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4787
4788 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4789 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4790
4791 /* Transmit mode Register (TMR)
4792 *
4793 * <15..13> 000 encoding
4794 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4795 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4796 * <9> 0 1 = Tx CRC Enabled
4797 * <8> 0 1 = Append CRC to end of transmit frame
4798 * <7..6> 00 Transmit parity Even
4799 * <5> 0 Transmit parity Disabled
4800 * <4..2> 000 Tx Char Length = 8 bits
4801 * <1..0> 00 Disable Transmitter
4802 *
4803 * 0000 0100 0000 0000 = 0x0400
4804 */
4805
4806 RegValue = 0x0400;
4807
4808 switch ( info->params.encoding ) {
4809 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4810 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4811 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4812 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4813 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4814 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4815 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4816 }
4817
4818 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4819 RegValue |= BIT9 | BIT8;
4820 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4821 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4822
4823 usc_OutReg( info, TMR, RegValue );
4824
4825 usc_set_txidle( info );
4826
4827
4828 usc_TCmd( info, TCmd_SelectTicrdma_level );
4829
4830 /* Transmit Interrupt Control Register (TICR)
4831 *
4832 * <15..8> ? Transmit FIFO DMA Level
4833 * <7> 0 Present IA (Interrupt Arm)
4834 * <6> 0 Idle Sent IA
4835 * <5> 1 Abort Sent IA
4836 * <4> 1 EOF/EOM Sent IA
4837 * <3> 0 CRC Sent IA
4838 * <2> 1 1 = Wait for SW Trigger to Start Frame
4839 * <1> 1 Tx Underrun IA
4840 * <0> 0 TC0 constant on read back
4841 *
4842 * 0000 0000 0011 0110 = 0x0036
4843 */
4844
4845 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4846 usc_OutReg( info, TICR, 0x0736 );
4847 else
4848 usc_OutReg( info, TICR, 0x1436 );
4849
4850 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4851 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4852
4853 /*
4854 ** Transmit Command/Status Register (TCSR)
4855 **
4856 ** <15..12> 0000 TCmd
4857 ** <11> 0/1 UnderWait
4858 ** <10..08> 000 TxIdle
4859 ** <7> x PreSent
4860 ** <6> x IdleSent
4861 ** <5> x AbortSent
4862 ** <4> x EOF/EOM Sent
4863 ** <3> x CRC Sent
4864 ** <2> x All Sent
4865 ** <1> x TxUnder
4866 ** <0> x TxEmpty
4867 **
4868 ** 0000 0000 0000 0000 = 0x0000
4869 */
4870 info->tcsr_value = 0;
4871
4872 if ( !PreSL1660 )
4873 info->tcsr_value |= TCSR_UNDERWAIT;
4874
4875 usc_OutReg( info, TCSR, info->tcsr_value );
4876
4877 /* Clock mode Control Register (CMCR)
4878 *
4879 * <15..14> 00 counter 1 Source = Disabled
4880 * <13..12> 00 counter 0 Source = Disabled
4881 * <11..10> 11 BRG1 Input is TxC Pin
4882 * <9..8> 11 BRG0 Input is TxC Pin
4883 * <7..6> 01 DPLL Input is BRG1 Output
4884 * <5..3> XXX TxCLK comes from Port 0
4885 * <2..0> XXX RxCLK comes from Port 1
4886 *
4887 * 0000 1111 0111 0111 = 0x0f77
4888 */
4889
4890 RegValue = 0x0f40;
4891
4892 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4893 RegValue |= 0x0003; /* RxCLK from DPLL */
4894 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4895 RegValue |= 0x0004; /* RxCLK from BRG0 */
4896 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4897 RegValue |= 0x0006; /* RxCLK from TXC Input */
4898 else
4899 RegValue |= 0x0007; /* RxCLK from Port1 */
4900
4901 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4902 RegValue |= 0x0018; /* TxCLK from DPLL */
4903 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4904 RegValue |= 0x0020; /* TxCLK from BRG0 */
4905 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4906 RegValue |= 0x0038; /* RxCLK from TXC Input */
4907 else
4908 RegValue |= 0x0030; /* TxCLK from Port0 */
4909
4910 usc_OutReg( info, CMCR, RegValue );
4911
4912
4913 /* Hardware Configuration Register (HCR)
4914 *
4915 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4916 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4917 * <12> 0 CVOK:0=report code violation in biphase
4918 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4919 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4920 * <7..6> 00 reserved
4921 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4922 * <4> X BRG1 Enable
4923 * <3..2> 00 reserved
4924 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4925 * <0> 0 BRG0 Enable
4926 */
4927
4928 RegValue = 0x0000;
4929
4930 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4931 u32 XtalSpeed;
4932 u32 DpllDivisor;
4933 u16 Tc;
4934
4935 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4936 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4937
4938 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4939 XtalSpeed = 11059200;
4940 else
4941 XtalSpeed = 14745600;
4942
4943 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4944 DpllDivisor = 16;
4945 RegValue |= BIT10;
4946 }
4947 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4948 DpllDivisor = 8;
4949 RegValue |= BIT11;
4950 }
4951 else
4952 DpllDivisor = 32;
4953
4954 /* Tc = (Xtal/Speed) - 1 */
4955 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4956 /* then rounding up gives a more precise time constant. Instead */
4957 /* of rounding up and then subtracting 1 we just don't subtract */
4958 /* the one in this case. */
4959
4960 /*--------------------------------------------------
4961 * ejz: for DPLL mode, application should use the
4962 * same clock speed as the partner system, even
4963 * though clocking is derived from the input RxData.
4964 * In case the user uses a 0 for the clock speed,
4965 * default to 0xffffffff and don't try to divide by
4966 * zero
4967 *--------------------------------------------------*/
4968 if ( info->params.clock_speed )
4969 {
4970 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4971 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4972 / info->params.clock_speed) )
4973 Tc--;
4974 }
4975 else
4976 Tc = -1;
4977
4978
4979 /* Write 16-bit Time Constant for BRG1 */
4980 usc_OutReg( info, TC1R, Tc );
4981
4982 RegValue |= BIT4; /* enable BRG1 */
4983
4984 switch ( info->params.encoding ) {
4985 case HDLC_ENCODING_NRZ:
4986 case HDLC_ENCODING_NRZB:
4987 case HDLC_ENCODING_NRZI_MARK:
4988 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
4989 case HDLC_ENCODING_BIPHASE_MARK:
4990 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
4991 case HDLC_ENCODING_BIPHASE_LEVEL:
4992 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
4993 }
4994 }
4995
4996 usc_OutReg( info, HCR, RegValue );
4997
4998
4999 /* Channel Control/status Register (CCSR)
5000 *
5001 * <15> X RCC FIFO Overflow status (RO)
5002 * <14> X RCC FIFO Not Empty status (RO)
5003 * <13> 0 1 = Clear RCC FIFO (WO)
5004 * <12> X DPLL Sync (RW)
5005 * <11> X DPLL 2 Missed Clocks status (RO)
5006 * <10> X DPLL 1 Missed Clock status (RO)
5007 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5008 * <7> X SDLC Loop On status (RO)
5009 * <6> X SDLC Loop Send status (RO)
5010 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5011 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5012 * <1..0> 00 reserved
5013 *
5014 * 0000 0000 0010 0000 = 0x0020
5015 */
5016
5017 usc_OutReg( info, CCSR, 0x1020 );
5018
5019
5020 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5021 usc_OutReg( info, SICR,
5022 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5023 }
5024
5025
5026 /* enable Master Interrupt Enable bit (MIE) */
5027 usc_EnableMasterIrqBit( info );
5028
5029 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
5030 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
5031
5032 /* arm RCC underflow interrupt */
5033 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5034 usc_EnableInterrupts(info, MISC);
5035
5036 info->mbre_bit = 0;
5037 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5038 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5039 info->mbre_bit = BIT8;
5040 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5041
5042 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5043 /* Enable DMAEN (Port 7, Bit 14) */
5044 /* This connects the DMA request signal to the ISA bus */
5045 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5046 }
5047
5048 /* DMA Control Register (DCR)
5049 *
5050 * <15..14> 10 Priority mode = Alternating Tx/Rx
5051 * 01 Rx has priority
5052 * 00 Tx has priority
5053 *
5054 * <13> 1 Enable Priority Preempt per DCR<15..14>
5055 * (WARNING DCR<11..10> must be 00 when this is 1)
5056 * 0 Choose activate channel per DCR<11..10>
5057 *
5058 * <12> 0 Little Endian for Array/List
5059 * <11..10> 00 Both Channels can use each bus grant
5060 * <9..6> 0000 reserved
5061 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5062 * <4> 0 1 = drive D/C and S/D pins
5063 * <3> 1 1 = Add one wait state to all DMA cycles.
5064 * <2> 0 1 = Strobe /UAS on every transfer.
5065 * <1..0> 11 Addr incrementing only affects LS24 bits
5066 *
5067 * 0110 0000 0000 1011 = 0x600b
5068 */
5069
5070 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5071 /* PCI adapter does not need DMA wait state */
5072 usc_OutDmaReg( info, DCR, 0xa00b );
5073 }
5074 else
5075 usc_OutDmaReg( info, DCR, 0x800b );
5076
5077
5078 /* Receive DMA mode Register (RDMR)
5079 *
5080 * <15..14> 11 DMA mode = Linked List Buffer mode
5081 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5082 * <12> 1 Clear count of List Entry after fetching
5083 * <11..10> 00 Address mode = Increment
5084 * <9> 1 Terminate Buffer on RxBound
5085 * <8> 0 Bus Width = 16bits
5086 * <7..0> ? status Bits (write as 0s)
5087 *
5088 * 1111 0010 0000 0000 = 0xf200
5089 */
5090
5091 usc_OutDmaReg( info, RDMR, 0xf200 );
5092
5093
5094 /* Transmit DMA mode Register (TDMR)
5095 *
5096 * <15..14> 11 DMA mode = Linked List Buffer mode
5097 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5098 * <12> 1 Clear count of List Entry after fetching
5099 * <11..10> 00 Address mode = Increment
5100 * <9> 1 Terminate Buffer on end of frame
5101 * <8> 0 Bus Width = 16bits
5102 * <7..0> ? status Bits (Read Only so write as 0)
5103 *
5104 * 1111 0010 0000 0000 = 0xf200
5105 */
5106
5107 usc_OutDmaReg( info, TDMR, 0xf200 );
5108
5109
5110 /* DMA Interrupt Control Register (DICR)
5111 *
5112 * <15> 1 DMA Interrupt Enable
5113 * <14> 0 1 = Disable IEO from USC
5114 * <13> 0 1 = Don't provide vector during IntAck
5115 * <12> 1 1 = Include status in Vector
5116 * <10..2> 0 reserved, Must be 0s
5117 * <1> 0 1 = Rx DMA Interrupt Enabled
5118 * <0> 0 1 = Tx DMA Interrupt Enabled
5119 *
5120 * 1001 0000 0000 0000 = 0x9000
5121 */
5122
5123 usc_OutDmaReg( info, DICR, 0x9000 );
5124
5125 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5126 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5127 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5128
5129 /* Channel Control Register (CCR)
5130 *
5131 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5132 * <13> 0 Trigger Tx on SW Command Disabled
5133 * <12> 0 Flag Preamble Disabled
5134 * <11..10> 00 Preamble Length
5135 * <9..8> 00 Preamble Pattern
5136 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5137 * <5> 0 Trigger Rx on SW Command Disabled
5138 * <4..0> 0 reserved
5139 *
5140 * 1000 0000 1000 0000 = 0x8080
5141 */
5142
5143 RegValue = 0x8080;
5144
5145 switch ( info->params.preamble_length ) {
5146 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5147 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5148 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
5149 }
5150
5151 switch ( info->params.preamble ) {
5152 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
5153 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5154 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5155 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
5156 }
5157
5158 usc_OutReg( info, CCR, RegValue );
5159
5160
5161 /*
5162 * Burst/Dwell Control Register
5163 *
5164 * <15..8> 0x20 Maximum number of transfers per bus grant
5165 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5166 */
5167
5168 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5169 /* don't limit bus occupancy on PCI adapter */
5170 usc_OutDmaReg( info, BDCR, 0x0000 );
5171 }
5172 else
5173 usc_OutDmaReg( info, BDCR, 0x2000 );
5174
5175 usc_stop_transmitter(info);
5176 usc_stop_receiver(info);
5177
5178 } /* end of usc_set_sdlc_mode() */
5179
5180 /* usc_enable_loopback()
5181 *
5182 * Set the 16C32 for internal loopback mode.
5183 * The TxCLK and RxCLK signals are generated from the BRG0 and
5184 * the TxD is looped back to the RxD internally.
5185 *
5186 * Arguments: info pointer to device instance data
5187 * enable 1 = enable loopback, 0 = disable
5188 * Return Value: None
5189 */
5190 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5191 {
5192 if (enable) {
5193 /* blank external TXD output */
5194 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5195
5196 /* Clock mode Control Register (CMCR)
5197 *
5198 * <15..14> 00 counter 1 Disabled
5199 * <13..12> 00 counter 0 Disabled
5200 * <11..10> 11 BRG1 Input is TxC Pin
5201 * <9..8> 11 BRG0 Input is TxC Pin
5202 * <7..6> 01 DPLL Input is BRG1 Output
5203 * <5..3> 100 TxCLK comes from BRG0
5204 * <2..0> 100 RxCLK comes from BRG0
5205 *
5206 * 0000 1111 0110 0100 = 0x0f64
5207 */
5208
5209 usc_OutReg( info, CMCR, 0x0f64 );
5210
5211 /* Write 16-bit Time Constant for BRG0 */
5212 /* use clock speed if available, otherwise use 8 for diagnostics */
5213 if (info->params.clock_speed) {
5214 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5215 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5216 else
5217 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5218 } else
5219 usc_OutReg(info, TC0R, (u16)8);
5220
5221 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5222 mode = Continuous Set Bit 0 to enable BRG0. */
5223 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5224
5225 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5226 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5227
5228 /* set Internal Data loopback mode */
5229 info->loopback_bits = 0x300;
5230 outw( 0x0300, info->io_base + CCAR );
5231 } else {
5232 /* enable external TXD output */
5233 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5234
5235 /* clear Internal Data loopback mode */
5236 info->loopback_bits = 0;
5237 outw( 0,info->io_base + CCAR );
5238 }
5239
5240 } /* end of usc_enable_loopback() */
5241
5242 /* usc_enable_aux_clock()
5243 *
5244 * Enabled the AUX clock output at the specified frequency.
5245 *
5246 * Arguments:
5247 *
5248 * info pointer to device extension
5249 * data_rate data rate of clock in bits per second
5250 * A data rate of 0 disables the AUX clock.
5251 *
5252 * Return Value: None
5253 */
5254 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5255 {
5256 u32 XtalSpeed;
5257 u16 Tc;
5258
5259 if ( data_rate ) {
5260 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5261 XtalSpeed = 11059200;
5262 else
5263 XtalSpeed = 14745600;
5264
5265
5266 /* Tc = (Xtal/Speed) - 1 */
5267 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5268 /* then rounding up gives a more precise time constant. Instead */
5269 /* of rounding up and then subtracting 1 we just don't subtract */
5270 /* the one in this case. */
5271
5272
5273 Tc = (u16)(XtalSpeed/data_rate);
5274 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5275 Tc--;
5276
5277 /* Write 16-bit Time Constant for BRG0 */
5278 usc_OutReg( info, TC0R, Tc );
5279
5280 /*
5281 * Hardware Configuration Register (HCR)
5282 * Clear Bit 1, BRG0 mode = Continuous
5283 * Set Bit 0 to enable BRG0.
5284 */
5285
5286 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5287
5288 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5289 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5290 } else {
5291 /* data rate == 0 so turn off BRG0 */
5292 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5293 }
5294
5295 } /* end of usc_enable_aux_clock() */
5296
5297 /*
5298 *
5299 * usc_process_rxoverrun_sync()
5300 *
5301 * This function processes a receive overrun by resetting the
5302 * receive DMA buffers and issuing a Purge Rx FIFO command
5303 * to allow the receiver to continue receiving.
5304 *
5305 * Arguments:
5306 *
5307 * info pointer to device extension
5308 *
5309 * Return Value: None
5310 */
5311 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5312 {
5313 int start_index;
5314 int end_index;
5315 int frame_start_index;
5316 bool start_of_frame_found = false;
5317 bool end_of_frame_found = false;
5318 bool reprogram_dma = false;
5319
5320 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5321 u32 phys_addr;
5322
5323 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5324 usc_RCmd( info, RCmd_EnterHuntmode );
5325 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5326
5327 /* CurrentRxBuffer points to the 1st buffer of the next */
5328 /* possibly available receive frame. */
5329
5330 frame_start_index = start_index = end_index = info->current_rx_buffer;
5331
5332 /* Search for an unfinished string of buffers. This means */
5333 /* that a receive frame started (at least one buffer with */
5334 /* count set to zero) but there is no terminiting buffer */
5335 /* (status set to non-zero). */
5336
5337 while( !buffer_list[end_index].count )
5338 {
5339 /* Count field has been reset to zero by 16C32. */
5340 /* This buffer is currently in use. */
5341
5342 if ( !start_of_frame_found )
5343 {
5344 start_of_frame_found = true;
5345 frame_start_index = end_index;
5346 end_of_frame_found = false;
5347 }
5348
5349 if ( buffer_list[end_index].status )
5350 {
5351 /* Status field has been set by 16C32. */
5352 /* This is the last buffer of a received frame. */
5353
5354 /* We want to leave the buffers for this frame intact. */
5355 /* Move on to next possible frame. */
5356
5357 start_of_frame_found = false;
5358 end_of_frame_found = true;
5359 }
5360
5361 /* advance to next buffer entry in linked list */
5362 end_index++;
5363 if ( end_index == info->rx_buffer_count )
5364 end_index = 0;
5365
5366 if ( start_index == end_index )
5367 {
5368 /* The entire list has been searched with all Counts == 0 and */
5369 /* all Status == 0. The receive buffers are */
5370 /* completely screwed, reset all receive buffers! */
5371 mgsl_reset_rx_dma_buffers( info );
5372 frame_start_index = 0;
5373 start_of_frame_found = false;
5374 reprogram_dma = true;
5375 break;
5376 }
5377 }
5378
5379 if ( start_of_frame_found && !end_of_frame_found )
5380 {
5381 /* There is an unfinished string of receive DMA buffers */
5382 /* as a result of the receiver overrun. */
5383
5384 /* Reset the buffers for the unfinished frame */
5385 /* and reprogram the receive DMA controller to start */
5386 /* at the 1st buffer of unfinished frame. */
5387
5388 start_index = frame_start_index;
5389
5390 do
5391 {
5392 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5393
5394 /* Adjust index for wrap around. */
5395 if ( start_index == info->rx_buffer_count )
5396 start_index = 0;
5397
5398 } while( start_index != end_index );
5399
5400 reprogram_dma = true;
5401 }
5402
5403 if ( reprogram_dma )
5404 {
5405 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5406 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5407 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5408
5409 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5410
5411 /* This empties the receive FIFO and loads the RCC with RCLR */
5412 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5413
5414 /* program 16C32 with physical address of 1st DMA buffer entry */
5415 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5416 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5417 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5418
5419 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5420 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5421 usc_EnableInterrupts( info, RECEIVE_STATUS );
5422
5423 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5424 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5425
5426 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5427 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5428 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5429 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5430 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5431 else
5432 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5433 }
5434 else
5435 {
5436 /* This empties the receive FIFO and loads the RCC with RCLR */
5437 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5438 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5439 }
5440
5441 } /* end of usc_process_rxoverrun_sync() */
5442
5443 /* usc_stop_receiver()
5444 *
5445 * Disable USC receiver
5446 *
5447 * Arguments: info pointer to device instance data
5448 * Return Value: None
5449 */
5450 static void usc_stop_receiver( struct mgsl_struct *info )
5451 {
5452 if (debug_level >= DEBUG_LEVEL_ISR)
5453 printk("%s(%d):usc_stop_receiver(%s)\n",
5454 __FILE__,__LINE__, info->device_name );
5455
5456 /* Disable receive DMA channel. */
5457 /* This also disables receive DMA channel interrupts */
5458 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5459
5460 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5461 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5462 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5463
5464 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5465
5466 /* This empties the receive FIFO and loads the RCC with RCLR */
5467 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5468 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5469
5470 info->rx_enabled = false;
5471 info->rx_overflow = false;
5472 info->rx_rcc_underrun = false;
5473
5474 } /* end of stop_receiver() */
5475
5476 /* usc_start_receiver()
5477 *
5478 * Enable the USC receiver
5479 *
5480 * Arguments: info pointer to device instance data
5481 * Return Value: None
5482 */
5483 static void usc_start_receiver( struct mgsl_struct *info )
5484 {
5485 u32 phys_addr;
5486
5487 if (debug_level >= DEBUG_LEVEL_ISR)
5488 printk("%s(%d):usc_start_receiver(%s)\n",
5489 __FILE__,__LINE__, info->device_name );
5490
5491 mgsl_reset_rx_dma_buffers( info );
5492 usc_stop_receiver( info );
5493
5494 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5495 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5496
5497 if ( info->params.mode == MGSL_MODE_HDLC ||
5498 info->params.mode == MGSL_MODE_RAW ) {
5499 /* DMA mode Transfers */
5500 /* Program the DMA controller. */
5501 /* Enable the DMA controller end of buffer interrupt. */
5502
5503 /* program 16C32 with physical address of 1st DMA buffer entry */
5504 phys_addr = info->rx_buffer_list[0].phys_entry;
5505 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5506 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5507
5508 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5509 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5510 usc_EnableInterrupts( info, RECEIVE_STATUS );
5511
5512 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5513 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5514
5515 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5516 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5517 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5518 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5519 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5520 else
5521 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5522 } else {
5523 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5524 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5525 usc_EnableInterrupts(info, RECEIVE_DATA);
5526
5527 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5528 usc_RCmd( info, RCmd_EnterHuntmode );
5529
5530 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5531 }
5532
5533 usc_OutReg( info, CCSR, 0x1020 );
5534
5535 info->rx_enabled = true;
5536
5537 } /* end of usc_start_receiver() */
5538
5539 /* usc_start_transmitter()
5540 *
5541 * Enable the USC transmitter and send a transmit frame if
5542 * one is loaded in the DMA buffers.
5543 *
5544 * Arguments: info pointer to device instance data
5545 * Return Value: None
5546 */
5547 static void usc_start_transmitter( struct mgsl_struct *info )
5548 {
5549 u32 phys_addr;
5550 unsigned int FrameSize;
5551
5552 if (debug_level >= DEBUG_LEVEL_ISR)
5553 printk("%s(%d):usc_start_transmitter(%s)\n",
5554 __FILE__,__LINE__, info->device_name );
5555
5556 if ( info->xmit_cnt ) {
5557
5558 /* If auto RTS enabled and RTS is inactive, then assert */
5559 /* RTS and set a flag indicating that the driver should */
5560 /* negate RTS when the transmission completes. */
5561
5562 info->drop_rts_on_tx_done = false;
5563
5564 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5565 usc_get_serial_signals( info );
5566 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5567 info->serial_signals |= SerialSignal_RTS;
5568 usc_set_serial_signals( info );
5569 info->drop_rts_on_tx_done = true;
5570 }
5571 }
5572
5573
5574 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5575 if ( !info->tx_active ) {
5576 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5577 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5578 usc_EnableInterrupts(info, TRANSMIT_DATA);
5579 usc_load_txfifo(info);
5580 }
5581 } else {
5582 /* Disable transmit DMA controller while programming. */
5583 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5584
5585 /* Transmit DMA buffer is loaded, so program USC */
5586 /* to send the frame contained in the buffers. */
5587
5588 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5589
5590 /* if operating in Raw sync mode, reset the rcc component
5591 * of the tx dma buffer entry, otherwise, the serial controller
5592 * will send a closing sync char after this count.
5593 */
5594 if ( info->params.mode == MGSL_MODE_RAW )
5595 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5596
5597 /* Program the Transmit Character Length Register (TCLR) */
5598 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5599 usc_OutReg( info, TCLR, (u16)FrameSize );
5600
5601 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5602
5603 /* Program the address of the 1st DMA Buffer Entry in linked list */
5604 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5605 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5606 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5607
5608 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5609 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5610 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5611
5612 if ( info->params.mode == MGSL_MODE_RAW &&
5613 info->num_tx_dma_buffers > 1 ) {
5614 /* When running external sync mode, attempt to 'stream' transmit */
5615 /* by filling tx dma buffers as they become available. To do this */
5616 /* we need to enable Tx DMA EOB Status interrupts : */
5617 /* */
5618 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5619 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5620
5621 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5622 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5623 }
5624
5625 /* Initialize Transmit DMA Channel */
5626 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5627
5628 usc_TCmd( info, TCmd_SendFrame );
5629
5630 mod_timer(&info->tx_timer, jiffies +
5631 msecs_to_jiffies(5000));
5632 }
5633 info->tx_active = true;
5634 }
5635
5636 if ( !info->tx_enabled ) {
5637 info->tx_enabled = true;
5638 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5639 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5640 else
5641 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5642 }
5643
5644 } /* end of usc_start_transmitter() */
5645
5646 /* usc_stop_transmitter()
5647 *
5648 * Stops the transmitter and DMA
5649 *
5650 * Arguments: info pointer to device isntance data
5651 * Return Value: None
5652 */
5653 static void usc_stop_transmitter( struct mgsl_struct *info )
5654 {
5655 if (debug_level >= DEBUG_LEVEL_ISR)
5656 printk("%s(%d):usc_stop_transmitter(%s)\n",
5657 __FILE__,__LINE__, info->device_name );
5658
5659 del_timer(&info->tx_timer);
5660
5661 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5662 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5663 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5664
5665 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5666 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5667 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5668
5669 info->tx_enabled = false;
5670 info->tx_active = false;
5671
5672 } /* end of usc_stop_transmitter() */
5673
5674 /* usc_load_txfifo()
5675 *
5676 * Fill the transmit FIFO until the FIFO is full or
5677 * there is no more data to load.
5678 *
5679 * Arguments: info pointer to device extension (instance data)
5680 * Return Value: None
5681 */
5682 static void usc_load_txfifo( struct mgsl_struct *info )
5683 {
5684 int Fifocount;
5685 u8 TwoBytes[2];
5686
5687 if ( !info->xmit_cnt && !info->x_char )
5688 return;
5689
5690 /* Select transmit FIFO status readback in TICR */
5691 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5692
5693 /* load the Transmit FIFO until FIFOs full or all data sent */
5694
5695 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5696 /* there is more space in the transmit FIFO and */
5697 /* there is more data in transmit buffer */
5698
5699 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5700 /* write a 16-bit word from transmit buffer to 16C32 */
5701
5702 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5703 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5704 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5705 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5706
5707 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5708
5709 info->xmit_cnt -= 2;
5710 info->icount.tx += 2;
5711 } else {
5712 /* only 1 byte left to transmit or 1 FIFO slot left */
5713
5714 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5715 info->io_base + CCAR );
5716
5717 if (info->x_char) {
5718 /* transmit pending high priority char */
5719 outw( info->x_char,info->io_base + CCAR );
5720 info->x_char = 0;
5721 } else {
5722 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5723 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5724 info->xmit_cnt--;
5725 }
5726 info->icount.tx++;
5727 }
5728 }
5729
5730 } /* end of usc_load_txfifo() */
5731
5732 /* usc_reset()
5733 *
5734 * Reset the adapter to a known state and prepare it for further use.
5735 *
5736 * Arguments: info pointer to device instance data
5737 * Return Value: None
5738 */
5739 static void usc_reset( struct mgsl_struct *info )
5740 {
5741 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5742 int i;
5743 u32 readval;
5744
5745 /* Set BIT30 of Misc Control Register */
5746 /* (Local Control Register 0x50) to force reset of USC. */
5747
5748 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5749 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5750
5751 info->misc_ctrl_value |= BIT30;
5752 *MiscCtrl = info->misc_ctrl_value;
5753
5754 /*
5755 * Force at least 170ns delay before clearing
5756 * reset bit. Each read from LCR takes at least
5757 * 30ns so 10 times for 300ns to be safe.
5758 */
5759 for(i=0;i<10;i++)
5760 readval = *MiscCtrl;
5761
5762 info->misc_ctrl_value &= ~BIT30;
5763 *MiscCtrl = info->misc_ctrl_value;
5764
5765 *LCR0BRDR = BUS_DESCRIPTOR(
5766 1, // Write Strobe Hold (0-3)
5767 2, // Write Strobe Delay (0-3)
5768 2, // Read Strobe Delay (0-3)
5769 0, // NWDD (Write data-data) (0-3)
5770 4, // NWAD (Write Addr-data) (0-31)
5771 0, // NXDA (Read/Write Data-Addr) (0-3)
5772 0, // NRDD (Read Data-Data) (0-3)
5773 5 // NRAD (Read Addr-Data) (0-31)
5774 );
5775 } else {
5776 /* do HW reset */
5777 outb( 0,info->io_base + 8 );
5778 }
5779
5780 info->mbre_bit = 0;
5781 info->loopback_bits = 0;
5782 info->usc_idle_mode = 0;
5783
5784 /*
5785 * Program the Bus Configuration Register (BCR)
5786 *
5787 * <15> 0 Don't use separate address
5788 * <14..6> 0 reserved
5789 * <5..4> 00 IAckmode = Default, don't care
5790 * <3> 1 Bus Request Totem Pole output
5791 * <2> 1 Use 16 Bit data bus
5792 * <1> 0 IRQ Totem Pole output
5793 * <0> 0 Don't Shift Right Addr
5794 *
5795 * 0000 0000 0000 1100 = 0x000c
5796 *
5797 * By writing to io_base + SDPIN the Wait/Ack pin is
5798 * programmed to work as a Wait pin.
5799 */
5800
5801 outw( 0x000c,info->io_base + SDPIN );
5802
5803
5804 outw( 0,info->io_base );
5805 outw( 0,info->io_base + CCAR );
5806
5807 /* select little endian byte ordering */
5808 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5809
5810
5811 /* Port Control Register (PCR)
5812 *
5813 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5814 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5815 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5816 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5817 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5818 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5819 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5820 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5821 *
5822 * 1111 0000 1111 0101 = 0xf0f5
5823 */
5824
5825 usc_OutReg( info, PCR, 0xf0f5 );
5826
5827
5828 /*
5829 * Input/Output Control Register
5830 *
5831 * <15..14> 00 CTS is active low input
5832 * <13..12> 00 DCD is active low input
5833 * <11..10> 00 TxREQ pin is input (DSR)
5834 * <9..8> 00 RxREQ pin is input (RI)
5835 * <7..6> 00 TxD is output (Transmit Data)
5836 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5837 * <2..0> 100 RxC is Output (drive with BRG0)
5838 *
5839 * 0000 0000 0000 0100 = 0x0004
5840 */
5841
5842 usc_OutReg( info, IOCR, 0x0004 );
5843
5844 } /* end of usc_reset() */
5845
5846 /* usc_set_async_mode()
5847 *
5848 * Program adapter for asynchronous communications.
5849 *
5850 * Arguments: info pointer to device instance data
5851 * Return Value: None
5852 */
5853 static void usc_set_async_mode( struct mgsl_struct *info )
5854 {
5855 u16 RegValue;
5856
5857 /* disable interrupts while programming USC */
5858 usc_DisableMasterIrqBit( info );
5859
5860 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5861 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5862
5863 usc_loopback_frame( info );
5864
5865 /* Channel mode Register (CMR)
5866 *
5867 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5868 * <13..12> 00 00 = 16X Clock
5869 * <11..8> 0000 Transmitter mode = Asynchronous
5870 * <7..6> 00 reserved?
5871 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5872 * <3..0> 0000 Receiver mode = Asynchronous
5873 *
5874 * 0000 0000 0000 0000 = 0x0
5875 */
5876
5877 RegValue = 0;
5878 if ( info->params.stop_bits != 1 )
5879 RegValue |= BIT14;
5880 usc_OutReg( info, CMR, RegValue );
5881
5882
5883 /* Receiver mode Register (RMR)
5884 *
5885 * <15..13> 000 encoding = None
5886 * <12..08> 00000 reserved (Sync Only)
5887 * <7..6> 00 Even parity
5888 * <5> 0 parity disabled
5889 * <4..2> 000 Receive Char Length = 8 bits
5890 * <1..0> 00 Disable Receiver
5891 *
5892 * 0000 0000 0000 0000 = 0x0
5893 */
5894
5895 RegValue = 0;
5896
5897 if ( info->params.data_bits != 8 )
5898 RegValue |= BIT4 | BIT3 | BIT2;
5899
5900 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5901 RegValue |= BIT5;
5902 if ( info->params.parity != ASYNC_PARITY_ODD )
5903 RegValue |= BIT6;
5904 }
5905
5906 usc_OutReg( info, RMR, RegValue );
5907
5908
5909 /* Set IRQ trigger level */
5910
5911 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5912
5913
5914 /* Receive Interrupt Control Register (RICR)
5915 *
5916 * <15..8> ? RxFIFO IRQ Request Level
5917 *
5918 * Note: For async mode the receive FIFO level must be set
5919 * to 0 to avoid the situation where the FIFO contains fewer bytes
5920 * than the trigger level and no more data is expected.
5921 *
5922 * <7> 0 Exited Hunt IA (Interrupt Arm)
5923 * <6> 0 Idle Received IA
5924 * <5> 0 Break/Abort IA
5925 * <4> 0 Rx Bound IA
5926 * <3> 0 Queued status reflects oldest byte in FIFO
5927 * <2> 0 Abort/PE IA
5928 * <1> 0 Rx Overrun IA
5929 * <0> 0 Select TC0 value for readback
5930 *
5931 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5932 */
5933
5934 usc_OutReg( info, RICR, 0x0000 );
5935
5936 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5937 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5938
5939
5940 /* Transmit mode Register (TMR)
5941 *
5942 * <15..13> 000 encoding = None
5943 * <12..08> 00000 reserved (Sync Only)
5944 * <7..6> 00 Transmit parity Even
5945 * <5> 0 Transmit parity Disabled
5946 * <4..2> 000 Tx Char Length = 8 bits
5947 * <1..0> 00 Disable Transmitter
5948 *
5949 * 0000 0000 0000 0000 = 0x0
5950 */
5951
5952 RegValue = 0;
5953
5954 if ( info->params.data_bits != 8 )
5955 RegValue |= BIT4 | BIT3 | BIT2;
5956
5957 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5958 RegValue |= BIT5;
5959 if ( info->params.parity != ASYNC_PARITY_ODD )
5960 RegValue |= BIT6;
5961 }
5962
5963 usc_OutReg( info, TMR, RegValue );
5964
5965 usc_set_txidle( info );
5966
5967
5968 /* Set IRQ trigger level */
5969
5970 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5971
5972
5973 /* Transmit Interrupt Control Register (TICR)
5974 *
5975 * <15..8> ? Transmit FIFO IRQ Level
5976 * <7> 0 Present IA (Interrupt Arm)
5977 * <6> 1 Idle Sent IA
5978 * <5> 0 Abort Sent IA
5979 * <4> 0 EOF/EOM Sent IA
5980 * <3> 0 CRC Sent IA
5981 * <2> 0 1 = Wait for SW Trigger to Start Frame
5982 * <1> 0 Tx Underrun IA
5983 * <0> 0 TC0 constant on read back
5984 *
5985 * 0000 0000 0100 0000 = 0x0040
5986 */
5987
5988 usc_OutReg( info, TICR, 0x1f40 );
5989
5990 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5991 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5992
5993 usc_enable_async_clock( info, info->params.data_rate );
5994
5995
5996 /* Channel Control/status Register (CCSR)
5997 *
5998 * <15> X RCC FIFO Overflow status (RO)
5999 * <14> X RCC FIFO Not Empty status (RO)
6000 * <13> 0 1 = Clear RCC FIFO (WO)
6001 * <12> X DPLL in Sync status (RO)
6002 * <11> X DPLL 2 Missed Clocks status (RO)
6003 * <10> X DPLL 1 Missed Clock status (RO)
6004 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6005 * <7> X SDLC Loop On status (RO)
6006 * <6> X SDLC Loop Send status (RO)
6007 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6008 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6009 * <1..0> 00 reserved
6010 *
6011 * 0000 0000 0010 0000 = 0x0020
6012 */
6013
6014 usc_OutReg( info, CCSR, 0x0020 );
6015
6016 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6017 RECEIVE_DATA + RECEIVE_STATUS );
6018
6019 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6020 RECEIVE_DATA + RECEIVE_STATUS );
6021
6022 usc_EnableMasterIrqBit( info );
6023
6024 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6025 /* Enable INTEN (Port 6, Bit12) */
6026 /* This connects the IRQ request signal to the ISA bus */
6027 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6028 }
6029
6030 if (info->params.loopback) {
6031 info->loopback_bits = 0x300;
6032 outw(0x0300, info->io_base + CCAR);
6033 }
6034
6035 } /* end of usc_set_async_mode() */
6036
6037 /* usc_loopback_frame()
6038 *
6039 * Loop back a small (2 byte) dummy SDLC frame.
6040 * Interrupts and DMA are NOT used. The purpose of this is to
6041 * clear any 'stale' status info left over from running in async mode.
6042 *
6043 * The 16C32 shows the strange behaviour of marking the 1st
6044 * received SDLC frame with a CRC error even when there is no
6045 * CRC error. To get around this a small dummy from of 2 bytes
6046 * is looped back when switching from async to sync mode.
6047 *
6048 * Arguments: info pointer to device instance data
6049 * Return Value: None
6050 */
6051 static void usc_loopback_frame( struct mgsl_struct *info )
6052 {
6053 int i;
6054 unsigned long oldmode = info->params.mode;
6055
6056 info->params.mode = MGSL_MODE_HDLC;
6057
6058 usc_DisableMasterIrqBit( info );
6059
6060 usc_set_sdlc_mode( info );
6061 usc_enable_loopback( info, 1 );
6062
6063 /* Write 16-bit Time Constant for BRG0 */
6064 usc_OutReg( info, TC0R, 0 );
6065
6066 /* Channel Control Register (CCR)
6067 *
6068 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6069 * <13> 0 Trigger Tx on SW Command Disabled
6070 * <12> 0 Flag Preamble Disabled
6071 * <11..10> 00 Preamble Length = 8-Bits
6072 * <9..8> 01 Preamble Pattern = flags
6073 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6074 * <5> 0 Trigger Rx on SW Command Disabled
6075 * <4..0> 0 reserved
6076 *
6077 * 0000 0001 0000 0000 = 0x0100
6078 */
6079
6080 usc_OutReg( info, CCR, 0x0100 );
6081
6082 /* SETUP RECEIVER */
6083 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6084 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6085
6086 /* SETUP TRANSMITTER */
6087 /* Program the Transmit Character Length Register (TCLR) */
6088 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6089 usc_OutReg( info, TCLR, 2 );
6090 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6091
6092 /* unlatch Tx status bits, and start transmit channel. */
6093 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6094 outw(0,info->io_base + DATAREG);
6095
6096 /* ENABLE TRANSMITTER */
6097 usc_TCmd( info, TCmd_SendFrame );
6098 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6099
6100 /* WAIT FOR RECEIVE COMPLETE */
6101 for (i=0 ; i<1000 ; i++)
6102 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
6103 break;
6104
6105 /* clear Internal Data loopback mode */
6106 usc_enable_loopback(info, 0);
6107
6108 usc_EnableMasterIrqBit(info);
6109
6110 info->params.mode = oldmode;
6111
6112 } /* end of usc_loopback_frame() */
6113
6114 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6115 *
6116 * Arguments: info pointer to adapter info structure
6117 * Return Value: None
6118 */
6119 static void usc_set_sync_mode( struct mgsl_struct *info )
6120 {
6121 usc_loopback_frame( info );
6122 usc_set_sdlc_mode( info );
6123
6124 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6125 /* Enable INTEN (Port 6, Bit12) */
6126 /* This connects the IRQ request signal to the ISA bus */
6127 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6128 }
6129
6130 usc_enable_aux_clock(info, info->params.clock_speed);
6131
6132 if (info->params.loopback)
6133 usc_enable_loopback(info,1);
6134
6135 } /* end of mgsl_set_sync_mode() */
6136
6137 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6138 *
6139 * Arguments: info pointer to device instance data
6140 * Return Value: None
6141 */
6142 static void usc_set_txidle( struct mgsl_struct *info )
6143 {
6144 u16 usc_idle_mode = IDLEMODE_FLAGS;
6145
6146 /* Map API idle mode to USC register bits */
6147
6148 switch( info->idle_mode ){
6149 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6150 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6151 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6152 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6153 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6154 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6155 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6156 }
6157
6158 info->usc_idle_mode = usc_idle_mode;
6159 //usc_OutReg(info, TCSR, usc_idle_mode);
6160 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6161 info->tcsr_value += usc_idle_mode;
6162 usc_OutReg(info, TCSR, info->tcsr_value);
6163
6164 /*
6165 * if SyncLink WAN adapter is running in external sync mode, the
6166 * transmitter has been set to Monosync in order to try to mimic
6167 * a true raw outbound bit stream. Monosync still sends an open/close
6168 * sync char at the start/end of a frame. Try to match those sync
6169 * patterns to the idle mode set here
6170 */
6171 if ( info->params.mode == MGSL_MODE_RAW ) {
6172 unsigned char syncpat = 0;
6173 switch( info->idle_mode ) {
6174 case HDLC_TXIDLE_FLAGS:
6175 syncpat = 0x7e;
6176 break;
6177 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6178 syncpat = 0x55;
6179 break;
6180 case HDLC_TXIDLE_ZEROS:
6181 case HDLC_TXIDLE_SPACE:
6182 syncpat = 0x00;
6183 break;
6184 case HDLC_TXIDLE_ONES:
6185 case HDLC_TXIDLE_MARK:
6186 syncpat = 0xff;
6187 break;
6188 case HDLC_TXIDLE_ALT_MARK_SPACE:
6189 syncpat = 0xaa;
6190 break;
6191 }
6192
6193 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6194 }
6195
6196 } /* end of usc_set_txidle() */
6197
6198 /* usc_get_serial_signals()
6199 *
6200 * Query the adapter for the state of the V24 status (input) signals.
6201 *
6202 * Arguments: info pointer to device instance data
6203 * Return Value: None
6204 */
6205 static void usc_get_serial_signals( struct mgsl_struct *info )
6206 {
6207 u16 status;
6208
6209 /* clear all serial signals except RTS and DTR */
6210 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6211
6212 /* Read the Misc Interrupt status Register (MISR) to get */
6213 /* the V24 status signals. */
6214
6215 status = usc_InReg( info, MISR );
6216
6217 /* set serial signal bits to reflect MISR */
6218
6219 if ( status & MISCSTATUS_CTS )
6220 info->serial_signals |= SerialSignal_CTS;
6221
6222 if ( status & MISCSTATUS_DCD )
6223 info->serial_signals |= SerialSignal_DCD;
6224
6225 if ( status & MISCSTATUS_RI )
6226 info->serial_signals |= SerialSignal_RI;
6227
6228 if ( status & MISCSTATUS_DSR )
6229 info->serial_signals |= SerialSignal_DSR;
6230
6231 } /* end of usc_get_serial_signals() */
6232
6233 /* usc_set_serial_signals()
6234 *
6235 * Set the state of RTS and DTR based on contents of
6236 * serial_signals member of device extension.
6237 *
6238 * Arguments: info pointer to device instance data
6239 * Return Value: None
6240 */
6241 static void usc_set_serial_signals( struct mgsl_struct *info )
6242 {
6243 u16 Control;
6244 unsigned char V24Out = info->serial_signals;
6245
6246 /* get the current value of the Port Control Register (PCR) */
6247
6248 Control = usc_InReg( info, PCR );
6249
6250 if ( V24Out & SerialSignal_RTS )
6251 Control &= ~(BIT6);
6252 else
6253 Control |= BIT6;
6254
6255 if ( V24Out & SerialSignal_DTR )
6256 Control &= ~(BIT4);
6257 else
6258 Control |= BIT4;
6259
6260 usc_OutReg( info, PCR, Control );
6261
6262 } /* end of usc_set_serial_signals() */
6263
6264 /* usc_enable_async_clock()
6265 *
6266 * Enable the async clock at the specified frequency.
6267 *
6268 * Arguments: info pointer to device instance data
6269 * data_rate data rate of clock in bps
6270 * 0 disables the AUX clock.
6271 * Return Value: None
6272 */
6273 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6274 {
6275 if ( data_rate ) {
6276 /*
6277 * Clock mode Control Register (CMCR)
6278 *
6279 * <15..14> 00 counter 1 Disabled
6280 * <13..12> 00 counter 0 Disabled
6281 * <11..10> 11 BRG1 Input is TxC Pin
6282 * <9..8> 11 BRG0 Input is TxC Pin
6283 * <7..6> 01 DPLL Input is BRG1 Output
6284 * <5..3> 100 TxCLK comes from BRG0
6285 * <2..0> 100 RxCLK comes from BRG0
6286 *
6287 * 0000 1111 0110 0100 = 0x0f64
6288 */
6289
6290 usc_OutReg( info, CMCR, 0x0f64 );
6291
6292
6293 /*
6294 * Write 16-bit Time Constant for BRG0
6295 * Time Constant = (ClkSpeed / data_rate) - 1
6296 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6297 */
6298
6299 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6300 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6301 else
6302 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6303
6304
6305 /*
6306 * Hardware Configuration Register (HCR)
6307 * Clear Bit 1, BRG0 mode = Continuous
6308 * Set Bit 0 to enable BRG0.
6309 */
6310
6311 usc_OutReg( info, HCR,
6312 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6313
6314
6315 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6316
6317 usc_OutReg( info, IOCR,
6318 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6319 } else {
6320 /* data rate == 0 so turn off BRG0 */
6321 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6322 }
6323
6324 } /* end of usc_enable_async_clock() */
6325
6326 /*
6327 * Buffer Structures:
6328 *
6329 * Normal memory access uses virtual addresses that can make discontiguous
6330 * physical memory pages appear to be contiguous in the virtual address
6331 * space (the processors memory mapping handles the conversions).
6332 *
6333 * DMA transfers require physically contiguous memory. This is because
6334 * the DMA system controller and DMA bus masters deal with memory using
6335 * only physical addresses.
6336 *
6337 * This causes a problem under Windows NT when large DMA buffers are
6338 * needed. Fragmentation of the nonpaged pool prevents allocations of
6339 * physically contiguous buffers larger than the PAGE_SIZE.
6340 *
6341 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6342 * allows DMA transfers to physically discontiguous buffers. Information
6343 * about each data transfer buffer is contained in a memory structure
6344 * called a 'buffer entry'. A list of buffer entries is maintained
6345 * to track and control the use of the data transfer buffers.
6346 *
6347 * To support this strategy we will allocate sufficient PAGE_SIZE
6348 * contiguous memory buffers to allow for the total required buffer
6349 * space.
6350 *
6351 * The 16C32 accesses the list of buffer entries using Bus Master
6352 * DMA. Control information is read from the buffer entries by the
6353 * 16C32 to control data transfers. status information is written to
6354 * the buffer entries by the 16C32 to indicate the status of completed
6355 * transfers.
6356 *
6357 * The CPU writes control information to the buffer entries to control
6358 * the 16C32 and reads status information from the buffer entries to
6359 * determine information about received and transmitted frames.
6360 *
6361 * Because the CPU and 16C32 (adapter) both need simultaneous access
6362 * to the buffer entries, the buffer entry memory is allocated with
6363 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6364 * entry list to PAGE_SIZE.
6365 *
6366 * The actual data buffers on the other hand will only be accessed
6367 * by the CPU or the adapter but not by both simultaneously. This allows
6368 * Scatter/Gather packet based DMA procedures for using physically
6369 * discontiguous pages.
6370 */
6371
6372 /*
6373 * mgsl_reset_tx_dma_buffers()
6374 *
6375 * Set the count for all transmit buffers to 0 to indicate the
6376 * buffer is available for use and set the current buffer to the
6377 * first buffer. This effectively makes all buffers free and
6378 * discards any data in buffers.
6379 *
6380 * Arguments: info pointer to device instance data
6381 * Return Value: None
6382 */
6383 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6384 {
6385 unsigned int i;
6386
6387 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6388 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6389 }
6390
6391 info->current_tx_buffer = 0;
6392 info->start_tx_dma_buffer = 0;
6393 info->tx_dma_buffers_used = 0;
6394
6395 info->get_tx_holding_index = 0;
6396 info->put_tx_holding_index = 0;
6397 info->tx_holding_count = 0;
6398
6399 } /* end of mgsl_reset_tx_dma_buffers() */
6400
6401 /*
6402 * num_free_tx_dma_buffers()
6403 *
6404 * returns the number of free tx dma buffers available
6405 *
6406 * Arguments: info pointer to device instance data
6407 * Return Value: number of free tx dma buffers
6408 */
6409 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6410 {
6411 return info->tx_buffer_count - info->tx_dma_buffers_used;
6412 }
6413
6414 /*
6415 * mgsl_reset_rx_dma_buffers()
6416 *
6417 * Set the count for all receive buffers to DMABUFFERSIZE
6418 * and set the current buffer to the first buffer. This effectively
6419 * makes all buffers free and discards any data in buffers.
6420 *
6421 * Arguments: info pointer to device instance data
6422 * Return Value: None
6423 */
6424 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6425 {
6426 unsigned int i;
6427
6428 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6429 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6430 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6431 // info->rx_buffer_list[i].status = 0;
6432 }
6433
6434 info->current_rx_buffer = 0;
6435
6436 } /* end of mgsl_reset_rx_dma_buffers() */
6437
6438 /*
6439 * mgsl_free_rx_frame_buffers()
6440 *
6441 * Free the receive buffers used by a received SDLC
6442 * frame such that the buffers can be reused.
6443 *
6444 * Arguments:
6445 *
6446 * info pointer to device instance data
6447 * StartIndex index of 1st receive buffer of frame
6448 * EndIndex index of last receive buffer of frame
6449 *
6450 * Return Value: None
6451 */
6452 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6453 {
6454 bool Done = false;
6455 DMABUFFERENTRY *pBufEntry;
6456 unsigned int Index;
6457
6458 /* Starting with 1st buffer entry of the frame clear the status */
6459 /* field and set the count field to DMA Buffer Size. */
6460
6461 Index = StartIndex;
6462
6463 while( !Done ) {
6464 pBufEntry = &(info->rx_buffer_list[Index]);
6465
6466 if ( Index == EndIndex ) {
6467 /* This is the last buffer of the frame! */
6468 Done = true;
6469 }
6470
6471 /* reset current buffer for reuse */
6472 // pBufEntry->status = 0;
6473 // pBufEntry->count = DMABUFFERSIZE;
6474 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6475
6476 /* advance to next buffer entry in linked list */
6477 Index++;
6478 if ( Index == info->rx_buffer_count )
6479 Index = 0;
6480 }
6481
6482 /* set current buffer to next buffer after last buffer of frame */
6483 info->current_rx_buffer = Index;
6484
6485 } /* end of free_rx_frame_buffers() */
6486
6487 /* mgsl_get_rx_frame()
6488 *
6489 * This function attempts to return a received SDLC frame from the
6490 * receive DMA buffers. Only frames received without errors are returned.
6491 *
6492 * Arguments: info pointer to device extension
6493 * Return Value: true if frame returned, otherwise false
6494 */
6495 static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6496 {
6497 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6498 unsigned short status;
6499 DMABUFFERENTRY *pBufEntry;
6500 unsigned int framesize = 0;
6501 bool ReturnCode = false;
6502 unsigned long flags;
6503 struct tty_struct *tty = info->port.tty;
6504 bool return_frame = false;
6505
6506 /*
6507 * current_rx_buffer points to the 1st buffer of the next available
6508 * receive frame. To find the last buffer of the frame look for
6509 * a non-zero status field in the buffer entries. (The status
6510 * field is set by the 16C32 after completing a receive frame.
6511 */
6512
6513 StartIndex = EndIndex = info->current_rx_buffer;
6514
6515 while( !info->rx_buffer_list[EndIndex].status ) {
6516 /*
6517 * If the count field of the buffer entry is non-zero then
6518 * this buffer has not been used. (The 16C32 clears the count
6519 * field when it starts using the buffer.) If an unused buffer
6520 * is encountered then there are no frames available.
6521 */
6522
6523 if ( info->rx_buffer_list[EndIndex].count )
6524 goto Cleanup;
6525
6526 /* advance to next buffer entry in linked list */
6527 EndIndex++;
6528 if ( EndIndex == info->rx_buffer_count )
6529 EndIndex = 0;
6530
6531 /* if entire list searched then no frame available */
6532 if ( EndIndex == StartIndex ) {
6533 /* If this occurs then something bad happened,
6534 * all buffers have been 'used' but none mark
6535 * the end of a frame. Reset buffers and receiver.
6536 */
6537
6538 if ( info->rx_enabled ){
6539 spin_lock_irqsave(&info->irq_spinlock,flags);
6540 usc_start_receiver(info);
6541 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6542 }
6543 goto Cleanup;
6544 }
6545 }
6546
6547
6548 /* check status of receive frame */
6549
6550 status = info->rx_buffer_list[EndIndex].status;
6551
6552 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6553 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6554 if ( status & RXSTATUS_SHORT_FRAME )
6555 info->icount.rxshort++;
6556 else if ( status & RXSTATUS_ABORT )
6557 info->icount.rxabort++;
6558 else if ( status & RXSTATUS_OVERRUN )
6559 info->icount.rxover++;
6560 else {
6561 info->icount.rxcrc++;
6562 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6563 return_frame = true;
6564 }
6565 framesize = 0;
6566 #if SYNCLINK_GENERIC_HDLC
6567 {
6568 info->netdev->stats.rx_errors++;
6569 info->netdev->stats.rx_frame_errors++;
6570 }
6571 #endif
6572 } else
6573 return_frame = true;
6574
6575 if ( return_frame ) {
6576 /* receive frame has no errors, get frame size.
6577 * The frame size is the starting value of the RCC (which was
6578 * set to 0xffff) minus the ending value of the RCC (decremented
6579 * once for each receive character) minus 2 for the 16-bit CRC.
6580 */
6581
6582 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6583
6584 /* adjust frame size for CRC if any */
6585 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6586 framesize -= 2;
6587 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6588 framesize -= 4;
6589 }
6590
6591 if ( debug_level >= DEBUG_LEVEL_BH )
6592 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6593 __FILE__,__LINE__,info->device_name,status,framesize);
6594
6595 if ( debug_level >= DEBUG_LEVEL_DATA )
6596 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6597 min_t(int, framesize, DMABUFFERSIZE),0);
6598
6599 if (framesize) {
6600 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6601 ((framesize+1) > info->max_frame_size) ) ||
6602 (framesize > info->max_frame_size) )
6603 info->icount.rxlong++;
6604 else {
6605 /* copy dma buffer(s) to contiguous intermediate buffer */
6606 int copy_count = framesize;
6607 int index = StartIndex;
6608 unsigned char *ptmp = info->intermediate_rxbuffer;
6609
6610 if ( !(status & RXSTATUS_CRC_ERROR))
6611 info->icount.rxok++;
6612
6613 while(copy_count) {
6614 int partial_count;
6615 if ( copy_count > DMABUFFERSIZE )
6616 partial_count = DMABUFFERSIZE;
6617 else
6618 partial_count = copy_count;
6619
6620 pBufEntry = &(info->rx_buffer_list[index]);
6621 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6622 ptmp += partial_count;
6623 copy_count -= partial_count;
6624
6625 if ( ++index == info->rx_buffer_count )
6626 index = 0;
6627 }
6628
6629 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6630 ++framesize;
6631 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6632 RX_CRC_ERROR :
6633 RX_OK);
6634
6635 if ( debug_level >= DEBUG_LEVEL_DATA )
6636 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6637 __FILE__,__LINE__,info->device_name,
6638 *ptmp);
6639 }
6640
6641 #if SYNCLINK_GENERIC_HDLC
6642 if (info->netcount)
6643 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6644 else
6645 #endif
6646 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6647 }
6648 }
6649 /* Free the buffers used by this frame. */
6650 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6651
6652 ReturnCode = true;
6653
6654 Cleanup:
6655
6656 if ( info->rx_enabled && info->rx_overflow ) {
6657 /* The receiver needs to restarted because of
6658 * a receive overflow (buffer or FIFO). If the
6659 * receive buffers are now empty, then restart receiver.
6660 */
6661
6662 if ( !info->rx_buffer_list[EndIndex].status &&
6663 info->rx_buffer_list[EndIndex].count ) {
6664 spin_lock_irqsave(&info->irq_spinlock,flags);
6665 usc_start_receiver(info);
6666 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6667 }
6668 }
6669
6670 return ReturnCode;
6671
6672 } /* end of mgsl_get_rx_frame() */
6673
6674 /* mgsl_get_raw_rx_frame()
6675 *
6676 * This function attempts to return a received frame from the
6677 * receive DMA buffers when running in external loop mode. In this mode,
6678 * we will return at most one DMABUFFERSIZE frame to the application.
6679 * The USC receiver is triggering off of DCD going active to start a new
6680 * frame, and DCD going inactive to terminate the frame (similar to
6681 * processing a closing flag character).
6682 *
6683 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6684 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6685 * status field and the RCC field will indicate the length of the
6686 * entire received frame. We take this RCC field and get the modulus
6687 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6688 * last Rx DMA buffer and return that last portion of the frame.
6689 *
6690 * Arguments: info pointer to device extension
6691 * Return Value: true if frame returned, otherwise false
6692 */
6693 static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6694 {
6695 unsigned int CurrentIndex, NextIndex;
6696 unsigned short status;
6697 DMABUFFERENTRY *pBufEntry;
6698 unsigned int framesize = 0;
6699 bool ReturnCode = false;
6700 unsigned long flags;
6701 struct tty_struct *tty = info->port.tty;
6702
6703 /*
6704 * current_rx_buffer points to the 1st buffer of the next available
6705 * receive frame. The status field is set by the 16C32 after
6706 * completing a receive frame. If the status field of this buffer
6707 * is zero, either the USC is still filling this buffer or this
6708 * is one of a series of buffers making up a received frame.
6709 *
6710 * If the count field of this buffer is zero, the USC is either
6711 * using this buffer or has used this buffer. Look at the count
6712 * field of the next buffer. If that next buffer's count is
6713 * non-zero, the USC is still actively using the current buffer.
6714 * Otherwise, if the next buffer's count field is zero, the
6715 * current buffer is complete and the USC is using the next
6716 * buffer.
6717 */
6718 CurrentIndex = NextIndex = info->current_rx_buffer;
6719 ++NextIndex;
6720 if ( NextIndex == info->rx_buffer_count )
6721 NextIndex = 0;
6722
6723 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6724 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6725 info->rx_buffer_list[NextIndex].count == 0)) {
6726 /*
6727 * Either the status field of this dma buffer is non-zero
6728 * (indicating the last buffer of a receive frame) or the next
6729 * buffer is marked as in use -- implying this buffer is complete
6730 * and an intermediate buffer for this received frame.
6731 */
6732
6733 status = info->rx_buffer_list[CurrentIndex].status;
6734
6735 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6736 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6737 if ( status & RXSTATUS_SHORT_FRAME )
6738 info->icount.rxshort++;
6739 else if ( status & RXSTATUS_ABORT )
6740 info->icount.rxabort++;
6741 else if ( status & RXSTATUS_OVERRUN )
6742 info->icount.rxover++;
6743 else
6744 info->icount.rxcrc++;
6745 framesize = 0;
6746 } else {
6747 /*
6748 * A receive frame is available, get frame size and status.
6749 *
6750 * The frame size is the starting value of the RCC (which was
6751 * set to 0xffff) minus the ending value of the RCC (decremented
6752 * once for each receive character) minus 2 or 4 for the 16-bit
6753 * or 32-bit CRC.
6754 *
6755 * If the status field is zero, this is an intermediate buffer.
6756 * It's size is 4K.
6757 *
6758 * If the DMA Buffer Entry's Status field is non-zero, the
6759 * receive operation completed normally (ie: DCD dropped). The
6760 * RCC field is valid and holds the received frame size.
6761 * It is possible that the RCC field will be zero on a DMA buffer
6762 * entry with a non-zero status. This can occur if the total
6763 * frame size (number of bytes between the time DCD goes active
6764 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6765 * case the 16C32 has underrun on the RCC count and appears to
6766 * stop updating this counter to let us know the actual received
6767 * frame size. If this happens (non-zero status and zero RCC),
6768 * simply return the entire RxDMA Buffer
6769 */
6770 if ( status ) {
6771 /*
6772 * In the event that the final RxDMA Buffer is
6773 * terminated with a non-zero status and the RCC
6774 * field is zero, we interpret this as the RCC
6775 * having underflowed (received frame > 65535 bytes).
6776 *
6777 * Signal the event to the user by passing back
6778 * a status of RxStatus_CrcError returning the full
6779 * buffer and let the app figure out what data is
6780 * actually valid
6781 */
6782 if ( info->rx_buffer_list[CurrentIndex].rcc )
6783 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6784 else
6785 framesize = DMABUFFERSIZE;
6786 }
6787 else
6788 framesize = DMABUFFERSIZE;
6789 }
6790
6791 if ( framesize > DMABUFFERSIZE ) {
6792 /*
6793 * if running in raw sync mode, ISR handler for
6794 * End Of Buffer events terminates all buffers at 4K.
6795 * If this frame size is said to be >4K, get the
6796 * actual number of bytes of the frame in this buffer.
6797 */
6798 framesize = framesize % DMABUFFERSIZE;
6799 }
6800
6801
6802 if ( debug_level >= DEBUG_LEVEL_BH )
6803 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6804 __FILE__,__LINE__,info->device_name,status,framesize);
6805
6806 if ( debug_level >= DEBUG_LEVEL_DATA )
6807 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6808 min_t(int, framesize, DMABUFFERSIZE),0);
6809
6810 if (framesize) {
6811 /* copy dma buffer(s) to contiguous intermediate buffer */
6812 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6813
6814 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6815 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6816 info->icount.rxok++;
6817
6818 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6819 }
6820
6821 /* Free the buffers used by this frame. */
6822 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6823
6824 ReturnCode = true;
6825 }
6826
6827
6828 if ( info->rx_enabled && info->rx_overflow ) {
6829 /* The receiver needs to restarted because of
6830 * a receive overflow (buffer or FIFO). If the
6831 * receive buffers are now empty, then restart receiver.
6832 */
6833
6834 if ( !info->rx_buffer_list[CurrentIndex].status &&
6835 info->rx_buffer_list[CurrentIndex].count ) {
6836 spin_lock_irqsave(&info->irq_spinlock,flags);
6837 usc_start_receiver(info);
6838 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6839 }
6840 }
6841
6842 return ReturnCode;
6843
6844 } /* end of mgsl_get_raw_rx_frame() */
6845
6846 /* mgsl_load_tx_dma_buffer()
6847 *
6848 * Load the transmit DMA buffer with the specified data.
6849 *
6850 * Arguments:
6851 *
6852 * info pointer to device extension
6853 * Buffer pointer to buffer containing frame to load
6854 * BufferSize size in bytes of frame in Buffer
6855 *
6856 * Return Value: None
6857 */
6858 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6859 const char *Buffer, unsigned int BufferSize)
6860 {
6861 unsigned short Copycount;
6862 unsigned int i = 0;
6863 DMABUFFERENTRY *pBufEntry;
6864
6865 if ( debug_level >= DEBUG_LEVEL_DATA )
6866 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6867
6868 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6869 /* set CMR:13 to start transmit when
6870 * next GoAhead (abort) is received
6871 */
6872 info->cmr_value |= BIT13;
6873 }
6874
6875 /* begin loading the frame in the next available tx dma
6876 * buffer, remember it's starting location for setting
6877 * up tx dma operation
6878 */
6879 i = info->current_tx_buffer;
6880 info->start_tx_dma_buffer = i;
6881
6882 /* Setup the status and RCC (Frame Size) fields of the 1st */
6883 /* buffer entry in the transmit DMA buffer list. */
6884
6885 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6886 info->tx_buffer_list[i].rcc = BufferSize;
6887 info->tx_buffer_list[i].count = BufferSize;
6888
6889 /* Copy frame data from 1st source buffer to the DMA buffers. */
6890 /* The frame data may span multiple DMA buffers. */
6891
6892 while( BufferSize ){
6893 /* Get a pointer to next DMA buffer entry. */
6894 pBufEntry = &info->tx_buffer_list[i++];
6895
6896 if ( i == info->tx_buffer_count )
6897 i=0;
6898
6899 /* Calculate the number of bytes that can be copied from */
6900 /* the source buffer to this DMA buffer. */
6901 if ( BufferSize > DMABUFFERSIZE )
6902 Copycount = DMABUFFERSIZE;
6903 else
6904 Copycount = BufferSize;
6905
6906 /* Actually copy data from source buffer to DMA buffer. */
6907 /* Also set the data count for this individual DMA buffer. */
6908 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6909 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6910 else
6911 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6912
6913 pBufEntry->count = Copycount;
6914
6915 /* Advance source pointer and reduce remaining data count. */
6916 Buffer += Copycount;
6917 BufferSize -= Copycount;
6918
6919 ++info->tx_dma_buffers_used;
6920 }
6921
6922 /* remember next available tx dma buffer */
6923 info->current_tx_buffer = i;
6924
6925 } /* end of mgsl_load_tx_dma_buffer() */
6926
6927 /*
6928 * mgsl_register_test()
6929 *
6930 * Performs a register test of the 16C32.
6931 *
6932 * Arguments: info pointer to device instance data
6933 * Return Value: true if test passed, otherwise false
6934 */
6935 static bool mgsl_register_test( struct mgsl_struct *info )
6936 {
6937 static unsigned short BitPatterns[] =
6938 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6939 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6940 unsigned int i;
6941 bool rc = true;
6942 unsigned long flags;
6943
6944 spin_lock_irqsave(&info->irq_spinlock,flags);
6945 usc_reset(info);
6946
6947 /* Verify the reset state of some registers. */
6948
6949 if ( (usc_InReg( info, SICR ) != 0) ||
6950 (usc_InReg( info, IVR ) != 0) ||
6951 (usc_InDmaReg( info, DIVR ) != 0) ){
6952 rc = false;
6953 }
6954
6955 if ( rc ){
6956 /* Write bit patterns to various registers but do it out of */
6957 /* sync, then read back and verify values. */
6958
6959 for ( i = 0 ; i < Patterncount ; i++ ) {
6960 usc_OutReg( info, TC0R, BitPatterns[i] );
6961 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6962 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6963 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6964 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6965 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6966
6967 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6968 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6969 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6970 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6971 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6972 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6973 rc = false;
6974 break;
6975 }
6976 }
6977 }
6978
6979 usc_reset(info);
6980 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6981
6982 return rc;
6983
6984 } /* end of mgsl_register_test() */
6985
6986 /* mgsl_irq_test() Perform interrupt test of the 16C32.
6987 *
6988 * Arguments: info pointer to device instance data
6989 * Return Value: true if test passed, otherwise false
6990 */
6991 static bool mgsl_irq_test( struct mgsl_struct *info )
6992 {
6993 unsigned long EndTime;
6994 unsigned long flags;
6995
6996 spin_lock_irqsave(&info->irq_spinlock,flags);
6997 usc_reset(info);
6998
6999 /*
7000 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7001 * The ISR sets irq_occurred to true.
7002 */
7003
7004 info->irq_occurred = false;
7005
7006 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7007 /* Enable INTEN (Port 6, Bit12) */
7008 /* This connects the IRQ request signal to the ISA bus */
7009 /* on the ISA adapter. This has no effect for the PCI adapter */
7010 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7011
7012 usc_EnableMasterIrqBit(info);
7013 usc_EnableInterrupts(info, IO_PIN);
7014 usc_ClearIrqPendingBits(info, IO_PIN);
7015
7016 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7017 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7018
7019 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7020
7021 EndTime=100;
7022 while( EndTime-- && !info->irq_occurred ) {
7023 msleep_interruptible(10);
7024 }
7025
7026 spin_lock_irqsave(&info->irq_spinlock,flags);
7027 usc_reset(info);
7028 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7029
7030 return info->irq_occurred;
7031
7032 } /* end of mgsl_irq_test() */
7033
7034 /* mgsl_dma_test()
7035 *
7036 * Perform a DMA test of the 16C32. A small frame is
7037 * transmitted via DMA from a transmit buffer to a receive buffer
7038 * using single buffer DMA mode.
7039 *
7040 * Arguments: info pointer to device instance data
7041 * Return Value: true if test passed, otherwise false
7042 */
7043 static bool mgsl_dma_test( struct mgsl_struct *info )
7044 {
7045 unsigned short FifoLevel;
7046 unsigned long phys_addr;
7047 unsigned int FrameSize;
7048 unsigned int i;
7049 char *TmpPtr;
7050 bool rc = true;
7051 unsigned short status=0;
7052 unsigned long EndTime;
7053 unsigned long flags;
7054 MGSL_PARAMS tmp_params;
7055
7056 /* save current port options */
7057 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7058 /* load default port options */
7059 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7060
7061 #define TESTFRAMESIZE 40
7062
7063 spin_lock_irqsave(&info->irq_spinlock,flags);
7064
7065 /* setup 16C32 for SDLC DMA transfer mode */
7066
7067 usc_reset(info);
7068 usc_set_sdlc_mode(info);
7069 usc_enable_loopback(info,1);
7070
7071 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7072 * field of the buffer entry after fetching buffer address. This
7073 * way we can detect a DMA failure for a DMA read (which should be
7074 * non-destructive to system memory) before we try and write to
7075 * memory (where a failure could corrupt system memory).
7076 */
7077
7078 /* Receive DMA mode Register (RDMR)
7079 *
7080 * <15..14> 11 DMA mode = Linked List Buffer mode
7081 * <13> 1 RSBinA/L = store Rx status Block in List entry
7082 * <12> 0 1 = Clear count of List Entry after fetching
7083 * <11..10> 00 Address mode = Increment
7084 * <9> 1 Terminate Buffer on RxBound
7085 * <8> 0 Bus Width = 16bits
7086 * <7..0> ? status Bits (write as 0s)
7087 *
7088 * 1110 0010 0000 0000 = 0xe200
7089 */
7090
7091 usc_OutDmaReg( info, RDMR, 0xe200 );
7092
7093 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7094
7095
7096 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7097
7098 FrameSize = TESTFRAMESIZE;
7099
7100 /* setup 1st transmit buffer entry: */
7101 /* with frame size and transmit control word */
7102
7103 info->tx_buffer_list[0].count = FrameSize;
7104 info->tx_buffer_list[0].rcc = FrameSize;
7105 info->tx_buffer_list[0].status = 0x4000;
7106
7107 /* build a transmit frame in 1st transmit DMA buffer */
7108
7109 TmpPtr = info->tx_buffer_list[0].virt_addr;
7110 for (i = 0; i < FrameSize; i++ )
7111 *TmpPtr++ = i;
7112
7113 /* setup 1st receive buffer entry: */
7114 /* clear status, set max receive buffer size */
7115
7116 info->rx_buffer_list[0].status = 0;
7117 info->rx_buffer_list[0].count = FrameSize + 4;
7118
7119 /* zero out the 1st receive buffer */
7120
7121 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7122
7123 /* Set count field of next buffer entries to prevent */
7124 /* 16C32 from using buffers after the 1st one. */
7125
7126 info->tx_buffer_list[1].count = 0;
7127 info->rx_buffer_list[1].count = 0;
7128
7129
7130 /***************************/
7131 /* Program 16C32 receiver. */
7132 /***************************/
7133
7134 spin_lock_irqsave(&info->irq_spinlock,flags);
7135
7136 /* setup DMA transfers */
7137 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7138
7139 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7140 phys_addr = info->rx_buffer_list[0].phys_entry;
7141 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7142 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7143
7144 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7145 usc_InDmaReg( info, RDMR );
7146 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7147
7148 /* Enable Receiver (RMR <1..0> = 10) */
7149 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7150
7151 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7152
7153
7154 /*************************************************************/
7155 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7156 /*************************************************************/
7157
7158 /* Wait 100ms for interrupt. */
7159 EndTime = jiffies + msecs_to_jiffies(100);
7160
7161 for(;;) {
7162 if (time_after(jiffies, EndTime)) {
7163 rc = false;
7164 break;
7165 }
7166
7167 spin_lock_irqsave(&info->irq_spinlock,flags);
7168 status = usc_InDmaReg( info, RDMR );
7169 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7170
7171 if ( !(status & BIT4) && (status & BIT5) ) {
7172 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7173 /* BUSY (BIT 5) is active (channel still active). */
7174 /* This means the buffer entry read has completed. */
7175 break;
7176 }
7177 }
7178
7179
7180 /******************************/
7181 /* Program 16C32 transmitter. */
7182 /******************************/
7183
7184 spin_lock_irqsave(&info->irq_spinlock,flags);
7185
7186 /* Program the Transmit Character Length Register (TCLR) */
7187 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7188
7189 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7190 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7191
7192 /* Program the address of the 1st DMA Buffer Entry in linked list */
7193
7194 phys_addr = info->tx_buffer_list[0].phys_entry;
7195 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7196 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7197
7198 /* unlatch Tx status bits, and start transmit channel. */
7199
7200 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7201 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7202
7203 /* wait for DMA controller to fill transmit FIFO */
7204
7205 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7206
7207 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7208
7209
7210 /**********************************/
7211 /* WAIT FOR TRANSMIT FIFO TO FILL */
7212 /**********************************/
7213
7214 /* Wait 100ms */
7215 EndTime = jiffies + msecs_to_jiffies(100);
7216
7217 for(;;) {
7218 if (time_after(jiffies, EndTime)) {
7219 rc = false;
7220 break;
7221 }
7222
7223 spin_lock_irqsave(&info->irq_spinlock,flags);
7224 FifoLevel = usc_InReg(info, TICR) >> 8;
7225 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7226
7227 if ( FifoLevel < 16 )
7228 break;
7229 else
7230 if ( FrameSize < 32 ) {
7231 /* This frame is smaller than the entire transmit FIFO */
7232 /* so wait for the entire frame to be loaded. */
7233 if ( FifoLevel <= (32 - FrameSize) )
7234 break;
7235 }
7236 }
7237
7238
7239 if ( rc )
7240 {
7241 /* Enable 16C32 transmitter. */
7242
7243 spin_lock_irqsave(&info->irq_spinlock,flags);
7244
7245 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7246 usc_TCmd( info, TCmd_SendFrame );
7247 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7248
7249 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7250
7251
7252 /******************************/
7253 /* WAIT FOR TRANSMIT COMPLETE */
7254 /******************************/
7255
7256 /* Wait 100ms */
7257 EndTime = jiffies + msecs_to_jiffies(100);
7258
7259 /* While timer not expired wait for transmit complete */
7260
7261 spin_lock_irqsave(&info->irq_spinlock,flags);
7262 status = usc_InReg( info, TCSR );
7263 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7264
7265 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7266 if (time_after(jiffies, EndTime)) {
7267 rc = false;
7268 break;
7269 }
7270
7271 spin_lock_irqsave(&info->irq_spinlock,flags);
7272 status = usc_InReg( info, TCSR );
7273 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7274 }
7275 }
7276
7277
7278 if ( rc ){
7279 /* CHECK FOR TRANSMIT ERRORS */
7280 if ( status & (BIT5 | BIT1) )
7281 rc = false;
7282 }
7283
7284 if ( rc ) {
7285 /* WAIT FOR RECEIVE COMPLETE */
7286
7287 /* Wait 100ms */
7288 EndTime = jiffies + msecs_to_jiffies(100);
7289
7290 /* Wait for 16C32 to write receive status to buffer entry. */
7291 status=info->rx_buffer_list[0].status;
7292 while ( status == 0 ) {
7293 if (time_after(jiffies, EndTime)) {
7294 rc = false;
7295 break;
7296 }
7297 status=info->rx_buffer_list[0].status;
7298 }
7299 }
7300
7301
7302 if ( rc ) {
7303 /* CHECK FOR RECEIVE ERRORS */
7304 status = info->rx_buffer_list[0].status;
7305
7306 if ( status & (BIT8 | BIT3 | BIT1) ) {
7307 /* receive error has occurred */
7308 rc = false;
7309 } else {
7310 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7311 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7312 rc = false;
7313 }
7314 }
7315 }
7316
7317 spin_lock_irqsave(&info->irq_spinlock,flags);
7318 usc_reset( info );
7319 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7320
7321 /* restore current port options */
7322 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7323
7324 return rc;
7325
7326 } /* end of mgsl_dma_test() */
7327
7328 /* mgsl_adapter_test()
7329 *
7330 * Perform the register, IRQ, and DMA tests for the 16C32.
7331 *
7332 * Arguments: info pointer to device instance data
7333 * Return Value: 0 if success, otherwise -ENODEV
7334 */
7335 static int mgsl_adapter_test( struct mgsl_struct *info )
7336 {
7337 if ( debug_level >= DEBUG_LEVEL_INFO )
7338 printk( "%s(%d):Testing device %s\n",
7339 __FILE__,__LINE__,info->device_name );
7340
7341 if ( !mgsl_register_test( info ) ) {
7342 info->init_error = DiagStatus_AddressFailure;
7343 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7344 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7345 return -ENODEV;
7346 }
7347
7348 if ( !mgsl_irq_test( info ) ) {
7349 info->init_error = DiagStatus_IrqFailure;
7350 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7351 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7352 return -ENODEV;
7353 }
7354
7355 if ( !mgsl_dma_test( info ) ) {
7356 info->init_error = DiagStatus_DmaFailure;
7357 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7358 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7359 return -ENODEV;
7360 }
7361
7362 if ( debug_level >= DEBUG_LEVEL_INFO )
7363 printk( "%s(%d):device %s passed diagnostics\n",
7364 __FILE__,__LINE__,info->device_name );
7365
7366 return 0;
7367
7368 } /* end of mgsl_adapter_test() */
7369
7370 /* mgsl_memory_test()
7371 *
7372 * Test the shared memory on a PCI adapter.
7373 *
7374 * Arguments: info pointer to device instance data
7375 * Return Value: true if test passed, otherwise false
7376 */
7377 static bool mgsl_memory_test( struct mgsl_struct *info )
7378 {
7379 static unsigned long BitPatterns[] =
7380 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7381 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7382 unsigned long i;
7383 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7384 unsigned long * TestAddr;
7385
7386 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7387 return true;
7388
7389 TestAddr = (unsigned long *)info->memory_base;
7390
7391 /* Test data lines with test pattern at one location. */
7392
7393 for ( i = 0 ; i < Patterncount ; i++ ) {
7394 *TestAddr = BitPatterns[i];
7395 if ( *TestAddr != BitPatterns[i] )
7396 return false;
7397 }
7398
7399 /* Test address lines with incrementing pattern over */
7400 /* entire address range. */
7401
7402 for ( i = 0 ; i < TestLimit ; i++ ) {
7403 *TestAddr = i * 4;
7404 TestAddr++;
7405 }
7406
7407 TestAddr = (unsigned long *)info->memory_base;
7408
7409 for ( i = 0 ; i < TestLimit ; i++ ) {
7410 if ( *TestAddr != i * 4 )
7411 return false;
7412 TestAddr++;
7413 }
7414
7415 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7416
7417 return true;
7418
7419 } /* End Of mgsl_memory_test() */
7420
7421
7422 /* mgsl_load_pci_memory()
7423 *
7424 * Load a large block of data into the PCI shared memory.
7425 * Use this instead of memcpy() or memmove() to move data
7426 * into the PCI shared memory.
7427 *
7428 * Notes:
7429 *
7430 * This function prevents the PCI9050 interface chip from hogging
7431 * the adapter local bus, which can starve the 16C32 by preventing
7432 * 16C32 bus master cycles.
7433 *
7434 * The PCI9050 documentation says that the 9050 will always release
7435 * control of the local bus after completing the current read
7436 * or write operation.
7437 *
7438 * It appears that as long as the PCI9050 write FIFO is full, the
7439 * PCI9050 treats all of the writes as a single burst transaction
7440 * and will not release the bus. This causes DMA latency problems
7441 * at high speeds when copying large data blocks to the shared
7442 * memory.
7443 *
7444 * This function in effect, breaks the a large shared memory write
7445 * into multiple transations by interleaving a shared memory read
7446 * which will flush the write FIFO and 'complete' the write
7447 * transation. This allows any pending DMA request to gain control
7448 * of the local bus in a timely fasion.
7449 *
7450 * Arguments:
7451 *
7452 * TargetPtr pointer to target address in PCI shared memory
7453 * SourcePtr pointer to source buffer for data
7454 * count count in bytes of data to copy
7455 *
7456 * Return Value: None
7457 */
7458 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7459 unsigned short count )
7460 {
7461 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7462 #define PCI_LOAD_INTERVAL 64
7463
7464 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7465 unsigned short Index;
7466 unsigned long Dummy;
7467
7468 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7469 {
7470 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7471 Dummy = *((volatile unsigned long *)TargetPtr);
7472 TargetPtr += PCI_LOAD_INTERVAL;
7473 SourcePtr += PCI_LOAD_INTERVAL;
7474 }
7475
7476 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7477
7478 } /* End Of mgsl_load_pci_memory() */
7479
7480 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7481 {
7482 int i;
7483 int linecount;
7484 if (xmit)
7485 printk("%s tx data:\n",info->device_name);
7486 else
7487 printk("%s rx data:\n",info->device_name);
7488
7489 while(count) {
7490 if (count > 16)
7491 linecount = 16;
7492 else
7493 linecount = count;
7494
7495 for(i=0;i<linecount;i++)
7496 printk("%02X ",(unsigned char)data[i]);
7497 for(;i<17;i++)
7498 printk(" ");
7499 for(i=0;i<linecount;i++) {
7500 if (data[i]>=040 && data[i]<=0176)
7501 printk("%c",data[i]);
7502 else
7503 printk(".");
7504 }
7505 printk("\n");
7506
7507 data += linecount;
7508 count -= linecount;
7509 }
7510 } /* end of mgsl_trace_block() */
7511
7512 /* mgsl_tx_timeout()
7513 *
7514 * called when HDLC frame times out
7515 * update stats and do tx completion processing
7516 *
7517 * Arguments: context pointer to device instance data
7518 * Return Value: None
7519 */
7520 static void mgsl_tx_timeout(unsigned long context)
7521 {
7522 struct mgsl_struct *info = (struct mgsl_struct*)context;
7523 unsigned long flags;
7524
7525 if ( debug_level >= DEBUG_LEVEL_INFO )
7526 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7527 __FILE__,__LINE__,info->device_name);
7528 if(info->tx_active &&
7529 (info->params.mode == MGSL_MODE_HDLC ||
7530 info->params.mode == MGSL_MODE_RAW) ) {
7531 info->icount.txtimeout++;
7532 }
7533 spin_lock_irqsave(&info->irq_spinlock,flags);
7534 info->tx_active = false;
7535 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7536
7537 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7538 usc_loopmode_cancel_transmit( info );
7539
7540 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7541
7542 #if SYNCLINK_GENERIC_HDLC
7543 if (info->netcount)
7544 hdlcdev_tx_done(info);
7545 else
7546 #endif
7547 mgsl_bh_transmit(info);
7548
7549 } /* end of mgsl_tx_timeout() */
7550
7551 /* signal that there are no more frames to send, so that
7552 * line is 'released' by echoing RxD to TxD when current
7553 * transmission is complete (or immediately if no tx in progress).
7554 */
7555 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7556 {
7557 unsigned long flags;
7558
7559 spin_lock_irqsave(&info->irq_spinlock,flags);
7560 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7561 if (info->tx_active)
7562 info->loopmode_send_done_requested = true;
7563 else
7564 usc_loopmode_send_done(info);
7565 }
7566 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7567
7568 return 0;
7569 }
7570
7571 /* release the line by echoing RxD to TxD
7572 * upon completion of a transmit frame
7573 */
7574 static void usc_loopmode_send_done( struct mgsl_struct * info )
7575 {
7576 info->loopmode_send_done_requested = false;
7577 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7578 info->cmr_value &= ~BIT13;
7579 usc_OutReg(info, CMR, info->cmr_value);
7580 }
7581
7582 /* abort a transmit in progress while in HDLC LoopMode
7583 */
7584 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7585 {
7586 /* reset tx dma channel and purge TxFifo */
7587 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7588 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7589 usc_loopmode_send_done( info );
7590 }
7591
7592 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7593 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7594 * we must clear CMR:13 to begin repeating TxData to RxData
7595 */
7596 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7597 {
7598 info->loopmode_insert_requested = true;
7599
7600 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7601 * begin repeating TxData on RxData (complete insertion)
7602 */
7603 usc_OutReg( info, RICR,
7604 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7605
7606 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7607 info->cmr_value |= BIT13;
7608 usc_OutReg(info, CMR, info->cmr_value);
7609 }
7610
7611 /* return 1 if station is inserted into the loop, otherwise 0
7612 */
7613 static int usc_loopmode_active( struct mgsl_struct * info)
7614 {
7615 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7616 }
7617
7618 #if SYNCLINK_GENERIC_HDLC
7619
7620 /**
7621 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7622 * set encoding and frame check sequence (FCS) options
7623 *
7624 * dev pointer to network device structure
7625 * encoding serial encoding setting
7626 * parity FCS setting
7627 *
7628 * returns 0 if success, otherwise error code
7629 */
7630 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7631 unsigned short parity)
7632 {
7633 struct mgsl_struct *info = dev_to_port(dev);
7634 unsigned char new_encoding;
7635 unsigned short new_crctype;
7636
7637 /* return error if TTY interface open */
7638 if (info->port.count)
7639 return -EBUSY;
7640
7641 switch (encoding)
7642 {
7643 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7644 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7645 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7646 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7647 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7648 default: return -EINVAL;
7649 }
7650
7651 switch (parity)
7652 {
7653 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7654 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7655 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7656 default: return -EINVAL;
7657 }
7658
7659 info->params.encoding = new_encoding;
7660 info->params.crc_type = new_crctype;
7661
7662 /* if network interface up, reprogram hardware */
7663 if (info->netcount)
7664 mgsl_program_hw(info);
7665
7666 return 0;
7667 }
7668
7669 /**
7670 * called by generic HDLC layer to send frame
7671 *
7672 * skb socket buffer containing HDLC frame
7673 * dev pointer to network device structure
7674 */
7675 static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7676 struct net_device *dev)
7677 {
7678 struct mgsl_struct *info = dev_to_port(dev);
7679 unsigned long flags;
7680
7681 if (debug_level >= DEBUG_LEVEL_INFO)
7682 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7683
7684 /* stop sending until this frame completes */
7685 netif_stop_queue(dev);
7686
7687 /* copy data to device buffers */
7688 info->xmit_cnt = skb->len;
7689 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7690
7691 /* update network statistics */
7692 dev->stats.tx_packets++;
7693 dev->stats.tx_bytes += skb->len;
7694
7695 /* done with socket buffer, so free it */
7696 dev_kfree_skb(skb);
7697
7698 /* save start time for transmit timeout detection */
7699 netif_trans_update(dev);
7700
7701 /* start hardware transmitter if necessary */
7702 spin_lock_irqsave(&info->irq_spinlock,flags);
7703 if (!info->tx_active)
7704 usc_start_transmitter(info);
7705 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7706
7707 return NETDEV_TX_OK;
7708 }
7709
7710 /**
7711 * called by network layer when interface enabled
7712 * claim resources and initialize hardware
7713 *
7714 * dev pointer to network device structure
7715 *
7716 * returns 0 if success, otherwise error code
7717 */
7718 static int hdlcdev_open(struct net_device *dev)
7719 {
7720 struct mgsl_struct *info = dev_to_port(dev);
7721 int rc;
7722 unsigned long flags;
7723
7724 if (debug_level >= DEBUG_LEVEL_INFO)
7725 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7726
7727 /* generic HDLC layer open processing */
7728 rc = hdlc_open(dev);
7729 if (rc)
7730 return rc;
7731
7732 /* arbitrate between network and tty opens */
7733 spin_lock_irqsave(&info->netlock, flags);
7734 if (info->port.count != 0 || info->netcount != 0) {
7735 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7736 spin_unlock_irqrestore(&info->netlock, flags);
7737 return -EBUSY;
7738 }
7739 info->netcount=1;
7740 spin_unlock_irqrestore(&info->netlock, flags);
7741
7742 /* claim resources and init adapter */
7743 if ((rc = startup(info)) != 0) {
7744 spin_lock_irqsave(&info->netlock, flags);
7745 info->netcount=0;
7746 spin_unlock_irqrestore(&info->netlock, flags);
7747 return rc;
7748 }
7749
7750 /* assert RTS and DTR, apply hardware settings */
7751 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7752 mgsl_program_hw(info);
7753
7754 /* enable network layer transmit */
7755 netif_trans_update(dev);
7756 netif_start_queue(dev);
7757
7758 /* inform generic HDLC layer of current DCD status */
7759 spin_lock_irqsave(&info->irq_spinlock, flags);
7760 usc_get_serial_signals(info);
7761 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7762 if (info->serial_signals & SerialSignal_DCD)
7763 netif_carrier_on(dev);
7764 else
7765 netif_carrier_off(dev);
7766 return 0;
7767 }
7768
7769 /**
7770 * called by network layer when interface is disabled
7771 * shutdown hardware and release resources
7772 *
7773 * dev pointer to network device structure
7774 *
7775 * returns 0 if success, otherwise error code
7776 */
7777 static int hdlcdev_close(struct net_device *dev)
7778 {
7779 struct mgsl_struct *info = dev_to_port(dev);
7780 unsigned long flags;
7781
7782 if (debug_level >= DEBUG_LEVEL_INFO)
7783 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7784
7785 netif_stop_queue(dev);
7786
7787 /* shutdown adapter and release resources */
7788 shutdown(info);
7789
7790 hdlc_close(dev);
7791
7792 spin_lock_irqsave(&info->netlock, flags);
7793 info->netcount=0;
7794 spin_unlock_irqrestore(&info->netlock, flags);
7795
7796 return 0;
7797 }
7798
7799 /**
7800 * called by network layer to process IOCTL call to network device
7801 *
7802 * dev pointer to network device structure
7803 * ifr pointer to network interface request structure
7804 * cmd IOCTL command code
7805 *
7806 * returns 0 if success, otherwise error code
7807 */
7808 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7809 {
7810 const size_t size = sizeof(sync_serial_settings);
7811 sync_serial_settings new_line;
7812 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7813 struct mgsl_struct *info = dev_to_port(dev);
7814 unsigned int flags;
7815
7816 if (debug_level >= DEBUG_LEVEL_INFO)
7817 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7818
7819 /* return error if TTY interface open */
7820 if (info->port.count)
7821 return -EBUSY;
7822
7823 if (cmd != SIOCWANDEV)
7824 return hdlc_ioctl(dev, ifr, cmd);
7825
7826 switch(ifr->ifr_settings.type) {
7827 case IF_GET_IFACE: /* return current sync_serial_settings */
7828
7829 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7830 if (ifr->ifr_settings.size < size) {
7831 ifr->ifr_settings.size = size; /* data size wanted */
7832 return -ENOBUFS;
7833 }
7834
7835 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7836 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7837 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7838 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7839
7840 memset(&new_line, 0, sizeof(new_line));
7841 switch (flags){
7842 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7843 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7844 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7845 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7846 default: new_line.clock_type = CLOCK_DEFAULT;
7847 }
7848
7849 new_line.clock_rate = info->params.clock_speed;
7850 new_line.loopback = info->params.loopback ? 1:0;
7851
7852 if (copy_to_user(line, &new_line, size))
7853 return -EFAULT;
7854 return 0;
7855
7856 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7857
7858 if(!capable(CAP_NET_ADMIN))
7859 return -EPERM;
7860 if (copy_from_user(&new_line, line, size))
7861 return -EFAULT;
7862
7863 switch (new_line.clock_type)
7864 {
7865 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7866 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7867 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7868 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7869 case CLOCK_DEFAULT: flags = info->params.flags &
7870 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7871 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7872 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7873 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7874 default: return -EINVAL;
7875 }
7876
7877 if (new_line.loopback != 0 && new_line.loopback != 1)
7878 return -EINVAL;
7879
7880 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7881 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7882 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7883 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7884 info->params.flags |= flags;
7885
7886 info->params.loopback = new_line.loopback;
7887
7888 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7889 info->params.clock_speed = new_line.clock_rate;
7890 else
7891 info->params.clock_speed = 0;
7892
7893 /* if network interface up, reprogram hardware */
7894 if (info->netcount)
7895 mgsl_program_hw(info);
7896 return 0;
7897
7898 default:
7899 return hdlc_ioctl(dev, ifr, cmd);
7900 }
7901 }
7902
7903 /**
7904 * called by network layer when transmit timeout is detected
7905 *
7906 * dev pointer to network device structure
7907 */
7908 static void hdlcdev_tx_timeout(struct net_device *dev)
7909 {
7910 struct mgsl_struct *info = dev_to_port(dev);
7911 unsigned long flags;
7912
7913 if (debug_level >= DEBUG_LEVEL_INFO)
7914 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7915
7916 dev->stats.tx_errors++;
7917 dev->stats.tx_aborted_errors++;
7918
7919 spin_lock_irqsave(&info->irq_spinlock,flags);
7920 usc_stop_transmitter(info);
7921 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7922
7923 netif_wake_queue(dev);
7924 }
7925
7926 /**
7927 * called by device driver when transmit completes
7928 * reenable network layer transmit if stopped
7929 *
7930 * info pointer to device instance information
7931 */
7932 static void hdlcdev_tx_done(struct mgsl_struct *info)
7933 {
7934 if (netif_queue_stopped(info->netdev))
7935 netif_wake_queue(info->netdev);
7936 }
7937
7938 /**
7939 * called by device driver when frame received
7940 * pass frame to network layer
7941 *
7942 * info pointer to device instance information
7943 * buf pointer to buffer contianing frame data
7944 * size count of data bytes in buf
7945 */
7946 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7947 {
7948 struct sk_buff *skb = dev_alloc_skb(size);
7949 struct net_device *dev = info->netdev;
7950
7951 if (debug_level >= DEBUG_LEVEL_INFO)
7952 printk("hdlcdev_rx(%s)\n", dev->name);
7953
7954 if (skb == NULL) {
7955 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7956 dev->name);
7957 dev->stats.rx_dropped++;
7958 return;
7959 }
7960
7961 skb_put_data(skb, buf, size);
7962
7963 skb->protocol = hdlc_type_trans(skb, dev);
7964
7965 dev->stats.rx_packets++;
7966 dev->stats.rx_bytes += size;
7967
7968 netif_rx(skb);
7969 }
7970
7971 static const struct net_device_ops hdlcdev_ops = {
7972 .ndo_open = hdlcdev_open,
7973 .ndo_stop = hdlcdev_close,
7974 .ndo_start_xmit = hdlc_start_xmit,
7975 .ndo_do_ioctl = hdlcdev_ioctl,
7976 .ndo_tx_timeout = hdlcdev_tx_timeout,
7977 };
7978
7979 /**
7980 * called by device driver when adding device instance
7981 * do generic HDLC initialization
7982 *
7983 * info pointer to device instance information
7984 *
7985 * returns 0 if success, otherwise error code
7986 */
7987 static int hdlcdev_init(struct mgsl_struct *info)
7988 {
7989 int rc;
7990 struct net_device *dev;
7991 hdlc_device *hdlc;
7992
7993 /* allocate and initialize network and HDLC layer objects */
7994
7995 dev = alloc_hdlcdev(info);
7996 if (!dev) {
7997 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
7998 return -ENOMEM;
7999 }
8000
8001 /* for network layer reporting purposes only */
8002 dev->base_addr = info->io_base;
8003 dev->irq = info->irq_level;
8004 dev->dma = info->dma_level;
8005
8006 /* network layer callbacks and settings */
8007 dev->netdev_ops = &hdlcdev_ops;
8008 dev->watchdog_timeo = 10 * HZ;
8009 dev->tx_queue_len = 50;
8010
8011 /* generic HDLC layer callbacks and settings */
8012 hdlc = dev_to_hdlc(dev);
8013 hdlc->attach = hdlcdev_attach;
8014 hdlc->xmit = hdlcdev_xmit;
8015
8016 /* register objects with HDLC layer */
8017 rc = register_hdlc_device(dev);
8018 if (rc) {
8019 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8020 free_netdev(dev);
8021 return rc;
8022 }
8023
8024 info->netdev = dev;
8025 return 0;
8026 }
8027
8028 /**
8029 * called by device driver when removing device instance
8030 * do generic HDLC cleanup
8031 *
8032 * info pointer to device instance information
8033 */
8034 static void hdlcdev_exit(struct mgsl_struct *info)
8035 {
8036 unregister_hdlc_device(info->netdev);
8037 free_netdev(info->netdev);
8038 info->netdev = NULL;
8039 }
8040
8041 #endif /* CONFIG_HDLC */
8042
8043
8044 static int synclink_init_one (struct pci_dev *dev,
8045 const struct pci_device_id *ent)
8046 {
8047 struct mgsl_struct *info;
8048
8049 if (pci_enable_device(dev)) {
8050 printk("error enabling pci device %p\n", dev);
8051 return -EIO;
8052 }
8053
8054 info = mgsl_allocate_device();
8055 if (!info) {
8056 printk("can't allocate device instance data.\n");
8057 return -EIO;
8058 }
8059
8060 /* Copy user configuration info to device instance data */
8061
8062 info->io_base = pci_resource_start(dev, 2);
8063 info->irq_level = dev->irq;
8064 info->phys_memory_base = pci_resource_start(dev, 3);
8065
8066 /* Because veremap only works on page boundaries we must map
8067 * a larger area than is actually implemented for the LCR
8068 * memory range. We map a full page starting at the page boundary.
8069 */
8070 info->phys_lcr_base = pci_resource_start(dev, 0);
8071 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8072 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8073
8074 info->bus_type = MGSL_BUS_TYPE_PCI;
8075 info->io_addr_size = 8;
8076 info->irq_flags = IRQF_SHARED;
8077
8078 if (dev->device == 0x0210) {
8079 /* Version 1 PCI9030 based universal PCI adapter */
8080 info->misc_ctrl_value = 0x007c4080;
8081 info->hw_version = 1;
8082 } else {
8083 /* Version 0 PCI9050 based 5V PCI adapter
8084 * A PCI9050 bug prevents reading LCR registers if
8085 * LCR base address bit 7 is set. Maintain shadow
8086 * value so we can write to LCR misc control reg.
8087 */
8088 info->misc_ctrl_value = 0x087e4546;
8089 info->hw_version = 0;
8090 }
8091
8092 mgsl_add_device(info);
8093
8094 return 0;
8095 }
8096
8097 static void synclink_remove_one (struct pci_dev *dev)
8098 {
8099 }
8100