]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/tty/serial/mpsc.c
tty: ioc4_serial.c: move assignment out of if () block
[mirror_ubuntu-artful-kernel.git] / drivers / tty / serial / mpsc.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
4 *
5 * Author: Mark A. Greer <mgreer@mvista.com>
6 *
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
11 * by Russell King.
12 *
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
16 * or implied.
17 */
18/*
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
23 * out of.
24 *
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
32 *
33 * NOTES:
34 *
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
37 * 'mpsc_port_info'.
38 *
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
44 * basically no-ops.
45 *
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
49 *
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
51 */
52
e4294b3e
MG
53
54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
55#define SUPPORT_SYSRQ
56#endif
57
58#include <linux/module.h>
59#include <linux/moduleparam.h>
60#include <linux/tty.h>
61#include <linux/tty_flip.h>
62#include <linux/ioport.h>
63#include <linux/init.h>
64#include <linux/console.h>
65#include <linux/sysrq.h>
66#include <linux/serial.h>
67#include <linux/serial_core.h>
68#include <linux/delay.h>
69#include <linux/device.h>
70#include <linux/dma-mapping.h>
71#include <linux/mv643xx.h>
d052d1be 72#include <linux/platform_device.h>
5a0e3ad6 73#include <linux/gfp.h>
d052d1be 74
e4294b3e
MG
75#include <asm/io.h>
76#include <asm/irq.h>
77
e4294b3e
MG
78#define MPSC_NUM_CTLRS 2
79
80/*
81 * Descriptors and buffers must be cache line aligned.
82 * Buffers lengths must be multiple of cache line size.
83 * Number of Tx & Rx descriptors must be powers of 2.
84 */
85#define MPSC_RXR_ENTRIES 32
86#define MPSC_RXRE_SIZE dma_get_cache_alignment()
87#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
88#define MPSC_RXBE_SIZE dma_get_cache_alignment()
89#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
90
91#define MPSC_TXR_ENTRIES 32
92#define MPSC_TXRE_SIZE dma_get_cache_alignment()
93#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
94#define MPSC_TXBE_SIZE dma_get_cache_alignment()
95#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
96
2e89db75
MG
97#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
98 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
e4294b3e
MG
99
100/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
101struct mpsc_rx_desc {
102 u16 bufsize;
103 u16 bytecnt;
104 u32 cmdstat;
105 u32 link;
106 u32 buf_ptr;
107} __attribute((packed));
108
109struct mpsc_tx_desc {
110 u16 bytecnt;
111 u16 shadow;
112 u32 cmdstat;
113 u32 link;
114 u32 buf_ptr;
115} __attribute((packed));
116
117/*
118 * Some regs that have the erratum that you can't read them are are shared
119 * between the two MPSC controllers. This struct contains those shared regs.
120 */
121struct mpsc_shared_regs {
122 phys_addr_t mpsc_routing_base_p;
123 phys_addr_t sdma_intr_base_p;
124
125 void __iomem *mpsc_routing_base;
126 void __iomem *sdma_intr_base;
127
128 u32 MPSC_MRR_m;
129 u32 MPSC_RCRR_m;
130 u32 MPSC_TCRR_m;
131 u32 SDMA_INTR_CAUSE_m;
132 u32 SDMA_INTR_MASK_m;
133};
134
135/* The main driver data structure */
136struct mpsc_port_info {
137 struct uart_port port; /* Overlay uart_port structure */
138
139 /* Internal driver state for this ctlr */
140 u8 ready;
141 u8 rcv_data;
142 tcflag_t c_iflag; /* save termios->c_iflag */
143 tcflag_t c_cflag; /* save termios->c_cflag */
144
145 /* Info passed in from platform */
146 u8 mirror_regs; /* Need to mirror regs? */
147 u8 cache_mgmt; /* Need manual cache mgmt? */
148 u8 brg_can_tune; /* BRG has baud tuning? */
149 u32 brg_clk_src;
150 u16 mpsc_max_idle;
151 int default_baud;
152 int default_bits;
153 int default_parity;
154 int default_flow;
155
156 /* Physical addresses of various blocks of registers (from platform) */
157 phys_addr_t mpsc_base_p;
158 phys_addr_t sdma_base_p;
159 phys_addr_t brg_base_p;
160
161 /* Virtual addresses of various blocks of registers (from platform) */
162 void __iomem *mpsc_base;
163 void __iomem *sdma_base;
164 void __iomem *brg_base;
165
166 /* Descriptor ring and buffer allocations */
167 void *dma_region;
168 dma_addr_t dma_region_p;
169
170 dma_addr_t rxr; /* Rx descriptor ring */
171 dma_addr_t rxr_p; /* Phys addr of rxr */
172 u8 *rxb; /* Rx Ring I/O buf */
173 u8 *rxb_p; /* Phys addr of rxb */
174 u32 rxr_posn; /* First desc w/ Rx data */
175
176 dma_addr_t txr; /* Tx descriptor ring */
177 dma_addr_t txr_p; /* Phys addr of txr */
178 u8 *txb; /* Tx Ring I/O buf */
179 u8 *txb_p; /* Phys addr of txb */
180 int txr_head; /* Where new data goes */
181 int txr_tail; /* Where sent data comes off */
1733310b 182 spinlock_t tx_lock; /* transmit lock */
e4294b3e
MG
183
184 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
185 u32 MPSC_MPCR_m;
186 u32 MPSC_CHR_1_m;
187 u32 MPSC_CHR_2_m;
188 u32 MPSC_CHR_10_m;
189 u32 BRG_BCR_m;
190 struct mpsc_shared_regs *shared_regs;
191};
192
193/* Hooks to platform-specific code */
194int mpsc_platform_register_driver(void);
195void mpsc_platform_unregister_driver(void);
196
197/* Hooks back in to mpsc common to be called by platform-specific code */
198struct mpsc_port_info *mpsc_device_probe(int index);
199struct mpsc_port_info *mpsc_device_remove(int index);
200
201/* Main MPSC Configuration Register Offsets */
202#define MPSC_MMCRL 0x0000
203#define MPSC_MMCRH 0x0004
204#define MPSC_MPCR 0x0008
205#define MPSC_CHR_1 0x000c
206#define MPSC_CHR_2 0x0010
207#define MPSC_CHR_3 0x0014
208#define MPSC_CHR_4 0x0018
209#define MPSC_CHR_5 0x001c
210#define MPSC_CHR_6 0x0020
211#define MPSC_CHR_7 0x0024
212#define MPSC_CHR_8 0x0028
213#define MPSC_CHR_9 0x002c
214#define MPSC_CHR_10 0x0030
215#define MPSC_CHR_11 0x0034
216
217#define MPSC_MPCR_FRZ (1 << 9)
218#define MPSC_MPCR_CL_5 0
219#define MPSC_MPCR_CL_6 1
220#define MPSC_MPCR_CL_7 2
221#define MPSC_MPCR_CL_8 3
222#define MPSC_MPCR_SBL_1 0
223#define MPSC_MPCR_SBL_2 1
224
225#define MPSC_CHR_2_TEV (1<<1)
226#define MPSC_CHR_2_TA (1<<7)
227#define MPSC_CHR_2_TTCS (1<<9)
228#define MPSC_CHR_2_REV (1<<17)
229#define MPSC_CHR_2_RA (1<<23)
230#define MPSC_CHR_2_CRD (1<<25)
231#define MPSC_CHR_2_EH (1<<31)
232#define MPSC_CHR_2_PAR_ODD 0
233#define MPSC_CHR_2_PAR_SPACE 1
234#define MPSC_CHR_2_PAR_EVEN 2
235#define MPSC_CHR_2_PAR_MARK 3
236
237/* MPSC Signal Routing */
238#define MPSC_MRR 0x0000
239#define MPSC_RCRR 0x0004
240#define MPSC_TCRR 0x0008
241
242/* Serial DMA Controller Interface Registers */
243#define SDMA_SDC 0x0000
244#define SDMA_SDCM 0x0008
245#define SDMA_RX_DESC 0x0800
246#define SDMA_RX_BUF_PTR 0x0808
247#define SDMA_SCRDP 0x0810
248#define SDMA_TX_DESC 0x0c00
249#define SDMA_SCTDP 0x0c10
250#define SDMA_SFTDP 0x0c14
251
252#define SDMA_DESC_CMDSTAT_PE (1<<0)
253#define SDMA_DESC_CMDSTAT_CDL (1<<1)
254#define SDMA_DESC_CMDSTAT_FR (1<<3)
255#define SDMA_DESC_CMDSTAT_OR (1<<6)
256#define SDMA_DESC_CMDSTAT_BR (1<<9)
257#define SDMA_DESC_CMDSTAT_MI (1<<10)
258#define SDMA_DESC_CMDSTAT_A (1<<11)
259#define SDMA_DESC_CMDSTAT_AM (1<<12)
260#define SDMA_DESC_CMDSTAT_CT (1<<13)
261#define SDMA_DESC_CMDSTAT_C (1<<14)
262#define SDMA_DESC_CMDSTAT_ES (1<<15)
263#define SDMA_DESC_CMDSTAT_L (1<<16)
264#define SDMA_DESC_CMDSTAT_F (1<<17)
265#define SDMA_DESC_CMDSTAT_P (1<<18)
266#define SDMA_DESC_CMDSTAT_EI (1<<23)
267#define SDMA_DESC_CMDSTAT_O (1<<31)
268
2e89db75
MG
269#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
270 | SDMA_DESC_CMDSTAT_EI)
e4294b3e
MG
271
272#define SDMA_SDC_RFT (1<<0)
273#define SDMA_SDC_SFM (1<<1)
274#define SDMA_SDC_BLMR (1<<6)
275#define SDMA_SDC_BLMT (1<<7)
276#define SDMA_SDC_POVR (1<<8)
277#define SDMA_SDC_RIFB (1<<9)
278
279#define SDMA_SDCM_ERD (1<<7)
280#define SDMA_SDCM_AR (1<<15)
281#define SDMA_SDCM_STD (1<<16)
282#define SDMA_SDCM_TXD (1<<23)
283#define SDMA_SDCM_AT (1<<31)
284
285#define SDMA_0_CAUSE_RXBUF (1<<0)
286#define SDMA_0_CAUSE_RXERR (1<<1)
287#define SDMA_0_CAUSE_TXBUF (1<<2)
288#define SDMA_0_CAUSE_TXEND (1<<3)
289#define SDMA_1_CAUSE_RXBUF (1<<8)
290#define SDMA_1_CAUSE_RXERR (1<<9)
291#define SDMA_1_CAUSE_TXBUF (1<<10)
292#define SDMA_1_CAUSE_TXEND (1<<11)
293
2e89db75
MG
294#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
295 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
296#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
297 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
e4294b3e
MG
298
299/* SDMA Interrupt registers */
300#define SDMA_INTR_CAUSE 0x0000
301#define SDMA_INTR_MASK 0x0080
302
303/* Baud Rate Generator Interface Registers */
304#define BRG_BCR 0x0000
305#define BRG_BTR 0x0004
1da177e4
LT
306
307/*
308 * Define how this driver is known to the outside (we've been assigned a
309 * range on the "Low-density serial ports" major).
310 */
2e89db75
MG
311#define MPSC_MAJOR 204
312#define MPSC_MINOR_START 44
313#define MPSC_DRIVER_NAME "MPSC"
314#define MPSC_DEV_NAME "ttyMM"
315#define MPSC_VERSION "1.00"
1da177e4
LT
316
317static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
318static struct mpsc_shared_regs mpsc_shared_regs;
4d0145a7 319static struct uart_driver mpsc_reg;
1da177e4 320
4d0145a7
LN
321static void mpsc_start_rx(struct mpsc_port_info *pi);
322static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
323static void mpsc_release_port(struct uart_port *port);
1da177e4
LT
324/*
325 ******************************************************************************
326 *
327 * Baud Rate Generator Routines (BRG)
328 *
329 ******************************************************************************
330 */
2e89db75 331static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
1da177e4
LT
332{
333 u32 v;
334
335 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
336 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
337
338 if (pi->brg_can_tune)
339 v &= ~(1 << 25);
340
341 if (pi->mirror_regs)
342 pi->BRG_BCR_m = v;
343 writel(v, pi->brg_base + BRG_BCR);
344
345 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
346 pi->brg_base + BRG_BTR);
1da177e4
LT
347}
348
2e89db75 349static void mpsc_brg_enable(struct mpsc_port_info *pi)
1da177e4
LT
350{
351 u32 v;
352
353 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
354 v |= (1 << 16);
355
356 if (pi->mirror_regs)
357 pi->BRG_BCR_m = v;
358 writel(v, pi->brg_base + BRG_BCR);
1da177e4
LT
359}
360
2e89db75 361static void mpsc_brg_disable(struct mpsc_port_info *pi)
1da177e4
LT
362{
363 u32 v;
364
365 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
366 v &= ~(1 << 16);
367
368 if (pi->mirror_regs)
369 pi->BRG_BCR_m = v;
370 writel(v, pi->brg_base + BRG_BCR);
1da177e4
LT
371}
372
2e89db75
MG
373/*
374 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
375 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
376 * However, the input clock is divided by 16 in the MPSC b/c of how
377 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
378 * calculation by 16 to account for that. So the real calculation
379 * that accounts for the way the mpsc is set up is:
380 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
381 */
382static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
1da177e4 383{
1da177e4
LT
384 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
385 u32 v;
386
387 mpsc_brg_disable(pi);
388 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
389 v = (v & 0xffff0000) | (cdv & 0xffff);
390
391 if (pi->mirror_regs)
392 pi->BRG_BCR_m = v;
393 writel(v, pi->brg_base + BRG_BCR);
394 mpsc_brg_enable(pi);
1da177e4
LT
395}
396
397/*
398 ******************************************************************************
399 *
400 * Serial DMA Routines (SDMA)
401 *
402 ******************************************************************************
403 */
404
2e89db75 405static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
1da177e4
LT
406{
407 u32 v;
408
409 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
2e89db75 410 pi->port.line, burst_size);
1da177e4
LT
411
412 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
413
414 if (burst_size < 2)
415 v = 0x0; /* 1 64-bit word */
416 else if (burst_size < 4)
417 v = 0x1; /* 2 64-bit words */
418 else if (burst_size < 8)
419 v = 0x2; /* 4 64-bit words */
420 else
421 v = 0x3; /* 8 64-bit words */
422
423 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
424 pi->sdma_base + SDMA_SDC);
1da177e4
LT
425}
426
2e89db75 427static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
1da177e4
LT
428{
429 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
430 burst_size);
431
432 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
433 pi->sdma_base + SDMA_SDC);
434 mpsc_sdma_burstsize(pi, burst_size);
1da177e4
LT
435}
436
2e89db75 437static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
1da177e4
LT
438{
439 u32 old, v;
440
441 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
442
443 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
444 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
445
446 mask &= 0xf;
447 if (pi->port.line)
448 mask <<= 8;
449 v &= ~mask;
450
451 if (pi->mirror_regs)
452 pi->shared_regs->SDMA_INTR_MASK_m = v;
453 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
454
455 if (pi->port.line)
456 old >>= 8;
457 return old & 0xf;
458}
459
2e89db75 460static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
1da177e4
LT
461{
462 u32 v;
463
464 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
465
2e89db75
MG
466 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
467 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
1da177e4
LT
468
469 mask &= 0xf;
470 if (pi->port.line)
471 mask <<= 8;
472 v |= mask;
473
474 if (pi->mirror_regs)
475 pi->shared_regs->SDMA_INTR_MASK_m = v;
476 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
1da177e4
LT
477}
478
2e89db75 479static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
1da177e4
LT
480{
481 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
482
483 if (pi->mirror_regs)
484 pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
2e89db75
MG
485 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
486 + pi->port.line);
1da177e4
LT
487}
488
2e89db75
MG
489static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
490 struct mpsc_rx_desc *rxre_p)
1da177e4
LT
491{
492 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
2e89db75 493 pi->port.line, (u32)rxre_p);
1da177e4
LT
494
495 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
1da177e4
LT
496}
497
2e89db75
MG
498static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
499 struct mpsc_tx_desc *txre_p)
1da177e4
LT
500{
501 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
502 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
1da177e4
LT
503}
504
2e89db75 505static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
1da177e4
LT
506{
507 u32 v;
508
509 v = readl(pi->sdma_base + SDMA_SDCM);
510 if (val)
511 v |= val;
512 else
513 v = 0;
514 wmb();
515 writel(v, pi->sdma_base + SDMA_SDCM);
516 wmb();
1da177e4
LT
517}
518
2e89db75 519static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
1da177e4
LT
520{
521 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
522}
523
2e89db75 524static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
1da177e4
LT
525{
526 struct mpsc_tx_desc *txre, *txre_p;
527
528 /* If tx isn't running & there's a desc ready to go, start it */
529 if (!mpsc_sdma_tx_active(pi)) {
2e89db75
MG
530 txre = (struct mpsc_tx_desc *)(pi->txr
531 + (pi->txr_tail * MPSC_TXRE_SIZE));
532 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
533 DMA_FROM_DEVICE);
1da177e4
LT
534#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
535 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
536 invalidate_dcache_range((ulong)txre,
2e89db75 537 (ulong)txre + MPSC_TXRE_SIZE);
1da177e4
LT
538#endif
539
540 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
2e89db75
MG
541 txre_p = (struct mpsc_tx_desc *)
542 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
1da177e4
LT
543
544 mpsc_sdma_set_tx_ring(pi, txre_p);
545 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
546 }
547 }
1da177e4
LT
548}
549
2e89db75 550static void mpsc_sdma_stop(struct mpsc_port_info *pi)
1da177e4
LT
551{
552 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
553
554 /* Abort any SDMA transfers */
555 mpsc_sdma_cmd(pi, 0);
556 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
557
558 /* Clear the SDMA current and first TX and RX pointers */
2c6e7599
AV
559 mpsc_sdma_set_tx_ring(pi, NULL);
560 mpsc_sdma_set_rx_ring(pi, NULL);
1da177e4
LT
561
562 /* Disable interrupts */
563 mpsc_sdma_intr_mask(pi, 0xf);
564 mpsc_sdma_intr_ack(pi);
1da177e4
LT
565}
566
567/*
568 ******************************************************************************
569 *
570 * Multi-Protocol Serial Controller Routines (MPSC)
571 *
572 ******************************************************************************
573 */
574
2e89db75 575static void mpsc_hw_init(struct mpsc_port_info *pi)
1da177e4
LT
576{
577 u32 v;
578
579 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
580
581 /* Set up clock routing */
582 if (pi->mirror_regs) {
583 v = pi->shared_regs->MPSC_MRR_m;
584 v &= ~0x1c7;
585 pi->shared_regs->MPSC_MRR_m = v;
586 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
587
588 v = pi->shared_regs->MPSC_RCRR_m;
589 v = (v & ~0xf0f) | 0x100;
590 pi->shared_regs->MPSC_RCRR_m = v;
591 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
592
593 v = pi->shared_regs->MPSC_TCRR_m;
594 v = (v & ~0xf0f) | 0x100;
595 pi->shared_regs->MPSC_TCRR_m = v;
596 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
2e89db75 597 } else {
1da177e4
LT
598 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
599 v &= ~0x1c7;
600 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
601
602 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
603 v = (v & ~0xf0f) | 0x100;
604 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
605
606 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
607 v = (v & ~0xf0f) | 0x100;
608 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
609 }
610
611 /* Put MPSC in UART mode & enabel Tx/Rx egines */
612 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
613
2e89db75 614 /* No preamble, 16x divider, low-latency, */
1da177e4 615 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
7bbdc3d5 616 mpsc_set_baudrate(pi, pi->default_baud);
1da177e4
LT
617
618 if (pi->mirror_regs) {
619 pi->MPSC_CHR_1_m = 0;
620 pi->MPSC_CHR_2_m = 0;
621 }
622 writel(0, pi->mpsc_base + MPSC_CHR_1);
623 writel(0, pi->mpsc_base + MPSC_CHR_2);
624 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
625 writel(0, pi->mpsc_base + MPSC_CHR_4);
626 writel(0, pi->mpsc_base + MPSC_CHR_5);
627 writel(0, pi->mpsc_base + MPSC_CHR_6);
628 writel(0, pi->mpsc_base + MPSC_CHR_7);
629 writel(0, pi->mpsc_base + MPSC_CHR_8);
630 writel(0, pi->mpsc_base + MPSC_CHR_9);
631 writel(0, pi->mpsc_base + MPSC_CHR_10);
1da177e4
LT
632}
633
2e89db75 634static void mpsc_enter_hunt(struct mpsc_port_info *pi)
1da177e4
LT
635{
636 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
637
638 if (pi->mirror_regs) {
639 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
640 pi->mpsc_base + MPSC_CHR_2);
641 /* Erratum prevents reading CHR_2 so just delay for a while */
642 udelay(100);
2e89db75 643 } else {
1da177e4 644 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
2e89db75 645 pi->mpsc_base + MPSC_CHR_2);
1da177e4
LT
646
647 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
648 udelay(10);
649 }
1da177e4
LT
650}
651
2e89db75 652static void mpsc_freeze(struct mpsc_port_info *pi)
1da177e4
LT
653{
654 u32 v;
655
656 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
657
658 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
659 readl(pi->mpsc_base + MPSC_MPCR);
660 v |= MPSC_MPCR_FRZ;
661
662 if (pi->mirror_regs)
663 pi->MPSC_MPCR_m = v;
664 writel(v, pi->mpsc_base + MPSC_MPCR);
1da177e4
LT
665}
666
2e89db75 667static void mpsc_unfreeze(struct mpsc_port_info *pi)
1da177e4
LT
668{
669 u32 v;
670
671 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
672 readl(pi->mpsc_base + MPSC_MPCR);
673 v &= ~MPSC_MPCR_FRZ;
674
675 if (pi->mirror_regs)
676 pi->MPSC_MPCR_m = v;
677 writel(v, pi->mpsc_base + MPSC_MPCR);
678
679 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
1da177e4
LT
680}
681
2e89db75 682static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
1da177e4
LT
683{
684 u32 v;
685
686 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
687
688 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
689 readl(pi->mpsc_base + MPSC_MPCR);
690 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
691
692 if (pi->mirror_regs)
693 pi->MPSC_MPCR_m = v;
694 writel(v, pi->mpsc_base + MPSC_MPCR);
1da177e4
LT
695}
696
2e89db75 697static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
1da177e4
LT
698{
699 u32 v;
700
701 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
702 pi->port.line, len);
703
704 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
705 readl(pi->mpsc_base + MPSC_MPCR);
706
707 v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
708
709 if (pi->mirror_regs)
710 pi->MPSC_MPCR_m = v;
711 writel(v, pi->mpsc_base + MPSC_MPCR);
1da177e4
LT
712}
713
2e89db75 714static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
1da177e4
LT
715{
716 u32 v;
717
718 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
719
720 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
721 readl(pi->mpsc_base + MPSC_CHR_2);
722
723 p &= 0x3;
724 v = (v & ~0xc000c) | (p << 18) | (p << 2);
725
726 if (pi->mirror_regs)
727 pi->MPSC_CHR_2_m = v;
728 writel(v, pi->mpsc_base + MPSC_CHR_2);
1da177e4
LT
729}
730
731/*
732 ******************************************************************************
733 *
734 * Driver Init Routines
735 *
736 ******************************************************************************
737 */
738
2e89db75 739static void mpsc_init_hw(struct mpsc_port_info *pi)
1da177e4
LT
740{
741 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
742
743 mpsc_brg_init(pi, pi->brg_clk_src);
744 mpsc_brg_enable(pi);
745 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
746 mpsc_sdma_stop(pi);
747 mpsc_hw_init(pi);
1da177e4
LT
748}
749
2e89db75 750static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
1da177e4
LT
751{
752 int rc = 0;
1da177e4
LT
753
754 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
755 pi->port.line);
756
757 if (!pi->dma_region) {
758 if (!dma_supported(pi->port.dev, 0xffffffff)) {
759 printk(KERN_ERR "MPSC: Inadequate DMA support\n");
760 rc = -ENXIO;
2e89db75
MG
761 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
762 MPSC_DMA_ALLOC_SIZE,
763 &pi->dma_region_p, GFP_KERNEL))
764 == NULL) {
1da177e4
LT
765 printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
766 rc = -ENOMEM;
767 }
768 }
769
770 return rc;
771}
772
2e89db75 773static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
1da177e4
LT
774{
775 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
776
777 if (pi->dma_region) {
778 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
2e89db75 779 pi->dma_region, pi->dma_region_p);
1da177e4 780 pi->dma_region = NULL;
2e89db75 781 pi->dma_region_p = (dma_addr_t)NULL;
1da177e4 782 }
1da177e4
LT
783}
784
2e89db75 785static void mpsc_init_rings(struct mpsc_port_info *pi)
1da177e4
LT
786{
787 struct mpsc_rx_desc *rxre;
788 struct mpsc_tx_desc *txre;
789 dma_addr_t dp, dp_p;
790 u8 *bp, *bp_p;
791 int i;
792
793 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
794
795 BUG_ON(pi->dma_region == NULL);
796
797 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
798
799 /*
800 * Descriptors & buffers are multiples of cacheline size and must be
801 * cacheline aligned.
802 */
2e89db75
MG
803 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
804 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
1da177e4
LT
805
806 /*
807 * Partition dma region into rx ring descriptor, rx buffers,
808 * tx ring descriptors, and tx buffers.
809 */
810 pi->rxr = dp;
811 pi->rxr_p = dp_p;
812 dp += MPSC_RXR_SIZE;
813 dp_p += MPSC_RXR_SIZE;
814
2e89db75
MG
815 pi->rxb = (u8 *)dp;
816 pi->rxb_p = (u8 *)dp_p;
1da177e4
LT
817 dp += MPSC_RXB_SIZE;
818 dp_p += MPSC_RXB_SIZE;
819
820 pi->rxr_posn = 0;
821
822 pi->txr = dp;
823 pi->txr_p = dp_p;
824 dp += MPSC_TXR_SIZE;
825 dp_p += MPSC_TXR_SIZE;
826
2e89db75
MG
827 pi->txb = (u8 *)dp;
828 pi->txb_p = (u8 *)dp_p;
1da177e4
LT
829
830 pi->txr_head = 0;
831 pi->txr_tail = 0;
832
833 /* Init rx ring descriptors */
834 dp = pi->rxr;
835 dp_p = pi->rxr_p;
836 bp = pi->rxb;
837 bp_p = pi->rxb_p;
838
839 for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
840 rxre = (struct mpsc_rx_desc *)dp;
841
842 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
843 rxre->bytecnt = cpu_to_be16(0);
2e89db75
MG
844 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
845 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
846 | SDMA_DESC_CMDSTAT_L);
1da177e4
LT
847 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
848 rxre->buf_ptr = cpu_to_be32(bp_p);
849
850 dp += MPSC_RXRE_SIZE;
851 dp_p += MPSC_RXRE_SIZE;
852 bp += MPSC_RXBE_SIZE;
853 bp_p += MPSC_RXBE_SIZE;
854 }
855 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
856
857 /* Init tx ring descriptors */
858 dp = pi->txr;
859 dp_p = pi->txr_p;
860 bp = pi->txb;
861 bp_p = pi->txb_p;
862
863 for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
864 txre = (struct mpsc_tx_desc *)dp;
865
866 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
867 txre->buf_ptr = cpu_to_be32(bp_p);
868
869 dp += MPSC_TXRE_SIZE;
870 dp_p += MPSC_TXRE_SIZE;
871 bp += MPSC_TXBE_SIZE;
872 bp_p += MPSC_TXBE_SIZE;
873 }
874 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
875
2e89db75
MG
876 dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
877 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
1da177e4
LT
878#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
879 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
880 flush_dcache_range((ulong)pi->dma_region,
2e89db75
MG
881 (ulong)pi->dma_region
882 + MPSC_DMA_ALLOC_SIZE);
1da177e4
LT
883#endif
884
885 return;
886}
887
2e89db75 888static void mpsc_uninit_rings(struct mpsc_port_info *pi)
1da177e4
LT
889{
890 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
891
892 BUG_ON(pi->dma_region == NULL);
893
894 pi->rxr = 0;
895 pi->rxr_p = 0;
896 pi->rxb = NULL;
897 pi->rxb_p = NULL;
898 pi->rxr_posn = 0;
899
900 pi->txr = 0;
901 pi->txr_p = 0;
902 pi->txb = NULL;
903 pi->txb_p = NULL;
904 pi->txr_head = 0;
905 pi->txr_tail = 0;
1da177e4
LT
906}
907
2e89db75 908static int mpsc_make_ready(struct mpsc_port_info *pi)
1da177e4
LT
909{
910 int rc;
911
912 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
913
914 if (!pi->ready) {
915 mpsc_init_hw(pi);
916 if ((rc = mpsc_alloc_ring_mem(pi)))
917 return rc;
918 mpsc_init_rings(pi);
919 pi->ready = 1;
920 }
921
922 return 0;
923}
924
3b216c9e
JW
925#ifdef CONFIG_CONSOLE_POLL
926static int serial_polled;
927#endif
928
1da177e4
LT
929/*
930 ******************************************************************************
931 *
932 * Interrupt Handling Routines
933 *
934 ******************************************************************************
935 */
936
bf7f5ee3 937static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
1da177e4
LT
938{
939 struct mpsc_rx_desc *rxre;
227434f8 940 struct tty_port *port = &pi->port.state->port;
1da177e4
LT
941 u32 cmdstat, bytes_in, i;
942 int rc = 0;
943 u8 *bp;
944 char flag = TTY_NORMAL;
1da177e4
LT
945
946 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
947
948 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
949
2e89db75
MG
950 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
951 DMA_FROM_DEVICE);
1da177e4
LT
952#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
953 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
954 invalidate_dcache_range((ulong)rxre,
2e89db75 955 (ulong)rxre + MPSC_RXRE_SIZE);
1da177e4
LT
956#endif
957
958 /*
959 * Loop through Rx descriptors handling ones that have been completed.
960 */
2e89db75
MG
961 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
962 & SDMA_DESC_CMDSTAT_O)) {
1da177e4 963 bytes_in = be16_to_cpu(rxre->bytecnt);
3b216c9e
JW
964#ifdef CONFIG_CONSOLE_POLL
965 if (unlikely(serial_polled)) {
966 serial_polled = 0;
967 return 0;
968 }
969#endif
1da177e4 970 /* Following use of tty struct directly is deprecated */
227434f8 971 if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
bf7f5ee3
VK
972 if (port->low_latency) {
973 spin_unlock_irqrestore(&pi->port.lock, *flags);
2e124b4a 974 tty_flip_buffer_push(port);
bf7f5ee3
VK
975 spin_lock_irqsave(&pi->port.lock, *flags);
976 }
1da177e4 977 /*
33f0f88f
AC
978 * If this failed then we will throw away the bytes
979 * but must do so to clear interrupts.
1da177e4
LT
980 */
981 }
982
983 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
2e89db75
MG
984 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
985 DMA_FROM_DEVICE);
1da177e4
LT
986#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
987 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
988 invalidate_dcache_range((ulong)bp,
2e89db75 989 (ulong)bp + MPSC_RXBE_SIZE);
1da177e4
LT
990#endif
991
992 /*
993 * Other than for parity error, the manual provides little
994 * info on what data will be in a frame flagged by any of
995 * these errors. For parity error, it is the last byte in
996 * the buffer that had the error. As for the rest, I guess
997 * we'll assume there is no data in the buffer.
998 * If there is...it gets lost.
999 */
2e89db75
MG
1000 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1001 | SDMA_DESC_CMDSTAT_FR
1002 | SDMA_DESC_CMDSTAT_OR))) {
1da177e4
LT
1003
1004 pi->port.icount.rx++;
1005
1006 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
1007 pi->port.icount.brk++;
1008
1009 if (uart_handle_break(&pi->port))
1010 goto next_frame;
2e89db75 1011 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1da177e4 1012 pi->port.icount.frame++;
2e89db75 1013 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1da177e4 1014 pi->port.icount.overrun++;
2e89db75 1015 }
1da177e4
LT
1016
1017 cmdstat &= pi->port.read_status_mask;
1018
1019 if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1020 flag = TTY_BREAK;
1021 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1022 flag = TTY_FRAME;
1023 else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1024 flag = TTY_OVERRUN;
1025 else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1026 flag = TTY_PARITY;
1027 }
1028
7d12e780 1029 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1da177e4
LT
1030 bp++;
1031 bytes_in--;
3b216c9e
JW
1032#ifdef CONFIG_CONSOLE_POLL
1033 if (unlikely(serial_polled)) {
1034 serial_polled = 0;
1035 return 0;
1036 }
1037#endif
1da177e4
LT
1038 goto next_frame;
1039 }
1040
2e89db75
MG
1041 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1042 | SDMA_DESC_CMDSTAT_FR
1043 | SDMA_DESC_CMDSTAT_OR)))
1044 && !(cmdstat & pi->port.ignore_status_mask)) {
92a19f9c 1045 tty_insert_flip_char(port, *bp, flag);
2e89db75 1046 } else {
1da177e4 1047 for (i=0; i<bytes_in; i++)
92a19f9c 1048 tty_insert_flip_char(port, *bp++, TTY_NORMAL);
1da177e4
LT
1049
1050 pi->port.icount.rx += bytes_in;
1051 }
1052
1053next_frame:
1054 rxre->bytecnt = cpu_to_be16(0);
1055 wmb();
2e89db75
MG
1056 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1057 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1058 | SDMA_DESC_CMDSTAT_L);
1da177e4 1059 wmb();
2e89db75
MG
1060 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1061 DMA_BIDIRECTIONAL);
1da177e4
LT
1062#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1063 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1064 flush_dcache_range((ulong)rxre,
2e89db75 1065 (ulong)rxre + MPSC_RXRE_SIZE);
1da177e4
LT
1066#endif
1067
1068 /* Advance to next descriptor */
1069 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
2e89db75
MG
1070 rxre = (struct mpsc_rx_desc *)
1071 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1072 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1073 DMA_FROM_DEVICE);
1da177e4
LT
1074#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1075 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1076 invalidate_dcache_range((ulong)rxre,
2e89db75 1077 (ulong)rxre + MPSC_RXRE_SIZE);
1da177e4 1078#endif
1da177e4
LT
1079 rc = 1;
1080 }
1081
1082 /* Restart rx engine, if its stopped */
1083 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1084 mpsc_start_rx(pi);
1085
bf7f5ee3 1086 spin_unlock_irqrestore(&pi->port.lock, *flags);
2e124b4a 1087 tty_flip_buffer_push(port);
bf7f5ee3 1088 spin_lock_irqsave(&pi->port.lock, *flags);
1da177e4
LT
1089 return rc;
1090}
1091
2e89db75 1092static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1da177e4
LT
1093{
1094 struct mpsc_tx_desc *txre;
1095
2e89db75
MG
1096 txre = (struct mpsc_tx_desc *)(pi->txr
1097 + (pi->txr_head * MPSC_TXRE_SIZE));
1da177e4
LT
1098
1099 txre->bytecnt = cpu_to_be16(count);
1100 txre->shadow = txre->bytecnt;
1101 wmb(); /* ensure cmdstat is last field updated */
2e89db75
MG
1102 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1103 | SDMA_DESC_CMDSTAT_L
1104 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1da177e4 1105 wmb();
2e89db75
MG
1106 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1107 DMA_BIDIRECTIONAL);
1da177e4
LT
1108#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1109 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1110 flush_dcache_range((ulong)txre,
2e89db75 1111 (ulong)txre + MPSC_TXRE_SIZE);
1da177e4 1112#endif
1da177e4
LT
1113}
1114
2e89db75 1115static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1da177e4 1116{
ebd2c8f6 1117 struct circ_buf *xmit = &pi->port.state->xmit;
1da177e4
LT
1118 u8 *bp;
1119 u32 i;
1120
1121 /* Make sure the desc ring isn't full */
2e89db75
MG
1122 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1123 < (MPSC_TXR_ENTRIES - 1)) {
1da177e4
LT
1124 if (pi->port.x_char) {
1125 /*
1126 * Ideally, we should use the TCS field in
1127 * CHR_1 to put the x_char out immediately but
1128 * errata prevents us from being able to read
1129 * CHR_2 to know that its safe to write to
1130 * CHR_1. Instead, just put it in-band with
1131 * all the other Tx data.
1132 */
1133 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1134 *bp = pi->port.x_char;
1135 pi->port.x_char = 0;
1136 i = 1;
2e89db75
MG
1137 } else if (!uart_circ_empty(xmit)
1138 && !uart_tx_stopped(&pi->port)) {
1139 i = min((u32)MPSC_TXBE_SIZE,
1140 (u32)uart_circ_chars_pending(xmit));
1141 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1da177e4
LT
1142 UART_XMIT_SIZE));
1143 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1144 memcpy(bp, &xmit->buf[xmit->tail], i);
1145 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1146
1147 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1148 uart_write_wakeup(&pi->port);
2e89db75 1149 } else { /* All tx data copied into ring bufs */
1da177e4 1150 return;
2e89db75 1151 }
1da177e4 1152
2e89db75
MG
1153 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1154 DMA_BIDIRECTIONAL);
1da177e4
LT
1155#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1156 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1157 flush_dcache_range((ulong)bp,
2e89db75 1158 (ulong)bp + MPSC_TXBE_SIZE);
1da177e4
LT
1159#endif
1160 mpsc_setup_tx_desc(pi, i, 1);
1161
1162 /* Advance to next descriptor */
1163 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1164 }
1da177e4
LT
1165}
1166
2e89db75 1167static int mpsc_tx_intr(struct mpsc_port_info *pi)
1da177e4
LT
1168{
1169 struct mpsc_tx_desc *txre;
1170 int rc = 0;
1733310b
DJ
1171 unsigned long iflags;
1172
1173 spin_lock_irqsave(&pi->tx_lock, iflags);
1da177e4
LT
1174
1175 if (!mpsc_sdma_tx_active(pi)) {
2e89db75
MG
1176 txre = (struct mpsc_tx_desc *)(pi->txr
1177 + (pi->txr_tail * MPSC_TXRE_SIZE));
1da177e4 1178
2e89db75
MG
1179 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1180 DMA_FROM_DEVICE);
1da177e4
LT
1181#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1182 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1183 invalidate_dcache_range((ulong)txre,
2e89db75 1184 (ulong)txre + MPSC_TXRE_SIZE);
1da177e4
LT
1185#endif
1186
1187 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1188 rc = 1;
1189 pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1190 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1191
1192 /* If no more data to tx, fall out of loop */
1193 if (pi->txr_head == pi->txr_tail)
1194 break;
1195
2e89db75
MG
1196 txre = (struct mpsc_tx_desc *)(pi->txr
1197 + (pi->txr_tail * MPSC_TXRE_SIZE));
1198 dma_cache_sync(pi->port.dev, (void *)txre,
1199 MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1da177e4
LT
1200#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1201 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1202 invalidate_dcache_range((ulong)txre,
2e89db75 1203 (ulong)txre + MPSC_TXRE_SIZE);
1da177e4
LT
1204#endif
1205 }
1206
1207 mpsc_copy_tx_data(pi);
1208 mpsc_sdma_start_tx(pi); /* start next desc if ready */
1209 }
1210
1733310b 1211 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1da177e4
LT
1212 return rc;
1213}
1214
1215/*
1216 * This is the driver's interrupt handler. To avoid a race, we first clear
1217 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1218 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1219 */
2e89db75 1220static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1da177e4
LT
1221{
1222 struct mpsc_port_info *pi = dev_id;
1223 ulong iflags;
1224 int rc = IRQ_NONE;
1225
1226 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1227
1228 spin_lock_irqsave(&pi->port.lock, iflags);
1229 mpsc_sdma_intr_ack(pi);
bf7f5ee3 1230 if (mpsc_rx_intr(pi, &iflags))
1da177e4
LT
1231 rc = IRQ_HANDLED;
1232 if (mpsc_tx_intr(pi))
1233 rc = IRQ_HANDLED;
1234 spin_unlock_irqrestore(&pi->port.lock, iflags);
1235
1236 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1237 return rc;
1238}
1239
1240/*
1241 ******************************************************************************
1242 *
1243 * serial_core.c Interface routines
1244 *
1245 ******************************************************************************
1246 */
2e89db75 1247static uint mpsc_tx_empty(struct uart_port *port)
1da177e4 1248{
22d4d44c
FF
1249 struct mpsc_port_info *pi =
1250 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1251 ulong iflags;
1252 uint rc;
1253
1254 spin_lock_irqsave(&pi->port.lock, iflags);
1255 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1256 spin_unlock_irqrestore(&pi->port.lock, iflags);
1257
1258 return rc;
1259}
1260
2e89db75 1261static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1da177e4
LT
1262{
1263 /* Have no way to set modem control lines AFAICT */
1da177e4
LT
1264}
1265
2e89db75 1266static uint mpsc_get_mctrl(struct uart_port *port)
1da177e4 1267{
22d4d44c
FF
1268 struct mpsc_port_info *pi =
1269 container_of(port, struct mpsc_port_info, port);
1da177e4 1270 u32 mflags, status;
1da177e4 1271
2e89db75
MG
1272 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1273 : readl(pi->mpsc_base + MPSC_CHR_10);
1da177e4
LT
1274
1275 mflags = 0;
1276 if (status & 0x1)
1277 mflags |= TIOCM_CTS;
1278 if (status & 0x2)
1279 mflags |= TIOCM_CAR;
1280
1281 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
1282}
1283
2e89db75 1284static void mpsc_stop_tx(struct uart_port *port)
1da177e4 1285{
22d4d44c
FF
1286 struct mpsc_port_info *pi =
1287 container_of(port, struct mpsc_port_info, port);
1da177e4 1288
b129a8cc 1289 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1da177e4
LT
1290
1291 mpsc_freeze(pi);
1da177e4
LT
1292}
1293
2e89db75 1294static void mpsc_start_tx(struct uart_port *port)
1da177e4 1295{
22d4d44c
FF
1296 struct mpsc_port_info *pi =
1297 container_of(port, struct mpsc_port_info, port);
1733310b
DJ
1298 unsigned long iflags;
1299
1300 spin_lock_irqsave(&pi->tx_lock, iflags);
1da177e4
LT
1301
1302 mpsc_unfreeze(pi);
1303 mpsc_copy_tx_data(pi);
1304 mpsc_sdma_start_tx(pi);
1305
1733310b
DJ
1306 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1307
b129a8cc 1308 pr_debug("mpsc_start_tx[%d]\n", port->line);
1da177e4
LT
1309}
1310
2e89db75 1311static void mpsc_start_rx(struct mpsc_port_info *pi)
1da177e4
LT
1312{
1313 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1314
1315 if (pi->rcv_data) {
1316 mpsc_enter_hunt(pi);
1317 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1318 }
1da177e4
LT
1319}
1320
2e89db75 1321static void mpsc_stop_rx(struct uart_port *port)
1da177e4 1322{
22d4d44c
FF
1323 struct mpsc_port_info *pi =
1324 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1325
1326 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1327
6c1ead5e
CS
1328 if (pi->mirror_regs) {
1329 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1330 pi->mpsc_base + MPSC_CHR_2);
1331 /* Erratum prevents reading CHR_2 so just delay for a while */
1332 udelay(100);
2e89db75 1333 } else {
6c1ead5e 1334 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
2e89db75 1335 pi->mpsc_base + MPSC_CHR_2);
6c1ead5e
CS
1336
1337 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1338 udelay(10);
1339 }
1340
1da177e4 1341 mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1da177e4
LT
1342}
1343
2e89db75 1344static void mpsc_break_ctl(struct uart_port *port, int ctl)
1da177e4 1345{
22d4d44c
FF
1346 struct mpsc_port_info *pi =
1347 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1348 ulong flags;
1349 u32 v;
1350
1351 v = ctl ? 0x00ff0000 : 0;
1352
1353 spin_lock_irqsave(&pi->port.lock, flags);
1354 if (pi->mirror_regs)
1355 pi->MPSC_CHR_1_m = v;
1356 writel(v, pi->mpsc_base + MPSC_CHR_1);
1357 spin_unlock_irqrestore(&pi->port.lock, flags);
1da177e4
LT
1358}
1359
2e89db75 1360static int mpsc_startup(struct uart_port *port)
1da177e4 1361{
22d4d44c
FF
1362 struct mpsc_port_info *pi =
1363 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1364 u32 flag = 0;
1365 int rc;
1366
1367 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1368 port->line, pi->port.irq);
1369
1370 if ((rc = mpsc_make_ready(pi)) == 0) {
1371 /* Setup IRQ handler */
1372 mpsc_sdma_intr_ack(pi);
1373
1374 /* If irq's are shared, need to set flag */
1375 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
40663cc7 1376 flag = IRQF_SHARED;
1da177e4
LT
1377
1378 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
2e89db75 1379 "mpsc-sdma", pi))
1da177e4 1380 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
2e89db75 1381 pi->port.irq);
1da177e4
LT
1382
1383 mpsc_sdma_intr_unmask(pi, 0xf);
2e89db75
MG
1384 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1385 + (pi->rxr_posn * MPSC_RXRE_SIZE)));
1da177e4
LT
1386 }
1387
1388 return rc;
1389}
1390
2e89db75 1391static void mpsc_shutdown(struct uart_port *port)
1da177e4 1392{
22d4d44c
FF
1393 struct mpsc_port_info *pi =
1394 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1395
1396 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1397
1398 mpsc_sdma_stop(pi);
1399 free_irq(pi->port.irq, pi);
1da177e4
LT
1400}
1401
2e89db75 1402static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
606d099c 1403 struct ktermios *old)
1da177e4 1404{
22d4d44c
FF
1405 struct mpsc_port_info *pi =
1406 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1407 u32 baud;
1408 ulong flags;
1409 u32 chr_bits, stop_bits, par;
1410
1411 pi->c_iflag = termios->c_iflag;
1412 pi->c_cflag = termios->c_cflag;
1413
1414 switch (termios->c_cflag & CSIZE) {
1415 case CS5:
1416 chr_bits = MPSC_MPCR_CL_5;
1417 break;
1418 case CS6:
1419 chr_bits = MPSC_MPCR_CL_6;
1420 break;
1421 case CS7:
1422 chr_bits = MPSC_MPCR_CL_7;
1423 break;
1424 case CS8:
1425 default:
1426 chr_bits = MPSC_MPCR_CL_8;
1427 break;
1428 }
1429
1430 if (termios->c_cflag & CSTOPB)
1431 stop_bits = MPSC_MPCR_SBL_2;
1432 else
1433 stop_bits = MPSC_MPCR_SBL_1;
1434
1435 par = MPSC_CHR_2_PAR_EVEN;
1436 if (termios->c_cflag & PARENB)
1437 if (termios->c_cflag & PARODD)
1438 par = MPSC_CHR_2_PAR_ODD;
1439#ifdef CMSPAR
1440 if (termios->c_cflag & CMSPAR) {
1441 if (termios->c_cflag & PARODD)
1442 par = MPSC_CHR_2_PAR_MARK;
1443 else
1444 par = MPSC_CHR_2_PAR_SPACE;
1445 }
1446#endif
1447
1448 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1449
1450 spin_lock_irqsave(&pi->port.lock, flags);
1451
1452 uart_update_timeout(port, termios->c_cflag, baud);
1453
1454 mpsc_set_char_length(pi, chr_bits);
1455 mpsc_set_stop_bit_length(pi, stop_bits);
1456 mpsc_set_parity(pi, par);
1457 mpsc_set_baudrate(pi, baud);
1458
1459 /* Characters/events to read */
1da177e4
LT
1460 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1461
1462 if (termios->c_iflag & INPCK)
2e89db75
MG
1463 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1464 | SDMA_DESC_CMDSTAT_FR;
1da177e4 1465
ef8b9ddc 1466 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1da177e4
LT
1467 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1468
1469 /* Characters/events to ignore */
1470 pi->port.ignore_status_mask = 0;
1471
1472 if (termios->c_iflag & IGNPAR)
2e89db75
MG
1473 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1474 | SDMA_DESC_CMDSTAT_FR;
1da177e4
LT
1475
1476 if (termios->c_iflag & IGNBRK) {
1477 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1478
1479 if (termios->c_iflag & IGNPAR)
1480 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1481 }
1482
5797ae36
SC
1483 if ((termios->c_cflag & CREAD)) {
1484 if (!pi->rcv_data) {
1485 pi->rcv_data = 1;
1486 mpsc_start_rx(pi);
1487 }
1488 } else if (pi->rcv_data) {
1489 mpsc_stop_rx(port);
1da177e4 1490 pi->rcv_data = 0;
5797ae36 1491 }
1da177e4
LT
1492
1493 spin_unlock_irqrestore(&pi->port.lock, flags);
1da177e4
LT
1494}
1495
2e89db75 1496static const char *mpsc_type(struct uart_port *port)
1da177e4
LT
1497{
1498 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1499 return MPSC_DRIVER_NAME;
1500}
1501
2e89db75 1502static int mpsc_request_port(struct uart_port *port)
1da177e4
LT
1503{
1504 /* Should make chip/platform specific call */
1505 return 0;
1506}
1507
2e89db75 1508static void mpsc_release_port(struct uart_port *port)
1da177e4 1509{
22d4d44c
FF
1510 struct mpsc_port_info *pi =
1511 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1512
1513 if (pi->ready) {
1514 mpsc_uninit_rings(pi);
1515 mpsc_free_ring_mem(pi);
1516 pi->ready = 0;
1517 }
1da177e4
LT
1518}
1519
2e89db75 1520static void mpsc_config_port(struct uart_port *port, int flags)
1da177e4 1521{
1da177e4
LT
1522}
1523
2e89db75 1524static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1da177e4 1525{
22d4d44c
FF
1526 struct mpsc_port_info *pi =
1527 container_of(port, struct mpsc_port_info, port);
1da177e4
LT
1528 int rc = 0;
1529
1530 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1531
1532 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1533 rc = -EINVAL;
1534 else if (pi->port.irq != ser->irq)
1535 rc = -EINVAL;
1536 else if (ser->io_type != SERIAL_IO_MEM)
1537 rc = -EINVAL;
1538 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1539 rc = -EINVAL;
1540 else if ((void *)pi->port.mapbase != ser->iomem_base)
1541 rc = -EINVAL;
1542 else if (pi->port.iobase != ser->port)
1543 rc = -EINVAL;
1544 else if (ser->hub6 != 0)
1545 rc = -EINVAL;
1546
1547 return rc;
1548}
3b216c9e
JW
1549#ifdef CONFIG_CONSOLE_POLL
1550/* Serial polling routines for writing and reading from the uart while
1551 * in an interrupt or debug context.
1552 */
1553
1554static char poll_buf[2048];
1555static int poll_ptr;
1556static int poll_cnt;
1557static void mpsc_put_poll_char(struct uart_port *port,
1558 unsigned char c);
1559
1560static int mpsc_get_poll_char(struct uart_port *port)
1561{
22d4d44c
FF
1562 struct mpsc_port_info *pi =
1563 container_of(port, struct mpsc_port_info, port);
3b216c9e
JW
1564 struct mpsc_rx_desc *rxre;
1565 u32 cmdstat, bytes_in, i;
1566 u8 *bp;
1567
1568 if (!serial_polled)
1569 serial_polled = 1;
1570
1571 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1572
1573 if (poll_cnt) {
1574 poll_cnt--;
1575 return poll_buf[poll_ptr++];
1576 }
1577 poll_ptr = 0;
1578 poll_cnt = 0;
1579
1580 while (poll_cnt == 0) {
1581 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1582 (pi->rxr_posn*MPSC_RXRE_SIZE));
1583 dma_cache_sync(pi->port.dev, (void *)rxre,
1584 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1585#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1586 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1587 invalidate_dcache_range((ulong)rxre,
1588 (ulong)rxre + MPSC_RXRE_SIZE);
1589#endif
1590 /*
1591 * Loop through Rx descriptors handling ones that have
1592 * been completed.
1593 */
1594 while (poll_cnt == 0 &&
1595 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1596 SDMA_DESC_CMDSTAT_O)){
1597 bytes_in = be16_to_cpu(rxre->bytecnt);
1598 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1599 dma_cache_sync(pi->port.dev, (void *) bp,
1600 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1601#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1602 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1603 invalidate_dcache_range((ulong)bp,
1604 (ulong)bp + MPSC_RXBE_SIZE);
1605#endif
1606 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1607 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1608 !(cmdstat & pi->port.ignore_status_mask)) {
1609 poll_buf[poll_cnt] = *bp;
1610 poll_cnt++;
1611 } else {
1612 for (i = 0; i < bytes_in; i++) {
1613 poll_buf[poll_cnt] = *bp++;
1614 poll_cnt++;
1615 }
1616 pi->port.icount.rx += bytes_in;
1617 }
1618 rxre->bytecnt = cpu_to_be16(0);
1619 wmb();
1620 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1621 SDMA_DESC_CMDSTAT_EI |
1622 SDMA_DESC_CMDSTAT_F |
1623 SDMA_DESC_CMDSTAT_L);
1624 wmb();
1625 dma_cache_sync(pi->port.dev, (void *)rxre,
1626 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1629 flush_dcache_range((ulong)rxre,
1630 (ulong)rxre + MPSC_RXRE_SIZE);
1631#endif
1632
1633 /* Advance to next descriptor */
1634 pi->rxr_posn = (pi->rxr_posn + 1) &
1635 (MPSC_RXR_ENTRIES - 1);
1636 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1637 (pi->rxr_posn * MPSC_RXRE_SIZE));
1638 dma_cache_sync(pi->port.dev, (void *)rxre,
1639 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1640#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1641 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1642 invalidate_dcache_range((ulong)rxre,
1643 (ulong)rxre + MPSC_RXRE_SIZE);
1644#endif
1645 }
1646
1647 /* Restart rx engine, if its stopped */
1648 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1649 mpsc_start_rx(pi);
1650 }
1651 if (poll_cnt) {
1652 poll_cnt--;
1653 return poll_buf[poll_ptr++];
1654 }
1655
1656 return 0;
1657}
1658
1659
1660static void mpsc_put_poll_char(struct uart_port *port,
1661 unsigned char c)
1662{
22d4d44c
FF
1663 struct mpsc_port_info *pi =
1664 container_of(port, struct mpsc_port_info, port);
3b216c9e
JW
1665 u32 data;
1666
1667 data = readl(pi->mpsc_base + MPSC_MPCR);
1668 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1669 mb();
1670 data = readl(pi->mpsc_base + MPSC_CHR_2);
1671 data |= MPSC_CHR_2_TTCS;
1672 writel(data, pi->mpsc_base + MPSC_CHR_2);
1673 mb();
1674
1675 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1676}
1677#endif
1da177e4
LT
1678
1679static struct uart_ops mpsc_pops = {
2e89db75
MG
1680 .tx_empty = mpsc_tx_empty,
1681 .set_mctrl = mpsc_set_mctrl,
1682 .get_mctrl = mpsc_get_mctrl,
1683 .stop_tx = mpsc_stop_tx,
1684 .start_tx = mpsc_start_tx,
1685 .stop_rx = mpsc_stop_rx,
2e89db75
MG
1686 .break_ctl = mpsc_break_ctl,
1687 .startup = mpsc_startup,
1688 .shutdown = mpsc_shutdown,
1689 .set_termios = mpsc_set_termios,
1690 .type = mpsc_type,
1691 .release_port = mpsc_release_port,
1692 .request_port = mpsc_request_port,
1693 .config_port = mpsc_config_port,
1694 .verify_port = mpsc_verify_port,
3b216c9e
JW
1695#ifdef CONFIG_CONSOLE_POLL
1696 .poll_get_char = mpsc_get_poll_char,
1697 .poll_put_char = mpsc_put_poll_char,
1698#endif
1da177e4
LT
1699};
1700
1701/*
1702 ******************************************************************************
1703 *
1704 * Console Interface Routines
1705 *
1706 ******************************************************************************
1707 */
1708
1709#ifdef CONFIG_SERIAL_MPSC_CONSOLE
2e89db75 1710static void mpsc_console_write(struct console *co, const char *s, uint count)
1da177e4
LT
1711{
1712 struct mpsc_port_info *pi = &mpsc_ports[co->index];
1713 u8 *bp, *dp, add_cr = 0;
1714 int i;
1733310b
DJ
1715 unsigned long iflags;
1716
1717 spin_lock_irqsave(&pi->tx_lock, iflags);
1718
1719 while (pi->txr_head != pi->txr_tail) {
1720 while (mpsc_sdma_tx_active(pi))
1721 udelay(100);
1722 mpsc_sdma_intr_ack(pi);
1723 mpsc_tx_intr(pi);
1724 }
1da177e4
LT
1725
1726 while (mpsc_sdma_tx_active(pi))
1727 udelay(100);
1728
1729 while (count > 0) {
1730 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1731
1732 for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1733 if (count == 0)
1734 break;
1735
1736 if (add_cr) {
1737 *(dp++) = '\r';
1738 add_cr = 0;
2e89db75 1739 } else {
1da177e4
LT
1740 *(dp++) = *s;
1741
1742 if (*(s++) == '\n') { /* add '\r' after '\n' */
1743 add_cr = 1;
1744 count++;
1745 }
1746 }
1747
1748 count--;
1749 }
1750
2e89db75
MG
1751 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1752 DMA_BIDIRECTIONAL);
1da177e4
LT
1753#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1754 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1755 flush_dcache_range((ulong)bp,
2e89db75 1756 (ulong)bp + MPSC_TXBE_SIZE);
1da177e4
LT
1757#endif
1758 mpsc_setup_tx_desc(pi, i, 0);
1759 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1760 mpsc_sdma_start_tx(pi);
1761
1762 while (mpsc_sdma_tx_active(pi))
1763 udelay(100);
1764
1765 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1766 }
1767
1733310b 1768 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1da177e4
LT
1769}
1770
2e89db75 1771static int __init mpsc_console_setup(struct console *co, char *options)
1da177e4
LT
1772{
1773 struct mpsc_port_info *pi;
1774 int baud, bits, parity, flow;
1775
1776 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1777
1778 if (co->index >= MPSC_NUM_CTLRS)
1779 co->index = 0;
1780
1781 pi = &mpsc_ports[co->index];
1782
1783 baud = pi->default_baud;
1784 bits = pi->default_bits;
1785 parity = pi->default_parity;
1786 flow = pi->default_flow;
1787
1788 if (!pi->port.ops)
1789 return -ENODEV;
1790
1791 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
1792
1793 if (options)
1794 uart_parse_options(options, &baud, &parity, &bits, &flow);
1795
1796 return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1797}
1798
1da177e4 1799static struct console mpsc_console = {
2e89db75
MG
1800 .name = MPSC_DEV_NAME,
1801 .write = mpsc_console_write,
1802 .device = uart_console_device,
1803 .setup = mpsc_console_setup,
1804 .flags = CON_PRINTBUFFER,
1805 .index = -1,
1806 .data = &mpsc_reg,
1da177e4
LT
1807};
1808
2e89db75 1809static int __init mpsc_late_console_init(void)
1da177e4
LT
1810{
1811 pr_debug("mpsc_late_console_init: Enter\n");
1812
1813 if (!(mpsc_console.flags & CON_ENABLED))
1814 register_console(&mpsc_console);
1815 return 0;
1816}
1817
1818late_initcall(mpsc_late_console_init);
1819
1820#define MPSC_CONSOLE &mpsc_console
1821#else
1822#define MPSC_CONSOLE NULL
1823#endif
1824/*
1825 ******************************************************************************
1826 *
1827 * Dummy Platform Driver to extract & map shared register regions
1828 *
1829 ******************************************************************************
1830 */
2e89db75 1831static void mpsc_resource_err(char *s)
1da177e4
LT
1832{
1833 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1da177e4
LT
1834}
1835
2e89db75 1836static int mpsc_shared_map_regs(struct platform_device *pd)
1da177e4
LT
1837{
1838 struct resource *r;
1839
1840 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
2e89db75
MG
1841 MPSC_ROUTING_BASE_ORDER))
1842 && request_mem_region(r->start,
1843 MPSC_ROUTING_REG_BLOCK_SIZE,
1844 "mpsc_routing_regs")) {
1da177e4 1845 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
2e89db75 1846 MPSC_ROUTING_REG_BLOCK_SIZE);
1da177e4 1847 mpsc_shared_regs.mpsc_routing_base_p = r->start;
2e89db75 1848 } else {
1da177e4
LT
1849 mpsc_resource_err("MPSC routing base");
1850 return -ENOMEM;
1851 }
1852
1853 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
2e89db75
MG
1854 MPSC_SDMA_INTR_BASE_ORDER))
1855 && request_mem_region(r->start,
1856 MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1857 "sdma_intr_regs")) {
1da177e4
LT
1858 mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1859 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1860 mpsc_shared_regs.sdma_intr_base_p = r->start;
2e89db75 1861 } else {
1da177e4
LT
1862 iounmap(mpsc_shared_regs.mpsc_routing_base);
1863 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
2e89db75 1864 MPSC_ROUTING_REG_BLOCK_SIZE);
1da177e4
LT
1865 mpsc_resource_err("SDMA intr base");
1866 return -ENOMEM;
1867 }
1868
1869 return 0;
1870}
1871
2e89db75 1872static void mpsc_shared_unmap_regs(void)
1da177e4
LT
1873{
1874 if (!mpsc_shared_regs.mpsc_routing_base) {
1875 iounmap(mpsc_shared_regs.mpsc_routing_base);
1876 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
2e89db75 1877 MPSC_ROUTING_REG_BLOCK_SIZE);
1da177e4
LT
1878 }
1879 if (!mpsc_shared_regs.sdma_intr_base) {
1880 iounmap(mpsc_shared_regs.sdma_intr_base);
1881 release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
2e89db75 1882 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1da177e4
LT
1883 }
1884
2c6e7599
AV
1885 mpsc_shared_regs.mpsc_routing_base = NULL;
1886 mpsc_shared_regs.sdma_intr_base = NULL;
1da177e4
LT
1887
1888 mpsc_shared_regs.mpsc_routing_base_p = 0;
1889 mpsc_shared_regs.sdma_intr_base_p = 0;
1da177e4
LT
1890}
1891
2e89db75 1892static int mpsc_shared_drv_probe(struct platform_device *dev)
1da177e4 1893{
1da177e4
LT
1894 struct mpsc_shared_pdata *pdata;
1895 int rc = -ENODEV;
1896
3ae5eaec 1897 if (dev->id == 0) {
2e89db75
MG
1898 if (!(rc = mpsc_shared_map_regs(dev))) {
1899 pdata = (struct mpsc_shared_pdata *)
574de559 1900 dev_get_platdata(&dev->dev);
1da177e4
LT
1901
1902 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1903 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1904 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1905 mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1906 pdata->intr_cause_val;
1907 mpsc_shared_regs.SDMA_INTR_MASK_m =
1908 pdata->intr_mask_val;
1909
1910 rc = 0;
1911 }
1912 }
1913
1914 return rc;
1915}
1916
2e89db75 1917static int mpsc_shared_drv_remove(struct platform_device *dev)
1da177e4 1918{
1da177e4
LT
1919 int rc = -ENODEV;
1920
3ae5eaec 1921 if (dev->id == 0) {
1da177e4
LT
1922 mpsc_shared_unmap_regs();
1923 mpsc_shared_regs.MPSC_MRR_m = 0;
1924 mpsc_shared_regs.MPSC_RCRR_m = 0;
1925 mpsc_shared_regs.MPSC_TCRR_m = 0;
1926 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1927 mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1928 rc = 0;
1929 }
1930
1931 return rc;
1932}
1933
3ae5eaec 1934static struct platform_driver mpsc_shared_driver = {
1da177e4
LT
1935 .probe = mpsc_shared_drv_probe,
1936 .remove = mpsc_shared_drv_remove,
3ae5eaec 1937 .driver = {
2e89db75 1938 .name = MPSC_SHARED_NAME,
3ae5eaec 1939 },
1da177e4
LT
1940};
1941
1942/*
1943 ******************************************************************************
1944 *
1945 * Driver Interface Routines
1946 *
1947 ******************************************************************************
1948 */
1949static struct uart_driver mpsc_reg = {
2e89db75
MG
1950 .owner = THIS_MODULE,
1951 .driver_name = MPSC_DRIVER_NAME,
1952 .dev_name = MPSC_DEV_NAME,
1953 .major = MPSC_MAJOR,
1954 .minor = MPSC_MINOR_START,
1955 .nr = MPSC_NUM_CTLRS,
1956 .cons = MPSC_CONSOLE,
1da177e4
LT
1957};
1958
2e89db75
MG
1959static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1960 struct platform_device *pd)
1da177e4
LT
1961{
1962 struct resource *r;
1963
2e89db75
MG
1964 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1965 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1966 "mpsc_regs")) {
1da177e4
LT
1967 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1968 pi->mpsc_base_p = r->start;
2e89db75 1969 } else {
1da177e4 1970 mpsc_resource_err("MPSC base");
2e89db75 1971 goto err;
1da177e4
LT
1972 }
1973
1974 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
2e89db75
MG
1975 MPSC_SDMA_BASE_ORDER))
1976 && request_mem_region(r->start,
1977 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1da177e4
LT
1978 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1979 pi->sdma_base_p = r->start;
2e89db75 1980 } else {
1da177e4 1981 mpsc_resource_err("SDMA base");
a141a043
AL
1982 if (pi->mpsc_base) {
1983 iounmap(pi->mpsc_base);
1984 pi->mpsc_base = NULL;
1985 }
2e89db75 1986 goto err;
1da177e4
LT
1987 }
1988
1989 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
2e89db75
MG
1990 && request_mem_region(r->start,
1991 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1da177e4
LT
1992 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1993 pi->brg_base_p = r->start;
2e89db75 1994 } else {
1da177e4 1995 mpsc_resource_err("BRG base");
a141a043
AL
1996 if (pi->mpsc_base) {
1997 iounmap(pi->mpsc_base);
1998 pi->mpsc_base = NULL;
1999 }
2000 if (pi->sdma_base) {
2001 iounmap(pi->sdma_base);
2002 pi->sdma_base = NULL;
2003 }
2e89db75 2004 goto err;
1da177e4 2005 }
1da177e4 2006 return 0;
2e89db75
MG
2007
2008err:
2009 return -ENOMEM;
1da177e4
LT
2010}
2011
2e89db75 2012static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
1da177e4
LT
2013{
2014 if (!pi->mpsc_base) {
2015 iounmap(pi->mpsc_base);
2016 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2017 }
2018 if (!pi->sdma_base) {
2019 iounmap(pi->sdma_base);
2020 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2021 }
2022 if (!pi->brg_base) {
2023 iounmap(pi->brg_base);
2024 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2025 }
2026
2c6e7599
AV
2027 pi->mpsc_base = NULL;
2028 pi->sdma_base = NULL;
2029 pi->brg_base = NULL;
1da177e4
LT
2030
2031 pi->mpsc_base_p = 0;
2032 pi->sdma_base_p = 0;
2033 pi->brg_base_p = 0;
1da177e4
LT
2034}
2035
2e89db75
MG
2036static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2037 struct platform_device *pd, int num)
1da177e4
LT
2038{
2039 struct mpsc_pdata *pdata;
2040
5c02fab6 2041 pdata = dev_get_platdata(&pd->dev);
1da177e4
LT
2042
2043 pi->port.uartclk = pdata->brg_clk_freq;
2044 pi->port.iotype = UPIO_MEM;
2045 pi->port.line = num;
2046 pi->port.type = PORT_MPSC;
2047 pi->port.fifosize = MPSC_TXBE_SIZE;
2048 pi->port.membase = pi->mpsc_base;
2049 pi->port.mapbase = (ulong)pi->mpsc_base;
2050 pi->port.ops = &mpsc_pops;
2051
2052 pi->mirror_regs = pdata->mirror_regs;
2053 pi->cache_mgmt = pdata->cache_mgmt;
2054 pi->brg_can_tune = pdata->brg_can_tune;
2055 pi->brg_clk_src = pdata->brg_clk_src;
2056 pi->mpsc_max_idle = pdata->max_idle;
2057 pi->default_baud = pdata->default_baud;
2058 pi->default_bits = pdata->default_bits;
2059 pi->default_parity = pdata->default_parity;
2060 pi->default_flow = pdata->default_flow;
2061
2062 /* Initial values of mirrored regs */
2063 pi->MPSC_CHR_1_m = pdata->chr_1_val;
2064 pi->MPSC_CHR_2_m = pdata->chr_2_val;
2065 pi->MPSC_CHR_10_m = pdata->chr_10_val;
2066 pi->MPSC_MPCR_m = pdata->mpcr_val;
2067 pi->BRG_BCR_m = pdata->bcr_val;
2068
2069 pi->shared_regs = &mpsc_shared_regs;
2070
2071 pi->port.irq = platform_get_irq(pd, 0);
1da177e4
LT
2072}
2073
2e89db75 2074static int mpsc_drv_probe(struct platform_device *dev)
1da177e4 2075{
1da177e4
LT
2076 struct mpsc_port_info *pi;
2077 int rc = -ENODEV;
2078
3ae5eaec 2079 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
1da177e4 2080
3ae5eaec
RK
2081 if (dev->id < MPSC_NUM_CTLRS) {
2082 pi = &mpsc_ports[dev->id];
1da177e4 2083
3ae5eaec
RK
2084 if (!(rc = mpsc_drv_map_regs(pi, dev))) {
2085 mpsc_drv_get_platform_data(pi, dev, dev->id);
f467bc14 2086 pi->port.dev = &dev->dev;
1da177e4 2087
1733310b
DJ
2088 if (!(rc = mpsc_make_ready(pi))) {
2089 spin_lock_init(&pi->tx_lock);
1da177e4 2090 if (!(rc = uart_add_one_port(&mpsc_reg,
2e89db75 2091 &pi->port))) {
1da177e4 2092 rc = 0;
2e89db75
MG
2093 } else {
2094 mpsc_release_port((struct uart_port *)
2095 pi);
1da177e4
LT
2096 mpsc_drv_unmap_regs(pi);
2097 }
2e89db75 2098 } else {
1da177e4 2099 mpsc_drv_unmap_regs(pi);
2e89db75 2100 }
1da177e4
LT
2101 }
2102 }
2103
2104 return rc;
2105}
2106
2e89db75 2107static int mpsc_drv_remove(struct platform_device *dev)
1da177e4 2108{
3ae5eaec 2109 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id);
1da177e4 2110
3ae5eaec
RK
2111 if (dev->id < MPSC_NUM_CTLRS) {
2112 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port);
2e89db75
MG
2113 mpsc_release_port((struct uart_port *)
2114 &mpsc_ports[dev->id].port);
3ae5eaec 2115 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]);
1da177e4 2116 return 0;
2e89db75 2117 } else {
1da177e4 2118 return -ENODEV;
2e89db75 2119 }
1da177e4
LT
2120}
2121
3ae5eaec 2122static struct platform_driver mpsc_driver = {
1da177e4
LT
2123 .probe = mpsc_drv_probe,
2124 .remove = mpsc_drv_remove,
3ae5eaec 2125 .driver = {
2e89db75 2126 .name = MPSC_CTLR_NAME,
3ae5eaec 2127 },
1da177e4
LT
2128};
2129
2e89db75 2130static int __init mpsc_drv_init(void)
1da177e4
LT
2131{
2132 int rc;
2133
d87a6d95 2134 printk(KERN_INFO "Serial: MPSC driver\n");
1da177e4
LT
2135
2136 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2137 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2138
2139 if (!(rc = uart_register_driver(&mpsc_reg))) {
3ae5eaec
RK
2140 if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
2141 if ((rc = platform_driver_register(&mpsc_driver))) {
2142 platform_driver_unregister(&mpsc_shared_driver);
1da177e4
LT
2143 uart_unregister_driver(&mpsc_reg);
2144 }
2e89db75 2145 } else {
1da177e4 2146 uart_unregister_driver(&mpsc_reg);
2e89db75 2147 }
1da177e4
LT
2148 }
2149
2150 return rc;
1da177e4
LT
2151}
2152
2e89db75 2153static void __exit mpsc_drv_exit(void)
1da177e4 2154{
3ae5eaec
RK
2155 platform_driver_unregister(&mpsc_driver);
2156 platform_driver_unregister(&mpsc_shared_driver);
1da177e4
LT
2157 uart_unregister_driver(&mpsc_reg);
2158 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2159 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
1da177e4
LT
2160}
2161
2162module_init(mpsc_drv_init);
2163module_exit(mpsc_drv_exit);
2164
2165MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
d87a6d95 2166MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
1da177e4
LT
2167MODULE_VERSION(MPSC_VERSION);
2168MODULE_LICENSE("GPL");
2169MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);
e169c139 2170MODULE_ALIAS("platform:" MPSC_CTLR_NAME);