]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/tty/serial/amba-pl011.c
TTY: switch tty_insert_flip_char
[mirror_ubuntu-artful-kernel.git] / drivers / tty / serial / amba-pl011.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Driver for AMBA serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
68b65f73 8 * Copyright (C) 2010 ST-Ericsson SA
1da177e4
LT
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
1da177e4
LT
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
1da177e4
LT
31
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ
34#endif
35
36#include <linux/module.h>
37#include <linux/ioport.h>
38#include <linux/init.h>
39#include <linux/console.h>
40#include <linux/sysrq.h>
41#include <linux/device.h>
42#include <linux/tty.h>
43#include <linux/tty_flip.h>
44#include <linux/serial_core.h>
45#include <linux/serial.h>
a62c80e5
RK
46#include <linux/amba/bus.h>
47#include <linux/amba/serial.h>
f8ce2547 48#include <linux/clk.h>
5a0e3ad6 49#include <linux/slab.h>
68b65f73
RK
50#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h>
c16d51a3 53#include <linux/delay.h>
258aea76 54#include <linux/types.h>
32614aad
ML
55#include <linux/of.h>
56#include <linux/of_device.h>
258e0551 57#include <linux/pinctrl/consumer.h>
cb70706c 58#include <linux/sizes.h>
de609582 59#include <linux/io.h>
1da177e4
LT
60
61#define UART_NR 14
62
63#define SERIAL_AMBA_MAJOR 204
64#define SERIAL_AMBA_MINOR 64
65#define SERIAL_AMBA_NR UART_NR
66
67#define AMBA_ISR_PASS_LIMIT 256
68
b63d4f0f
RK
69#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
70#define UART_DUMMY_DR_RX (1 << 16)
1da177e4 71
5926a295
AR
72/* There is by now at least one vendor with differing details, so handle it */
73struct vendor_data {
74 unsigned int ifls;
75 unsigned int fifosize;
ec489aa8
LW
76 unsigned int lcrh_tx;
77 unsigned int lcrh_rx;
ac3e3fb4 78 bool oversampling;
38d62436 79 bool dma_threshold;
4fd0690b 80 bool cts_event_workaround;
5926a295
AR
81};
82
83static struct vendor_data vendor_arm = {
84 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
85 .fifosize = 16,
ec489aa8
LW
86 .lcrh_tx = UART011_LCRH,
87 .lcrh_rx = UART011_LCRH,
ac3e3fb4 88 .oversampling = false,
38d62436 89 .dma_threshold = false,
4fd0690b 90 .cts_event_workaround = false,
5926a295
AR
91};
92
93static struct vendor_data vendor_st = {
94 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
95 .fifosize = 64,
ec489aa8
LW
96 .lcrh_tx = ST_UART011_LCRH_TX,
97 .lcrh_rx = ST_UART011_LCRH_RX,
ac3e3fb4 98 .oversampling = true,
38d62436 99 .dma_threshold = true,
4fd0690b 100 .cts_event_workaround = true,
1da177e4
LT
101};
102
c16d51a3
SKS
103static struct uart_amba_port *amba_ports[UART_NR];
104
68b65f73 105/* Deals with DMA transactions */
ead76f32
LW
106
107struct pl011_sgbuf {
108 struct scatterlist sg;
109 char *buf;
110};
111
112struct pl011_dmarx_data {
113 struct dma_chan *chan;
114 struct completion complete;
115 bool use_buf_b;
116 struct pl011_sgbuf sgbuf_a;
117 struct pl011_sgbuf sgbuf_b;
118 dma_cookie_t cookie;
119 bool running;
120};
121
68b65f73
RK
122struct pl011_dmatx_data {
123 struct dma_chan *chan;
124 struct scatterlist sg;
125 char *buf;
126 bool queued;
127};
128
c19f12b5
RK
129/*
130 * We wrap our port structure around the generic uart_port.
131 */
132struct uart_amba_port {
133 struct uart_port port;
134 struct clk *clk;
78d80c5a
LW
135 /* Two optional pin states - default & sleep */
136 struct pinctrl *pinctrl;
137 struct pinctrl_state *pins_default;
138 struct pinctrl_state *pins_sleep;
c19f12b5 139 const struct vendor_data *vendor;
68b65f73 140 unsigned int dmacr; /* dma control reg */
c19f12b5
RK
141 unsigned int im; /* interrupt mask */
142 unsigned int old_status;
ffca2b11 143 unsigned int fifosize; /* vendor-specific */
c19f12b5
RK
144 unsigned int lcrh_tx; /* vendor-specific */
145 unsigned int lcrh_rx; /* vendor-specific */
d8d8ffa4 146 unsigned int old_cr; /* state during shutdown */
c19f12b5
RK
147 bool autorts;
148 char type[12];
68b65f73
RK
149#ifdef CONFIG_DMA_ENGINE
150 /* DMA stuff */
ead76f32
LW
151 bool using_tx_dma;
152 bool using_rx_dma;
153 struct pl011_dmarx_data dmarx;
68b65f73
RK
154 struct pl011_dmatx_data dmatx;
155#endif
156};
157
29772c4e
LW
158/*
159 * Reads up to 256 characters from the FIFO or until it's empty and
160 * inserts them into the TTY layer. Returns the number of characters
161 * read from the FIFO.
162 */
163static int pl011_fifo_to_tty(struct uart_amba_port *uap)
164{
165 u16 status, ch;
166 unsigned int flag, max_count = 256;
167 int fifotaken = 0;
168
169 while (max_count--) {
170 status = readw(uap->port.membase + UART01x_FR);
171 if (status & UART01x_FR_RXFE)
172 break;
173
174 /* Take chars from the FIFO and update status */
175 ch = readw(uap->port.membase + UART01x_DR) |
176 UART_DUMMY_DR_RX;
177 flag = TTY_NORMAL;
178 uap->port.icount.rx++;
179 fifotaken++;
180
181 if (unlikely(ch & UART_DR_ERROR)) {
182 if (ch & UART011_DR_BE) {
183 ch &= ~(UART011_DR_FE | UART011_DR_PE);
184 uap->port.icount.brk++;
185 if (uart_handle_break(&uap->port))
186 continue;
187 } else if (ch & UART011_DR_PE)
188 uap->port.icount.parity++;
189 else if (ch & UART011_DR_FE)
190 uap->port.icount.frame++;
191 if (ch & UART011_DR_OE)
192 uap->port.icount.overrun++;
193
194 ch &= uap->port.read_status_mask;
195
196 if (ch & UART011_DR_BE)
197 flag = TTY_BREAK;
198 else if (ch & UART011_DR_PE)
199 flag = TTY_PARITY;
200 else if (ch & UART011_DR_FE)
201 flag = TTY_FRAME;
202 }
203
204 if (uart_handle_sysrq_char(&uap->port, ch & 255))
205 continue;
206
207 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
208 }
209
210 return fifotaken;
211}
212
213
68b65f73
RK
214/*
215 * All the DMA operation mode stuff goes inside this ifdef.
216 * This assumes that you have a generic DMA device interface,
217 * no custom DMA interfaces are supported.
218 */
219#ifdef CONFIG_DMA_ENGINE
220
221#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
222
ead76f32
LW
223static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
224 enum dma_data_direction dir)
225{
226 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
227 if (!sg->buf)
228 return -ENOMEM;
229
230 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
231
232 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
233 kfree(sg->buf);
234 return -EINVAL;
235 }
236 return 0;
237}
238
239static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
240 enum dma_data_direction dir)
241{
242 if (sg->buf) {
243 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
244 kfree(sg->buf);
245 }
246}
247
68b65f73
RK
248static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
249{
250 /* DMA is the sole user of the platform data right now */
251 struct amba_pl011_data *plat = uap->port.dev->platform_data;
252 struct dma_slave_config tx_conf = {
253 .dst_addr = uap->port.mapbase + UART01x_DR,
254 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
a485df4b 255 .direction = DMA_MEM_TO_DEV,
68b65f73 256 .dst_maxburst = uap->fifosize >> 1,
258aea76 257 .device_fc = false,
68b65f73
RK
258 };
259 struct dma_chan *chan;
260 dma_cap_mask_t mask;
261
262 /* We need platform data */
263 if (!plat || !plat->dma_filter) {
264 dev_info(uap->port.dev, "no DMA platform data\n");
265 return;
266 }
267
ead76f32 268 /* Try to acquire a generic DMA engine slave TX channel */
68b65f73
RK
269 dma_cap_zero(mask);
270 dma_cap_set(DMA_SLAVE, mask);
271
272 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
273 if (!chan) {
274 dev_err(uap->port.dev, "no TX DMA channel!\n");
275 return;
276 }
277
278 dmaengine_slave_config(chan, &tx_conf);
279 uap->dmatx.chan = chan;
280
281 dev_info(uap->port.dev, "DMA channel TX %s\n",
282 dma_chan_name(uap->dmatx.chan));
ead76f32
LW
283
284 /* Optionally make use of an RX channel as well */
285 if (plat->dma_rx_param) {
286 struct dma_slave_config rx_conf = {
287 .src_addr = uap->port.mapbase + UART01x_DR,
288 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
a485df4b 289 .direction = DMA_DEV_TO_MEM,
ead76f32 290 .src_maxburst = uap->fifosize >> 1,
258aea76 291 .device_fc = false,
ead76f32
LW
292 };
293
294 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
295 if (!chan) {
296 dev_err(uap->port.dev, "no RX DMA channel!\n");
297 return;
298 }
299
300 dmaengine_slave_config(chan, &rx_conf);
301 uap->dmarx.chan = chan;
302
303 dev_info(uap->port.dev, "DMA channel RX %s\n",
304 dma_chan_name(uap->dmarx.chan));
305 }
68b65f73
RK
306}
307
308#ifndef MODULE
309/*
310 * Stack up the UARTs and let the above initcall be done at device
311 * initcall time, because the serial driver is called as an arch
312 * initcall, and at this time the DMA subsystem is not yet registered.
313 * At this point the driver will switch over to using DMA where desired.
314 */
315struct dma_uap {
316 struct list_head node;
317 struct uart_amba_port *uap;
c19f12b5
RK
318};
319
68b65f73
RK
320static LIST_HEAD(pl011_dma_uarts);
321
322static int __init pl011_dma_initcall(void)
323{
324 struct list_head *node, *tmp;
325
326 list_for_each_safe(node, tmp, &pl011_dma_uarts) {
327 struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
328 pl011_dma_probe_initcall(dmau->uap);
329 list_del(node);
330 kfree(dmau);
331 }
332 return 0;
333}
334
335device_initcall(pl011_dma_initcall);
336
337static void pl011_dma_probe(struct uart_amba_port *uap)
338{
339 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
340 if (dmau) {
341 dmau->uap = uap;
342 list_add_tail(&dmau->node, &pl011_dma_uarts);
343 }
344}
345#else
346static void pl011_dma_probe(struct uart_amba_port *uap)
347{
348 pl011_dma_probe_initcall(uap);
349}
350#endif
351
352static void pl011_dma_remove(struct uart_amba_port *uap)
353{
354 /* TODO: remove the initcall if it has not yet executed */
355 if (uap->dmatx.chan)
356 dma_release_channel(uap->dmatx.chan);
ead76f32
LW
357 if (uap->dmarx.chan)
358 dma_release_channel(uap->dmarx.chan);
68b65f73
RK
359}
360
68b65f73
RK
361/* Forward declare this for the refill routine */
362static int pl011_dma_tx_refill(struct uart_amba_port *uap);
363
364/*
365 * The current DMA TX buffer has been sent.
366 * Try to queue up another DMA buffer.
367 */
368static void pl011_dma_tx_callback(void *data)
369{
370 struct uart_amba_port *uap = data;
371 struct pl011_dmatx_data *dmatx = &uap->dmatx;
372 unsigned long flags;
373 u16 dmacr;
374
375 spin_lock_irqsave(&uap->port.lock, flags);
376 if (uap->dmatx.queued)
377 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
378 DMA_TO_DEVICE);
379
380 dmacr = uap->dmacr;
381 uap->dmacr = dmacr & ~UART011_TXDMAE;
382 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
383
384 /*
385 * If TX DMA was disabled, it means that we've stopped the DMA for
386 * some reason (eg, XOFF received, or we want to send an X-char.)
387 *
388 * Note: we need to be careful here of a potential race between DMA
389 * and the rest of the driver - if the driver disables TX DMA while
390 * a TX buffer completing, we must update the tx queued status to
391 * get further refills (hence we check dmacr).
392 */
393 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
394 uart_circ_empty(&uap->port.state->xmit)) {
395 uap->dmatx.queued = false;
396 spin_unlock_irqrestore(&uap->port.lock, flags);
397 return;
398 }
399
400 if (pl011_dma_tx_refill(uap) <= 0) {
401 /*
402 * We didn't queue a DMA buffer for some reason, but we
403 * have data pending to be sent. Re-enable the TX IRQ.
404 */
405 uap->im |= UART011_TXIM;
406 writew(uap->im, uap->port.membase + UART011_IMSC);
407 }
408 spin_unlock_irqrestore(&uap->port.lock, flags);
409}
410
411/*
412 * Try to refill the TX DMA buffer.
413 * Locking: called with port lock held and IRQs disabled.
414 * Returns:
415 * 1 if we queued up a TX DMA buffer.
416 * 0 if we didn't want to handle this by DMA
417 * <0 on error
418 */
419static int pl011_dma_tx_refill(struct uart_amba_port *uap)
420{
421 struct pl011_dmatx_data *dmatx = &uap->dmatx;
422 struct dma_chan *chan = dmatx->chan;
423 struct dma_device *dma_dev = chan->device;
424 struct dma_async_tx_descriptor *desc;
425 struct circ_buf *xmit = &uap->port.state->xmit;
426 unsigned int count;
427
428 /*
429 * Try to avoid the overhead involved in using DMA if the
430 * transaction fits in the first half of the FIFO, by using
431 * the standard interrupt handling. This ensures that we
432 * issue a uart_write_wakeup() at the appropriate time.
433 */
434 count = uart_circ_chars_pending(xmit);
435 if (count < (uap->fifosize >> 1)) {
436 uap->dmatx.queued = false;
437 return 0;
438 }
439
440 /*
441 * Bodge: don't send the last character by DMA, as this
442 * will prevent XON from notifying us to restart DMA.
443 */
444 count -= 1;
445
446 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
447 if (count > PL011_DMA_BUFFER_SIZE)
448 count = PL011_DMA_BUFFER_SIZE;
449
450 if (xmit->tail < xmit->head)
451 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
452 else {
453 size_t first = UART_XMIT_SIZE - xmit->tail;
454 size_t second = xmit->head;
455
456 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
457 if (second)
458 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
459 }
460
461 dmatx->sg.length = count;
462
463 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
464 uap->dmatx.queued = false;
465 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
466 return -EBUSY;
467 }
468
16052827 469 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
68b65f73
RK
470 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
471 if (!desc) {
472 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
473 uap->dmatx.queued = false;
474 /*
475 * If DMA cannot be used right now, we complete this
476 * transaction via IRQ and let the TTY layer retry.
477 */
478 dev_dbg(uap->port.dev, "TX DMA busy\n");
479 return -EBUSY;
480 }
481
482 /* Some data to go along to the callback */
483 desc->callback = pl011_dma_tx_callback;
484 desc->callback_param = uap;
485
486 /* All errors should happen at prepare time */
487 dmaengine_submit(desc);
488
489 /* Fire the DMA transaction */
490 dma_dev->device_issue_pending(chan);
491
492 uap->dmacr |= UART011_TXDMAE;
493 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
494 uap->dmatx.queued = true;
495
496 /*
497 * Now we know that DMA will fire, so advance the ring buffer
498 * with the stuff we just dispatched.
499 */
500 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
501 uap->port.icount.tx += count;
502
503 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
504 uart_write_wakeup(&uap->port);
505
506 return 1;
507}
508
509/*
510 * We received a transmit interrupt without a pending X-char but with
511 * pending characters.
512 * Locking: called with port lock held and IRQs disabled.
513 * Returns:
514 * false if we want to use PIO to transmit
515 * true if we queued a DMA buffer
516 */
517static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
518{
ead76f32 519 if (!uap->using_tx_dma)
68b65f73
RK
520 return false;
521
522 /*
523 * If we already have a TX buffer queued, but received a
524 * TX interrupt, it will be because we've just sent an X-char.
525 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
526 */
527 if (uap->dmatx.queued) {
528 uap->dmacr |= UART011_TXDMAE;
529 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
530 uap->im &= ~UART011_TXIM;
531 writew(uap->im, uap->port.membase + UART011_IMSC);
532 return true;
533 }
534
535 /*
536 * We don't have a TX buffer queued, so try to queue one.
25985edc 537 * If we successfully queued a buffer, mask the TX IRQ.
68b65f73
RK
538 */
539 if (pl011_dma_tx_refill(uap) > 0) {
540 uap->im &= ~UART011_TXIM;
541 writew(uap->im, uap->port.membase + UART011_IMSC);
542 return true;
543 }
544 return false;
545}
546
547/*
548 * Stop the DMA transmit (eg, due to received XOFF).
549 * Locking: called with port lock held and IRQs disabled.
550 */
551static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
552{
553 if (uap->dmatx.queued) {
554 uap->dmacr &= ~UART011_TXDMAE;
555 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
556 }
557}
558
559/*
560 * Try to start a DMA transmit, or in the case of an XON/OFF
561 * character queued for send, try to get that character out ASAP.
562 * Locking: called with port lock held and IRQs disabled.
563 * Returns:
564 * false if we want the TX IRQ to be enabled
565 * true if we have a buffer queued
566 */
567static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
568{
569 u16 dmacr;
570
ead76f32 571 if (!uap->using_tx_dma)
68b65f73
RK
572 return false;
573
574 if (!uap->port.x_char) {
575 /* no X-char, try to push chars out in DMA mode */
576 bool ret = true;
577
578 if (!uap->dmatx.queued) {
579 if (pl011_dma_tx_refill(uap) > 0) {
580 uap->im &= ~UART011_TXIM;
581 ret = true;
582 } else {
583 uap->im |= UART011_TXIM;
584 ret = false;
585 }
586 writew(uap->im, uap->port.membase + UART011_IMSC);
587 } else if (!(uap->dmacr & UART011_TXDMAE)) {
588 uap->dmacr |= UART011_TXDMAE;
589 writew(uap->dmacr,
590 uap->port.membase + UART011_DMACR);
591 }
592 return ret;
593 }
594
595 /*
596 * We have an X-char to send. Disable DMA to prevent it loading
597 * the TX fifo, and then see if we can stuff it into the FIFO.
598 */
599 dmacr = uap->dmacr;
600 uap->dmacr &= ~UART011_TXDMAE;
601 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
602
603 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
604 /*
605 * No space in the FIFO, so enable the transmit interrupt
606 * so we know when there is space. Note that once we've
607 * loaded the character, we should just re-enable DMA.
608 */
609 return false;
610 }
611
612 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
613 uap->port.icount.tx++;
614 uap->port.x_char = 0;
615
616 /* Success - restore the DMA state */
617 uap->dmacr = dmacr;
618 writew(dmacr, uap->port.membase + UART011_DMACR);
619
620 return true;
621}
622
623/*
624 * Flush the transmit buffer.
625 * Locking: called with port lock held and IRQs disabled.
626 */
627static void pl011_dma_flush_buffer(struct uart_port *port)
628{
629 struct uart_amba_port *uap = (struct uart_amba_port *)port;
630
ead76f32 631 if (!uap->using_tx_dma)
68b65f73
RK
632 return;
633
634 /* Avoid deadlock with the DMA engine callback */
635 spin_unlock(&uap->port.lock);
636 dmaengine_terminate_all(uap->dmatx.chan);
637 spin_lock(&uap->port.lock);
638 if (uap->dmatx.queued) {
639 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
640 DMA_TO_DEVICE);
641 uap->dmatx.queued = false;
642 uap->dmacr &= ~UART011_TXDMAE;
643 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
644 }
645}
646
ead76f32
LW
647static void pl011_dma_rx_callback(void *data);
648
649static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
650{
651 struct dma_chan *rxchan = uap->dmarx.chan;
ead76f32
LW
652 struct pl011_dmarx_data *dmarx = &uap->dmarx;
653 struct dma_async_tx_descriptor *desc;
654 struct pl011_sgbuf *sgbuf;
655
656 if (!rxchan)
657 return -EIO;
658
659 /* Start the RX DMA job */
660 sgbuf = uap->dmarx.use_buf_b ?
661 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
16052827 662 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
a485df4b 663 DMA_DEV_TO_MEM,
ead76f32
LW
664 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
665 /*
666 * If the DMA engine is busy and cannot prepare a
667 * channel, no big deal, the driver will fall back
668 * to interrupt mode as a result of this error code.
669 */
670 if (!desc) {
671 uap->dmarx.running = false;
672 dmaengine_terminate_all(rxchan);
673 return -EBUSY;
674 }
675
676 /* Some data to go along to the callback */
677 desc->callback = pl011_dma_rx_callback;
678 desc->callback_param = uap;
679 dmarx->cookie = dmaengine_submit(desc);
680 dma_async_issue_pending(rxchan);
681
682 uap->dmacr |= UART011_RXDMAE;
683 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
684 uap->dmarx.running = true;
685
686 uap->im &= ~UART011_RXIM;
687 writew(uap->im, uap->port.membase + UART011_IMSC);
688
689 return 0;
690}
691
692/*
693 * This is called when either the DMA job is complete, or
694 * the FIFO timeout interrupt occurred. This must be called
695 * with the port spinlock uap->port.lock held.
696 */
697static void pl011_dma_rx_chars(struct uart_amba_port *uap,
698 u32 pending, bool use_buf_b,
699 bool readfifo)
700{
701 struct tty_struct *tty = uap->port.state->port.tty;
702 struct pl011_sgbuf *sgbuf = use_buf_b ?
703 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
704 struct device *dev = uap->dmarx.chan->device->dev;
ead76f32
LW
705 int dma_count = 0;
706 u32 fifotaken = 0; /* only used for vdbg() */
707
708 /* Pick everything from the DMA first */
709 if (pending) {
710 /* Sync in buffer */
711 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
712
713 /*
714 * First take all chars in the DMA pipe, then look in the FIFO.
715 * Note that tty_insert_flip_buf() tries to take as many chars
716 * as it can.
717 */
718 dma_count = tty_insert_flip_string(uap->port.state->port.tty,
719 sgbuf->buf, pending);
720
721 /* Return buffer to device */
722 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
723
724 uap->port.icount.rx += dma_count;
725 if (dma_count < pending)
726 dev_warn(uap->port.dev,
727 "couldn't insert all characters (TTY is full?)\n");
728 }
729
730 /*
731 * Only continue with trying to read the FIFO if all DMA chars have
732 * been taken first.
733 */
734 if (dma_count == pending && readfifo) {
735 /* Clear any error flags */
736 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
737 uap->port.membase + UART011_ICR);
738
739 /*
740 * If we read all the DMA'd characters, and we had an
29772c4e
LW
741 * incomplete buffer, that could be due to an rx error, or
742 * maybe we just timed out. Read any pending chars and check
743 * the error status.
744 *
745 * Error conditions will only occur in the FIFO, these will
746 * trigger an immediate interrupt and stop the DMA job, so we
747 * will always find the error in the FIFO, never in the DMA
748 * buffer.
ead76f32 749 */
29772c4e 750 fifotaken = pl011_fifo_to_tty(uap);
ead76f32
LW
751 }
752
753 spin_unlock(&uap->port.lock);
754 dev_vdbg(uap->port.dev,
755 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
756 dma_count, fifotaken);
757 tty_flip_buffer_push(tty);
758 spin_lock(&uap->port.lock);
759}
760
761static void pl011_dma_rx_irq(struct uart_amba_port *uap)
762{
763 struct pl011_dmarx_data *dmarx = &uap->dmarx;
764 struct dma_chan *rxchan = dmarx->chan;
765 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
766 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
767 size_t pending;
768 struct dma_tx_state state;
769 enum dma_status dmastat;
770
771 /*
772 * Pause the transfer so we can trust the current counter,
773 * do this before we pause the PL011 block, else we may
774 * overflow the FIFO.
775 */
776 if (dmaengine_pause(rxchan))
777 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
778 dmastat = rxchan->device->device_tx_status(rxchan,
779 dmarx->cookie, &state);
780 if (dmastat != DMA_PAUSED)
781 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
782
783 /* Disable RX DMA - incoming data will wait in the FIFO */
784 uap->dmacr &= ~UART011_RXDMAE;
785 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
786 uap->dmarx.running = false;
787
788 pending = sgbuf->sg.length - state.residue;
789 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
790 /* Then we terminate the transfer - we now know our residue */
791 dmaengine_terminate_all(rxchan);
792
793 /*
794 * This will take the chars we have so far and insert
795 * into the framework.
796 */
797 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
798
799 /* Switch buffer & re-trigger DMA job */
800 dmarx->use_buf_b = !dmarx->use_buf_b;
801 if (pl011_dma_rx_trigger_dma(uap)) {
802 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
803 "fall back to interrupt mode\n");
804 uap->im |= UART011_RXIM;
805 writew(uap->im, uap->port.membase + UART011_IMSC);
806 }
807}
808
809static void pl011_dma_rx_callback(void *data)
810{
811 struct uart_amba_port *uap = data;
812 struct pl011_dmarx_data *dmarx = &uap->dmarx;
6dc01aa6 813 struct dma_chan *rxchan = dmarx->chan;
ead76f32 814 bool lastbuf = dmarx->use_buf_b;
6dc01aa6
CM
815 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
816 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
817 size_t pending;
818 struct dma_tx_state state;
ead76f32
LW
819 int ret;
820
821 /*
822 * This completion interrupt occurs typically when the
823 * RX buffer is totally stuffed but no timeout has yet
824 * occurred. When that happens, we just want the RX
825 * routine to flush out the secondary DMA buffer while
826 * we immediately trigger the next DMA job.
827 */
828 spin_lock_irq(&uap->port.lock);
6dc01aa6
CM
829 /*
830 * Rx data can be taken by the UART interrupts during
831 * the DMA irq handler. So we check the residue here.
832 */
833 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
834 pending = sgbuf->sg.length - state.residue;
835 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
836 /* Then we terminate the transfer - we now know our residue */
837 dmaengine_terminate_all(rxchan);
838
ead76f32
LW
839 uap->dmarx.running = false;
840 dmarx->use_buf_b = !lastbuf;
841 ret = pl011_dma_rx_trigger_dma(uap);
842
6dc01aa6 843 pl011_dma_rx_chars(uap, pending, lastbuf, false);
ead76f32
LW
844 spin_unlock_irq(&uap->port.lock);
845 /*
846 * Do this check after we picked the DMA chars so we don't
847 * get some IRQ immediately from RX.
848 */
849 if (ret) {
850 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
851 "fall back to interrupt mode\n");
852 uap->im |= UART011_RXIM;
853 writew(uap->im, uap->port.membase + UART011_IMSC);
854 }
855}
856
857/*
858 * Stop accepting received characters, when we're shutting down or
859 * suspending this port.
860 * Locking: called with port lock held and IRQs disabled.
861 */
862static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
863{
864 /* FIXME. Just disable the DMA enable */
865 uap->dmacr &= ~UART011_RXDMAE;
866 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
867}
68b65f73
RK
868
869static void pl011_dma_startup(struct uart_amba_port *uap)
870{
ead76f32
LW
871 int ret;
872
68b65f73
RK
873 if (!uap->dmatx.chan)
874 return;
875
876 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
877 if (!uap->dmatx.buf) {
878 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
879 uap->port.fifosize = uap->fifosize;
880 return;
881 }
882
883 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
884
885 /* The DMA buffer is now the FIFO the TTY subsystem can use */
886 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
ead76f32
LW
887 uap->using_tx_dma = true;
888
889 if (!uap->dmarx.chan)
890 goto skip_rx;
891
892 /* Allocate and map DMA RX buffers */
893 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
894 DMA_FROM_DEVICE);
895 if (ret) {
896 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
897 "RX buffer A", ret);
898 goto skip_rx;
899 }
68b65f73 900
ead76f32
LW
901 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
902 DMA_FROM_DEVICE);
903 if (ret) {
904 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
905 "RX buffer B", ret);
906 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
907 DMA_FROM_DEVICE);
908 goto skip_rx;
909 }
910
911 uap->using_rx_dma = true;
68b65f73 912
ead76f32 913skip_rx:
68b65f73
RK
914 /* Turn on DMA error (RX/TX will be enabled on demand) */
915 uap->dmacr |= UART011_DMAONERR;
916 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
38d62436
RK
917
918 /*
919 * ST Micro variants has some specific dma burst threshold
920 * compensation. Set this to 16 bytes, so burst will only
921 * be issued above/below 16 bytes.
922 */
923 if (uap->vendor->dma_threshold)
924 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
925 uap->port.membase + ST_UART011_DMAWM);
ead76f32
LW
926
927 if (uap->using_rx_dma) {
928 if (pl011_dma_rx_trigger_dma(uap))
929 dev_dbg(uap->port.dev, "could not trigger initial "
930 "RX DMA job, fall back to interrupt mode\n");
931 }
68b65f73
RK
932}
933
934static void pl011_dma_shutdown(struct uart_amba_port *uap)
935{
ead76f32 936 if (!(uap->using_tx_dma || uap->using_rx_dma))
68b65f73
RK
937 return;
938
939 /* Disable RX and TX DMA */
940 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
941 barrier();
942
943 spin_lock_irq(&uap->port.lock);
944 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
945 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
946 spin_unlock_irq(&uap->port.lock);
947
ead76f32
LW
948 if (uap->using_tx_dma) {
949 /* In theory, this should already be done by pl011_dma_flush_buffer */
950 dmaengine_terminate_all(uap->dmatx.chan);
951 if (uap->dmatx.queued) {
952 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
953 DMA_TO_DEVICE);
954 uap->dmatx.queued = false;
955 }
956
957 kfree(uap->dmatx.buf);
958 uap->using_tx_dma = false;
68b65f73
RK
959 }
960
ead76f32
LW
961 if (uap->using_rx_dma) {
962 dmaengine_terminate_all(uap->dmarx.chan);
963 /* Clean up the RX DMA */
964 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
965 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
966 uap->using_rx_dma = false;
967 }
968}
68b65f73 969
ead76f32
LW
970static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
971{
972 return uap->using_rx_dma;
68b65f73
RK
973}
974
ead76f32
LW
975static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
976{
977 return uap->using_rx_dma && uap->dmarx.running;
978}
979
980
68b65f73
RK
981#else
982/* Blank functions if the DMA engine is not available */
983static inline void pl011_dma_probe(struct uart_amba_port *uap)
984{
985}
986
987static inline void pl011_dma_remove(struct uart_amba_port *uap)
988{
989}
990
991static inline void pl011_dma_startup(struct uart_amba_port *uap)
992{
993}
994
995static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
996{
997}
998
999static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1000{
1001 return false;
1002}
1003
1004static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1005{
1006}
1007
1008static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1009{
1010 return false;
1011}
1012
ead76f32
LW
1013static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1014{
1015}
1016
1017static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1018{
1019}
1020
1021static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1022{
1023 return -EIO;
1024}
1025
1026static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1027{
1028 return false;
1029}
1030
1031static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1032{
1033 return false;
1034}
1035
68b65f73
RK
1036#define pl011_dma_flush_buffer NULL
1037#endif
1038
b129a8cc 1039static void pl011_stop_tx(struct uart_port *port)
1da177e4
LT
1040{
1041 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1042
1043 uap->im &= ~UART011_TXIM;
1044 writew(uap->im, uap->port.membase + UART011_IMSC);
68b65f73 1045 pl011_dma_tx_stop(uap);
1da177e4
LT
1046}
1047
b129a8cc 1048static void pl011_start_tx(struct uart_port *port)
1da177e4
LT
1049{
1050 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1051
68b65f73
RK
1052 if (!pl011_dma_tx_start(uap)) {
1053 uap->im |= UART011_TXIM;
1054 writew(uap->im, uap->port.membase + UART011_IMSC);
1055 }
1da177e4
LT
1056}
1057
1058static void pl011_stop_rx(struct uart_port *port)
1059{
1060 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1061
1062 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1063 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1064 writew(uap->im, uap->port.membase + UART011_IMSC);
ead76f32
LW
1065
1066 pl011_dma_rx_stop(uap);
1da177e4
LT
1067}
1068
1069static void pl011_enable_ms(struct uart_port *port)
1070{
1071 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1072
1073 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1074 writew(uap->im, uap->port.membase + UART011_IMSC);
1075}
1076
7d12e780 1077static void pl011_rx_chars(struct uart_amba_port *uap)
1da177e4 1078{
ebd2c8f6 1079 struct tty_struct *tty = uap->port.state->port.tty;
1da177e4 1080
29772c4e 1081 pl011_fifo_to_tty(uap);
1da177e4 1082
2389b272 1083 spin_unlock(&uap->port.lock);
1da177e4 1084 tty_flip_buffer_push(tty);
ead76f32
LW
1085 /*
1086 * If we were temporarily out of DMA mode for a while,
1087 * attempt to switch back to DMA mode again.
1088 */
1089 if (pl011_dma_rx_available(uap)) {
1090 if (pl011_dma_rx_trigger_dma(uap)) {
1091 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1092 "fall back to interrupt mode again\n");
1093 uap->im |= UART011_RXIM;
1094 } else
1095 uap->im &= ~UART011_RXIM;
1096 writew(uap->im, uap->port.membase + UART011_IMSC);
1097 }
2389b272 1098 spin_lock(&uap->port.lock);
1da177e4
LT
1099}
1100
1101static void pl011_tx_chars(struct uart_amba_port *uap)
1102{
ebd2c8f6 1103 struct circ_buf *xmit = &uap->port.state->xmit;
1da177e4
LT
1104 int count;
1105
1106 if (uap->port.x_char) {
1107 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1108 uap->port.icount.tx++;
1109 uap->port.x_char = 0;
1110 return;
1111 }
1112 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
b129a8cc 1113 pl011_stop_tx(&uap->port);
1da177e4
LT
1114 return;
1115 }
1116
68b65f73
RK
1117 /* If we are using DMA mode, try to send some characters. */
1118 if (pl011_dma_tx_irq(uap))
1119 return;
1120
ffca2b11 1121 count = uap->fifosize >> 1;
1da177e4
LT
1122 do {
1123 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1124 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1125 uap->port.icount.tx++;
1126 if (uart_circ_empty(xmit))
1127 break;
1128 } while (--count > 0);
1129
1130 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1131 uart_write_wakeup(&uap->port);
1132
1133 if (uart_circ_empty(xmit))
b129a8cc 1134 pl011_stop_tx(&uap->port);
1da177e4
LT
1135}
1136
1137static void pl011_modem_status(struct uart_amba_port *uap)
1138{
1139 unsigned int status, delta;
1140
1141 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1142
1143 delta = status ^ uap->old_status;
1144 uap->old_status = status;
1145
1146 if (!delta)
1147 return;
1148
1149 if (delta & UART01x_FR_DCD)
1150 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1151
1152 if (delta & UART01x_FR_DSR)
1153 uap->port.icount.dsr++;
1154
1155 if (delta & UART01x_FR_CTS)
1156 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1157
bdc04e31 1158 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1da177e4
LT
1159}
1160
7d12e780 1161static irqreturn_t pl011_int(int irq, void *dev_id)
1da177e4
LT
1162{
1163 struct uart_amba_port *uap = dev_id;
963cc981 1164 unsigned long flags;
1da177e4
LT
1165 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1166 int handled = 0;
4fd0690b 1167 unsigned int dummy_read;
1da177e4 1168
963cc981 1169 spin_lock_irqsave(&uap->port.lock, flags);
1da177e4
LT
1170
1171 status = readw(uap->port.membase + UART011_MIS);
1172 if (status) {
1173 do {
4fd0690b
R
1174 if (uap->vendor->cts_event_workaround) {
1175 /* workaround to make sure that all bits are unlocked.. */
1176 writew(0x00, uap->port.membase + UART011_ICR);
1177
1178 /*
1179 * WA: introduce 26ns(1 uart clk) delay before W1C;
1180 * single apb access will incur 2 pclk(133.12Mhz) delay,
1181 * so add 2 dummy reads
1182 */
1183 dummy_read = readw(uap->port.membase + UART011_ICR);
1184 dummy_read = readw(uap->port.membase + UART011_ICR);
1185 }
1186
1da177e4
LT
1187 writew(status & ~(UART011_TXIS|UART011_RTIS|
1188 UART011_RXIS),
1189 uap->port.membase + UART011_ICR);
1190
ead76f32
LW
1191 if (status & (UART011_RTIS|UART011_RXIS)) {
1192 if (pl011_dma_rx_running(uap))
1193 pl011_dma_rx_irq(uap);
1194 else
1195 pl011_rx_chars(uap);
1196 }
1da177e4
LT
1197 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1198 UART011_CTSMIS|UART011_RIMIS))
1199 pl011_modem_status(uap);
1200 if (status & UART011_TXIS)
1201 pl011_tx_chars(uap);
1202
4fd0690b 1203 if (pass_counter-- == 0)
1da177e4
LT
1204 break;
1205
1206 status = readw(uap->port.membase + UART011_MIS);
1207 } while (status != 0);
1208 handled = 1;
1209 }
1210
963cc981 1211 spin_unlock_irqrestore(&uap->port.lock, flags);
1da177e4
LT
1212
1213 return IRQ_RETVAL(handled);
1214}
1215
e643f87f 1216static unsigned int pl011_tx_empty(struct uart_port *port)
1da177e4
LT
1217{
1218 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1219 unsigned int status = readw(uap->port.membase + UART01x_FR);
1220 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1221}
1222
e643f87f 1223static unsigned int pl011_get_mctrl(struct uart_port *port)
1da177e4
LT
1224{
1225 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1226 unsigned int result = 0;
1227 unsigned int status = readw(uap->port.membase + UART01x_FR);
1228
5159f407 1229#define TIOCMBIT(uartbit, tiocmbit) \
1da177e4
LT
1230 if (status & uartbit) \
1231 result |= tiocmbit
1232
5159f407
JS
1233 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1234 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1235 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1236 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1237#undef TIOCMBIT
1da177e4
LT
1238 return result;
1239}
1240
1241static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1242{
1243 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1244 unsigned int cr;
1245
1246 cr = readw(uap->port.membase + UART011_CR);
1247
5159f407 1248#define TIOCMBIT(tiocmbit, uartbit) \
1da177e4
LT
1249 if (mctrl & tiocmbit) \
1250 cr |= uartbit; \
1251 else \
1252 cr &= ~uartbit
1253
5159f407
JS
1254 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1255 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1256 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1257 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1258 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
3b43816f
RV
1259
1260 if (uap->autorts) {
1261 /* We need to disable auto-RTS if we want to turn RTS off */
1262 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1263 }
5159f407 1264#undef TIOCMBIT
1da177e4
LT
1265
1266 writew(cr, uap->port.membase + UART011_CR);
1267}
1268
1269static void pl011_break_ctl(struct uart_port *port, int break_state)
1270{
1271 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1272 unsigned long flags;
1273 unsigned int lcr_h;
1274
1275 spin_lock_irqsave(&uap->port.lock, flags);
ec489aa8 1276 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1277 if (break_state == -1)
1278 lcr_h |= UART01x_LCRH_BRK;
1279 else
1280 lcr_h &= ~UART01x_LCRH_BRK;
ec489aa8 1281 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1282 spin_unlock_irqrestore(&uap->port.lock, flags);
1283}
1284
84b5ae15 1285#ifdef CONFIG_CONSOLE_POLL
5c8124a0
AV
1286
1287static void pl011_quiesce_irqs(struct uart_port *port)
1288{
1289 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1290 unsigned char __iomem *regs = uap->port.membase;
1291
1292 writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1293 /*
1294 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1295 * we simply mask it. start_tx() will unmask it.
1296 *
1297 * Note we can race with start_tx(), and if the race happens, the
1298 * polling user might get another interrupt just after we clear it.
1299 * But it should be OK and can happen even w/o the race, e.g.
1300 * controller immediately got some new data and raised the IRQ.
1301 *
1302 * And whoever uses polling routines assumes that it manages the device
1303 * (including tx queue), so we're also fine with start_tx()'s caller
1304 * side.
1305 */
1306 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1307}
1308
e643f87f 1309static int pl011_get_poll_char(struct uart_port *port)
84b5ae15
JW
1310{
1311 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1312 unsigned int status;
1313
5c8124a0
AV
1314 /*
1315 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1316 * debugger.
1317 */
1318 pl011_quiesce_irqs(port);
1319
f5316b4a
JW
1320 status = readw(uap->port.membase + UART01x_FR);
1321 if (status & UART01x_FR_RXFE)
1322 return NO_POLL_CHAR;
84b5ae15
JW
1323
1324 return readw(uap->port.membase + UART01x_DR);
1325}
1326
e643f87f 1327static void pl011_put_poll_char(struct uart_port *port,
84b5ae15
JW
1328 unsigned char ch)
1329{
1330 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1331
1332 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1333 barrier();
1334
1335 writew(ch, uap->port.membase + UART01x_DR);
1336}
1337
1338#endif /* CONFIG_CONSOLE_POLL */
1339
b3564c2c 1340static int pl011_hwinit(struct uart_port *port)
1da177e4
LT
1341{
1342 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4
LT
1343 int retval;
1344
78d80c5a
LW
1345 /* Optionaly enable pins to be muxed in and configured */
1346 if (!IS_ERR(uap->pins_default)) {
1347 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1348 if (retval)
1349 dev_err(port->dev,
1350 "could not set default pins\n");
1351 }
1352
1da177e4
LT
1353 /*
1354 * Try to enable the clock producer.
1355 */
1c4c4394 1356 retval = clk_prepare_enable(uap->clk);
1da177e4 1357 if (retval)
1c4c4394 1358 goto out;
1da177e4
LT
1359
1360 uap->port.uartclk = clk_get_rate(uap->clk);
1361
9b96fbac
LW
1362 /* Clear pending error and receive interrupts */
1363 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1364 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1365
b3564c2c
AV
1366 /*
1367 * Save interrupts enable mask, and enable RX interrupts in case if
1368 * the interrupt is used for NMI entry.
1369 */
1370 uap->im = readw(uap->port.membase + UART011_IMSC);
1371 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1372
1373 if (uap->port.dev->platform_data) {
1374 struct amba_pl011_data *plat;
1375
1376 plat = uap->port.dev->platform_data;
1377 if (plat->init)
1378 plat->init();
1379 }
1380 return 0;
1381 out:
1382 return retval;
1383}
1384
1385static int pl011_startup(struct uart_port *port)
1386{
1387 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1388 unsigned int cr;
1389 int retval;
1390
1391 retval = pl011_hwinit(port);
1392 if (retval)
1393 goto clk_dis;
1394
1395 writew(uap->im, uap->port.membase + UART011_IMSC);
1396
1da177e4
LT
1397 /*
1398 * Allocate the IRQ
1399 */
1400 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1401 if (retval)
1402 goto clk_dis;
1403
c19f12b5 1404 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1da177e4
LT
1405
1406 /*
1407 * Provoke TX FIFO interrupt into asserting.
1408 */
1409 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1410 writew(cr, uap->port.membase + UART011_CR);
1411 writew(0, uap->port.membase + UART011_FBRD);
1412 writew(1, uap->port.membase + UART011_IBRD);
ec489aa8
LW
1413 writew(0, uap->port.membase + uap->lcrh_rx);
1414 if (uap->lcrh_tx != uap->lcrh_rx) {
1415 int i;
1416 /*
1417 * Wait 10 PCLKs before writing LCRH_TX register,
1418 * to get this delay write read only register 10 times
1419 */
1420 for (i = 0; i < 10; ++i)
1421 writew(0xff, uap->port.membase + UART011_MIS);
1422 writew(0, uap->port.membase + uap->lcrh_tx);
1423 }
1da177e4
LT
1424 writew(0, uap->port.membase + UART01x_DR);
1425 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1426 barrier();
1427
d8d8ffa4
SKS
1428 /* restore RTS and DTR */
1429 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1430 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1da177e4
LT
1431 writew(cr, uap->port.membase + UART011_CR);
1432
1433 /*
1434 * initialise the old status of the modem signals
1435 */
1436 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1437
68b65f73
RK
1438 /* Startup DMA */
1439 pl011_dma_startup(uap);
1440
1da177e4 1441 /*
ead76f32
LW
1442 * Finally, enable interrupts, only timeouts when using DMA
1443 * if initial RX DMA job failed, start in interrupt mode
1444 * as well.
1da177e4
LT
1445 */
1446 spin_lock_irq(&uap->port.lock);
9b96fbac
LW
1447 /* Clear out any spuriously appearing RX interrupts */
1448 writew(UART011_RTIS | UART011_RXIS,
1449 uap->port.membase + UART011_ICR);
ead76f32
LW
1450 uap->im = UART011_RTIM;
1451 if (!pl011_dma_rx_running(uap))
1452 uap->im |= UART011_RXIM;
1da177e4
LT
1453 writew(uap->im, uap->port.membase + UART011_IMSC);
1454 spin_unlock_irq(&uap->port.lock);
1455
1456 return 0;
1457
1458 clk_dis:
1c4c4394 1459 clk_disable_unprepare(uap->clk);
1da177e4
LT
1460 return retval;
1461}
1462
ec489aa8
LW
1463static void pl011_shutdown_channel(struct uart_amba_port *uap,
1464 unsigned int lcrh)
1465{
1466 unsigned long val;
1467
1468 val = readw(uap->port.membase + lcrh);
1469 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1470 writew(val, uap->port.membase + lcrh);
1471}
1472
1da177e4
LT
1473static void pl011_shutdown(struct uart_port *port)
1474{
1475 struct uart_amba_port *uap = (struct uart_amba_port *)port;
d8d8ffa4 1476 unsigned int cr;
78d80c5a 1477 int retval;
1da177e4
LT
1478
1479 /*
1480 * disable all interrupts
1481 */
1482 spin_lock_irq(&uap->port.lock);
1483 uap->im = 0;
1484 writew(uap->im, uap->port.membase + UART011_IMSC);
1485 writew(0xffff, uap->port.membase + UART011_ICR);
1486 spin_unlock_irq(&uap->port.lock);
1487
68b65f73
RK
1488 pl011_dma_shutdown(uap);
1489
1da177e4
LT
1490 /*
1491 * Free the interrupt
1492 */
1493 free_irq(uap->port.irq, uap);
1494
1495 /*
1496 * disable the port
d8d8ffa4
SKS
1497 * disable the port. It should not disable RTS and DTR.
1498 * Also RTS and DTR state should be preserved to restore
1499 * it during startup().
1da177e4 1500 */
3b43816f 1501 uap->autorts = false;
d8d8ffa4
SKS
1502 cr = readw(uap->port.membase + UART011_CR);
1503 uap->old_cr = cr;
1504 cr &= UART011_CR_RTS | UART011_CR_DTR;
1505 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1506 writew(cr, uap->port.membase + UART011_CR);
1da177e4
LT
1507
1508 /*
1509 * disable break condition and fifos
1510 */
ec489aa8
LW
1511 pl011_shutdown_channel(uap, uap->lcrh_rx);
1512 if (uap->lcrh_rx != uap->lcrh_tx)
1513 pl011_shutdown_channel(uap, uap->lcrh_tx);
1da177e4
LT
1514
1515 /*
1516 * Shut down the clock producer
1517 */
1c4c4394 1518 clk_disable_unprepare(uap->clk);
78d80c5a
LW
1519 /* Optionally let pins go into sleep states */
1520 if (!IS_ERR(uap->pins_sleep)) {
1521 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
1522 if (retval)
1523 dev_err(port->dev,
1524 "could not set pins to sleep state\n");
1525 }
1526
c16d51a3
SKS
1527
1528 if (uap->port.dev->platform_data) {
1529 struct amba_pl011_data *plat;
1530
1531 plat = uap->port.dev->platform_data;
1532 if (plat->exit)
1533 plat->exit();
1534 }
1535
1da177e4
LT
1536}
1537
1538static void
606d099c
AC
1539pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1540 struct ktermios *old)
1da177e4 1541{
3b43816f 1542 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4
LT
1543 unsigned int lcr_h, old_cr;
1544 unsigned long flags;
c19f12b5
RK
1545 unsigned int baud, quot, clkdiv;
1546
1547 if (uap->vendor->oversampling)
1548 clkdiv = 8;
1549 else
1550 clkdiv = 16;
1da177e4
LT
1551
1552 /*
1553 * Ask the core to calculate the divisor for us.
1554 */
ac3e3fb4 1555 baud = uart_get_baud_rate(port, termios, old, 0,
c19f12b5 1556 port->uartclk / clkdiv);
ac3e3fb4
LW
1557
1558 if (baud > port->uartclk/16)
1559 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1560 else
1561 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1da177e4
LT
1562
1563 switch (termios->c_cflag & CSIZE) {
1564 case CS5:
1565 lcr_h = UART01x_LCRH_WLEN_5;
1566 break;
1567 case CS6:
1568 lcr_h = UART01x_LCRH_WLEN_6;
1569 break;
1570 case CS7:
1571 lcr_h = UART01x_LCRH_WLEN_7;
1572 break;
1573 default: // CS8
1574 lcr_h = UART01x_LCRH_WLEN_8;
1575 break;
1576 }
1577 if (termios->c_cflag & CSTOPB)
1578 lcr_h |= UART01x_LCRH_STP2;
1579 if (termios->c_cflag & PARENB) {
1580 lcr_h |= UART01x_LCRH_PEN;
1581 if (!(termios->c_cflag & PARODD))
1582 lcr_h |= UART01x_LCRH_EPS;
1583 }
ffca2b11 1584 if (uap->fifosize > 1)
1da177e4
LT
1585 lcr_h |= UART01x_LCRH_FEN;
1586
1587 spin_lock_irqsave(&port->lock, flags);
1588
1589 /*
1590 * Update the per-port timeout.
1591 */
1592 uart_update_timeout(port, termios->c_cflag, baud);
1593
b63d4f0f 1594 port->read_status_mask = UART011_DR_OE | 255;
1da177e4 1595 if (termios->c_iflag & INPCK)
b63d4f0f 1596 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1da177e4 1597 if (termios->c_iflag & (BRKINT | PARMRK))
b63d4f0f 1598 port->read_status_mask |= UART011_DR_BE;
1da177e4
LT
1599
1600 /*
1601 * Characters to ignore
1602 */
1603 port->ignore_status_mask = 0;
1604 if (termios->c_iflag & IGNPAR)
b63d4f0f 1605 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1da177e4 1606 if (termios->c_iflag & IGNBRK) {
b63d4f0f 1607 port->ignore_status_mask |= UART011_DR_BE;
1da177e4
LT
1608 /*
1609 * If we're ignoring parity and break indicators,
1610 * ignore overruns too (for real raw support).
1611 */
1612 if (termios->c_iflag & IGNPAR)
b63d4f0f 1613 port->ignore_status_mask |= UART011_DR_OE;
1da177e4
LT
1614 }
1615
1616 /*
1617 * Ignore all characters if CREAD is not set.
1618 */
1619 if ((termios->c_cflag & CREAD) == 0)
b63d4f0f 1620 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1da177e4
LT
1621
1622 if (UART_ENABLE_MS(port, termios->c_cflag))
1623 pl011_enable_ms(port);
1624
1625 /* first, disable everything */
1626 old_cr = readw(port->membase + UART011_CR);
1627 writew(0, port->membase + UART011_CR);
1628
3b43816f
RV
1629 if (termios->c_cflag & CRTSCTS) {
1630 if (old_cr & UART011_CR_RTS)
1631 old_cr |= UART011_CR_RTSEN;
1632
1633 old_cr |= UART011_CR_CTSEN;
1634 uap->autorts = true;
1635 } else {
1636 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1637 uap->autorts = false;
1638 }
1639
c19f12b5
RK
1640 if (uap->vendor->oversampling) {
1641 if (baud > port->uartclk / 16)
ac3e3fb4
LW
1642 old_cr |= ST_UART011_CR_OVSFACT;
1643 else
1644 old_cr &= ~ST_UART011_CR_OVSFACT;
1645 }
1646
c5dd553b
LW
1647 /*
1648 * Workaround for the ST Micro oversampling variants to
1649 * increase the bitrate slightly, by lowering the divisor,
1650 * to avoid delayed sampling of start bit at high speeds,
1651 * else we see data corruption.
1652 */
1653 if (uap->vendor->oversampling) {
1654 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1655 quot -= 1;
1656 else if ((baud > 3250000) && (quot > 2))
1657 quot -= 2;
1658 }
1da177e4
LT
1659 /* Set baud rate */
1660 writew(quot & 0x3f, port->membase + UART011_FBRD);
1661 writew(quot >> 6, port->membase + UART011_IBRD);
1662
1663 /*
1664 * ----------v----------v----------v----------v-----
c5dd553b
LW
1665 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1666 * UART011_FBRD & UART011_IBRD.
1da177e4
LT
1667 * ----------^----------^----------^----------^-----
1668 */
ec489aa8
LW
1669 writew(lcr_h, port->membase + uap->lcrh_rx);
1670 if (uap->lcrh_rx != uap->lcrh_tx) {
1671 int i;
1672 /*
1673 * Wait 10 PCLKs before writing LCRH_TX register,
1674 * to get this delay write read only register 10 times
1675 */
1676 for (i = 0; i < 10; ++i)
1677 writew(0xff, uap->port.membase + UART011_MIS);
1678 writew(lcr_h, port->membase + uap->lcrh_tx);
1679 }
1da177e4
LT
1680 writew(old_cr, port->membase + UART011_CR);
1681
1682 spin_unlock_irqrestore(&port->lock, flags);
1683}
1684
1685static const char *pl011_type(struct uart_port *port)
1686{
e8a7ba86
RK
1687 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1688 return uap->port.type == PORT_AMBA ? uap->type : NULL;
1da177e4
LT
1689}
1690
1691/*
1692 * Release the memory region(s) being used by 'port'
1693 */
e643f87f 1694static void pl011_release_port(struct uart_port *port)
1da177e4
LT
1695{
1696 release_mem_region(port->mapbase, SZ_4K);
1697}
1698
1699/*
1700 * Request the memory region(s) being used by 'port'
1701 */
e643f87f 1702static int pl011_request_port(struct uart_port *port)
1da177e4
LT
1703{
1704 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1705 != NULL ? 0 : -EBUSY;
1706}
1707
1708/*
1709 * Configure/autoconfigure the port.
1710 */
e643f87f 1711static void pl011_config_port(struct uart_port *port, int flags)
1da177e4
LT
1712{
1713 if (flags & UART_CONFIG_TYPE) {
1714 port->type = PORT_AMBA;
e643f87f 1715 pl011_request_port(port);
1da177e4
LT
1716 }
1717}
1718
1719/*
1720 * verify the new serial_struct (for TIOCSSERIAL).
1721 */
e643f87f 1722static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1da177e4
LT
1723{
1724 int ret = 0;
1725 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1726 ret = -EINVAL;
a62c4133 1727 if (ser->irq < 0 || ser->irq >= nr_irqs)
1da177e4
LT
1728 ret = -EINVAL;
1729 if (ser->baud_base < 9600)
1730 ret = -EINVAL;
1731 return ret;
1732}
1733
1734static struct uart_ops amba_pl011_pops = {
e643f87f 1735 .tx_empty = pl011_tx_empty,
1da177e4 1736 .set_mctrl = pl011_set_mctrl,
e643f87f 1737 .get_mctrl = pl011_get_mctrl,
1da177e4
LT
1738 .stop_tx = pl011_stop_tx,
1739 .start_tx = pl011_start_tx,
1740 .stop_rx = pl011_stop_rx,
1741 .enable_ms = pl011_enable_ms,
1742 .break_ctl = pl011_break_ctl,
1743 .startup = pl011_startup,
1744 .shutdown = pl011_shutdown,
68b65f73 1745 .flush_buffer = pl011_dma_flush_buffer,
1da177e4
LT
1746 .set_termios = pl011_set_termios,
1747 .type = pl011_type,
e643f87f
LW
1748 .release_port = pl011_release_port,
1749 .request_port = pl011_request_port,
1750 .config_port = pl011_config_port,
1751 .verify_port = pl011_verify_port,
84b5ae15 1752#ifdef CONFIG_CONSOLE_POLL
b3564c2c 1753 .poll_init = pl011_hwinit,
e643f87f
LW
1754 .poll_get_char = pl011_get_poll_char,
1755 .poll_put_char = pl011_put_poll_char,
84b5ae15 1756#endif
1da177e4
LT
1757};
1758
1759static struct uart_amba_port *amba_ports[UART_NR];
1760
1761#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1762
d358788f 1763static void pl011_console_putchar(struct uart_port *port, int ch)
1da177e4 1764{
d358788f 1765 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4 1766
d358788f
RK
1767 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1768 barrier();
1da177e4
LT
1769 writew(ch, uap->port.membase + UART01x_DR);
1770}
1771
1772static void
1773pl011_console_write(struct console *co, const char *s, unsigned int count)
1774{
1775 struct uart_amba_port *uap = amba_ports[co->index];
1776 unsigned int status, old_cr, new_cr;
ef605fdb
RV
1777 unsigned long flags;
1778 int locked = 1;
1da177e4
LT
1779
1780 clk_enable(uap->clk);
1781
ef605fdb
RV
1782 local_irq_save(flags);
1783 if (uap->port.sysrq)
1784 locked = 0;
1785 else if (oops_in_progress)
1786 locked = spin_trylock(&uap->port.lock);
1787 else
1788 spin_lock(&uap->port.lock);
1789
1da177e4
LT
1790 /*
1791 * First save the CR then disable the interrupts
1792 */
1793 old_cr = readw(uap->port.membase + UART011_CR);
1794 new_cr = old_cr & ~UART011_CR_CTSEN;
1795 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1796 writew(new_cr, uap->port.membase + UART011_CR);
1797
d358788f 1798 uart_console_write(&uap->port, s, count, pl011_console_putchar);
1da177e4
LT
1799
1800 /*
1801 * Finally, wait for transmitter to become empty
1802 * and restore the TCR
1803 */
1804 do {
1805 status = readw(uap->port.membase + UART01x_FR);
1806 } while (status & UART01x_FR_BUSY);
1807 writew(old_cr, uap->port.membase + UART011_CR);
1808
ef605fdb
RV
1809 if (locked)
1810 spin_unlock(&uap->port.lock);
1811 local_irq_restore(flags);
1812
1da177e4
LT
1813 clk_disable(uap->clk);
1814}
1815
1816static void __init
1817pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1818 int *parity, int *bits)
1819{
1820 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1821 unsigned int lcr_h, ibrd, fbrd;
1822
ec489aa8 1823 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1824
1825 *parity = 'n';
1826 if (lcr_h & UART01x_LCRH_PEN) {
1827 if (lcr_h & UART01x_LCRH_EPS)
1828 *parity = 'e';
1829 else
1830 *parity = 'o';
1831 }
1832
1833 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1834 *bits = 7;
1835 else
1836 *bits = 8;
1837
1838 ibrd = readw(uap->port.membase + UART011_IBRD);
1839 fbrd = readw(uap->port.membase + UART011_FBRD);
1840
1841 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
ac3e3fb4 1842
c19f12b5 1843 if (uap->vendor->oversampling) {
ac3e3fb4
LW
1844 if (readw(uap->port.membase + UART011_CR)
1845 & ST_UART011_CR_OVSFACT)
1846 *baud *= 2;
1847 }
1da177e4
LT
1848 }
1849}
1850
1851static int __init pl011_console_setup(struct console *co, char *options)
1852{
1853 struct uart_amba_port *uap;
1854 int baud = 38400;
1855 int bits = 8;
1856 int parity = 'n';
1857 int flow = 'n';
4b4851c6 1858 int ret;
1da177e4
LT
1859
1860 /*
1861 * Check whether an invalid uart number has been specified, and
1862 * if so, search for the first available port that does have
1863 * console support.
1864 */
1865 if (co->index >= UART_NR)
1866 co->index = 0;
1867 uap = amba_ports[co->index];
d28122a5
RK
1868 if (!uap)
1869 return -ENODEV;
1da177e4 1870
78d80c5a
LW
1871 /* Allow pins to be muxed in and configured */
1872 if (!IS_ERR(uap->pins_default)) {
1873 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1874 if (ret)
1875 dev_err(uap->port.dev,
1876 "could not set default pins\n");
1877 }
1878
4b4851c6
RK
1879 ret = clk_prepare(uap->clk);
1880 if (ret)
1881 return ret;
1882
c16d51a3
SKS
1883 if (uap->port.dev->platform_data) {
1884 struct amba_pl011_data *plat;
1885
1886 plat = uap->port.dev->platform_data;
1887 if (plat->init)
1888 plat->init();
1889 }
1890
1da177e4
LT
1891 uap->port.uartclk = clk_get_rate(uap->clk);
1892
1893 if (options)
1894 uart_parse_options(options, &baud, &parity, &bits, &flow);
1895 else
1896 pl011_console_get_options(uap, &baud, &parity, &bits);
1897
1898 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
1899}
1900
2d93486c 1901static struct uart_driver amba_reg;
1da177e4
LT
1902static struct console amba_console = {
1903 .name = "ttyAMA",
1904 .write = pl011_console_write,
1905 .device = uart_console_device,
1906 .setup = pl011_console_setup,
1907 .flags = CON_PRINTBUFFER,
1908 .index = -1,
1909 .data = &amba_reg,
1910};
1911
1912#define AMBA_CONSOLE (&amba_console)
1913#else
1914#define AMBA_CONSOLE NULL
1915#endif
1916
1917static struct uart_driver amba_reg = {
1918 .owner = THIS_MODULE,
1919 .driver_name = "ttyAMA",
1920 .dev_name = "ttyAMA",
1921 .major = SERIAL_AMBA_MAJOR,
1922 .minor = SERIAL_AMBA_MINOR,
1923 .nr = UART_NR,
1924 .cons = AMBA_CONSOLE,
1925};
1926
32614aad
ML
1927static int pl011_probe_dt_alias(int index, struct device *dev)
1928{
1929 struct device_node *np;
1930 static bool seen_dev_with_alias = false;
1931 static bool seen_dev_without_alias = false;
1932 int ret = index;
1933
1934 if (!IS_ENABLED(CONFIG_OF))
1935 return ret;
1936
1937 np = dev->of_node;
1938 if (!np)
1939 return ret;
1940
1941 ret = of_alias_get_id(np, "serial");
1942 if (IS_ERR_VALUE(ret)) {
1943 seen_dev_without_alias = true;
1944 ret = index;
1945 } else {
1946 seen_dev_with_alias = true;
1947 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
1948 dev_warn(dev, "requested serial port %d not available.\n", ret);
1949 ret = index;
1950 }
1951 }
1952
1953 if (seen_dev_with_alias && seen_dev_without_alias)
1954 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
1955
1956 return ret;
1957}
1958
aa25afad 1959static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1da177e4
LT
1960{
1961 struct uart_amba_port *uap;
5926a295 1962 struct vendor_data *vendor = id->data;
1da177e4
LT
1963 void __iomem *base;
1964 int i, ret;
1965
1966 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1967 if (amba_ports[i] == NULL)
1968 break;
1969
1970 if (i == ARRAY_SIZE(amba_ports)) {
1971 ret = -EBUSY;
1972 goto out;
1973 }
1974
de609582
LW
1975 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
1976 GFP_KERNEL);
1da177e4
LT
1977 if (uap == NULL) {
1978 ret = -ENOMEM;
1979 goto out;
1980 }
1981
32614aad
ML
1982 i = pl011_probe_dt_alias(i, &dev->dev);
1983
de609582
LW
1984 base = devm_ioremap(&dev->dev, dev->res.start,
1985 resource_size(&dev->res));
1da177e4
LT
1986 if (!base) {
1987 ret = -ENOMEM;
de609582 1988 goto out;
1da177e4
LT
1989 }
1990
78d80c5a
LW
1991 uap->pinctrl = devm_pinctrl_get(&dev->dev);
1992 if (IS_ERR(uap->pinctrl)) {
1993 ret = PTR_ERR(uap->pinctrl);
de609582 1994 goto out;
258e0551 1995 }
78d80c5a
LW
1996 uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
1997 PINCTRL_STATE_DEFAULT);
1998 if (IS_ERR(uap->pins_default))
1999 dev_err(&dev->dev, "could not get default pinstate\n");
2000
2001 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
2002 PINCTRL_STATE_SLEEP);
2003 if (IS_ERR(uap->pins_sleep))
2004 dev_dbg(&dev->dev, "could not get sleep pinstate\n");
258e0551 2005
de609582 2006 uap->clk = devm_clk_get(&dev->dev, NULL);
1da177e4
LT
2007 if (IS_ERR(uap->clk)) {
2008 ret = PTR_ERR(uap->clk);
de609582 2009 goto out;
1da177e4
LT
2010 }
2011
c19f12b5 2012 uap->vendor = vendor;
ec489aa8
LW
2013 uap->lcrh_rx = vendor->lcrh_rx;
2014 uap->lcrh_tx = vendor->lcrh_tx;
d8d8ffa4 2015 uap->old_cr = 0;
ffca2b11 2016 uap->fifosize = vendor->fifosize;
1da177e4
LT
2017 uap->port.dev = &dev->dev;
2018 uap->port.mapbase = dev->res.start;
2019 uap->port.membase = base;
2020 uap->port.iotype = UPIO_MEM;
2021 uap->port.irq = dev->irq[0];
ffca2b11 2022 uap->port.fifosize = uap->fifosize;
1da177e4
LT
2023 uap->port.ops = &amba_pl011_pops;
2024 uap->port.flags = UPF_BOOT_AUTOCONF;
2025 uap->port.line = i;
68b65f73 2026 pl011_dma_probe(uap);
1da177e4 2027
c3d8b76f
LW
2028 /* Ensure interrupts from this UART are masked and cleared */
2029 writew(0, uap->port.membase + UART011_IMSC);
2030 writew(0xffff, uap->port.membase + UART011_ICR);
2031
e8a7ba86
RK
2032 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2033
1da177e4
LT
2034 amba_ports[i] = uap;
2035
2036 amba_set_drvdata(dev, uap);
2037 ret = uart_add_one_port(&amba_reg, &uap->port);
2038 if (ret) {
2039 amba_set_drvdata(dev, NULL);
2040 amba_ports[i] = NULL;
68b65f73 2041 pl011_dma_remove(uap);
1da177e4
LT
2042 }
2043 out:
2044 return ret;
2045}
2046
2047static int pl011_remove(struct amba_device *dev)
2048{
2049 struct uart_amba_port *uap = amba_get_drvdata(dev);
2050 int i;
2051
2052 amba_set_drvdata(dev, NULL);
2053
2054 uart_remove_one_port(&amba_reg, &uap->port);
2055
2056 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2057 if (amba_ports[i] == uap)
2058 amba_ports[i] = NULL;
2059
68b65f73 2060 pl011_dma_remove(uap);
1da177e4
LT
2061 return 0;
2062}
2063
b736b89f
LC
2064#ifdef CONFIG_PM
2065static int pl011_suspend(struct amba_device *dev, pm_message_t state)
2066{
2067 struct uart_amba_port *uap = amba_get_drvdata(dev);
2068
2069 if (!uap)
2070 return -EINVAL;
2071
2072 return uart_suspend_port(&amba_reg, &uap->port);
2073}
2074
2075static int pl011_resume(struct amba_device *dev)
2076{
2077 struct uart_amba_port *uap = amba_get_drvdata(dev);
2078
2079 if (!uap)
2080 return -EINVAL;
2081
2082 return uart_resume_port(&amba_reg, &uap->port);
2083}
2084#endif
2085
2c39c9e1 2086static struct amba_id pl011_ids[] = {
1da177e4
LT
2087 {
2088 .id = 0x00041011,
2089 .mask = 0x000fffff,
5926a295
AR
2090 .data = &vendor_arm,
2091 },
2092 {
2093 .id = 0x00380802,
2094 .mask = 0x00ffffff,
2095 .data = &vendor_st,
1da177e4
LT
2096 },
2097 { 0, 0 },
2098};
2099
60f7a33b
DM
2100MODULE_DEVICE_TABLE(amba, pl011_ids);
2101
1da177e4
LT
2102static struct amba_driver pl011_driver = {
2103 .drv = {
2104 .name = "uart-pl011",
2105 },
2106 .id_table = pl011_ids,
2107 .probe = pl011_probe,
2108 .remove = pl011_remove,
b736b89f
LC
2109#ifdef CONFIG_PM
2110 .suspend = pl011_suspend,
2111 .resume = pl011_resume,
2112#endif
1da177e4
LT
2113};
2114
2115static int __init pl011_init(void)
2116{
2117 int ret;
2118 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2119
2120 ret = uart_register_driver(&amba_reg);
2121 if (ret == 0) {
2122 ret = amba_driver_register(&pl011_driver);
2123 if (ret)
2124 uart_unregister_driver(&amba_reg);
2125 }
2126 return ret;
2127}
2128
2129static void __exit pl011_exit(void)
2130{
2131 amba_driver_unregister(&pl011_driver);
2132 uart_unregister_driver(&amba_reg);
2133}
2134
4dd9e742
AR
2135/*
2136 * While this can be a module, if builtin it's most likely the console
2137 * So let's leave module_exit but move module_init to an earlier place
2138 */
2139arch_initcall(pl011_init);
1da177e4
LT
2140module_exit(pl011_exit);
2141
2142MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2143MODULE_DESCRIPTION("ARM AMBA serial port driver");
2144MODULE_LICENSE("GPL");