]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/tty/serial/amba-pl011.c
serial: 8250_dw: add support for clk api
[mirror_ubuntu-artful-kernel.git] / drivers / tty / serial / amba-pl011.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Driver for AMBA serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
68b65f73 8 * Copyright (C) 2010 ST-Ericsson SA
1da177e4
LT
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
1da177e4
LT
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
1da177e4
LT
31
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ
34#endif
35
36#include <linux/module.h>
37#include <linux/ioport.h>
38#include <linux/init.h>
39#include <linux/console.h>
40#include <linux/sysrq.h>
41#include <linux/device.h>
42#include <linux/tty.h>
43#include <linux/tty_flip.h>
44#include <linux/serial_core.h>
45#include <linux/serial.h>
a62c80e5
RK
46#include <linux/amba/bus.h>
47#include <linux/amba/serial.h>
f8ce2547 48#include <linux/clk.h>
5a0e3ad6 49#include <linux/slab.h>
68b65f73
RK
50#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h>
c16d51a3 53#include <linux/delay.h>
258aea76 54#include <linux/types.h>
32614aad
ML
55#include <linux/of.h>
56#include <linux/of_device.h>
258e0551 57#include <linux/pinctrl/consumer.h>
cb70706c 58#include <linux/sizes.h>
de609582 59#include <linux/io.h>
1da177e4
LT
60
61#define UART_NR 14
62
63#define SERIAL_AMBA_MAJOR 204
64#define SERIAL_AMBA_MINOR 64
65#define SERIAL_AMBA_NR UART_NR
66
67#define AMBA_ISR_PASS_LIMIT 256
68
b63d4f0f
RK
69#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
70#define UART_DUMMY_DR_RX (1 << 16)
1da177e4 71
5926a295
AR
72/* There is by now at least one vendor with differing details, so handle it */
73struct vendor_data {
74 unsigned int ifls;
75 unsigned int fifosize;
ec489aa8
LW
76 unsigned int lcrh_tx;
77 unsigned int lcrh_rx;
ac3e3fb4 78 bool oversampling;
38d62436 79 bool dma_threshold;
4fd0690b 80 bool cts_event_workaround;
5926a295
AR
81};
82
83static struct vendor_data vendor_arm = {
84 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
85 .fifosize = 16,
ec489aa8
LW
86 .lcrh_tx = UART011_LCRH,
87 .lcrh_rx = UART011_LCRH,
ac3e3fb4 88 .oversampling = false,
38d62436 89 .dma_threshold = false,
4fd0690b 90 .cts_event_workaround = false,
5926a295
AR
91};
92
93static struct vendor_data vendor_st = {
94 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
95 .fifosize = 64,
ec489aa8
LW
96 .lcrh_tx = ST_UART011_LCRH_TX,
97 .lcrh_rx = ST_UART011_LCRH_RX,
ac3e3fb4 98 .oversampling = true,
38d62436 99 .dma_threshold = true,
4fd0690b 100 .cts_event_workaround = true,
1da177e4
LT
101};
102
c16d51a3
SKS
103static struct uart_amba_port *amba_ports[UART_NR];
104
68b65f73 105/* Deals with DMA transactions */
ead76f32
LW
106
107struct pl011_sgbuf {
108 struct scatterlist sg;
109 char *buf;
110};
111
112struct pl011_dmarx_data {
113 struct dma_chan *chan;
114 struct completion complete;
115 bool use_buf_b;
116 struct pl011_sgbuf sgbuf_a;
117 struct pl011_sgbuf sgbuf_b;
118 dma_cookie_t cookie;
119 bool running;
120};
121
68b65f73
RK
122struct pl011_dmatx_data {
123 struct dma_chan *chan;
124 struct scatterlist sg;
125 char *buf;
126 bool queued;
127};
128
c19f12b5
RK
129/*
130 * We wrap our port structure around the generic uart_port.
131 */
132struct uart_amba_port {
133 struct uart_port port;
134 struct clk *clk;
78d80c5a
LW
135 /* Two optional pin states - default & sleep */
136 struct pinctrl *pinctrl;
137 struct pinctrl_state *pins_default;
138 struct pinctrl_state *pins_sleep;
c19f12b5 139 const struct vendor_data *vendor;
68b65f73 140 unsigned int dmacr; /* dma control reg */
c19f12b5
RK
141 unsigned int im; /* interrupt mask */
142 unsigned int old_status;
ffca2b11 143 unsigned int fifosize; /* vendor-specific */
c19f12b5
RK
144 unsigned int lcrh_tx; /* vendor-specific */
145 unsigned int lcrh_rx; /* vendor-specific */
d8d8ffa4 146 unsigned int old_cr; /* state during shutdown */
c19f12b5
RK
147 bool autorts;
148 char type[12];
68b65f73
RK
149#ifdef CONFIG_DMA_ENGINE
150 /* DMA stuff */
ead76f32
LW
151 bool using_tx_dma;
152 bool using_rx_dma;
153 struct pl011_dmarx_data dmarx;
68b65f73
RK
154 struct pl011_dmatx_data dmatx;
155#endif
156};
157
29772c4e
LW
158/*
159 * Reads up to 256 characters from the FIFO or until it's empty and
160 * inserts them into the TTY layer. Returns the number of characters
161 * read from the FIFO.
162 */
163static int pl011_fifo_to_tty(struct uart_amba_port *uap)
164{
165 u16 status, ch;
166 unsigned int flag, max_count = 256;
167 int fifotaken = 0;
168
169 while (max_count--) {
170 status = readw(uap->port.membase + UART01x_FR);
171 if (status & UART01x_FR_RXFE)
172 break;
173
174 /* Take chars from the FIFO and update status */
175 ch = readw(uap->port.membase + UART01x_DR) |
176 UART_DUMMY_DR_RX;
177 flag = TTY_NORMAL;
178 uap->port.icount.rx++;
179 fifotaken++;
180
181 if (unlikely(ch & UART_DR_ERROR)) {
182 if (ch & UART011_DR_BE) {
183 ch &= ~(UART011_DR_FE | UART011_DR_PE);
184 uap->port.icount.brk++;
185 if (uart_handle_break(&uap->port))
186 continue;
187 } else if (ch & UART011_DR_PE)
188 uap->port.icount.parity++;
189 else if (ch & UART011_DR_FE)
190 uap->port.icount.frame++;
191 if (ch & UART011_DR_OE)
192 uap->port.icount.overrun++;
193
194 ch &= uap->port.read_status_mask;
195
196 if (ch & UART011_DR_BE)
197 flag = TTY_BREAK;
198 else if (ch & UART011_DR_PE)
199 flag = TTY_PARITY;
200 else if (ch & UART011_DR_FE)
201 flag = TTY_FRAME;
202 }
203
204 if (uart_handle_sysrq_char(&uap->port, ch & 255))
205 continue;
206
207 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
208 }
209
210 return fifotaken;
211}
212
213
68b65f73
RK
214/*
215 * All the DMA operation mode stuff goes inside this ifdef.
216 * This assumes that you have a generic DMA device interface,
217 * no custom DMA interfaces are supported.
218 */
219#ifdef CONFIG_DMA_ENGINE
220
221#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
222
ead76f32
LW
223static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
224 enum dma_data_direction dir)
225{
226 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
227 if (!sg->buf)
228 return -ENOMEM;
229
230 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
231
232 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
233 kfree(sg->buf);
234 return -EINVAL;
235 }
236 return 0;
237}
238
239static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
240 enum dma_data_direction dir)
241{
242 if (sg->buf) {
243 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
244 kfree(sg->buf);
245 }
246}
247
68b65f73
RK
248static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
249{
250 /* DMA is the sole user of the platform data right now */
251 struct amba_pl011_data *plat = uap->port.dev->platform_data;
252 struct dma_slave_config tx_conf = {
253 .dst_addr = uap->port.mapbase + UART01x_DR,
254 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
a485df4b 255 .direction = DMA_MEM_TO_DEV,
68b65f73 256 .dst_maxburst = uap->fifosize >> 1,
258aea76 257 .device_fc = false,
68b65f73
RK
258 };
259 struct dma_chan *chan;
260 dma_cap_mask_t mask;
261
262 /* We need platform data */
263 if (!plat || !plat->dma_filter) {
264 dev_info(uap->port.dev, "no DMA platform data\n");
265 return;
266 }
267
ead76f32 268 /* Try to acquire a generic DMA engine slave TX channel */
68b65f73
RK
269 dma_cap_zero(mask);
270 dma_cap_set(DMA_SLAVE, mask);
271
272 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
273 if (!chan) {
274 dev_err(uap->port.dev, "no TX DMA channel!\n");
275 return;
276 }
277
278 dmaengine_slave_config(chan, &tx_conf);
279 uap->dmatx.chan = chan;
280
281 dev_info(uap->port.dev, "DMA channel TX %s\n",
282 dma_chan_name(uap->dmatx.chan));
ead76f32
LW
283
284 /* Optionally make use of an RX channel as well */
285 if (plat->dma_rx_param) {
286 struct dma_slave_config rx_conf = {
287 .src_addr = uap->port.mapbase + UART01x_DR,
288 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
a485df4b 289 .direction = DMA_DEV_TO_MEM,
ead76f32 290 .src_maxburst = uap->fifosize >> 1,
258aea76 291 .device_fc = false,
ead76f32
LW
292 };
293
294 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
295 if (!chan) {
296 dev_err(uap->port.dev, "no RX DMA channel!\n");
297 return;
298 }
299
300 dmaengine_slave_config(chan, &rx_conf);
301 uap->dmarx.chan = chan;
302
303 dev_info(uap->port.dev, "DMA channel RX %s\n",
304 dma_chan_name(uap->dmarx.chan));
305 }
68b65f73
RK
306}
307
308#ifndef MODULE
309/*
310 * Stack up the UARTs and let the above initcall be done at device
311 * initcall time, because the serial driver is called as an arch
312 * initcall, and at this time the DMA subsystem is not yet registered.
313 * At this point the driver will switch over to using DMA where desired.
314 */
315struct dma_uap {
316 struct list_head node;
317 struct uart_amba_port *uap;
c19f12b5
RK
318};
319
68b65f73
RK
320static LIST_HEAD(pl011_dma_uarts);
321
322static int __init pl011_dma_initcall(void)
323{
324 struct list_head *node, *tmp;
325
326 list_for_each_safe(node, tmp, &pl011_dma_uarts) {
327 struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
328 pl011_dma_probe_initcall(dmau->uap);
329 list_del(node);
330 kfree(dmau);
331 }
332 return 0;
333}
334
335device_initcall(pl011_dma_initcall);
336
337static void pl011_dma_probe(struct uart_amba_port *uap)
338{
339 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
340 if (dmau) {
341 dmau->uap = uap;
342 list_add_tail(&dmau->node, &pl011_dma_uarts);
343 }
344}
345#else
346static void pl011_dma_probe(struct uart_amba_port *uap)
347{
348 pl011_dma_probe_initcall(uap);
349}
350#endif
351
352static void pl011_dma_remove(struct uart_amba_port *uap)
353{
354 /* TODO: remove the initcall if it has not yet executed */
355 if (uap->dmatx.chan)
356 dma_release_channel(uap->dmatx.chan);
ead76f32
LW
357 if (uap->dmarx.chan)
358 dma_release_channel(uap->dmarx.chan);
68b65f73
RK
359}
360
68b65f73
RK
361/* Forward declare this for the refill routine */
362static int pl011_dma_tx_refill(struct uart_amba_port *uap);
363
364/*
365 * The current DMA TX buffer has been sent.
366 * Try to queue up another DMA buffer.
367 */
368static void pl011_dma_tx_callback(void *data)
369{
370 struct uart_amba_port *uap = data;
371 struct pl011_dmatx_data *dmatx = &uap->dmatx;
372 unsigned long flags;
373 u16 dmacr;
374
375 spin_lock_irqsave(&uap->port.lock, flags);
376 if (uap->dmatx.queued)
377 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
378 DMA_TO_DEVICE);
379
380 dmacr = uap->dmacr;
381 uap->dmacr = dmacr & ~UART011_TXDMAE;
382 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
383
384 /*
385 * If TX DMA was disabled, it means that we've stopped the DMA for
386 * some reason (eg, XOFF received, or we want to send an X-char.)
387 *
388 * Note: we need to be careful here of a potential race between DMA
389 * and the rest of the driver - if the driver disables TX DMA while
390 * a TX buffer completing, we must update the tx queued status to
391 * get further refills (hence we check dmacr).
392 */
393 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
394 uart_circ_empty(&uap->port.state->xmit)) {
395 uap->dmatx.queued = false;
396 spin_unlock_irqrestore(&uap->port.lock, flags);
397 return;
398 }
399
400 if (pl011_dma_tx_refill(uap) <= 0) {
401 /*
402 * We didn't queue a DMA buffer for some reason, but we
403 * have data pending to be sent. Re-enable the TX IRQ.
404 */
405 uap->im |= UART011_TXIM;
406 writew(uap->im, uap->port.membase + UART011_IMSC);
407 }
408 spin_unlock_irqrestore(&uap->port.lock, flags);
409}
410
411/*
412 * Try to refill the TX DMA buffer.
413 * Locking: called with port lock held and IRQs disabled.
414 * Returns:
415 * 1 if we queued up a TX DMA buffer.
416 * 0 if we didn't want to handle this by DMA
417 * <0 on error
418 */
419static int pl011_dma_tx_refill(struct uart_amba_port *uap)
420{
421 struct pl011_dmatx_data *dmatx = &uap->dmatx;
422 struct dma_chan *chan = dmatx->chan;
423 struct dma_device *dma_dev = chan->device;
424 struct dma_async_tx_descriptor *desc;
425 struct circ_buf *xmit = &uap->port.state->xmit;
426 unsigned int count;
427
428 /*
429 * Try to avoid the overhead involved in using DMA if the
430 * transaction fits in the first half of the FIFO, by using
431 * the standard interrupt handling. This ensures that we
432 * issue a uart_write_wakeup() at the appropriate time.
433 */
434 count = uart_circ_chars_pending(xmit);
435 if (count < (uap->fifosize >> 1)) {
436 uap->dmatx.queued = false;
437 return 0;
438 }
439
440 /*
441 * Bodge: don't send the last character by DMA, as this
442 * will prevent XON from notifying us to restart DMA.
443 */
444 count -= 1;
445
446 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
447 if (count > PL011_DMA_BUFFER_SIZE)
448 count = PL011_DMA_BUFFER_SIZE;
449
450 if (xmit->tail < xmit->head)
451 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
452 else {
453 size_t first = UART_XMIT_SIZE - xmit->tail;
454 size_t second = xmit->head;
455
456 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
457 if (second)
458 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
459 }
460
461 dmatx->sg.length = count;
462
463 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
464 uap->dmatx.queued = false;
465 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
466 return -EBUSY;
467 }
468
16052827 469 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
68b65f73
RK
470 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
471 if (!desc) {
472 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
473 uap->dmatx.queued = false;
474 /*
475 * If DMA cannot be used right now, we complete this
476 * transaction via IRQ and let the TTY layer retry.
477 */
478 dev_dbg(uap->port.dev, "TX DMA busy\n");
479 return -EBUSY;
480 }
481
482 /* Some data to go along to the callback */
483 desc->callback = pl011_dma_tx_callback;
484 desc->callback_param = uap;
485
486 /* All errors should happen at prepare time */
487 dmaengine_submit(desc);
488
489 /* Fire the DMA transaction */
490 dma_dev->device_issue_pending(chan);
491
492 uap->dmacr |= UART011_TXDMAE;
493 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
494 uap->dmatx.queued = true;
495
496 /*
497 * Now we know that DMA will fire, so advance the ring buffer
498 * with the stuff we just dispatched.
499 */
500 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
501 uap->port.icount.tx += count;
502
503 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
504 uart_write_wakeup(&uap->port);
505
506 return 1;
507}
508
509/*
510 * We received a transmit interrupt without a pending X-char but with
511 * pending characters.
512 * Locking: called with port lock held and IRQs disabled.
513 * Returns:
514 * false if we want to use PIO to transmit
515 * true if we queued a DMA buffer
516 */
517static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
518{
ead76f32 519 if (!uap->using_tx_dma)
68b65f73
RK
520 return false;
521
522 /*
523 * If we already have a TX buffer queued, but received a
524 * TX interrupt, it will be because we've just sent an X-char.
525 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
526 */
527 if (uap->dmatx.queued) {
528 uap->dmacr |= UART011_TXDMAE;
529 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
530 uap->im &= ~UART011_TXIM;
531 writew(uap->im, uap->port.membase + UART011_IMSC);
532 return true;
533 }
534
535 /*
536 * We don't have a TX buffer queued, so try to queue one.
25985edc 537 * If we successfully queued a buffer, mask the TX IRQ.
68b65f73
RK
538 */
539 if (pl011_dma_tx_refill(uap) > 0) {
540 uap->im &= ~UART011_TXIM;
541 writew(uap->im, uap->port.membase + UART011_IMSC);
542 return true;
543 }
544 return false;
545}
546
547/*
548 * Stop the DMA transmit (eg, due to received XOFF).
549 * Locking: called with port lock held and IRQs disabled.
550 */
551static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
552{
553 if (uap->dmatx.queued) {
554 uap->dmacr &= ~UART011_TXDMAE;
555 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
556 }
557}
558
559/*
560 * Try to start a DMA transmit, or in the case of an XON/OFF
561 * character queued for send, try to get that character out ASAP.
562 * Locking: called with port lock held and IRQs disabled.
563 * Returns:
564 * false if we want the TX IRQ to be enabled
565 * true if we have a buffer queued
566 */
567static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
568{
569 u16 dmacr;
570
ead76f32 571 if (!uap->using_tx_dma)
68b65f73
RK
572 return false;
573
574 if (!uap->port.x_char) {
575 /* no X-char, try to push chars out in DMA mode */
576 bool ret = true;
577
578 if (!uap->dmatx.queued) {
579 if (pl011_dma_tx_refill(uap) > 0) {
580 uap->im &= ~UART011_TXIM;
581 ret = true;
582 } else {
583 uap->im |= UART011_TXIM;
584 ret = false;
585 }
586 writew(uap->im, uap->port.membase + UART011_IMSC);
587 } else if (!(uap->dmacr & UART011_TXDMAE)) {
588 uap->dmacr |= UART011_TXDMAE;
589 writew(uap->dmacr,
590 uap->port.membase + UART011_DMACR);
591 }
592 return ret;
593 }
594
595 /*
596 * We have an X-char to send. Disable DMA to prevent it loading
597 * the TX fifo, and then see if we can stuff it into the FIFO.
598 */
599 dmacr = uap->dmacr;
600 uap->dmacr &= ~UART011_TXDMAE;
601 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
602
603 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
604 /*
605 * No space in the FIFO, so enable the transmit interrupt
606 * so we know when there is space. Note that once we've
607 * loaded the character, we should just re-enable DMA.
608 */
609 return false;
610 }
611
612 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
613 uap->port.icount.tx++;
614 uap->port.x_char = 0;
615
616 /* Success - restore the DMA state */
617 uap->dmacr = dmacr;
618 writew(dmacr, uap->port.membase + UART011_DMACR);
619
620 return true;
621}
622
623/*
624 * Flush the transmit buffer.
625 * Locking: called with port lock held and IRQs disabled.
626 */
627static void pl011_dma_flush_buffer(struct uart_port *port)
628{
629 struct uart_amba_port *uap = (struct uart_amba_port *)port;
630
ead76f32 631 if (!uap->using_tx_dma)
68b65f73
RK
632 return;
633
634 /* Avoid deadlock with the DMA engine callback */
635 spin_unlock(&uap->port.lock);
636 dmaengine_terminate_all(uap->dmatx.chan);
637 spin_lock(&uap->port.lock);
638 if (uap->dmatx.queued) {
639 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
640 DMA_TO_DEVICE);
641 uap->dmatx.queued = false;
642 uap->dmacr &= ~UART011_TXDMAE;
643 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
644 }
645}
646
ead76f32
LW
647static void pl011_dma_rx_callback(void *data);
648
649static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
650{
651 struct dma_chan *rxchan = uap->dmarx.chan;
ead76f32
LW
652 struct pl011_dmarx_data *dmarx = &uap->dmarx;
653 struct dma_async_tx_descriptor *desc;
654 struct pl011_sgbuf *sgbuf;
655
656 if (!rxchan)
657 return -EIO;
658
659 /* Start the RX DMA job */
660 sgbuf = uap->dmarx.use_buf_b ?
661 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
16052827 662 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
a485df4b 663 DMA_DEV_TO_MEM,
ead76f32
LW
664 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
665 /*
666 * If the DMA engine is busy and cannot prepare a
667 * channel, no big deal, the driver will fall back
668 * to interrupt mode as a result of this error code.
669 */
670 if (!desc) {
671 uap->dmarx.running = false;
672 dmaengine_terminate_all(rxchan);
673 return -EBUSY;
674 }
675
676 /* Some data to go along to the callback */
677 desc->callback = pl011_dma_rx_callback;
678 desc->callback_param = uap;
679 dmarx->cookie = dmaengine_submit(desc);
680 dma_async_issue_pending(rxchan);
681
682 uap->dmacr |= UART011_RXDMAE;
683 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
684 uap->dmarx.running = true;
685
686 uap->im &= ~UART011_RXIM;
687 writew(uap->im, uap->port.membase + UART011_IMSC);
688
689 return 0;
690}
691
692/*
693 * This is called when either the DMA job is complete, or
694 * the FIFO timeout interrupt occurred. This must be called
695 * with the port spinlock uap->port.lock held.
696 */
697static void pl011_dma_rx_chars(struct uart_amba_port *uap,
698 u32 pending, bool use_buf_b,
699 bool readfifo)
700{
05c7cd39 701 struct tty_port *port = &uap->port.state->port;
ead76f32
LW
702 struct pl011_sgbuf *sgbuf = use_buf_b ?
703 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
704 struct device *dev = uap->dmarx.chan->device->dev;
ead76f32
LW
705 int dma_count = 0;
706 u32 fifotaken = 0; /* only used for vdbg() */
707
708 /* Pick everything from the DMA first */
709 if (pending) {
710 /* Sync in buffer */
711 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
712
713 /*
714 * First take all chars in the DMA pipe, then look in the FIFO.
715 * Note that tty_insert_flip_buf() tries to take as many chars
716 * as it can.
717 */
05c7cd39 718 dma_count = tty_insert_flip_string(port, sgbuf->buf, pending);
ead76f32
LW
719
720 /* Return buffer to device */
721 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
722
723 uap->port.icount.rx += dma_count;
724 if (dma_count < pending)
725 dev_warn(uap->port.dev,
726 "couldn't insert all characters (TTY is full?)\n");
727 }
728
729 /*
730 * Only continue with trying to read the FIFO if all DMA chars have
731 * been taken first.
732 */
733 if (dma_count == pending && readfifo) {
734 /* Clear any error flags */
735 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
736 uap->port.membase + UART011_ICR);
737
738 /*
739 * If we read all the DMA'd characters, and we had an
29772c4e
LW
740 * incomplete buffer, that could be due to an rx error, or
741 * maybe we just timed out. Read any pending chars and check
742 * the error status.
743 *
744 * Error conditions will only occur in the FIFO, these will
745 * trigger an immediate interrupt and stop the DMA job, so we
746 * will always find the error in the FIFO, never in the DMA
747 * buffer.
ead76f32 748 */
29772c4e 749 fifotaken = pl011_fifo_to_tty(uap);
ead76f32
LW
750 }
751
752 spin_unlock(&uap->port.lock);
753 dev_vdbg(uap->port.dev,
754 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
755 dma_count, fifotaken);
2e124b4a 756 tty_flip_buffer_push(port);
ead76f32
LW
757 spin_lock(&uap->port.lock);
758}
759
760static void pl011_dma_rx_irq(struct uart_amba_port *uap)
761{
762 struct pl011_dmarx_data *dmarx = &uap->dmarx;
763 struct dma_chan *rxchan = dmarx->chan;
764 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
765 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
766 size_t pending;
767 struct dma_tx_state state;
768 enum dma_status dmastat;
769
770 /*
771 * Pause the transfer so we can trust the current counter,
772 * do this before we pause the PL011 block, else we may
773 * overflow the FIFO.
774 */
775 if (dmaengine_pause(rxchan))
776 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
777 dmastat = rxchan->device->device_tx_status(rxchan,
778 dmarx->cookie, &state);
779 if (dmastat != DMA_PAUSED)
780 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
781
782 /* Disable RX DMA - incoming data will wait in the FIFO */
783 uap->dmacr &= ~UART011_RXDMAE;
784 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
785 uap->dmarx.running = false;
786
787 pending = sgbuf->sg.length - state.residue;
788 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
789 /* Then we terminate the transfer - we now know our residue */
790 dmaengine_terminate_all(rxchan);
791
792 /*
793 * This will take the chars we have so far and insert
794 * into the framework.
795 */
796 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
797
798 /* Switch buffer & re-trigger DMA job */
799 dmarx->use_buf_b = !dmarx->use_buf_b;
800 if (pl011_dma_rx_trigger_dma(uap)) {
801 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
802 "fall back to interrupt mode\n");
803 uap->im |= UART011_RXIM;
804 writew(uap->im, uap->port.membase + UART011_IMSC);
805 }
806}
807
808static void pl011_dma_rx_callback(void *data)
809{
810 struct uart_amba_port *uap = data;
811 struct pl011_dmarx_data *dmarx = &uap->dmarx;
6dc01aa6 812 struct dma_chan *rxchan = dmarx->chan;
ead76f32 813 bool lastbuf = dmarx->use_buf_b;
6dc01aa6
CM
814 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
815 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
816 size_t pending;
817 struct dma_tx_state state;
ead76f32
LW
818 int ret;
819
820 /*
821 * This completion interrupt occurs typically when the
822 * RX buffer is totally stuffed but no timeout has yet
823 * occurred. When that happens, we just want the RX
824 * routine to flush out the secondary DMA buffer while
825 * we immediately trigger the next DMA job.
826 */
827 spin_lock_irq(&uap->port.lock);
6dc01aa6
CM
828 /*
829 * Rx data can be taken by the UART interrupts during
830 * the DMA irq handler. So we check the residue here.
831 */
832 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
833 pending = sgbuf->sg.length - state.residue;
834 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
835 /* Then we terminate the transfer - we now know our residue */
836 dmaengine_terminate_all(rxchan);
837
ead76f32
LW
838 uap->dmarx.running = false;
839 dmarx->use_buf_b = !lastbuf;
840 ret = pl011_dma_rx_trigger_dma(uap);
841
6dc01aa6 842 pl011_dma_rx_chars(uap, pending, lastbuf, false);
ead76f32
LW
843 spin_unlock_irq(&uap->port.lock);
844 /*
845 * Do this check after we picked the DMA chars so we don't
846 * get some IRQ immediately from RX.
847 */
848 if (ret) {
849 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
850 "fall back to interrupt mode\n");
851 uap->im |= UART011_RXIM;
852 writew(uap->im, uap->port.membase + UART011_IMSC);
853 }
854}
855
856/*
857 * Stop accepting received characters, when we're shutting down or
858 * suspending this port.
859 * Locking: called with port lock held and IRQs disabled.
860 */
861static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
862{
863 /* FIXME. Just disable the DMA enable */
864 uap->dmacr &= ~UART011_RXDMAE;
865 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
866}
68b65f73
RK
867
868static void pl011_dma_startup(struct uart_amba_port *uap)
869{
ead76f32
LW
870 int ret;
871
68b65f73
RK
872 if (!uap->dmatx.chan)
873 return;
874
875 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
876 if (!uap->dmatx.buf) {
877 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
878 uap->port.fifosize = uap->fifosize;
879 return;
880 }
881
882 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
883
884 /* The DMA buffer is now the FIFO the TTY subsystem can use */
885 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
ead76f32
LW
886 uap->using_tx_dma = true;
887
888 if (!uap->dmarx.chan)
889 goto skip_rx;
890
891 /* Allocate and map DMA RX buffers */
892 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
893 DMA_FROM_DEVICE);
894 if (ret) {
895 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
896 "RX buffer A", ret);
897 goto skip_rx;
898 }
68b65f73 899
ead76f32
LW
900 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
901 DMA_FROM_DEVICE);
902 if (ret) {
903 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
904 "RX buffer B", ret);
905 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
906 DMA_FROM_DEVICE);
907 goto skip_rx;
908 }
909
910 uap->using_rx_dma = true;
68b65f73 911
ead76f32 912skip_rx:
68b65f73
RK
913 /* Turn on DMA error (RX/TX will be enabled on demand) */
914 uap->dmacr |= UART011_DMAONERR;
915 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
38d62436
RK
916
917 /*
918 * ST Micro variants has some specific dma burst threshold
919 * compensation. Set this to 16 bytes, so burst will only
920 * be issued above/below 16 bytes.
921 */
922 if (uap->vendor->dma_threshold)
923 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
924 uap->port.membase + ST_UART011_DMAWM);
ead76f32
LW
925
926 if (uap->using_rx_dma) {
927 if (pl011_dma_rx_trigger_dma(uap))
928 dev_dbg(uap->port.dev, "could not trigger initial "
929 "RX DMA job, fall back to interrupt mode\n");
930 }
68b65f73
RK
931}
932
933static void pl011_dma_shutdown(struct uart_amba_port *uap)
934{
ead76f32 935 if (!(uap->using_tx_dma || uap->using_rx_dma))
68b65f73
RK
936 return;
937
938 /* Disable RX and TX DMA */
939 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
940 barrier();
941
942 spin_lock_irq(&uap->port.lock);
943 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
944 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
945 spin_unlock_irq(&uap->port.lock);
946
ead76f32
LW
947 if (uap->using_tx_dma) {
948 /* In theory, this should already be done by pl011_dma_flush_buffer */
949 dmaengine_terminate_all(uap->dmatx.chan);
950 if (uap->dmatx.queued) {
951 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
952 DMA_TO_DEVICE);
953 uap->dmatx.queued = false;
954 }
955
956 kfree(uap->dmatx.buf);
957 uap->using_tx_dma = false;
68b65f73
RK
958 }
959
ead76f32
LW
960 if (uap->using_rx_dma) {
961 dmaengine_terminate_all(uap->dmarx.chan);
962 /* Clean up the RX DMA */
963 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
964 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
965 uap->using_rx_dma = false;
966 }
967}
68b65f73 968
ead76f32
LW
969static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
970{
971 return uap->using_rx_dma;
68b65f73
RK
972}
973
ead76f32
LW
974static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
975{
976 return uap->using_rx_dma && uap->dmarx.running;
977}
978
979
68b65f73
RK
980#else
981/* Blank functions if the DMA engine is not available */
982static inline void pl011_dma_probe(struct uart_amba_port *uap)
983{
984}
985
986static inline void pl011_dma_remove(struct uart_amba_port *uap)
987{
988}
989
990static inline void pl011_dma_startup(struct uart_amba_port *uap)
991{
992}
993
994static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
995{
996}
997
998static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
999{
1000 return false;
1001}
1002
1003static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1004{
1005}
1006
1007static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1008{
1009 return false;
1010}
1011
ead76f32
LW
1012static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1013{
1014}
1015
1016static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1017{
1018}
1019
1020static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1021{
1022 return -EIO;
1023}
1024
1025static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1026{
1027 return false;
1028}
1029
1030static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1031{
1032 return false;
1033}
1034
68b65f73
RK
1035#define pl011_dma_flush_buffer NULL
1036#endif
1037
b129a8cc 1038static void pl011_stop_tx(struct uart_port *port)
1da177e4
LT
1039{
1040 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1041
1042 uap->im &= ~UART011_TXIM;
1043 writew(uap->im, uap->port.membase + UART011_IMSC);
68b65f73 1044 pl011_dma_tx_stop(uap);
1da177e4
LT
1045}
1046
b129a8cc 1047static void pl011_start_tx(struct uart_port *port)
1da177e4
LT
1048{
1049 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1050
68b65f73
RK
1051 if (!pl011_dma_tx_start(uap)) {
1052 uap->im |= UART011_TXIM;
1053 writew(uap->im, uap->port.membase + UART011_IMSC);
1054 }
1da177e4
LT
1055}
1056
1057static void pl011_stop_rx(struct uart_port *port)
1058{
1059 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1060
1061 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1062 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1063 writew(uap->im, uap->port.membase + UART011_IMSC);
ead76f32
LW
1064
1065 pl011_dma_rx_stop(uap);
1da177e4
LT
1066}
1067
1068static void pl011_enable_ms(struct uart_port *port)
1069{
1070 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1071
1072 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1073 writew(uap->im, uap->port.membase + UART011_IMSC);
1074}
1075
7d12e780 1076static void pl011_rx_chars(struct uart_amba_port *uap)
1da177e4 1077{
29772c4e 1078 pl011_fifo_to_tty(uap);
1da177e4 1079
2389b272 1080 spin_unlock(&uap->port.lock);
2e124b4a 1081 tty_flip_buffer_push(&uap->port.state->port);
ead76f32
LW
1082 /*
1083 * If we were temporarily out of DMA mode for a while,
1084 * attempt to switch back to DMA mode again.
1085 */
1086 if (pl011_dma_rx_available(uap)) {
1087 if (pl011_dma_rx_trigger_dma(uap)) {
1088 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1089 "fall back to interrupt mode again\n");
1090 uap->im |= UART011_RXIM;
1091 } else
1092 uap->im &= ~UART011_RXIM;
1093 writew(uap->im, uap->port.membase + UART011_IMSC);
1094 }
2389b272 1095 spin_lock(&uap->port.lock);
1da177e4
LT
1096}
1097
1098static void pl011_tx_chars(struct uart_amba_port *uap)
1099{
ebd2c8f6 1100 struct circ_buf *xmit = &uap->port.state->xmit;
1da177e4
LT
1101 int count;
1102
1103 if (uap->port.x_char) {
1104 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1105 uap->port.icount.tx++;
1106 uap->port.x_char = 0;
1107 return;
1108 }
1109 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
b129a8cc 1110 pl011_stop_tx(&uap->port);
1da177e4
LT
1111 return;
1112 }
1113
68b65f73
RK
1114 /* If we are using DMA mode, try to send some characters. */
1115 if (pl011_dma_tx_irq(uap))
1116 return;
1117
ffca2b11 1118 count = uap->fifosize >> 1;
1da177e4
LT
1119 do {
1120 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1121 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1122 uap->port.icount.tx++;
1123 if (uart_circ_empty(xmit))
1124 break;
1125 } while (--count > 0);
1126
1127 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1128 uart_write_wakeup(&uap->port);
1129
1130 if (uart_circ_empty(xmit))
b129a8cc 1131 pl011_stop_tx(&uap->port);
1da177e4
LT
1132}
1133
1134static void pl011_modem_status(struct uart_amba_port *uap)
1135{
1136 unsigned int status, delta;
1137
1138 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1139
1140 delta = status ^ uap->old_status;
1141 uap->old_status = status;
1142
1143 if (!delta)
1144 return;
1145
1146 if (delta & UART01x_FR_DCD)
1147 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1148
1149 if (delta & UART01x_FR_DSR)
1150 uap->port.icount.dsr++;
1151
1152 if (delta & UART01x_FR_CTS)
1153 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1154
bdc04e31 1155 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1da177e4
LT
1156}
1157
7d12e780 1158static irqreturn_t pl011_int(int irq, void *dev_id)
1da177e4
LT
1159{
1160 struct uart_amba_port *uap = dev_id;
963cc981 1161 unsigned long flags;
1da177e4
LT
1162 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1163 int handled = 0;
4fd0690b 1164 unsigned int dummy_read;
1da177e4 1165
963cc981 1166 spin_lock_irqsave(&uap->port.lock, flags);
1da177e4
LT
1167
1168 status = readw(uap->port.membase + UART011_MIS);
1169 if (status) {
1170 do {
4fd0690b
R
1171 if (uap->vendor->cts_event_workaround) {
1172 /* workaround to make sure that all bits are unlocked.. */
1173 writew(0x00, uap->port.membase + UART011_ICR);
1174
1175 /*
1176 * WA: introduce 26ns(1 uart clk) delay before W1C;
1177 * single apb access will incur 2 pclk(133.12Mhz) delay,
1178 * so add 2 dummy reads
1179 */
1180 dummy_read = readw(uap->port.membase + UART011_ICR);
1181 dummy_read = readw(uap->port.membase + UART011_ICR);
1182 }
1183
1da177e4
LT
1184 writew(status & ~(UART011_TXIS|UART011_RTIS|
1185 UART011_RXIS),
1186 uap->port.membase + UART011_ICR);
1187
ead76f32
LW
1188 if (status & (UART011_RTIS|UART011_RXIS)) {
1189 if (pl011_dma_rx_running(uap))
1190 pl011_dma_rx_irq(uap);
1191 else
1192 pl011_rx_chars(uap);
1193 }
1da177e4
LT
1194 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1195 UART011_CTSMIS|UART011_RIMIS))
1196 pl011_modem_status(uap);
1197 if (status & UART011_TXIS)
1198 pl011_tx_chars(uap);
1199
4fd0690b 1200 if (pass_counter-- == 0)
1da177e4
LT
1201 break;
1202
1203 status = readw(uap->port.membase + UART011_MIS);
1204 } while (status != 0);
1205 handled = 1;
1206 }
1207
963cc981 1208 spin_unlock_irqrestore(&uap->port.lock, flags);
1da177e4
LT
1209
1210 return IRQ_RETVAL(handled);
1211}
1212
e643f87f 1213static unsigned int pl011_tx_empty(struct uart_port *port)
1da177e4
LT
1214{
1215 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1216 unsigned int status = readw(uap->port.membase + UART01x_FR);
1217 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1218}
1219
e643f87f 1220static unsigned int pl011_get_mctrl(struct uart_port *port)
1da177e4
LT
1221{
1222 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1223 unsigned int result = 0;
1224 unsigned int status = readw(uap->port.membase + UART01x_FR);
1225
5159f407 1226#define TIOCMBIT(uartbit, tiocmbit) \
1da177e4
LT
1227 if (status & uartbit) \
1228 result |= tiocmbit
1229
5159f407
JS
1230 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1231 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1232 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1233 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1234#undef TIOCMBIT
1da177e4
LT
1235 return result;
1236}
1237
1238static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1239{
1240 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1241 unsigned int cr;
1242
1243 cr = readw(uap->port.membase + UART011_CR);
1244
5159f407 1245#define TIOCMBIT(tiocmbit, uartbit) \
1da177e4
LT
1246 if (mctrl & tiocmbit) \
1247 cr |= uartbit; \
1248 else \
1249 cr &= ~uartbit
1250
5159f407
JS
1251 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1252 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1253 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1254 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1255 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
3b43816f
RV
1256
1257 if (uap->autorts) {
1258 /* We need to disable auto-RTS if we want to turn RTS off */
1259 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1260 }
5159f407 1261#undef TIOCMBIT
1da177e4
LT
1262
1263 writew(cr, uap->port.membase + UART011_CR);
1264}
1265
1266static void pl011_break_ctl(struct uart_port *port, int break_state)
1267{
1268 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1269 unsigned long flags;
1270 unsigned int lcr_h;
1271
1272 spin_lock_irqsave(&uap->port.lock, flags);
ec489aa8 1273 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1274 if (break_state == -1)
1275 lcr_h |= UART01x_LCRH_BRK;
1276 else
1277 lcr_h &= ~UART01x_LCRH_BRK;
ec489aa8 1278 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1279 spin_unlock_irqrestore(&uap->port.lock, flags);
1280}
1281
84b5ae15 1282#ifdef CONFIG_CONSOLE_POLL
5c8124a0
AV
1283
1284static void pl011_quiesce_irqs(struct uart_port *port)
1285{
1286 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1287 unsigned char __iomem *regs = uap->port.membase;
1288
1289 writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1290 /*
1291 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1292 * we simply mask it. start_tx() will unmask it.
1293 *
1294 * Note we can race with start_tx(), and if the race happens, the
1295 * polling user might get another interrupt just after we clear it.
1296 * But it should be OK and can happen even w/o the race, e.g.
1297 * controller immediately got some new data and raised the IRQ.
1298 *
1299 * And whoever uses polling routines assumes that it manages the device
1300 * (including tx queue), so we're also fine with start_tx()'s caller
1301 * side.
1302 */
1303 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1304}
1305
e643f87f 1306static int pl011_get_poll_char(struct uart_port *port)
84b5ae15
JW
1307{
1308 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1309 unsigned int status;
1310
5c8124a0
AV
1311 /*
1312 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1313 * debugger.
1314 */
1315 pl011_quiesce_irqs(port);
1316
f5316b4a
JW
1317 status = readw(uap->port.membase + UART01x_FR);
1318 if (status & UART01x_FR_RXFE)
1319 return NO_POLL_CHAR;
84b5ae15
JW
1320
1321 return readw(uap->port.membase + UART01x_DR);
1322}
1323
e643f87f 1324static void pl011_put_poll_char(struct uart_port *port,
84b5ae15
JW
1325 unsigned char ch)
1326{
1327 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1328
1329 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1330 barrier();
1331
1332 writew(ch, uap->port.membase + UART01x_DR);
1333}
1334
1335#endif /* CONFIG_CONSOLE_POLL */
1336
b3564c2c 1337static int pl011_hwinit(struct uart_port *port)
1da177e4
LT
1338{
1339 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4
LT
1340 int retval;
1341
78d80c5a
LW
1342 /* Optionaly enable pins to be muxed in and configured */
1343 if (!IS_ERR(uap->pins_default)) {
1344 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1345 if (retval)
1346 dev_err(port->dev,
1347 "could not set default pins\n");
1348 }
1349
1da177e4
LT
1350 /*
1351 * Try to enable the clock producer.
1352 */
1c4c4394 1353 retval = clk_prepare_enable(uap->clk);
1da177e4 1354 if (retval)
1c4c4394 1355 goto out;
1da177e4
LT
1356
1357 uap->port.uartclk = clk_get_rate(uap->clk);
1358
9b96fbac
LW
1359 /* Clear pending error and receive interrupts */
1360 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1361 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1362
b3564c2c
AV
1363 /*
1364 * Save interrupts enable mask, and enable RX interrupts in case if
1365 * the interrupt is used for NMI entry.
1366 */
1367 uap->im = readw(uap->port.membase + UART011_IMSC);
1368 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1369
1370 if (uap->port.dev->platform_data) {
1371 struct amba_pl011_data *plat;
1372
1373 plat = uap->port.dev->platform_data;
1374 if (plat->init)
1375 plat->init();
1376 }
1377 return 0;
1378 out:
1379 return retval;
1380}
1381
1382static int pl011_startup(struct uart_port *port)
1383{
1384 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1385 unsigned int cr;
1386 int retval;
1387
1388 retval = pl011_hwinit(port);
1389 if (retval)
1390 goto clk_dis;
1391
1392 writew(uap->im, uap->port.membase + UART011_IMSC);
1393
1da177e4
LT
1394 /*
1395 * Allocate the IRQ
1396 */
1397 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1398 if (retval)
1399 goto clk_dis;
1400
c19f12b5 1401 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1da177e4
LT
1402
1403 /*
1404 * Provoke TX FIFO interrupt into asserting.
1405 */
1406 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1407 writew(cr, uap->port.membase + UART011_CR);
1408 writew(0, uap->port.membase + UART011_FBRD);
1409 writew(1, uap->port.membase + UART011_IBRD);
ec489aa8
LW
1410 writew(0, uap->port.membase + uap->lcrh_rx);
1411 if (uap->lcrh_tx != uap->lcrh_rx) {
1412 int i;
1413 /*
1414 * Wait 10 PCLKs before writing LCRH_TX register,
1415 * to get this delay write read only register 10 times
1416 */
1417 for (i = 0; i < 10; ++i)
1418 writew(0xff, uap->port.membase + UART011_MIS);
1419 writew(0, uap->port.membase + uap->lcrh_tx);
1420 }
1da177e4
LT
1421 writew(0, uap->port.membase + UART01x_DR);
1422 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1423 barrier();
1424
d8d8ffa4
SKS
1425 /* restore RTS and DTR */
1426 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1427 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1da177e4
LT
1428 writew(cr, uap->port.membase + UART011_CR);
1429
1430 /*
1431 * initialise the old status of the modem signals
1432 */
1433 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1434
68b65f73
RK
1435 /* Startup DMA */
1436 pl011_dma_startup(uap);
1437
1da177e4 1438 /*
ead76f32
LW
1439 * Finally, enable interrupts, only timeouts when using DMA
1440 * if initial RX DMA job failed, start in interrupt mode
1441 * as well.
1da177e4
LT
1442 */
1443 spin_lock_irq(&uap->port.lock);
9b96fbac
LW
1444 /* Clear out any spuriously appearing RX interrupts */
1445 writew(UART011_RTIS | UART011_RXIS,
1446 uap->port.membase + UART011_ICR);
ead76f32
LW
1447 uap->im = UART011_RTIM;
1448 if (!pl011_dma_rx_running(uap))
1449 uap->im |= UART011_RXIM;
1da177e4
LT
1450 writew(uap->im, uap->port.membase + UART011_IMSC);
1451 spin_unlock_irq(&uap->port.lock);
1452
1453 return 0;
1454
1455 clk_dis:
1c4c4394 1456 clk_disable_unprepare(uap->clk);
1da177e4
LT
1457 return retval;
1458}
1459
ec489aa8
LW
1460static void pl011_shutdown_channel(struct uart_amba_port *uap,
1461 unsigned int lcrh)
1462{
1463 unsigned long val;
1464
1465 val = readw(uap->port.membase + lcrh);
1466 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1467 writew(val, uap->port.membase + lcrh);
1468}
1469
1da177e4
LT
1470static void pl011_shutdown(struct uart_port *port)
1471{
1472 struct uart_amba_port *uap = (struct uart_amba_port *)port;
d8d8ffa4 1473 unsigned int cr;
78d80c5a 1474 int retval;
1da177e4
LT
1475
1476 /*
1477 * disable all interrupts
1478 */
1479 spin_lock_irq(&uap->port.lock);
1480 uap->im = 0;
1481 writew(uap->im, uap->port.membase + UART011_IMSC);
1482 writew(0xffff, uap->port.membase + UART011_ICR);
1483 spin_unlock_irq(&uap->port.lock);
1484
68b65f73
RK
1485 pl011_dma_shutdown(uap);
1486
1da177e4
LT
1487 /*
1488 * Free the interrupt
1489 */
1490 free_irq(uap->port.irq, uap);
1491
1492 /*
1493 * disable the port
d8d8ffa4
SKS
1494 * disable the port. It should not disable RTS and DTR.
1495 * Also RTS and DTR state should be preserved to restore
1496 * it during startup().
1da177e4 1497 */
3b43816f 1498 uap->autorts = false;
d8d8ffa4
SKS
1499 cr = readw(uap->port.membase + UART011_CR);
1500 uap->old_cr = cr;
1501 cr &= UART011_CR_RTS | UART011_CR_DTR;
1502 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1503 writew(cr, uap->port.membase + UART011_CR);
1da177e4
LT
1504
1505 /*
1506 * disable break condition and fifos
1507 */
ec489aa8
LW
1508 pl011_shutdown_channel(uap, uap->lcrh_rx);
1509 if (uap->lcrh_rx != uap->lcrh_tx)
1510 pl011_shutdown_channel(uap, uap->lcrh_tx);
1da177e4
LT
1511
1512 /*
1513 * Shut down the clock producer
1514 */
1c4c4394 1515 clk_disable_unprepare(uap->clk);
78d80c5a
LW
1516 /* Optionally let pins go into sleep states */
1517 if (!IS_ERR(uap->pins_sleep)) {
1518 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
1519 if (retval)
1520 dev_err(port->dev,
1521 "could not set pins to sleep state\n");
1522 }
1523
c16d51a3
SKS
1524
1525 if (uap->port.dev->platform_data) {
1526 struct amba_pl011_data *plat;
1527
1528 plat = uap->port.dev->platform_data;
1529 if (plat->exit)
1530 plat->exit();
1531 }
1532
1da177e4
LT
1533}
1534
1535static void
606d099c
AC
1536pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1537 struct ktermios *old)
1da177e4 1538{
3b43816f 1539 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4
LT
1540 unsigned int lcr_h, old_cr;
1541 unsigned long flags;
c19f12b5
RK
1542 unsigned int baud, quot, clkdiv;
1543
1544 if (uap->vendor->oversampling)
1545 clkdiv = 8;
1546 else
1547 clkdiv = 16;
1da177e4
LT
1548
1549 /*
1550 * Ask the core to calculate the divisor for us.
1551 */
ac3e3fb4 1552 baud = uart_get_baud_rate(port, termios, old, 0,
c19f12b5 1553 port->uartclk / clkdiv);
ac3e3fb4
LW
1554
1555 if (baud > port->uartclk/16)
1556 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1557 else
1558 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1da177e4
LT
1559
1560 switch (termios->c_cflag & CSIZE) {
1561 case CS5:
1562 lcr_h = UART01x_LCRH_WLEN_5;
1563 break;
1564 case CS6:
1565 lcr_h = UART01x_LCRH_WLEN_6;
1566 break;
1567 case CS7:
1568 lcr_h = UART01x_LCRH_WLEN_7;
1569 break;
1570 default: // CS8
1571 lcr_h = UART01x_LCRH_WLEN_8;
1572 break;
1573 }
1574 if (termios->c_cflag & CSTOPB)
1575 lcr_h |= UART01x_LCRH_STP2;
1576 if (termios->c_cflag & PARENB) {
1577 lcr_h |= UART01x_LCRH_PEN;
1578 if (!(termios->c_cflag & PARODD))
1579 lcr_h |= UART01x_LCRH_EPS;
1580 }
ffca2b11 1581 if (uap->fifosize > 1)
1da177e4
LT
1582 lcr_h |= UART01x_LCRH_FEN;
1583
1584 spin_lock_irqsave(&port->lock, flags);
1585
1586 /*
1587 * Update the per-port timeout.
1588 */
1589 uart_update_timeout(port, termios->c_cflag, baud);
1590
b63d4f0f 1591 port->read_status_mask = UART011_DR_OE | 255;
1da177e4 1592 if (termios->c_iflag & INPCK)
b63d4f0f 1593 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1da177e4 1594 if (termios->c_iflag & (BRKINT | PARMRK))
b63d4f0f 1595 port->read_status_mask |= UART011_DR_BE;
1da177e4
LT
1596
1597 /*
1598 * Characters to ignore
1599 */
1600 port->ignore_status_mask = 0;
1601 if (termios->c_iflag & IGNPAR)
b63d4f0f 1602 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1da177e4 1603 if (termios->c_iflag & IGNBRK) {
b63d4f0f 1604 port->ignore_status_mask |= UART011_DR_BE;
1da177e4
LT
1605 /*
1606 * If we're ignoring parity and break indicators,
1607 * ignore overruns too (for real raw support).
1608 */
1609 if (termios->c_iflag & IGNPAR)
b63d4f0f 1610 port->ignore_status_mask |= UART011_DR_OE;
1da177e4
LT
1611 }
1612
1613 /*
1614 * Ignore all characters if CREAD is not set.
1615 */
1616 if ((termios->c_cflag & CREAD) == 0)
b63d4f0f 1617 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1da177e4
LT
1618
1619 if (UART_ENABLE_MS(port, termios->c_cflag))
1620 pl011_enable_ms(port);
1621
1622 /* first, disable everything */
1623 old_cr = readw(port->membase + UART011_CR);
1624 writew(0, port->membase + UART011_CR);
1625
3b43816f
RV
1626 if (termios->c_cflag & CRTSCTS) {
1627 if (old_cr & UART011_CR_RTS)
1628 old_cr |= UART011_CR_RTSEN;
1629
1630 old_cr |= UART011_CR_CTSEN;
1631 uap->autorts = true;
1632 } else {
1633 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1634 uap->autorts = false;
1635 }
1636
c19f12b5
RK
1637 if (uap->vendor->oversampling) {
1638 if (baud > port->uartclk / 16)
ac3e3fb4
LW
1639 old_cr |= ST_UART011_CR_OVSFACT;
1640 else
1641 old_cr &= ~ST_UART011_CR_OVSFACT;
1642 }
1643
c5dd553b
LW
1644 /*
1645 * Workaround for the ST Micro oversampling variants to
1646 * increase the bitrate slightly, by lowering the divisor,
1647 * to avoid delayed sampling of start bit at high speeds,
1648 * else we see data corruption.
1649 */
1650 if (uap->vendor->oversampling) {
1651 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1652 quot -= 1;
1653 else if ((baud > 3250000) && (quot > 2))
1654 quot -= 2;
1655 }
1da177e4
LT
1656 /* Set baud rate */
1657 writew(quot & 0x3f, port->membase + UART011_FBRD);
1658 writew(quot >> 6, port->membase + UART011_IBRD);
1659
1660 /*
1661 * ----------v----------v----------v----------v-----
c5dd553b
LW
1662 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1663 * UART011_FBRD & UART011_IBRD.
1da177e4
LT
1664 * ----------^----------^----------^----------^-----
1665 */
ec489aa8
LW
1666 writew(lcr_h, port->membase + uap->lcrh_rx);
1667 if (uap->lcrh_rx != uap->lcrh_tx) {
1668 int i;
1669 /*
1670 * Wait 10 PCLKs before writing LCRH_TX register,
1671 * to get this delay write read only register 10 times
1672 */
1673 for (i = 0; i < 10; ++i)
1674 writew(0xff, uap->port.membase + UART011_MIS);
1675 writew(lcr_h, port->membase + uap->lcrh_tx);
1676 }
1da177e4
LT
1677 writew(old_cr, port->membase + UART011_CR);
1678
1679 spin_unlock_irqrestore(&port->lock, flags);
1680}
1681
1682static const char *pl011_type(struct uart_port *port)
1683{
e8a7ba86
RK
1684 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1685 return uap->port.type == PORT_AMBA ? uap->type : NULL;
1da177e4
LT
1686}
1687
1688/*
1689 * Release the memory region(s) being used by 'port'
1690 */
e643f87f 1691static void pl011_release_port(struct uart_port *port)
1da177e4
LT
1692{
1693 release_mem_region(port->mapbase, SZ_4K);
1694}
1695
1696/*
1697 * Request the memory region(s) being used by 'port'
1698 */
e643f87f 1699static int pl011_request_port(struct uart_port *port)
1da177e4
LT
1700{
1701 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1702 != NULL ? 0 : -EBUSY;
1703}
1704
1705/*
1706 * Configure/autoconfigure the port.
1707 */
e643f87f 1708static void pl011_config_port(struct uart_port *port, int flags)
1da177e4
LT
1709{
1710 if (flags & UART_CONFIG_TYPE) {
1711 port->type = PORT_AMBA;
e643f87f 1712 pl011_request_port(port);
1da177e4
LT
1713 }
1714}
1715
1716/*
1717 * verify the new serial_struct (for TIOCSSERIAL).
1718 */
e643f87f 1719static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1da177e4
LT
1720{
1721 int ret = 0;
1722 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1723 ret = -EINVAL;
a62c4133 1724 if (ser->irq < 0 || ser->irq >= nr_irqs)
1da177e4
LT
1725 ret = -EINVAL;
1726 if (ser->baud_base < 9600)
1727 ret = -EINVAL;
1728 return ret;
1729}
1730
1731static struct uart_ops amba_pl011_pops = {
e643f87f 1732 .tx_empty = pl011_tx_empty,
1da177e4 1733 .set_mctrl = pl011_set_mctrl,
e643f87f 1734 .get_mctrl = pl011_get_mctrl,
1da177e4
LT
1735 .stop_tx = pl011_stop_tx,
1736 .start_tx = pl011_start_tx,
1737 .stop_rx = pl011_stop_rx,
1738 .enable_ms = pl011_enable_ms,
1739 .break_ctl = pl011_break_ctl,
1740 .startup = pl011_startup,
1741 .shutdown = pl011_shutdown,
68b65f73 1742 .flush_buffer = pl011_dma_flush_buffer,
1da177e4
LT
1743 .set_termios = pl011_set_termios,
1744 .type = pl011_type,
e643f87f
LW
1745 .release_port = pl011_release_port,
1746 .request_port = pl011_request_port,
1747 .config_port = pl011_config_port,
1748 .verify_port = pl011_verify_port,
84b5ae15 1749#ifdef CONFIG_CONSOLE_POLL
b3564c2c 1750 .poll_init = pl011_hwinit,
e643f87f
LW
1751 .poll_get_char = pl011_get_poll_char,
1752 .poll_put_char = pl011_put_poll_char,
84b5ae15 1753#endif
1da177e4
LT
1754};
1755
1756static struct uart_amba_port *amba_ports[UART_NR];
1757
1758#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1759
d358788f 1760static void pl011_console_putchar(struct uart_port *port, int ch)
1da177e4 1761{
d358788f 1762 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1da177e4 1763
d358788f
RK
1764 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1765 barrier();
1da177e4
LT
1766 writew(ch, uap->port.membase + UART01x_DR);
1767}
1768
1769static void
1770pl011_console_write(struct console *co, const char *s, unsigned int count)
1771{
1772 struct uart_amba_port *uap = amba_ports[co->index];
1773 unsigned int status, old_cr, new_cr;
ef605fdb
RV
1774 unsigned long flags;
1775 int locked = 1;
1da177e4
LT
1776
1777 clk_enable(uap->clk);
1778
ef605fdb
RV
1779 local_irq_save(flags);
1780 if (uap->port.sysrq)
1781 locked = 0;
1782 else if (oops_in_progress)
1783 locked = spin_trylock(&uap->port.lock);
1784 else
1785 spin_lock(&uap->port.lock);
1786
1da177e4
LT
1787 /*
1788 * First save the CR then disable the interrupts
1789 */
1790 old_cr = readw(uap->port.membase + UART011_CR);
1791 new_cr = old_cr & ~UART011_CR_CTSEN;
1792 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1793 writew(new_cr, uap->port.membase + UART011_CR);
1794
d358788f 1795 uart_console_write(&uap->port, s, count, pl011_console_putchar);
1da177e4
LT
1796
1797 /*
1798 * Finally, wait for transmitter to become empty
1799 * and restore the TCR
1800 */
1801 do {
1802 status = readw(uap->port.membase + UART01x_FR);
1803 } while (status & UART01x_FR_BUSY);
1804 writew(old_cr, uap->port.membase + UART011_CR);
1805
ef605fdb
RV
1806 if (locked)
1807 spin_unlock(&uap->port.lock);
1808 local_irq_restore(flags);
1809
1da177e4
LT
1810 clk_disable(uap->clk);
1811}
1812
1813static void __init
1814pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1815 int *parity, int *bits)
1816{
1817 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1818 unsigned int lcr_h, ibrd, fbrd;
1819
ec489aa8 1820 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1da177e4
LT
1821
1822 *parity = 'n';
1823 if (lcr_h & UART01x_LCRH_PEN) {
1824 if (lcr_h & UART01x_LCRH_EPS)
1825 *parity = 'e';
1826 else
1827 *parity = 'o';
1828 }
1829
1830 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1831 *bits = 7;
1832 else
1833 *bits = 8;
1834
1835 ibrd = readw(uap->port.membase + UART011_IBRD);
1836 fbrd = readw(uap->port.membase + UART011_FBRD);
1837
1838 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
ac3e3fb4 1839
c19f12b5 1840 if (uap->vendor->oversampling) {
ac3e3fb4
LW
1841 if (readw(uap->port.membase + UART011_CR)
1842 & ST_UART011_CR_OVSFACT)
1843 *baud *= 2;
1844 }
1da177e4
LT
1845 }
1846}
1847
1848static int __init pl011_console_setup(struct console *co, char *options)
1849{
1850 struct uart_amba_port *uap;
1851 int baud = 38400;
1852 int bits = 8;
1853 int parity = 'n';
1854 int flow = 'n';
4b4851c6 1855 int ret;
1da177e4
LT
1856
1857 /*
1858 * Check whether an invalid uart number has been specified, and
1859 * if so, search for the first available port that does have
1860 * console support.
1861 */
1862 if (co->index >= UART_NR)
1863 co->index = 0;
1864 uap = amba_ports[co->index];
d28122a5
RK
1865 if (!uap)
1866 return -ENODEV;
1da177e4 1867
78d80c5a
LW
1868 /* Allow pins to be muxed in and configured */
1869 if (!IS_ERR(uap->pins_default)) {
1870 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1871 if (ret)
1872 dev_err(uap->port.dev,
1873 "could not set default pins\n");
1874 }
1875
4b4851c6
RK
1876 ret = clk_prepare(uap->clk);
1877 if (ret)
1878 return ret;
1879
c16d51a3
SKS
1880 if (uap->port.dev->platform_data) {
1881 struct amba_pl011_data *plat;
1882
1883 plat = uap->port.dev->platform_data;
1884 if (plat->init)
1885 plat->init();
1886 }
1887
1da177e4
LT
1888 uap->port.uartclk = clk_get_rate(uap->clk);
1889
1890 if (options)
1891 uart_parse_options(options, &baud, &parity, &bits, &flow);
1892 else
1893 pl011_console_get_options(uap, &baud, &parity, &bits);
1894
1895 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
1896}
1897
2d93486c 1898static struct uart_driver amba_reg;
1da177e4
LT
1899static struct console amba_console = {
1900 .name = "ttyAMA",
1901 .write = pl011_console_write,
1902 .device = uart_console_device,
1903 .setup = pl011_console_setup,
1904 .flags = CON_PRINTBUFFER,
1905 .index = -1,
1906 .data = &amba_reg,
1907};
1908
1909#define AMBA_CONSOLE (&amba_console)
1910#else
1911#define AMBA_CONSOLE NULL
1912#endif
1913
1914static struct uart_driver amba_reg = {
1915 .owner = THIS_MODULE,
1916 .driver_name = "ttyAMA",
1917 .dev_name = "ttyAMA",
1918 .major = SERIAL_AMBA_MAJOR,
1919 .minor = SERIAL_AMBA_MINOR,
1920 .nr = UART_NR,
1921 .cons = AMBA_CONSOLE,
1922};
1923
32614aad
ML
1924static int pl011_probe_dt_alias(int index, struct device *dev)
1925{
1926 struct device_node *np;
1927 static bool seen_dev_with_alias = false;
1928 static bool seen_dev_without_alias = false;
1929 int ret = index;
1930
1931 if (!IS_ENABLED(CONFIG_OF))
1932 return ret;
1933
1934 np = dev->of_node;
1935 if (!np)
1936 return ret;
1937
1938 ret = of_alias_get_id(np, "serial");
1939 if (IS_ERR_VALUE(ret)) {
1940 seen_dev_without_alias = true;
1941 ret = index;
1942 } else {
1943 seen_dev_with_alias = true;
1944 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
1945 dev_warn(dev, "requested serial port %d not available.\n", ret);
1946 ret = index;
1947 }
1948 }
1949
1950 if (seen_dev_with_alias && seen_dev_without_alias)
1951 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
1952
1953 return ret;
1954}
1955
aa25afad 1956static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1da177e4
LT
1957{
1958 struct uart_amba_port *uap;
5926a295 1959 struct vendor_data *vendor = id->data;
1da177e4
LT
1960 void __iomem *base;
1961 int i, ret;
1962
1963 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1964 if (amba_ports[i] == NULL)
1965 break;
1966
1967 if (i == ARRAY_SIZE(amba_ports)) {
1968 ret = -EBUSY;
1969 goto out;
1970 }
1971
de609582
LW
1972 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
1973 GFP_KERNEL);
1da177e4
LT
1974 if (uap == NULL) {
1975 ret = -ENOMEM;
1976 goto out;
1977 }
1978
32614aad
ML
1979 i = pl011_probe_dt_alias(i, &dev->dev);
1980
de609582
LW
1981 base = devm_ioremap(&dev->dev, dev->res.start,
1982 resource_size(&dev->res));
1da177e4
LT
1983 if (!base) {
1984 ret = -ENOMEM;
de609582 1985 goto out;
1da177e4
LT
1986 }
1987
78d80c5a
LW
1988 uap->pinctrl = devm_pinctrl_get(&dev->dev);
1989 if (IS_ERR(uap->pinctrl)) {
1990 ret = PTR_ERR(uap->pinctrl);
de609582 1991 goto out;
258e0551 1992 }
78d80c5a
LW
1993 uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
1994 PINCTRL_STATE_DEFAULT);
1995 if (IS_ERR(uap->pins_default))
1996 dev_err(&dev->dev, "could not get default pinstate\n");
1997
1998 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
1999 PINCTRL_STATE_SLEEP);
2000 if (IS_ERR(uap->pins_sleep))
2001 dev_dbg(&dev->dev, "could not get sleep pinstate\n");
258e0551 2002
de609582 2003 uap->clk = devm_clk_get(&dev->dev, NULL);
1da177e4
LT
2004 if (IS_ERR(uap->clk)) {
2005 ret = PTR_ERR(uap->clk);
de609582 2006 goto out;
1da177e4
LT
2007 }
2008
c19f12b5 2009 uap->vendor = vendor;
ec489aa8
LW
2010 uap->lcrh_rx = vendor->lcrh_rx;
2011 uap->lcrh_tx = vendor->lcrh_tx;
d8d8ffa4 2012 uap->old_cr = 0;
ffca2b11 2013 uap->fifosize = vendor->fifosize;
1da177e4
LT
2014 uap->port.dev = &dev->dev;
2015 uap->port.mapbase = dev->res.start;
2016 uap->port.membase = base;
2017 uap->port.iotype = UPIO_MEM;
2018 uap->port.irq = dev->irq[0];
ffca2b11 2019 uap->port.fifosize = uap->fifosize;
1da177e4
LT
2020 uap->port.ops = &amba_pl011_pops;
2021 uap->port.flags = UPF_BOOT_AUTOCONF;
2022 uap->port.line = i;
68b65f73 2023 pl011_dma_probe(uap);
1da177e4 2024
c3d8b76f
LW
2025 /* Ensure interrupts from this UART are masked and cleared */
2026 writew(0, uap->port.membase + UART011_IMSC);
2027 writew(0xffff, uap->port.membase + UART011_ICR);
2028
e8a7ba86
RK
2029 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2030
1da177e4
LT
2031 amba_ports[i] = uap;
2032
2033 amba_set_drvdata(dev, uap);
2034 ret = uart_add_one_port(&amba_reg, &uap->port);
2035 if (ret) {
2036 amba_set_drvdata(dev, NULL);
2037 amba_ports[i] = NULL;
68b65f73 2038 pl011_dma_remove(uap);
1da177e4
LT
2039 }
2040 out:
2041 return ret;
2042}
2043
2044static int pl011_remove(struct amba_device *dev)
2045{
2046 struct uart_amba_port *uap = amba_get_drvdata(dev);
2047 int i;
2048
2049 amba_set_drvdata(dev, NULL);
2050
2051 uart_remove_one_port(&amba_reg, &uap->port);
2052
2053 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2054 if (amba_ports[i] == uap)
2055 amba_ports[i] = NULL;
2056
68b65f73 2057 pl011_dma_remove(uap);
1da177e4
LT
2058 return 0;
2059}
2060
b736b89f
LC
2061#ifdef CONFIG_PM
2062static int pl011_suspend(struct amba_device *dev, pm_message_t state)
2063{
2064 struct uart_amba_port *uap = amba_get_drvdata(dev);
2065
2066 if (!uap)
2067 return -EINVAL;
2068
2069 return uart_suspend_port(&amba_reg, &uap->port);
2070}
2071
2072static int pl011_resume(struct amba_device *dev)
2073{
2074 struct uart_amba_port *uap = amba_get_drvdata(dev);
2075
2076 if (!uap)
2077 return -EINVAL;
2078
2079 return uart_resume_port(&amba_reg, &uap->port);
2080}
2081#endif
2082
2c39c9e1 2083static struct amba_id pl011_ids[] = {
1da177e4
LT
2084 {
2085 .id = 0x00041011,
2086 .mask = 0x000fffff,
5926a295
AR
2087 .data = &vendor_arm,
2088 },
2089 {
2090 .id = 0x00380802,
2091 .mask = 0x00ffffff,
2092 .data = &vendor_st,
1da177e4
LT
2093 },
2094 { 0, 0 },
2095};
2096
60f7a33b
DM
2097MODULE_DEVICE_TABLE(amba, pl011_ids);
2098
1da177e4
LT
2099static struct amba_driver pl011_driver = {
2100 .drv = {
2101 .name = "uart-pl011",
2102 },
2103 .id_table = pl011_ids,
2104 .probe = pl011_probe,
2105 .remove = pl011_remove,
b736b89f
LC
2106#ifdef CONFIG_PM
2107 .suspend = pl011_suspend,
2108 .resume = pl011_resume,
2109#endif
1da177e4
LT
2110};
2111
2112static int __init pl011_init(void)
2113{
2114 int ret;
2115 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2116
2117 ret = uart_register_driver(&amba_reg);
2118 if (ret == 0) {
2119 ret = amba_driver_register(&pl011_driver);
2120 if (ret)
2121 uart_unregister_driver(&amba_reg);
2122 }
2123 return ret;
2124}
2125
2126static void __exit pl011_exit(void)
2127{
2128 amba_driver_unregister(&pl011_driver);
2129 uart_unregister_driver(&amba_reg);
2130}
2131
4dd9e742
AR
2132/*
2133 * While this can be a module, if builtin it's most likely the console
2134 * So let's leave module_exit but move module_init to an earlier place
2135 */
2136arch_initcall(pl011_init);
1da177e4
LT
2137module_exit(pl011_exit);
2138
2139MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2140MODULE_DESCRIPTION("ARM AMBA serial port driver");
2141MODULE_LICENSE("GPL");