]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/tty/serial/msm_serial.c
gpio: change member .dev to .parent
[mirror_ubuntu-focal-kernel.git] / drivers / tty / serial / msm_serial.c
1 /*
2 * Driver for msm7k serial device and console
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Author: Robert Love <rlove@google.com>
6 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
19 # define SUPPORT_SYSRQ
20 #endif
21
22 #include <linux/atomic.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/hrtimer.h>
26 #include <linux/module.h>
27 #include <linux/io.h>
28 #include <linux/ioport.h>
29 #include <linux/irq.h>
30 #include <linux/init.h>
31 #include <linux/console.h>
32 #include <linux/tty.h>
33 #include <linux/tty_flip.h>
34 #include <linux/serial_core.h>
35 #include <linux/serial.h>
36 #include <linux/slab.h>
37 #include <linux/clk.h>
38 #include <linux/platform_device.h>
39 #include <linux/delay.h>
40 #include <linux/of.h>
41 #include <linux/of_device.h>
42
43 #include "msm_serial.h"
44
45 #define UARTDM_BURST_SIZE 16 /* in bytes */
46 #define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
47 #define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
48 #define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
49
50 enum {
51 UARTDM_1P1 = 1,
52 UARTDM_1P2,
53 UARTDM_1P3,
54 UARTDM_1P4,
55 };
56
57 struct msm_dma {
58 struct dma_chan *chan;
59 enum dma_data_direction dir;
60 dma_addr_t phys;
61 unsigned char *virt;
62 dma_cookie_t cookie;
63 u32 enable_bit;
64 unsigned int count;
65 struct dma_async_tx_descriptor *desc;
66 };
67
68 struct msm_port {
69 struct uart_port uart;
70 char name[16];
71 struct clk *clk;
72 struct clk *pclk;
73 unsigned int imr;
74 int is_uartdm;
75 unsigned int old_snap_state;
76 bool break_detected;
77 struct msm_dma tx_dma;
78 struct msm_dma rx_dma;
79 };
80
81 static void msm_handle_tx(struct uart_port *port);
82 static void msm_start_rx_dma(struct msm_port *msm_port);
83
84 void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
85 {
86 struct device *dev = port->dev;
87 unsigned int mapped;
88 u32 val;
89
90 mapped = dma->count;
91 dma->count = 0;
92
93 dmaengine_terminate_all(dma->chan);
94
95 /*
96 * DMA Stall happens if enqueue and flush command happens concurrently.
97 * For example before changing the baud rate/protocol configuration and
98 * sending flush command to ADM, disable the channel of UARTDM.
99 * Note: should not reset the receiver here immediately as it is not
100 * suggested to do disable/reset or reset/disable at the same time.
101 */
102 val = msm_read(port, UARTDM_DMEN);
103 val &= ~dma->enable_bit;
104 msm_write(port, val, UARTDM_DMEN);
105
106 if (mapped)
107 dma_unmap_single(dev, dma->phys, mapped, dma->dir);
108 }
109
110 static void msm_release_dma(struct msm_port *msm_port)
111 {
112 struct msm_dma *dma;
113
114 dma = &msm_port->tx_dma;
115 if (dma->chan) {
116 msm_stop_dma(&msm_port->uart, dma);
117 dma_release_channel(dma->chan);
118 }
119
120 memset(dma, 0, sizeof(*dma));
121
122 dma = &msm_port->rx_dma;
123 if (dma->chan) {
124 msm_stop_dma(&msm_port->uart, dma);
125 dma_release_channel(dma->chan);
126 kfree(dma->virt);
127 }
128
129 memset(dma, 0, sizeof(*dma));
130 }
131
132 static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
133 {
134 struct device *dev = msm_port->uart.dev;
135 struct dma_slave_config conf;
136 struct msm_dma *dma;
137 u32 crci = 0;
138 int ret;
139
140 dma = &msm_port->tx_dma;
141
142 /* allocate DMA resources, if available */
143 dma->chan = dma_request_slave_channel_reason(dev, "tx");
144 if (IS_ERR(dma->chan))
145 goto no_tx;
146
147 of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
148
149 memset(&conf, 0, sizeof(conf));
150 conf.direction = DMA_MEM_TO_DEV;
151 conf.device_fc = true;
152 conf.dst_addr = base + UARTDM_TF;
153 conf.dst_maxburst = UARTDM_BURST_SIZE;
154 conf.slave_id = crci;
155
156 ret = dmaengine_slave_config(dma->chan, &conf);
157 if (ret)
158 goto rel_tx;
159
160 dma->dir = DMA_TO_DEVICE;
161
162 if (msm_port->is_uartdm < UARTDM_1P4)
163 dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE;
164 else
165 dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE;
166
167 return;
168
169 rel_tx:
170 dma_release_channel(dma->chan);
171 no_tx:
172 memset(dma, 0, sizeof(*dma));
173 }
174
175 static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
176 {
177 struct device *dev = msm_port->uart.dev;
178 struct dma_slave_config conf;
179 struct msm_dma *dma;
180 u32 crci = 0;
181 int ret;
182
183 dma = &msm_port->rx_dma;
184
185 /* allocate DMA resources, if available */
186 dma->chan = dma_request_slave_channel_reason(dev, "rx");
187 if (IS_ERR(dma->chan))
188 goto no_rx;
189
190 of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
191
192 dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
193 if (!dma->virt)
194 goto rel_rx;
195
196 memset(&conf, 0, sizeof(conf));
197 conf.direction = DMA_DEV_TO_MEM;
198 conf.device_fc = true;
199 conf.src_addr = base + UARTDM_RF;
200 conf.src_maxburst = UARTDM_BURST_SIZE;
201 conf.slave_id = crci;
202
203 ret = dmaengine_slave_config(dma->chan, &conf);
204 if (ret)
205 goto err;
206
207 dma->dir = DMA_FROM_DEVICE;
208
209 if (msm_port->is_uartdm < UARTDM_1P4)
210 dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE;
211 else
212 dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE;
213
214 return;
215 err:
216 kfree(dma->virt);
217 rel_rx:
218 dma_release_channel(dma->chan);
219 no_rx:
220 memset(dma, 0, sizeof(*dma));
221 }
222
223 static inline void msm_wait_for_xmitr(struct uart_port *port)
224 {
225 while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
226 if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
227 break;
228 udelay(1);
229 }
230 msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
231 }
232
233 static void msm_stop_tx(struct uart_port *port)
234 {
235 struct msm_port *msm_port = UART_TO_MSM(port);
236
237 msm_port->imr &= ~UART_IMR_TXLEV;
238 msm_write(port, msm_port->imr, UART_IMR);
239 }
240
241 static void msm_start_tx(struct uart_port *port)
242 {
243 struct msm_port *msm_port = UART_TO_MSM(port);
244 struct msm_dma *dma = &msm_port->tx_dma;
245
246 /* Already started in DMA mode */
247 if (dma->count)
248 return;
249
250 msm_port->imr |= UART_IMR_TXLEV;
251 msm_write(port, msm_port->imr, UART_IMR);
252 }
253
254 static void msm_reset_dm_count(struct uart_port *port, int count)
255 {
256 msm_wait_for_xmitr(port);
257 msm_write(port, count, UARTDM_NCF_TX);
258 msm_read(port, UARTDM_NCF_TX);
259 }
260
261 static void msm_complete_tx_dma(void *args)
262 {
263 struct msm_port *msm_port = args;
264 struct uart_port *port = &msm_port->uart;
265 struct circ_buf *xmit = &port->state->xmit;
266 struct msm_dma *dma = &msm_port->tx_dma;
267 struct dma_tx_state state;
268 enum dma_status status;
269 unsigned long flags;
270 unsigned int count;
271 u32 val;
272
273 spin_lock_irqsave(&port->lock, flags);
274
275 /* Already stopped */
276 if (!dma->count)
277 goto done;
278
279 status = dmaengine_tx_status(dma->chan, dma->cookie, &state);
280
281 dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir);
282
283 val = msm_read(port, UARTDM_DMEN);
284 val &= ~dma->enable_bit;
285 msm_write(port, val, UARTDM_DMEN);
286
287 if (msm_port->is_uartdm > UARTDM_1P3) {
288 msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
289 msm_write(port, UART_CR_TX_ENABLE, UART_CR);
290 }
291
292 count = dma->count - state.residue;
293 port->icount.tx += count;
294 dma->count = 0;
295
296 xmit->tail += count;
297 xmit->tail &= UART_XMIT_SIZE - 1;
298
299 /* Restore "Tx FIFO below watermark" interrupt */
300 msm_port->imr |= UART_IMR_TXLEV;
301 msm_write(port, msm_port->imr, UART_IMR);
302
303 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
304 uart_write_wakeup(port);
305
306 msm_handle_tx(port);
307 done:
308 spin_unlock_irqrestore(&port->lock, flags);
309 }
310
311 static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
312 {
313 struct circ_buf *xmit = &msm_port->uart.state->xmit;
314 struct uart_port *port = &msm_port->uart;
315 struct msm_dma *dma = &msm_port->tx_dma;
316 void *cpu_addr;
317 int ret;
318 u32 val;
319
320 cpu_addr = &xmit->buf[xmit->tail];
321
322 dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir);
323 ret = dma_mapping_error(port->dev, dma->phys);
324 if (ret)
325 return ret;
326
327 dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
328 count, DMA_MEM_TO_DEV,
329 DMA_PREP_INTERRUPT |
330 DMA_PREP_FENCE);
331 if (!dma->desc) {
332 ret = -EIO;
333 goto unmap;
334 }
335
336 dma->desc->callback = msm_complete_tx_dma;
337 dma->desc->callback_param = msm_port;
338
339 dma->cookie = dmaengine_submit(dma->desc);
340 ret = dma_submit_error(dma->cookie);
341 if (ret)
342 goto unmap;
343
344 /*
345 * Using DMA complete for Tx FIFO reload, no need for
346 * "Tx FIFO below watermark" one, disable it
347 */
348 msm_port->imr &= ~UART_IMR_TXLEV;
349 msm_write(port, msm_port->imr, UART_IMR);
350
351 dma->count = count;
352
353 val = msm_read(port, UARTDM_DMEN);
354 val |= dma->enable_bit;
355
356 if (msm_port->is_uartdm < UARTDM_1P4)
357 msm_write(port, val, UARTDM_DMEN);
358
359 msm_reset_dm_count(port, count);
360
361 if (msm_port->is_uartdm > UARTDM_1P3)
362 msm_write(port, val, UARTDM_DMEN);
363
364 dma_async_issue_pending(dma->chan);
365 return 0;
366 unmap:
367 dma_unmap_single(port->dev, dma->phys, count, dma->dir);
368 return ret;
369 }
370
371 static void msm_complete_rx_dma(void *args)
372 {
373 struct msm_port *msm_port = args;
374 struct uart_port *port = &msm_port->uart;
375 struct tty_port *tport = &port->state->port;
376 struct msm_dma *dma = &msm_port->rx_dma;
377 int count = 0, i, sysrq;
378 unsigned long flags;
379 u32 val;
380
381 spin_lock_irqsave(&port->lock, flags);
382
383 /* Already stopped */
384 if (!dma->count)
385 goto done;
386
387 val = msm_read(port, UARTDM_DMEN);
388 val &= ~dma->enable_bit;
389 msm_write(port, val, UARTDM_DMEN);
390
391 /* Restore interrupts */
392 msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
393 msm_write(port, msm_port->imr, UART_IMR);
394
395 if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
396 port->icount.overrun++;
397 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
398 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
399 }
400
401 count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
402
403 port->icount.rx += count;
404
405 dma->count = 0;
406
407 dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
408
409 for (i = 0; i < count; i++) {
410 char flag = TTY_NORMAL;
411
412 if (msm_port->break_detected && dma->virt[i] == 0) {
413 port->icount.brk++;
414 flag = TTY_BREAK;
415 msm_port->break_detected = false;
416 if (uart_handle_break(port))
417 continue;
418 }
419
420 if (!(port->read_status_mask & UART_SR_RX_BREAK))
421 flag = TTY_NORMAL;
422
423 spin_unlock_irqrestore(&port->lock, flags);
424 sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
425 spin_lock_irqsave(&port->lock, flags);
426 if (!sysrq)
427 tty_insert_flip_char(tport, dma->virt[i], flag);
428 }
429
430 msm_start_rx_dma(msm_port);
431 done:
432 spin_unlock_irqrestore(&port->lock, flags);
433
434 if (count)
435 tty_flip_buffer_push(tport);
436 }
437
438 static void msm_start_rx_dma(struct msm_port *msm_port)
439 {
440 struct msm_dma *dma = &msm_port->rx_dma;
441 struct uart_port *uart = &msm_port->uart;
442 u32 val;
443 int ret;
444
445 if (!dma->chan)
446 return;
447
448 dma->phys = dma_map_single(uart->dev, dma->virt,
449 UARTDM_RX_SIZE, dma->dir);
450 ret = dma_mapping_error(uart->dev, dma->phys);
451 if (ret)
452 return;
453
454 dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
455 UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
456 DMA_PREP_INTERRUPT);
457 if (!dma->desc)
458 goto unmap;
459
460 dma->desc->callback = msm_complete_rx_dma;
461 dma->desc->callback_param = msm_port;
462
463 dma->cookie = dmaengine_submit(dma->desc);
464 ret = dma_submit_error(dma->cookie);
465 if (ret)
466 goto unmap;
467 /*
468 * Using DMA for FIFO off-load, no need for "Rx FIFO over
469 * watermark" or "stale" interrupts, disable them
470 */
471 msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
472
473 /*
474 * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
475 * we need RXSTALE to flush input DMA fifo to memory
476 */
477 if (msm_port->is_uartdm < UARTDM_1P4)
478 msm_port->imr |= UART_IMR_RXSTALE;
479
480 msm_write(uart, msm_port->imr, UART_IMR);
481
482 dma->count = UARTDM_RX_SIZE;
483
484 dma_async_issue_pending(dma->chan);
485
486 msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
487 msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
488
489 val = msm_read(uart, UARTDM_DMEN);
490 val |= dma->enable_bit;
491
492 if (msm_port->is_uartdm < UARTDM_1P4)
493 msm_write(uart, val, UARTDM_DMEN);
494
495 msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX);
496
497 if (msm_port->is_uartdm > UARTDM_1P3)
498 msm_write(uart, val, UARTDM_DMEN);
499
500 return;
501 unmap:
502 dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
503 }
504
505 static void msm_stop_rx(struct uart_port *port)
506 {
507 struct msm_port *msm_port = UART_TO_MSM(port);
508 struct msm_dma *dma = &msm_port->rx_dma;
509
510 msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
511 msm_write(port, msm_port->imr, UART_IMR);
512
513 if (dma->chan)
514 msm_stop_dma(port, dma);
515 }
516
517 static void msm_enable_ms(struct uart_port *port)
518 {
519 struct msm_port *msm_port = UART_TO_MSM(port);
520
521 msm_port->imr |= UART_IMR_DELTA_CTS;
522 msm_write(port, msm_port->imr, UART_IMR);
523 }
524
525 static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
526 {
527 struct tty_port *tport = &port->state->port;
528 unsigned int sr;
529 int count = 0;
530 struct msm_port *msm_port = UART_TO_MSM(port);
531
532 if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
533 port->icount.overrun++;
534 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
535 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
536 }
537
538 if (misr & UART_IMR_RXSTALE) {
539 count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
540 msm_port->old_snap_state;
541 msm_port->old_snap_state = 0;
542 } else {
543 count = 4 * (msm_read(port, UART_RFWR));
544 msm_port->old_snap_state += count;
545 }
546
547 /* TODO: Precise error reporting */
548
549 port->icount.rx += count;
550
551 while (count > 0) {
552 unsigned char buf[4];
553 int sysrq, r_count, i;
554
555 sr = msm_read(port, UART_SR);
556 if ((sr & UART_SR_RX_READY) == 0) {
557 msm_port->old_snap_state -= count;
558 break;
559 }
560
561 ioread32_rep(port->membase + UARTDM_RF, buf, 1);
562 r_count = min_t(int, count, sizeof(buf));
563
564 for (i = 0; i < r_count; i++) {
565 char flag = TTY_NORMAL;
566
567 if (msm_port->break_detected && buf[i] == 0) {
568 port->icount.brk++;
569 flag = TTY_BREAK;
570 msm_port->break_detected = false;
571 if (uart_handle_break(port))
572 continue;
573 }
574
575 if (!(port->read_status_mask & UART_SR_RX_BREAK))
576 flag = TTY_NORMAL;
577
578 spin_unlock(&port->lock);
579 sysrq = uart_handle_sysrq_char(port, buf[i]);
580 spin_lock(&port->lock);
581 if (!sysrq)
582 tty_insert_flip_char(tport, buf[i], flag);
583 }
584 count -= r_count;
585 }
586
587 spin_unlock(&port->lock);
588 tty_flip_buffer_push(tport);
589 spin_lock(&port->lock);
590
591 if (misr & (UART_IMR_RXSTALE))
592 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
593 msm_write(port, 0xFFFFFF, UARTDM_DMRX);
594 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
595
596 /* Try to use DMA */
597 msm_start_rx_dma(msm_port);
598 }
599
600 static void msm_handle_rx(struct uart_port *port)
601 {
602 struct tty_port *tport = &port->state->port;
603 unsigned int sr;
604
605 /*
606 * Handle overrun. My understanding of the hardware is that overrun
607 * is not tied to the RX buffer, so we handle the case out of band.
608 */
609 if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
610 port->icount.overrun++;
611 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
612 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
613 }
614
615 /* and now the main RX loop */
616 while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
617 unsigned int c;
618 char flag = TTY_NORMAL;
619 int sysrq;
620
621 c = msm_read(port, UART_RF);
622
623 if (sr & UART_SR_RX_BREAK) {
624 port->icount.brk++;
625 if (uart_handle_break(port))
626 continue;
627 } else if (sr & UART_SR_PAR_FRAME_ERR) {
628 port->icount.frame++;
629 } else {
630 port->icount.rx++;
631 }
632
633 /* Mask conditions we're ignorning. */
634 sr &= port->read_status_mask;
635
636 if (sr & UART_SR_RX_BREAK)
637 flag = TTY_BREAK;
638 else if (sr & UART_SR_PAR_FRAME_ERR)
639 flag = TTY_FRAME;
640
641 spin_unlock(&port->lock);
642 sysrq = uart_handle_sysrq_char(port, c);
643 spin_lock(&port->lock);
644 if (!sysrq)
645 tty_insert_flip_char(tport, c, flag);
646 }
647
648 spin_unlock(&port->lock);
649 tty_flip_buffer_push(tport);
650 spin_lock(&port->lock);
651 }
652
653 static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
654 {
655 struct circ_buf *xmit = &port->state->xmit;
656 struct msm_port *msm_port = UART_TO_MSM(port);
657 unsigned int num_chars;
658 unsigned int tf_pointer = 0;
659 void __iomem *tf;
660
661 if (msm_port->is_uartdm)
662 tf = port->membase + UARTDM_TF;
663 else
664 tf = port->membase + UART_TF;
665
666 if (tx_count && msm_port->is_uartdm)
667 msm_reset_dm_count(port, tx_count);
668
669 while (tf_pointer < tx_count) {
670 int i;
671 char buf[4] = { 0 };
672
673 if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
674 break;
675
676 if (msm_port->is_uartdm)
677 num_chars = min(tx_count - tf_pointer,
678 (unsigned int)sizeof(buf));
679 else
680 num_chars = 1;
681
682 for (i = 0; i < num_chars; i++) {
683 buf[i] = xmit->buf[xmit->tail + i];
684 port->icount.tx++;
685 }
686
687 iowrite32_rep(tf, buf, 1);
688 xmit->tail = (xmit->tail + num_chars) & (UART_XMIT_SIZE - 1);
689 tf_pointer += num_chars;
690 }
691
692 /* disable tx interrupts if nothing more to send */
693 if (uart_circ_empty(xmit))
694 msm_stop_tx(port);
695
696 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
697 uart_write_wakeup(port);
698 }
699
700 static void msm_handle_tx(struct uart_port *port)
701 {
702 struct msm_port *msm_port = UART_TO_MSM(port);
703 struct circ_buf *xmit = &msm_port->uart.state->xmit;
704 struct msm_dma *dma = &msm_port->tx_dma;
705 unsigned int pio_count, dma_count, dma_min;
706 void __iomem *tf;
707 int err = 0;
708
709 if (port->x_char) {
710 if (msm_port->is_uartdm)
711 tf = port->membase + UARTDM_TF;
712 else
713 tf = port->membase + UART_TF;
714
715 if (msm_port->is_uartdm)
716 msm_reset_dm_count(port, 1);
717
718 iowrite8_rep(tf, &port->x_char, 1);
719 port->icount.tx++;
720 port->x_char = 0;
721 return;
722 }
723
724 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
725 msm_stop_tx(port);
726 return;
727 }
728
729 pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
730 dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
731
732 dma_min = 1; /* Always DMA */
733 if (msm_port->is_uartdm > UARTDM_1P3) {
734 dma_count = UARTDM_TX_AIGN(dma_count);
735 dma_min = UARTDM_BURST_SIZE;
736 } else {
737 if (dma_count > UARTDM_TX_MAX)
738 dma_count = UARTDM_TX_MAX;
739 }
740
741 if (pio_count > port->fifosize)
742 pio_count = port->fifosize;
743
744 if (!dma->chan || dma_count < dma_min)
745 msm_handle_tx_pio(port, pio_count);
746 else
747 err = msm_handle_tx_dma(msm_port, dma_count);
748
749 if (err) /* fall back to PIO mode */
750 msm_handle_tx_pio(port, pio_count);
751 }
752
753 static void msm_handle_delta_cts(struct uart_port *port)
754 {
755 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
756 port->icount.cts++;
757 wake_up_interruptible(&port->state->port.delta_msr_wait);
758 }
759
760 static irqreturn_t msm_uart_irq(int irq, void *dev_id)
761 {
762 struct uart_port *port = dev_id;
763 struct msm_port *msm_port = UART_TO_MSM(port);
764 struct msm_dma *dma = &msm_port->rx_dma;
765 unsigned long flags;
766 unsigned int misr;
767 u32 val;
768
769 spin_lock_irqsave(&port->lock, flags);
770 misr = msm_read(port, UART_MISR);
771 msm_write(port, 0, UART_IMR); /* disable interrupt */
772
773 if (misr & UART_IMR_RXBREAK_START) {
774 msm_port->break_detected = true;
775 msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
776 }
777
778 if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
779 if (dma->count) {
780 val = UART_CR_CMD_STALE_EVENT_DISABLE;
781 msm_write(port, val, UART_CR);
782 val = UART_CR_CMD_RESET_STALE_INT;
783 msm_write(port, val, UART_CR);
784 /*
785 * Flush DMA input fifo to memory, this will also
786 * trigger DMA RX completion
787 */
788 dmaengine_terminate_all(dma->chan);
789 } else if (msm_port->is_uartdm) {
790 msm_handle_rx_dm(port, misr);
791 } else {
792 msm_handle_rx(port);
793 }
794 }
795 if (misr & UART_IMR_TXLEV)
796 msm_handle_tx(port);
797 if (misr & UART_IMR_DELTA_CTS)
798 msm_handle_delta_cts(port);
799
800 msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
801 spin_unlock_irqrestore(&port->lock, flags);
802
803 return IRQ_HANDLED;
804 }
805
806 static unsigned int msm_tx_empty(struct uart_port *port)
807 {
808 return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
809 }
810
811 static unsigned int msm_get_mctrl(struct uart_port *port)
812 {
813 return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
814 }
815
816 static void msm_reset(struct uart_port *port)
817 {
818 struct msm_port *msm_port = UART_TO_MSM(port);
819
820 /* reset everything */
821 msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
822 msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
823 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
824 msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
825 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
826 msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
827
828 /* Disable DM modes */
829 if (msm_port->is_uartdm)
830 msm_write(port, 0, UARTDM_DMEN);
831 }
832
833 static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
834 {
835 unsigned int mr;
836
837 mr = msm_read(port, UART_MR1);
838
839 if (!(mctrl & TIOCM_RTS)) {
840 mr &= ~UART_MR1_RX_RDY_CTL;
841 msm_write(port, mr, UART_MR1);
842 msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
843 } else {
844 mr |= UART_MR1_RX_RDY_CTL;
845 msm_write(port, mr, UART_MR1);
846 }
847 }
848
849 static void msm_break_ctl(struct uart_port *port, int break_ctl)
850 {
851 if (break_ctl)
852 msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
853 else
854 msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
855 }
856
857 struct msm_baud_map {
858 u16 divisor;
859 u8 code;
860 u8 rxstale;
861 };
862
863 static const struct msm_baud_map *
864 msm_find_best_baud(struct uart_port *port, unsigned int baud)
865 {
866 unsigned int i, divisor;
867 const struct msm_baud_map *entry;
868 static const struct msm_baud_map table[] = {
869 { 1536, 0x00, 1 },
870 { 768, 0x11, 1 },
871 { 384, 0x22, 1 },
872 { 192, 0x33, 1 },
873 { 96, 0x44, 1 },
874 { 48, 0x55, 1 },
875 { 32, 0x66, 1 },
876 { 24, 0x77, 1 },
877 { 16, 0x88, 1 },
878 { 12, 0x99, 6 },
879 { 8, 0xaa, 6 },
880 { 6, 0xbb, 6 },
881 { 4, 0xcc, 6 },
882 { 3, 0xdd, 8 },
883 { 2, 0xee, 16 },
884 { 1, 0xff, 31 },
885 { 0, 0xff, 31 },
886 };
887
888 divisor = uart_get_divisor(port, baud);
889
890 for (i = 0, entry = table; i < ARRAY_SIZE(table); i++, entry++)
891 if (entry->divisor <= divisor)
892 break;
893
894 return entry; /* Default to smallest divider */
895 }
896
897 static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
898 unsigned long *saved_flags)
899 {
900 unsigned int rxstale, watermark, mask;
901 struct msm_port *msm_port = UART_TO_MSM(port);
902 const struct msm_baud_map *entry;
903 unsigned long flags;
904
905 entry = msm_find_best_baud(port, baud);
906
907 msm_write(port, entry->code, UART_CSR);
908
909 if (baud > 460800)
910 port->uartclk = baud * 16;
911
912 flags = *saved_flags;
913 spin_unlock_irqrestore(&port->lock, flags);
914
915 clk_set_rate(msm_port->clk, port->uartclk);
916
917 spin_lock_irqsave(&port->lock, flags);
918 *saved_flags = flags;
919
920 /* RX stale watermark */
921 rxstale = entry->rxstale;
922 watermark = UART_IPR_STALE_LSB & rxstale;
923 if (msm_port->is_uartdm) {
924 mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
925 } else {
926 watermark |= UART_IPR_RXSTALE_LAST;
927 mask = UART_IPR_STALE_TIMEOUT_MSB;
928 }
929
930 watermark |= mask & (rxstale << 2);
931
932 msm_write(port, watermark, UART_IPR);
933
934 /* set RX watermark */
935 watermark = (port->fifosize * 3) / 4;
936 msm_write(port, watermark, UART_RFWR);
937
938 /* set TX watermark */
939 msm_write(port, 10, UART_TFWR);
940
941 msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
942 msm_reset(port);
943
944 /* Enable RX and TX */
945 msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
946
947 /* turn on RX and CTS interrupts */
948 msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
949 UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
950
951 msm_write(port, msm_port->imr, UART_IMR);
952
953 if (msm_port->is_uartdm) {
954 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
955 msm_write(port, 0xFFFFFF, UARTDM_DMRX);
956 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
957 }
958
959 return baud;
960 }
961
962 static void msm_init_clock(struct uart_port *port)
963 {
964 struct msm_port *msm_port = UART_TO_MSM(port);
965
966 clk_prepare_enable(msm_port->clk);
967 clk_prepare_enable(msm_port->pclk);
968 msm_serial_set_mnd_regs(port);
969 }
970
971 static int msm_startup(struct uart_port *port)
972 {
973 struct msm_port *msm_port = UART_TO_MSM(port);
974 unsigned int data, rfr_level, mask;
975 int ret;
976
977 snprintf(msm_port->name, sizeof(msm_port->name),
978 "msm_serial%d", port->line);
979
980 ret = request_irq(port->irq, msm_uart_irq, IRQF_TRIGGER_HIGH,
981 msm_port->name, port);
982 if (unlikely(ret))
983 return ret;
984
985 msm_init_clock(port);
986
987 if (likely(port->fifosize > 12))
988 rfr_level = port->fifosize - 12;
989 else
990 rfr_level = port->fifosize;
991
992 /* set automatic RFR level */
993 data = msm_read(port, UART_MR1);
994
995 if (msm_port->is_uartdm)
996 mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
997 else
998 mask = UART_MR1_AUTO_RFR_LEVEL1;
999
1000 data &= ~mask;
1001 data &= ~UART_MR1_AUTO_RFR_LEVEL0;
1002 data |= mask & (rfr_level << 2);
1003 data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
1004 msm_write(port, data, UART_MR1);
1005
1006 if (msm_port->is_uartdm) {
1007 msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
1008 msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
1009 }
1010
1011 return 0;
1012 }
1013
1014 static void msm_shutdown(struct uart_port *port)
1015 {
1016 struct msm_port *msm_port = UART_TO_MSM(port);
1017
1018 msm_port->imr = 0;
1019 msm_write(port, 0, UART_IMR); /* disable interrupts */
1020
1021 if (msm_port->is_uartdm)
1022 msm_release_dma(msm_port);
1023
1024 clk_disable_unprepare(msm_port->clk);
1025
1026 free_irq(port->irq, port);
1027 }
1028
1029 static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
1030 struct ktermios *old)
1031 {
1032 struct msm_port *msm_port = UART_TO_MSM(port);
1033 struct msm_dma *dma = &msm_port->rx_dma;
1034 unsigned long flags;
1035 unsigned int baud, mr;
1036
1037 spin_lock_irqsave(&port->lock, flags);
1038
1039 if (dma->chan) /* Terminate if any */
1040 msm_stop_dma(port, dma);
1041
1042 /* calculate and set baud rate */
1043 baud = uart_get_baud_rate(port, termios, old, 300, 4000000);
1044 baud = msm_set_baud_rate(port, baud, &flags);
1045 if (tty_termios_baud_rate(termios))
1046 tty_termios_encode_baud_rate(termios, baud, baud);
1047
1048 /* calculate parity */
1049 mr = msm_read(port, UART_MR2);
1050 mr &= ~UART_MR2_PARITY_MODE;
1051 if (termios->c_cflag & PARENB) {
1052 if (termios->c_cflag & PARODD)
1053 mr |= UART_MR2_PARITY_MODE_ODD;
1054 else if (termios->c_cflag & CMSPAR)
1055 mr |= UART_MR2_PARITY_MODE_SPACE;
1056 else
1057 mr |= UART_MR2_PARITY_MODE_EVEN;
1058 }
1059
1060 /* calculate bits per char */
1061 mr &= ~UART_MR2_BITS_PER_CHAR;
1062 switch (termios->c_cflag & CSIZE) {
1063 case CS5:
1064 mr |= UART_MR2_BITS_PER_CHAR_5;
1065 break;
1066 case CS6:
1067 mr |= UART_MR2_BITS_PER_CHAR_6;
1068 break;
1069 case CS7:
1070 mr |= UART_MR2_BITS_PER_CHAR_7;
1071 break;
1072 case CS8:
1073 default:
1074 mr |= UART_MR2_BITS_PER_CHAR_8;
1075 break;
1076 }
1077
1078 /* calculate stop bits */
1079 mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
1080 if (termios->c_cflag & CSTOPB)
1081 mr |= UART_MR2_STOP_BIT_LEN_TWO;
1082 else
1083 mr |= UART_MR2_STOP_BIT_LEN_ONE;
1084
1085 /* set parity, bits per char, and stop bit */
1086 msm_write(port, mr, UART_MR2);
1087
1088 /* calculate and set hardware flow control */
1089 mr = msm_read(port, UART_MR1);
1090 mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
1091 if (termios->c_cflag & CRTSCTS) {
1092 mr |= UART_MR1_CTS_CTL;
1093 mr |= UART_MR1_RX_RDY_CTL;
1094 }
1095 msm_write(port, mr, UART_MR1);
1096
1097 /* Configure status bits to ignore based on termio flags. */
1098 port->read_status_mask = 0;
1099 if (termios->c_iflag & INPCK)
1100 port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
1101 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1102 port->read_status_mask |= UART_SR_RX_BREAK;
1103
1104 uart_update_timeout(port, termios->c_cflag, baud);
1105
1106 /* Try to use DMA */
1107 msm_start_rx_dma(msm_port);
1108
1109 spin_unlock_irqrestore(&port->lock, flags);
1110 }
1111
1112 static const char *msm_type(struct uart_port *port)
1113 {
1114 return "MSM";
1115 }
1116
1117 static void msm_release_port(struct uart_port *port)
1118 {
1119 struct platform_device *pdev = to_platform_device(port->dev);
1120 struct resource *uart_resource;
1121 resource_size_t size;
1122
1123 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124 if (unlikely(!uart_resource))
1125 return;
1126 size = resource_size(uart_resource);
1127
1128 release_mem_region(port->mapbase, size);
1129 iounmap(port->membase);
1130 port->membase = NULL;
1131 }
1132
1133 static int msm_request_port(struct uart_port *port)
1134 {
1135 struct platform_device *pdev = to_platform_device(port->dev);
1136 struct resource *uart_resource;
1137 resource_size_t size;
1138 int ret;
1139
1140 uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 if (unlikely(!uart_resource))
1142 return -ENXIO;
1143
1144 size = resource_size(uart_resource);
1145
1146 if (!request_mem_region(port->mapbase, size, "msm_serial"))
1147 return -EBUSY;
1148
1149 port->membase = ioremap(port->mapbase, size);
1150 if (!port->membase) {
1151 ret = -EBUSY;
1152 goto fail_release_port;
1153 }
1154
1155 return 0;
1156
1157 fail_release_port:
1158 release_mem_region(port->mapbase, size);
1159 return ret;
1160 }
1161
1162 static void msm_config_port(struct uart_port *port, int flags)
1163 {
1164 int ret;
1165
1166 if (flags & UART_CONFIG_TYPE) {
1167 port->type = PORT_MSM;
1168 ret = msm_request_port(port);
1169 if (ret)
1170 return;
1171 }
1172 }
1173
1174 static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
1175 {
1176 if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
1177 return -EINVAL;
1178 if (unlikely(port->irq != ser->irq))
1179 return -EINVAL;
1180 return 0;
1181 }
1182
1183 static void msm_power(struct uart_port *port, unsigned int state,
1184 unsigned int oldstate)
1185 {
1186 struct msm_port *msm_port = UART_TO_MSM(port);
1187
1188 switch (state) {
1189 case 0:
1190 clk_prepare_enable(msm_port->clk);
1191 clk_prepare_enable(msm_port->pclk);
1192 break;
1193 case 3:
1194 clk_disable_unprepare(msm_port->clk);
1195 clk_disable_unprepare(msm_port->pclk);
1196 break;
1197 default:
1198 pr_err("msm_serial: Unknown PM state %d\n", state);
1199 }
1200 }
1201
1202 #ifdef CONFIG_CONSOLE_POLL
1203 static int msm_poll_get_char_single(struct uart_port *port)
1204 {
1205 struct msm_port *msm_port = UART_TO_MSM(port);
1206 unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
1207
1208 if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
1209 return NO_POLL_CHAR;
1210
1211 return msm_read(port, rf_reg) & 0xff;
1212 }
1213
1214 static int msm_poll_get_char_dm(struct uart_port *port)
1215 {
1216 int c;
1217 static u32 slop;
1218 static int count;
1219 unsigned char *sp = (unsigned char *)&slop;
1220
1221 /* Check if a previous read had more than one char */
1222 if (count) {
1223 c = sp[sizeof(slop) - count];
1224 count--;
1225 /* Or if FIFO is empty */
1226 } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
1227 /*
1228 * If RX packing buffer has less than a word, force stale to
1229 * push contents into RX FIFO
1230 */
1231 count = msm_read(port, UARTDM_RXFS);
1232 count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
1233 if (count) {
1234 msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
1235 slop = msm_read(port, UARTDM_RF);
1236 c = sp[0];
1237 count--;
1238 msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
1239 msm_write(port, 0xFFFFFF, UARTDM_DMRX);
1240 msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
1241 UART_CR);
1242 } else {
1243 c = NO_POLL_CHAR;
1244 }
1245 /* FIFO has a word */
1246 } else {
1247 slop = msm_read(port, UARTDM_RF);
1248 c = sp[0];
1249 count = sizeof(slop) - 1;
1250 }
1251
1252 return c;
1253 }
1254
1255 static int msm_poll_get_char(struct uart_port *port)
1256 {
1257 u32 imr;
1258 int c;
1259 struct msm_port *msm_port = UART_TO_MSM(port);
1260
1261 /* Disable all interrupts */
1262 imr = msm_read(port, UART_IMR);
1263 msm_write(port, 0, UART_IMR);
1264
1265 if (msm_port->is_uartdm)
1266 c = msm_poll_get_char_dm(port);
1267 else
1268 c = msm_poll_get_char_single(port);
1269
1270 /* Enable interrupts */
1271 msm_write(port, imr, UART_IMR);
1272
1273 return c;
1274 }
1275
1276 static void msm_poll_put_char(struct uart_port *port, unsigned char c)
1277 {
1278 u32 imr;
1279 struct msm_port *msm_port = UART_TO_MSM(port);
1280
1281 /* Disable all interrupts */
1282 imr = msm_read(port, UART_IMR);
1283 msm_write(port, 0, UART_IMR);
1284
1285 if (msm_port->is_uartdm)
1286 msm_reset_dm_count(port, 1);
1287
1288 /* Wait until FIFO is empty */
1289 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
1290 cpu_relax();
1291
1292 /* Write a character */
1293 msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
1294
1295 /* Wait until FIFO is empty */
1296 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
1297 cpu_relax();
1298
1299 /* Enable interrupts */
1300 msm_write(port, imr, UART_IMR);
1301 }
1302 #endif
1303
1304 static struct uart_ops msm_uart_pops = {
1305 .tx_empty = msm_tx_empty,
1306 .set_mctrl = msm_set_mctrl,
1307 .get_mctrl = msm_get_mctrl,
1308 .stop_tx = msm_stop_tx,
1309 .start_tx = msm_start_tx,
1310 .stop_rx = msm_stop_rx,
1311 .enable_ms = msm_enable_ms,
1312 .break_ctl = msm_break_ctl,
1313 .startup = msm_startup,
1314 .shutdown = msm_shutdown,
1315 .set_termios = msm_set_termios,
1316 .type = msm_type,
1317 .release_port = msm_release_port,
1318 .request_port = msm_request_port,
1319 .config_port = msm_config_port,
1320 .verify_port = msm_verify_port,
1321 .pm = msm_power,
1322 #ifdef CONFIG_CONSOLE_POLL
1323 .poll_get_char = msm_poll_get_char,
1324 .poll_put_char = msm_poll_put_char,
1325 #endif
1326 };
1327
1328 static struct msm_port msm_uart_ports[] = {
1329 {
1330 .uart = {
1331 .iotype = UPIO_MEM,
1332 .ops = &msm_uart_pops,
1333 .flags = UPF_BOOT_AUTOCONF,
1334 .fifosize = 64,
1335 .line = 0,
1336 },
1337 },
1338 {
1339 .uart = {
1340 .iotype = UPIO_MEM,
1341 .ops = &msm_uart_pops,
1342 .flags = UPF_BOOT_AUTOCONF,
1343 .fifosize = 64,
1344 .line = 1,
1345 },
1346 },
1347 {
1348 .uart = {
1349 .iotype = UPIO_MEM,
1350 .ops = &msm_uart_pops,
1351 .flags = UPF_BOOT_AUTOCONF,
1352 .fifosize = 64,
1353 .line = 2,
1354 },
1355 },
1356 };
1357
1358 #define UART_NR ARRAY_SIZE(msm_uart_ports)
1359
1360 static inline struct uart_port *msm_get_port_from_line(unsigned int line)
1361 {
1362 return &msm_uart_ports[line].uart;
1363 }
1364
1365 #ifdef CONFIG_SERIAL_MSM_CONSOLE
1366 static void __msm_console_write(struct uart_port *port, const char *s,
1367 unsigned int count, bool is_uartdm)
1368 {
1369 int i;
1370 int num_newlines = 0;
1371 bool replaced = false;
1372 void __iomem *tf;
1373
1374 if (is_uartdm)
1375 tf = port->membase + UARTDM_TF;
1376 else
1377 tf = port->membase + UART_TF;
1378
1379 /* Account for newlines that will get a carriage return added */
1380 for (i = 0; i < count; i++)
1381 if (s[i] == '\n')
1382 num_newlines++;
1383 count += num_newlines;
1384
1385 spin_lock(&port->lock);
1386 if (is_uartdm)
1387 msm_reset_dm_count(port, count);
1388
1389 i = 0;
1390 while (i < count) {
1391 int j;
1392 unsigned int num_chars;
1393 char buf[4] = { 0 };
1394
1395 if (is_uartdm)
1396 num_chars = min(count - i, (unsigned int)sizeof(buf));
1397 else
1398 num_chars = 1;
1399
1400 for (j = 0; j < num_chars; j++) {
1401 char c = *s;
1402
1403 if (c == '\n' && !replaced) {
1404 buf[j] = '\r';
1405 j++;
1406 replaced = true;
1407 }
1408 if (j < num_chars) {
1409 buf[j] = c;
1410 s++;
1411 replaced = false;
1412 }
1413 }
1414
1415 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
1416 cpu_relax();
1417
1418 iowrite32_rep(tf, buf, 1);
1419 i += num_chars;
1420 }
1421 spin_unlock(&port->lock);
1422 }
1423
1424 static void msm_console_write(struct console *co, const char *s,
1425 unsigned int count)
1426 {
1427 struct uart_port *port;
1428 struct msm_port *msm_port;
1429
1430 BUG_ON(co->index < 0 || co->index >= UART_NR);
1431
1432 port = msm_get_port_from_line(co->index);
1433 msm_port = UART_TO_MSM(port);
1434
1435 __msm_console_write(port, s, count, msm_port->is_uartdm);
1436 }
1437
1438 static int __init msm_console_setup(struct console *co, char *options)
1439 {
1440 struct uart_port *port;
1441 int baud = 115200;
1442 int bits = 8;
1443 int parity = 'n';
1444 int flow = 'n';
1445
1446 if (unlikely(co->index >= UART_NR || co->index < 0))
1447 return -ENXIO;
1448
1449 port = msm_get_port_from_line(co->index);
1450
1451 if (unlikely(!port->membase))
1452 return -ENXIO;
1453
1454 msm_init_clock(port);
1455
1456 if (options)
1457 uart_parse_options(options, &baud, &parity, &bits, &flow);
1458
1459 pr_info("msm_serial: console setup on port #%d\n", port->line);
1460
1461 return uart_set_options(port, co, baud, parity, bits, flow);
1462 }
1463
1464 static void
1465 msm_serial_early_write(struct console *con, const char *s, unsigned n)
1466 {
1467 struct earlycon_device *dev = con->data;
1468
1469 __msm_console_write(&dev->port, s, n, false);
1470 }
1471
1472 static int __init
1473 msm_serial_early_console_setup(struct earlycon_device *device, const char *opt)
1474 {
1475 if (!device->port.membase)
1476 return -ENODEV;
1477
1478 device->con->write = msm_serial_early_write;
1479 return 0;
1480 }
1481 EARLYCON_DECLARE(msm_serial, msm_serial_early_console_setup);
1482 OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart",
1483 msm_serial_early_console_setup);
1484
1485 static void
1486 msm_serial_early_write_dm(struct console *con, const char *s, unsigned n)
1487 {
1488 struct earlycon_device *dev = con->data;
1489
1490 __msm_console_write(&dev->port, s, n, true);
1491 }
1492
1493 static int __init
1494 msm_serial_early_console_setup_dm(struct earlycon_device *device,
1495 const char *opt)
1496 {
1497 if (!device->port.membase)
1498 return -ENODEV;
1499
1500 device->con->write = msm_serial_early_write_dm;
1501 return 0;
1502 }
1503 EARLYCON_DECLARE(msm_serial_dm, msm_serial_early_console_setup_dm);
1504 OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm",
1505 msm_serial_early_console_setup_dm);
1506
1507 static struct uart_driver msm_uart_driver;
1508
1509 static struct console msm_console = {
1510 .name = "ttyMSM",
1511 .write = msm_console_write,
1512 .device = uart_console_device,
1513 .setup = msm_console_setup,
1514 .flags = CON_PRINTBUFFER,
1515 .index = -1,
1516 .data = &msm_uart_driver,
1517 };
1518
1519 #define MSM_CONSOLE (&msm_console)
1520
1521 #else
1522 #define MSM_CONSOLE NULL
1523 #endif
1524
1525 static struct uart_driver msm_uart_driver = {
1526 .owner = THIS_MODULE,
1527 .driver_name = "msm_serial",
1528 .dev_name = "ttyMSM",
1529 .nr = UART_NR,
1530 .cons = MSM_CONSOLE,
1531 };
1532
1533 static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
1534
1535 static const struct of_device_id msm_uartdm_table[] = {
1536 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
1537 { .compatible = "qcom,msm-uartdm-v1.2", .data = (void *)UARTDM_1P2 },
1538 { .compatible = "qcom,msm-uartdm-v1.3", .data = (void *)UARTDM_1P3 },
1539 { .compatible = "qcom,msm-uartdm-v1.4", .data = (void *)UARTDM_1P4 },
1540 { }
1541 };
1542
1543 static int msm_serial_probe(struct platform_device *pdev)
1544 {
1545 struct msm_port *msm_port;
1546 struct resource *resource;
1547 struct uart_port *port;
1548 const struct of_device_id *id;
1549 int irq, line;
1550
1551 if (pdev->dev.of_node)
1552 line = of_alias_get_id(pdev->dev.of_node, "serial");
1553 else
1554 line = pdev->id;
1555
1556 if (line < 0)
1557 line = atomic_inc_return(&msm_uart_next_id) - 1;
1558
1559 if (unlikely(line < 0 || line >= UART_NR))
1560 return -ENXIO;
1561
1562 dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
1563
1564 port = msm_get_port_from_line(line);
1565 port->dev = &pdev->dev;
1566 msm_port = UART_TO_MSM(port);
1567
1568 id = of_match_device(msm_uartdm_table, &pdev->dev);
1569 if (id)
1570 msm_port->is_uartdm = (unsigned long)id->data;
1571 else
1572 msm_port->is_uartdm = 0;
1573
1574 msm_port->clk = devm_clk_get(&pdev->dev, "core");
1575 if (IS_ERR(msm_port->clk))
1576 return PTR_ERR(msm_port->clk);
1577
1578 if (msm_port->is_uartdm) {
1579 msm_port->pclk = devm_clk_get(&pdev->dev, "iface");
1580 if (IS_ERR(msm_port->pclk))
1581 return PTR_ERR(msm_port->pclk);
1582
1583 clk_set_rate(msm_port->clk, 1843200);
1584 }
1585
1586 port->uartclk = clk_get_rate(msm_port->clk);
1587 dev_info(&pdev->dev, "uartclk = %d\n", port->uartclk);
1588
1589 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1590 if (unlikely(!resource))
1591 return -ENXIO;
1592 port->mapbase = resource->start;
1593
1594 irq = platform_get_irq(pdev, 0);
1595 if (unlikely(irq < 0))
1596 return -ENXIO;
1597 port->irq = irq;
1598
1599 platform_set_drvdata(pdev, port);
1600
1601 return uart_add_one_port(&msm_uart_driver, port);
1602 }
1603
1604 static int msm_serial_remove(struct platform_device *pdev)
1605 {
1606 struct uart_port *port = platform_get_drvdata(pdev);
1607
1608 uart_remove_one_port(&msm_uart_driver, port);
1609
1610 return 0;
1611 }
1612
1613 static const struct of_device_id msm_match_table[] = {
1614 { .compatible = "qcom,msm-uart" },
1615 { .compatible = "qcom,msm-uartdm" },
1616 {}
1617 };
1618
1619 static struct platform_driver msm_platform_driver = {
1620 .remove = msm_serial_remove,
1621 .probe = msm_serial_probe,
1622 .driver = {
1623 .name = "msm_serial",
1624 .of_match_table = msm_match_table,
1625 },
1626 };
1627
1628 static int __init msm_serial_init(void)
1629 {
1630 int ret;
1631
1632 ret = uart_register_driver(&msm_uart_driver);
1633 if (unlikely(ret))
1634 return ret;
1635
1636 ret = platform_driver_register(&msm_platform_driver);
1637 if (unlikely(ret))
1638 uart_unregister_driver(&msm_uart_driver);
1639
1640 pr_info("msm_serial: driver initialized\n");
1641
1642 return ret;
1643 }
1644
1645 static void __exit msm_serial_exit(void)
1646 {
1647 platform_driver_unregister(&msm_platform_driver);
1648 uart_unregister_driver(&msm_uart_driver);
1649 }
1650
1651 module_init(msm_serial_init);
1652 module_exit(msm_serial_exit);
1653
1654 MODULE_AUTHOR("Robert Love <rlove@google.com>");
1655 MODULE_DESCRIPTION("Driver for msm7x serial device");
1656 MODULE_LICENSE("GPL");