]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/spi/spi-omap2-mcspi.c
spi: omap2-mcspi: Convert to let spi core validate transfer speed
[mirror_ubuntu-jammy-kernel.git] / drivers / spi / spi-omap2-mcspi.c
1 /*
2 * OMAP2 McSPI controller driver
3 *
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrj�l� <juha.yrjola@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/err.h>
35 #include <linux/clk.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/of.h>
40 #include <linux/of_device.h>
41 #include <linux/gcd.h>
42
43 #include <linux/spi/spi.h>
44
45 #include <linux/platform_data/spi-omap2-mcspi.h>
46
47 #define OMAP2_MCSPI_MAX_FREQ 48000000
48 #define OMAP2_MCSPI_MAX_DIVIDER 4096
49 #define OMAP2_MCSPI_MAX_FIFODEPTH 64
50 #define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
51 #define SPI_AUTOSUSPEND_TIMEOUT 2000
52
53 #define OMAP2_MCSPI_REVISION 0x00
54 #define OMAP2_MCSPI_SYSSTATUS 0x14
55 #define OMAP2_MCSPI_IRQSTATUS 0x18
56 #define OMAP2_MCSPI_IRQENABLE 0x1c
57 #define OMAP2_MCSPI_WAKEUPENABLE 0x20
58 #define OMAP2_MCSPI_SYST 0x24
59 #define OMAP2_MCSPI_MODULCTRL 0x28
60 #define OMAP2_MCSPI_XFERLEVEL 0x7c
61
62 /* per-channel banks, 0x14 bytes each, first is: */
63 #define OMAP2_MCSPI_CHCONF0 0x2c
64 #define OMAP2_MCSPI_CHSTAT0 0x30
65 #define OMAP2_MCSPI_CHCTRL0 0x34
66 #define OMAP2_MCSPI_TX0 0x38
67 #define OMAP2_MCSPI_RX0 0x3c
68
69 /* per-register bitmasks: */
70 #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
71
72 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
73 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
74 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
75
76 #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
77 #define OMAP2_MCSPI_CHCONF_POL BIT(1)
78 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
79 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
80 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
81 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
82 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
83 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
84 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
85 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
86 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
87 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
88 #define OMAP2_MCSPI_CHCONF_IS BIT(18)
89 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
90 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
91 #define OMAP2_MCSPI_CHCONF_FFET BIT(27)
92 #define OMAP2_MCSPI_CHCONF_FFER BIT(28)
93 #define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
94
95 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
96 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
97 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
98 #define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
99
100 #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
101 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
102
103 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
104
105 /* We have 2 DMA channels per CS, one for RX and one for TX */
106 struct omap2_mcspi_dma {
107 struct dma_chan *dma_tx;
108 struct dma_chan *dma_rx;
109
110 int dma_tx_sync_dev;
111 int dma_rx_sync_dev;
112
113 struct completion dma_tx_completion;
114 struct completion dma_rx_completion;
115
116 char dma_rx_ch_name[14];
117 char dma_tx_ch_name[14];
118 };
119
120 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
121 * cache operations; better heuristics consider wordsize and bitrate.
122 */
123 #define DMA_MIN_BYTES 160
124
125
126 /*
127 * Used for context save and restore, structure members to be updated whenever
128 * corresponding registers are modified.
129 */
130 struct omap2_mcspi_regs {
131 u32 modulctrl;
132 u32 wakeupenable;
133 struct list_head cs;
134 };
135
136 struct omap2_mcspi {
137 struct spi_master *master;
138 /* Virtual base address of the controller */
139 void __iomem *base;
140 unsigned long phys;
141 /* SPI1 has 4 channels, while SPI2 has 2 */
142 struct omap2_mcspi_dma *dma_channels;
143 struct device *dev;
144 struct omap2_mcspi_regs ctx;
145 int fifo_depth;
146 unsigned int pin_dir:1;
147 };
148
149 struct omap2_mcspi_cs {
150 void __iomem *base;
151 unsigned long phys;
152 int word_len;
153 struct list_head node;
154 /* Context save and restore shadow register */
155 u32 chconf0, chctrl0;
156 };
157
158 static inline void mcspi_write_reg(struct spi_master *master,
159 int idx, u32 val)
160 {
161 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
162
163 writel_relaxed(val, mcspi->base + idx);
164 }
165
166 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
167 {
168 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
169
170 return readl_relaxed(mcspi->base + idx);
171 }
172
173 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
174 int idx, u32 val)
175 {
176 struct omap2_mcspi_cs *cs = spi->controller_state;
177
178 writel_relaxed(val, cs->base + idx);
179 }
180
181 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
182 {
183 struct omap2_mcspi_cs *cs = spi->controller_state;
184
185 return readl_relaxed(cs->base + idx);
186 }
187
188 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
189 {
190 struct omap2_mcspi_cs *cs = spi->controller_state;
191
192 return cs->chconf0;
193 }
194
195 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
196 {
197 struct omap2_mcspi_cs *cs = spi->controller_state;
198
199 cs->chconf0 = val;
200 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
201 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
202 }
203
204 static inline int mcspi_bytes_per_word(int word_len)
205 {
206 if (word_len <= 8)
207 return 1;
208 else if (word_len <= 16)
209 return 2;
210 else /* word_len <= 32 */
211 return 4;
212 }
213
214 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
215 int is_read, int enable)
216 {
217 u32 l, rw;
218
219 l = mcspi_cached_chconf0(spi);
220
221 if (is_read) /* 1 is read, 0 write */
222 rw = OMAP2_MCSPI_CHCONF_DMAR;
223 else
224 rw = OMAP2_MCSPI_CHCONF_DMAW;
225
226 if (enable)
227 l |= rw;
228 else
229 l &= ~rw;
230
231 mcspi_write_chconf0(spi, l);
232 }
233
234 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
235 {
236 struct omap2_mcspi_cs *cs = spi->controller_state;
237 u32 l;
238
239 l = cs->chctrl0;
240 if (enable)
241 l |= OMAP2_MCSPI_CHCTRL_EN;
242 else
243 l &= ~OMAP2_MCSPI_CHCTRL_EN;
244 cs->chctrl0 = l;
245 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
246 /* Flash post-writes */
247 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
248 }
249
250 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
251 {
252 u32 l;
253
254 l = mcspi_cached_chconf0(spi);
255 if (cs_active)
256 l |= OMAP2_MCSPI_CHCONF_FORCE;
257 else
258 l &= ~OMAP2_MCSPI_CHCONF_FORCE;
259
260 mcspi_write_chconf0(spi, l);
261 }
262
263 static void omap2_mcspi_set_master_mode(struct spi_master *master)
264 {
265 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
266 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
267 u32 l;
268
269 /*
270 * Setup when switching from (reset default) slave mode
271 * to single-channel master mode
272 */
273 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
274 l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
275 l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
276 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
277
278 ctx->modulctrl = l;
279 }
280
281 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
282 struct spi_transfer *t, int enable)
283 {
284 struct spi_master *master = spi->master;
285 struct omap2_mcspi_cs *cs = spi->controller_state;
286 struct omap2_mcspi *mcspi;
287 unsigned int wcnt;
288 int max_fifo_depth, fifo_depth, bytes_per_word;
289 u32 chconf, xferlevel;
290
291 mcspi = spi_master_get_devdata(master);
292
293 chconf = mcspi_cached_chconf0(spi);
294 if (enable) {
295 bytes_per_word = mcspi_bytes_per_word(cs->word_len);
296 if (t->len % bytes_per_word != 0)
297 goto disable_fifo;
298
299 if (t->rx_buf != NULL && t->tx_buf != NULL)
300 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
301 else
302 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
303
304 fifo_depth = gcd(t->len, max_fifo_depth);
305 if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
306 goto disable_fifo;
307
308 wcnt = t->len / bytes_per_word;
309 if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
310 goto disable_fifo;
311
312 xferlevel = wcnt << 16;
313 if (t->rx_buf != NULL) {
314 chconf |= OMAP2_MCSPI_CHCONF_FFER;
315 xferlevel |= (fifo_depth - 1) << 8;
316 }
317 if (t->tx_buf != NULL) {
318 chconf |= OMAP2_MCSPI_CHCONF_FFET;
319 xferlevel |= fifo_depth - 1;
320 }
321
322 mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
323 mcspi_write_chconf0(spi, chconf);
324 mcspi->fifo_depth = fifo_depth;
325
326 return;
327 }
328
329 disable_fifo:
330 if (t->rx_buf != NULL)
331 chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
332 else
333 chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
334
335 mcspi_write_chconf0(spi, chconf);
336 mcspi->fifo_depth = 0;
337 }
338
339 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
340 {
341 struct spi_master *spi_cntrl = mcspi->master;
342 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
343 struct omap2_mcspi_cs *cs;
344
345 /* McSPI: context restore */
346 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
347 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
348
349 list_for_each_entry(cs, &ctx->cs, node)
350 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
351 }
352
353 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
354 {
355 unsigned long timeout;
356
357 timeout = jiffies + msecs_to_jiffies(1000);
358 while (!(readl_relaxed(reg) & bit)) {
359 if (time_after(jiffies, timeout)) {
360 if (!(readl_relaxed(reg) & bit))
361 return -ETIMEDOUT;
362 else
363 return 0;
364 }
365 cpu_relax();
366 }
367 return 0;
368 }
369
370 static void omap2_mcspi_rx_callback(void *data)
371 {
372 struct spi_device *spi = data;
373 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
374 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
375
376 /* We must disable the DMA RX request */
377 omap2_mcspi_set_dma_req(spi, 1, 0);
378
379 complete(&mcspi_dma->dma_rx_completion);
380 }
381
382 static void omap2_mcspi_tx_callback(void *data)
383 {
384 struct spi_device *spi = data;
385 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
386 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
387
388 /* We must disable the DMA TX request */
389 omap2_mcspi_set_dma_req(spi, 0, 0);
390
391 complete(&mcspi_dma->dma_tx_completion);
392 }
393
394 static void omap2_mcspi_tx_dma(struct spi_device *spi,
395 struct spi_transfer *xfer,
396 struct dma_slave_config cfg)
397 {
398 struct omap2_mcspi *mcspi;
399 struct omap2_mcspi_dma *mcspi_dma;
400 unsigned int count;
401
402 mcspi = spi_master_get_devdata(spi->master);
403 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
404 count = xfer->len;
405
406 if (mcspi_dma->dma_tx) {
407 struct dma_async_tx_descriptor *tx;
408 struct scatterlist sg;
409
410 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
411
412 sg_init_table(&sg, 1);
413 sg_dma_address(&sg) = xfer->tx_dma;
414 sg_dma_len(&sg) = xfer->len;
415
416 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
417 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
418 if (tx) {
419 tx->callback = omap2_mcspi_tx_callback;
420 tx->callback_param = spi;
421 dmaengine_submit(tx);
422 } else {
423 /* FIXME: fall back to PIO? */
424 }
425 }
426 dma_async_issue_pending(mcspi_dma->dma_tx);
427 omap2_mcspi_set_dma_req(spi, 0, 1);
428
429 }
430
431 static unsigned
432 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
433 struct dma_slave_config cfg,
434 unsigned es)
435 {
436 struct omap2_mcspi *mcspi;
437 struct omap2_mcspi_dma *mcspi_dma;
438 unsigned int count, dma_count;
439 u32 l;
440 int elements = 0;
441 int word_len, element_count;
442 struct omap2_mcspi_cs *cs = spi->controller_state;
443 mcspi = spi_master_get_devdata(spi->master);
444 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
445 count = xfer->len;
446 dma_count = xfer->len;
447
448 if (mcspi->fifo_depth == 0)
449 dma_count -= es;
450
451 word_len = cs->word_len;
452 l = mcspi_cached_chconf0(spi);
453
454 if (word_len <= 8)
455 element_count = count;
456 else if (word_len <= 16)
457 element_count = count >> 1;
458 else /* word_len <= 32 */
459 element_count = count >> 2;
460
461 if (mcspi_dma->dma_rx) {
462 struct dma_async_tx_descriptor *tx;
463 struct scatterlist sg;
464
465 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
466
467 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
468 dma_count -= es;
469
470 sg_init_table(&sg, 1);
471 sg_dma_address(&sg) = xfer->rx_dma;
472 sg_dma_len(&sg) = dma_count;
473
474 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
475 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
476 DMA_CTRL_ACK);
477 if (tx) {
478 tx->callback = omap2_mcspi_rx_callback;
479 tx->callback_param = spi;
480 dmaengine_submit(tx);
481 } else {
482 /* FIXME: fall back to PIO? */
483 }
484 }
485
486 dma_async_issue_pending(mcspi_dma->dma_rx);
487 omap2_mcspi_set_dma_req(spi, 1, 1);
488
489 wait_for_completion(&mcspi_dma->dma_rx_completion);
490 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
491 DMA_FROM_DEVICE);
492
493 if (mcspi->fifo_depth > 0)
494 return count;
495
496 omap2_mcspi_set_enable(spi, 0);
497
498 elements = element_count - 1;
499
500 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
501 elements--;
502
503 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
504 & OMAP2_MCSPI_CHSTAT_RXS)) {
505 u32 w;
506
507 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
508 if (word_len <= 8)
509 ((u8 *)xfer->rx_buf)[elements++] = w;
510 else if (word_len <= 16)
511 ((u16 *)xfer->rx_buf)[elements++] = w;
512 else /* word_len <= 32 */
513 ((u32 *)xfer->rx_buf)[elements++] = w;
514 } else {
515 int bytes_per_word = mcspi_bytes_per_word(word_len);
516 dev_err(&spi->dev, "DMA RX penultimate word empty\n");
517 count -= (bytes_per_word << 1);
518 omap2_mcspi_set_enable(spi, 1);
519 return count;
520 }
521 }
522 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
523 & OMAP2_MCSPI_CHSTAT_RXS)) {
524 u32 w;
525
526 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
527 if (word_len <= 8)
528 ((u8 *)xfer->rx_buf)[elements] = w;
529 else if (word_len <= 16)
530 ((u16 *)xfer->rx_buf)[elements] = w;
531 else /* word_len <= 32 */
532 ((u32 *)xfer->rx_buf)[elements] = w;
533 } else {
534 dev_err(&spi->dev, "DMA RX last word empty\n");
535 count -= mcspi_bytes_per_word(word_len);
536 }
537 omap2_mcspi_set_enable(spi, 1);
538 return count;
539 }
540
541 static unsigned
542 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
543 {
544 struct omap2_mcspi *mcspi;
545 struct omap2_mcspi_cs *cs = spi->controller_state;
546 struct omap2_mcspi_dma *mcspi_dma;
547 unsigned int count;
548 u32 l;
549 u8 *rx;
550 const u8 *tx;
551 struct dma_slave_config cfg;
552 enum dma_slave_buswidth width;
553 unsigned es;
554 u32 burst;
555 void __iomem *chstat_reg;
556 void __iomem *irqstat_reg;
557 int wait_res;
558
559 mcspi = spi_master_get_devdata(spi->master);
560 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
561 l = mcspi_cached_chconf0(spi);
562
563
564 if (cs->word_len <= 8) {
565 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
566 es = 1;
567 } else if (cs->word_len <= 16) {
568 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
569 es = 2;
570 } else {
571 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
572 es = 4;
573 }
574
575 count = xfer->len;
576 burst = 1;
577
578 if (mcspi->fifo_depth > 0) {
579 if (count > mcspi->fifo_depth)
580 burst = mcspi->fifo_depth / es;
581 else
582 burst = count / es;
583 }
584
585 memset(&cfg, 0, sizeof(cfg));
586 cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
587 cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
588 cfg.src_addr_width = width;
589 cfg.dst_addr_width = width;
590 cfg.src_maxburst = burst;
591 cfg.dst_maxburst = burst;
592
593 rx = xfer->rx_buf;
594 tx = xfer->tx_buf;
595
596 if (tx != NULL)
597 omap2_mcspi_tx_dma(spi, xfer, cfg);
598
599 if (rx != NULL)
600 count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
601
602 if (tx != NULL) {
603 wait_for_completion(&mcspi_dma->dma_tx_completion);
604 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
605 DMA_TO_DEVICE);
606
607 if (mcspi->fifo_depth > 0) {
608 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
609
610 if (mcspi_wait_for_reg_bit(irqstat_reg,
611 OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
612 dev_err(&spi->dev, "EOW timed out\n");
613
614 mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
615 OMAP2_MCSPI_IRQSTATUS_EOW);
616 }
617
618 /* for TX_ONLY mode, be sure all words have shifted out */
619 if (rx == NULL) {
620 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
621 if (mcspi->fifo_depth > 0) {
622 wait_res = mcspi_wait_for_reg_bit(chstat_reg,
623 OMAP2_MCSPI_CHSTAT_TXFFE);
624 if (wait_res < 0)
625 dev_err(&spi->dev, "TXFFE timed out\n");
626 } else {
627 wait_res = mcspi_wait_for_reg_bit(chstat_reg,
628 OMAP2_MCSPI_CHSTAT_TXS);
629 if (wait_res < 0)
630 dev_err(&spi->dev, "TXS timed out\n");
631 }
632 if (wait_res >= 0 &&
633 (mcspi_wait_for_reg_bit(chstat_reg,
634 OMAP2_MCSPI_CHSTAT_EOT) < 0))
635 dev_err(&spi->dev, "EOT timed out\n");
636 }
637 }
638 return count;
639 }
640
641 static unsigned
642 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
643 {
644 struct omap2_mcspi *mcspi;
645 struct omap2_mcspi_cs *cs = spi->controller_state;
646 unsigned int count, c;
647 u32 l;
648 void __iomem *base = cs->base;
649 void __iomem *tx_reg;
650 void __iomem *rx_reg;
651 void __iomem *chstat_reg;
652 int word_len;
653
654 mcspi = spi_master_get_devdata(spi->master);
655 count = xfer->len;
656 c = count;
657 word_len = cs->word_len;
658
659 l = mcspi_cached_chconf0(spi);
660
661 /* We store the pre-calculated register addresses on stack to speed
662 * up the transfer loop. */
663 tx_reg = base + OMAP2_MCSPI_TX0;
664 rx_reg = base + OMAP2_MCSPI_RX0;
665 chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
666
667 if (c < (word_len>>3))
668 return 0;
669
670 if (word_len <= 8) {
671 u8 *rx;
672 const u8 *tx;
673
674 rx = xfer->rx_buf;
675 tx = xfer->tx_buf;
676
677 do {
678 c -= 1;
679 if (tx != NULL) {
680 if (mcspi_wait_for_reg_bit(chstat_reg,
681 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
682 dev_err(&spi->dev, "TXS timed out\n");
683 goto out;
684 }
685 dev_vdbg(&spi->dev, "write-%d %02x\n",
686 word_len, *tx);
687 writel_relaxed(*tx++, tx_reg);
688 }
689 if (rx != NULL) {
690 if (mcspi_wait_for_reg_bit(chstat_reg,
691 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
692 dev_err(&spi->dev, "RXS timed out\n");
693 goto out;
694 }
695
696 if (c == 1 && tx == NULL &&
697 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
698 omap2_mcspi_set_enable(spi, 0);
699 *rx++ = readl_relaxed(rx_reg);
700 dev_vdbg(&spi->dev, "read-%d %02x\n",
701 word_len, *(rx - 1));
702 if (mcspi_wait_for_reg_bit(chstat_reg,
703 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
704 dev_err(&spi->dev,
705 "RXS timed out\n");
706 goto out;
707 }
708 c = 0;
709 } else if (c == 0 && tx == NULL) {
710 omap2_mcspi_set_enable(spi, 0);
711 }
712
713 *rx++ = readl_relaxed(rx_reg);
714 dev_vdbg(&spi->dev, "read-%d %02x\n",
715 word_len, *(rx - 1));
716 }
717 } while (c);
718 } else if (word_len <= 16) {
719 u16 *rx;
720 const u16 *tx;
721
722 rx = xfer->rx_buf;
723 tx = xfer->tx_buf;
724 do {
725 c -= 2;
726 if (tx != NULL) {
727 if (mcspi_wait_for_reg_bit(chstat_reg,
728 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
729 dev_err(&spi->dev, "TXS timed out\n");
730 goto out;
731 }
732 dev_vdbg(&spi->dev, "write-%d %04x\n",
733 word_len, *tx);
734 writel_relaxed(*tx++, tx_reg);
735 }
736 if (rx != NULL) {
737 if (mcspi_wait_for_reg_bit(chstat_reg,
738 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
739 dev_err(&spi->dev, "RXS timed out\n");
740 goto out;
741 }
742
743 if (c == 2 && tx == NULL &&
744 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
745 omap2_mcspi_set_enable(spi, 0);
746 *rx++ = readl_relaxed(rx_reg);
747 dev_vdbg(&spi->dev, "read-%d %04x\n",
748 word_len, *(rx - 1));
749 if (mcspi_wait_for_reg_bit(chstat_reg,
750 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
751 dev_err(&spi->dev,
752 "RXS timed out\n");
753 goto out;
754 }
755 c = 0;
756 } else if (c == 0 && tx == NULL) {
757 omap2_mcspi_set_enable(spi, 0);
758 }
759
760 *rx++ = readl_relaxed(rx_reg);
761 dev_vdbg(&spi->dev, "read-%d %04x\n",
762 word_len, *(rx - 1));
763 }
764 } while (c >= 2);
765 } else if (word_len <= 32) {
766 u32 *rx;
767 const u32 *tx;
768
769 rx = xfer->rx_buf;
770 tx = xfer->tx_buf;
771 do {
772 c -= 4;
773 if (tx != NULL) {
774 if (mcspi_wait_for_reg_bit(chstat_reg,
775 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
776 dev_err(&spi->dev, "TXS timed out\n");
777 goto out;
778 }
779 dev_vdbg(&spi->dev, "write-%d %08x\n",
780 word_len, *tx);
781 writel_relaxed(*tx++, tx_reg);
782 }
783 if (rx != NULL) {
784 if (mcspi_wait_for_reg_bit(chstat_reg,
785 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
786 dev_err(&spi->dev, "RXS timed out\n");
787 goto out;
788 }
789
790 if (c == 4 && tx == NULL &&
791 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
792 omap2_mcspi_set_enable(spi, 0);
793 *rx++ = readl_relaxed(rx_reg);
794 dev_vdbg(&spi->dev, "read-%d %08x\n",
795 word_len, *(rx - 1));
796 if (mcspi_wait_for_reg_bit(chstat_reg,
797 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
798 dev_err(&spi->dev,
799 "RXS timed out\n");
800 goto out;
801 }
802 c = 0;
803 } else if (c == 0 && tx == NULL) {
804 omap2_mcspi_set_enable(spi, 0);
805 }
806
807 *rx++ = readl_relaxed(rx_reg);
808 dev_vdbg(&spi->dev, "read-%d %08x\n",
809 word_len, *(rx - 1));
810 }
811 } while (c >= 4);
812 }
813
814 /* for TX_ONLY mode, be sure all words have shifted out */
815 if (xfer->rx_buf == NULL) {
816 if (mcspi_wait_for_reg_bit(chstat_reg,
817 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
818 dev_err(&spi->dev, "TXS timed out\n");
819 } else if (mcspi_wait_for_reg_bit(chstat_reg,
820 OMAP2_MCSPI_CHSTAT_EOT) < 0)
821 dev_err(&spi->dev, "EOT timed out\n");
822
823 /* disable chan to purge rx datas received in TX_ONLY transfer,
824 * otherwise these rx datas will affect the direct following
825 * RX_ONLY transfer.
826 */
827 omap2_mcspi_set_enable(spi, 0);
828 }
829 out:
830 omap2_mcspi_set_enable(spi, 1);
831 return count - c;
832 }
833
834 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
835 {
836 u32 div;
837
838 for (div = 0; div < 15; div++)
839 if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
840 return div;
841
842 return 15;
843 }
844
845 /* called only when no transfer is active to this device */
846 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
847 struct spi_transfer *t)
848 {
849 struct omap2_mcspi_cs *cs = spi->controller_state;
850 struct omap2_mcspi *mcspi;
851 struct spi_master *spi_cntrl;
852 u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
853 u8 word_len = spi->bits_per_word;
854 u32 speed_hz = spi->max_speed_hz;
855
856 mcspi = spi_master_get_devdata(spi->master);
857 spi_cntrl = mcspi->master;
858
859 if (t != NULL && t->bits_per_word)
860 word_len = t->bits_per_word;
861
862 cs->word_len = word_len;
863
864 if (t && t->speed_hz)
865 speed_hz = t->speed_hz;
866
867 speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
868 if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
869 clkd = omap2_mcspi_calc_divisor(speed_hz);
870 speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
871 clkg = 0;
872 } else {
873 div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
874 speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
875 clkd = (div - 1) & 0xf;
876 extclk = (div - 1) >> 4;
877 clkg = OMAP2_MCSPI_CHCONF_CLKG;
878 }
879
880 l = mcspi_cached_chconf0(spi);
881
882 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
883 * REVISIT: this controller could support SPI_3WIRE mode.
884 */
885 if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
886 l &= ~OMAP2_MCSPI_CHCONF_IS;
887 l &= ~OMAP2_MCSPI_CHCONF_DPE1;
888 l |= OMAP2_MCSPI_CHCONF_DPE0;
889 } else {
890 l |= OMAP2_MCSPI_CHCONF_IS;
891 l |= OMAP2_MCSPI_CHCONF_DPE1;
892 l &= ~OMAP2_MCSPI_CHCONF_DPE0;
893 }
894
895 /* wordlength */
896 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
897 l |= (word_len - 1) << 7;
898
899 /* set chipselect polarity; manage with FORCE */
900 if (!(spi->mode & SPI_CS_HIGH))
901 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
902 else
903 l &= ~OMAP2_MCSPI_CHCONF_EPOL;
904
905 /* set clock divisor */
906 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
907 l |= clkd << 2;
908
909 /* set clock granularity */
910 l &= ~OMAP2_MCSPI_CHCONF_CLKG;
911 l |= clkg;
912 if (clkg) {
913 cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
914 cs->chctrl0 |= extclk << 8;
915 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
916 }
917
918 /* set SPI mode 0..3 */
919 if (spi->mode & SPI_CPOL)
920 l |= OMAP2_MCSPI_CHCONF_POL;
921 else
922 l &= ~OMAP2_MCSPI_CHCONF_POL;
923 if (spi->mode & SPI_CPHA)
924 l |= OMAP2_MCSPI_CHCONF_PHA;
925 else
926 l &= ~OMAP2_MCSPI_CHCONF_PHA;
927
928 mcspi_write_chconf0(spi, l);
929
930 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
931 speed_hz,
932 (spi->mode & SPI_CPHA) ? "trailing" : "leading",
933 (spi->mode & SPI_CPOL) ? "inverted" : "normal");
934
935 return 0;
936 }
937
938 /*
939 * Note that we currently allow DMA only if we get a channel
940 * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
941 */
942 static int omap2_mcspi_request_dma(struct spi_device *spi)
943 {
944 struct spi_master *master = spi->master;
945 struct omap2_mcspi *mcspi;
946 struct omap2_mcspi_dma *mcspi_dma;
947 dma_cap_mask_t mask;
948 unsigned sig;
949
950 mcspi = spi_master_get_devdata(master);
951 mcspi_dma = mcspi->dma_channels + spi->chip_select;
952
953 init_completion(&mcspi_dma->dma_rx_completion);
954 init_completion(&mcspi_dma->dma_tx_completion);
955
956 dma_cap_zero(mask);
957 dma_cap_set(DMA_SLAVE, mask);
958 sig = mcspi_dma->dma_rx_sync_dev;
959
960 mcspi_dma->dma_rx =
961 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
962 &sig, &master->dev,
963 mcspi_dma->dma_rx_ch_name);
964 if (!mcspi_dma->dma_rx)
965 goto no_dma;
966
967 sig = mcspi_dma->dma_tx_sync_dev;
968 mcspi_dma->dma_tx =
969 dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
970 &sig, &master->dev,
971 mcspi_dma->dma_tx_ch_name);
972
973 if (!mcspi_dma->dma_tx) {
974 dma_release_channel(mcspi_dma->dma_rx);
975 mcspi_dma->dma_rx = NULL;
976 goto no_dma;
977 }
978
979 return 0;
980
981 no_dma:
982 dev_warn(&spi->dev, "not using DMA for McSPI\n");
983 return -EAGAIN;
984 }
985
986 static int omap2_mcspi_setup(struct spi_device *spi)
987 {
988 int ret;
989 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
990 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
991 struct omap2_mcspi_dma *mcspi_dma;
992 struct omap2_mcspi_cs *cs = spi->controller_state;
993
994 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
995
996 if (!cs) {
997 cs = kzalloc(sizeof *cs, GFP_KERNEL);
998 if (!cs)
999 return -ENOMEM;
1000 cs->base = mcspi->base + spi->chip_select * 0x14;
1001 cs->phys = mcspi->phys + spi->chip_select * 0x14;
1002 cs->chconf0 = 0;
1003 cs->chctrl0 = 0;
1004 spi->controller_state = cs;
1005 /* Link this to context save list */
1006 list_add_tail(&cs->node, &ctx->cs);
1007 }
1008
1009 if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
1010 ret = omap2_mcspi_request_dma(spi);
1011 if (ret < 0 && ret != -EAGAIN)
1012 return ret;
1013 }
1014
1015 ret = pm_runtime_get_sync(mcspi->dev);
1016 if (ret < 0)
1017 return ret;
1018
1019 ret = omap2_mcspi_setup_transfer(spi, NULL);
1020 pm_runtime_mark_last_busy(mcspi->dev);
1021 pm_runtime_put_autosuspend(mcspi->dev);
1022
1023 return ret;
1024 }
1025
1026 static void omap2_mcspi_cleanup(struct spi_device *spi)
1027 {
1028 struct omap2_mcspi *mcspi;
1029 struct omap2_mcspi_dma *mcspi_dma;
1030 struct omap2_mcspi_cs *cs;
1031
1032 mcspi = spi_master_get_devdata(spi->master);
1033
1034 if (spi->controller_state) {
1035 /* Unlink controller state from context save list */
1036 cs = spi->controller_state;
1037 list_del(&cs->node);
1038
1039 kfree(cs);
1040 }
1041
1042 if (spi->chip_select < spi->master->num_chipselect) {
1043 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
1044
1045 if (mcspi_dma->dma_rx) {
1046 dma_release_channel(mcspi_dma->dma_rx);
1047 mcspi_dma->dma_rx = NULL;
1048 }
1049 if (mcspi_dma->dma_tx) {
1050 dma_release_channel(mcspi_dma->dma_tx);
1051 mcspi_dma->dma_tx = NULL;
1052 }
1053 }
1054 }
1055
1056 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1057 {
1058
1059 /* We only enable one channel at a time -- the one whose message is
1060 * -- although this controller would gladly
1061 * arbitrate among multiple channels. This corresponds to "single
1062 * channel" master mode. As a side effect, we need to manage the
1063 * chipselect with the FORCE bit ... CS != channel enable.
1064 */
1065
1066 struct spi_device *spi;
1067 struct spi_transfer *t = NULL;
1068 struct spi_master *master;
1069 struct omap2_mcspi_dma *mcspi_dma;
1070 int cs_active = 0;
1071 struct omap2_mcspi_cs *cs;
1072 struct omap2_mcspi_device_config *cd;
1073 int par_override = 0;
1074 int status = 0;
1075 u32 chconf;
1076
1077 spi = m->spi;
1078 master = spi->master;
1079 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1080 cs = spi->controller_state;
1081 cd = spi->controller_data;
1082
1083 omap2_mcspi_set_enable(spi, 0);
1084 list_for_each_entry(t, &m->transfers, transfer_list) {
1085 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
1086 status = -EINVAL;
1087 break;
1088 }
1089 if (par_override ||
1090 (t->speed_hz != spi->max_speed_hz) ||
1091 (t->bits_per_word != spi->bits_per_word)) {
1092 par_override = 1;
1093 status = omap2_mcspi_setup_transfer(spi, t);
1094 if (status < 0)
1095 break;
1096 if (t->speed_hz == spi->max_speed_hz &&
1097 t->bits_per_word == spi->bits_per_word)
1098 par_override = 0;
1099 }
1100 if (cd && cd->cs_per_word) {
1101 chconf = mcspi->ctx.modulctrl;
1102 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1103 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1104 mcspi->ctx.modulctrl =
1105 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1106 }
1107
1108
1109 if (!cs_active) {
1110 omap2_mcspi_force_cs(spi, 1);
1111 cs_active = 1;
1112 }
1113
1114 chconf = mcspi_cached_chconf0(spi);
1115 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1116 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1117
1118 if (t->tx_buf == NULL)
1119 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1120 else if (t->rx_buf == NULL)
1121 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1122
1123 if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1124 /* Turbo mode is for more than one word */
1125 if (t->len > ((cs->word_len + 7) >> 3))
1126 chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1127 }
1128
1129 mcspi_write_chconf0(spi, chconf);
1130
1131 if (t->len) {
1132 unsigned count;
1133
1134 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1135 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
1136 omap2_mcspi_set_fifo(spi, t, 1);
1137
1138 omap2_mcspi_set_enable(spi, 1);
1139
1140 /* RX_ONLY mode needs dummy data in TX reg */
1141 if (t->tx_buf == NULL)
1142 writel_relaxed(0, cs->base
1143 + OMAP2_MCSPI_TX0);
1144
1145 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1146 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
1147 count = omap2_mcspi_txrx_dma(spi, t);
1148 else
1149 count = omap2_mcspi_txrx_pio(spi, t);
1150 m->actual_length += count;
1151
1152 if (count != t->len) {
1153 status = -EIO;
1154 break;
1155 }
1156 }
1157
1158 if (t->delay_usecs)
1159 udelay(t->delay_usecs);
1160
1161 /* ignore the "leave it on after last xfer" hint */
1162 if (t->cs_change) {
1163 omap2_mcspi_force_cs(spi, 0);
1164 cs_active = 0;
1165 }
1166
1167 omap2_mcspi_set_enable(spi, 0);
1168
1169 if (mcspi->fifo_depth > 0)
1170 omap2_mcspi_set_fifo(spi, t, 0);
1171 }
1172 /* Restore defaults if they were overriden */
1173 if (par_override) {
1174 par_override = 0;
1175 status = omap2_mcspi_setup_transfer(spi, NULL);
1176 }
1177
1178 if (cs_active)
1179 omap2_mcspi_force_cs(spi, 0);
1180
1181 if (cd && cd->cs_per_word) {
1182 chconf = mcspi->ctx.modulctrl;
1183 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1184 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1185 mcspi->ctx.modulctrl =
1186 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1187 }
1188
1189 omap2_mcspi_set_enable(spi, 0);
1190
1191 if (mcspi->fifo_depth > 0 && t)
1192 omap2_mcspi_set_fifo(spi, t, 0);
1193
1194 m->status = status;
1195 }
1196
1197 static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1198 struct spi_message *m)
1199 {
1200 struct spi_device *spi;
1201 struct omap2_mcspi *mcspi;
1202 struct omap2_mcspi_dma *mcspi_dma;
1203 struct spi_transfer *t;
1204
1205 spi = m->spi;
1206 mcspi = spi_master_get_devdata(master);
1207 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1208 m->actual_length = 0;
1209 m->status = 0;
1210
1211 list_for_each_entry(t, &m->transfers, transfer_list) {
1212 const void *tx_buf = t->tx_buf;
1213 void *rx_buf = t->rx_buf;
1214 unsigned len = t->len;
1215
1216 if ((len && !(rx_buf || tx_buf))) {
1217 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1218 t->speed_hz,
1219 len,
1220 tx_buf ? "tx" : "",
1221 rx_buf ? "rx" : "",
1222 t->bits_per_word);
1223 return -EINVAL;
1224 }
1225
1226 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1227 continue;
1228
1229 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1230 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1231 len, DMA_TO_DEVICE);
1232 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1233 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1234 'T', len);
1235 return -EINVAL;
1236 }
1237 }
1238 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1239 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1240 DMA_FROM_DEVICE);
1241 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1242 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1243 'R', len);
1244 if (tx_buf != NULL)
1245 dma_unmap_single(mcspi->dev, t->tx_dma,
1246 len, DMA_TO_DEVICE);
1247 return -EINVAL;
1248 }
1249 }
1250 }
1251
1252 omap2_mcspi_work(mcspi, m);
1253 spi_finalize_current_message(master);
1254 return 0;
1255 }
1256
1257 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1258 {
1259 struct spi_master *master = mcspi->master;
1260 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1261 int ret = 0;
1262
1263 ret = pm_runtime_get_sync(mcspi->dev);
1264 if (ret < 0)
1265 return ret;
1266
1267 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1268 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1269 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1270
1271 omap2_mcspi_set_master_mode(master);
1272 pm_runtime_mark_last_busy(mcspi->dev);
1273 pm_runtime_put_autosuspend(mcspi->dev);
1274 return 0;
1275 }
1276
1277 static int omap_mcspi_runtime_resume(struct device *dev)
1278 {
1279 struct omap2_mcspi *mcspi;
1280 struct spi_master *master;
1281
1282 master = dev_get_drvdata(dev);
1283 mcspi = spi_master_get_devdata(master);
1284 omap2_mcspi_restore_ctx(mcspi);
1285
1286 return 0;
1287 }
1288
1289 static struct omap2_mcspi_platform_config omap2_pdata = {
1290 .regs_offset = 0,
1291 };
1292
1293 static struct omap2_mcspi_platform_config omap4_pdata = {
1294 .regs_offset = OMAP4_MCSPI_REG_OFFSET,
1295 };
1296
1297 static const struct of_device_id omap_mcspi_of_match[] = {
1298 {
1299 .compatible = "ti,omap2-mcspi",
1300 .data = &omap2_pdata,
1301 },
1302 {
1303 .compatible = "ti,omap4-mcspi",
1304 .data = &omap4_pdata,
1305 },
1306 { },
1307 };
1308 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1309
1310 static int omap2_mcspi_probe(struct platform_device *pdev)
1311 {
1312 struct spi_master *master;
1313 const struct omap2_mcspi_platform_config *pdata;
1314 struct omap2_mcspi *mcspi;
1315 struct resource *r;
1316 int status = 0, i;
1317 u32 regs_offset = 0;
1318 static int bus_num = 1;
1319 struct device_node *node = pdev->dev.of_node;
1320 const struct of_device_id *match;
1321
1322 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1323 if (master == NULL) {
1324 dev_dbg(&pdev->dev, "master allocation failed\n");
1325 return -ENOMEM;
1326 }
1327
1328 /* the spi->mode bits understood by this driver: */
1329 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1330 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1331 master->setup = omap2_mcspi_setup;
1332 master->auto_runtime_pm = true;
1333 master->transfer_one_message = omap2_mcspi_transfer_one_message;
1334 master->cleanup = omap2_mcspi_cleanup;
1335 master->dev.of_node = node;
1336 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1337 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
1338
1339 platform_set_drvdata(pdev, master);
1340
1341 mcspi = spi_master_get_devdata(master);
1342 mcspi->master = master;
1343
1344 match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1345 if (match) {
1346 u32 num_cs = 1; /* default number of chipselect */
1347 pdata = match->data;
1348
1349 of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1350 master->num_chipselect = num_cs;
1351 master->bus_num = bus_num++;
1352 if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
1353 mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1354 } else {
1355 pdata = dev_get_platdata(&pdev->dev);
1356 master->num_chipselect = pdata->num_cs;
1357 if (pdev->id != -1)
1358 master->bus_num = pdev->id;
1359 mcspi->pin_dir = pdata->pin_dir;
1360 }
1361 regs_offset = pdata->regs_offset;
1362
1363 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1364 if (r == NULL) {
1365 status = -ENODEV;
1366 goto free_master;
1367 }
1368
1369 r->start += regs_offset;
1370 r->end += regs_offset;
1371 mcspi->phys = r->start;
1372
1373 mcspi->base = devm_ioremap_resource(&pdev->dev, r);
1374 if (IS_ERR(mcspi->base)) {
1375 status = PTR_ERR(mcspi->base);
1376 goto free_master;
1377 }
1378
1379 mcspi->dev = &pdev->dev;
1380
1381 INIT_LIST_HEAD(&mcspi->ctx.cs);
1382
1383 mcspi->dma_channels = kcalloc(master->num_chipselect,
1384 sizeof(struct omap2_mcspi_dma),
1385 GFP_KERNEL);
1386
1387 if (mcspi->dma_channels == NULL)
1388 goto free_master;
1389
1390 for (i = 0; i < master->num_chipselect; i++) {
1391 char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
1392 char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
1393 struct resource *dma_res;
1394
1395 sprintf(dma_rx_ch_name, "rx%d", i);
1396 if (!pdev->dev.of_node) {
1397 dma_res =
1398 platform_get_resource_byname(pdev,
1399 IORESOURCE_DMA,
1400 dma_rx_ch_name);
1401 if (!dma_res) {
1402 dev_dbg(&pdev->dev,
1403 "cannot get DMA RX channel\n");
1404 status = -ENODEV;
1405 break;
1406 }
1407
1408 mcspi->dma_channels[i].dma_rx_sync_dev =
1409 dma_res->start;
1410 }
1411 sprintf(dma_tx_ch_name, "tx%d", i);
1412 if (!pdev->dev.of_node) {
1413 dma_res =
1414 platform_get_resource_byname(pdev,
1415 IORESOURCE_DMA,
1416 dma_tx_ch_name);
1417 if (!dma_res) {
1418 dev_dbg(&pdev->dev,
1419 "cannot get DMA TX channel\n");
1420 status = -ENODEV;
1421 break;
1422 }
1423
1424 mcspi->dma_channels[i].dma_tx_sync_dev =
1425 dma_res->start;
1426 }
1427 }
1428
1429 if (status < 0)
1430 goto dma_chnl_free;
1431
1432 pm_runtime_use_autosuspend(&pdev->dev);
1433 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1434 pm_runtime_enable(&pdev->dev);
1435
1436 status = omap2_mcspi_master_setup(mcspi);
1437 if (status < 0)
1438 goto disable_pm;
1439
1440 status = devm_spi_register_master(&pdev->dev, master);
1441 if (status < 0)
1442 goto disable_pm;
1443
1444 return status;
1445
1446 disable_pm:
1447 pm_runtime_disable(&pdev->dev);
1448 dma_chnl_free:
1449 kfree(mcspi->dma_channels);
1450 free_master:
1451 spi_master_put(master);
1452 return status;
1453 }
1454
1455 static int omap2_mcspi_remove(struct platform_device *pdev)
1456 {
1457 struct spi_master *master;
1458 struct omap2_mcspi *mcspi;
1459 struct omap2_mcspi_dma *dma_channels;
1460
1461 master = platform_get_drvdata(pdev);
1462 mcspi = spi_master_get_devdata(master);
1463 dma_channels = mcspi->dma_channels;
1464
1465 pm_runtime_put_sync(mcspi->dev);
1466 pm_runtime_disable(&pdev->dev);
1467
1468 kfree(dma_channels);
1469
1470 return 0;
1471 }
1472
1473 /* work with hotplug and coldplug */
1474 MODULE_ALIAS("platform:omap2_mcspi");
1475
1476 #ifdef CONFIG_SUSPEND
1477 /*
1478 * When SPI wake up from off-mode, CS is in activate state. If it was in
1479 * unactive state when driver was suspend, then force it to unactive state at
1480 * wake up.
1481 */
1482 static int omap2_mcspi_resume(struct device *dev)
1483 {
1484 struct spi_master *master = dev_get_drvdata(dev);
1485 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1486 struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1487 struct omap2_mcspi_cs *cs;
1488
1489 pm_runtime_get_sync(mcspi->dev);
1490 list_for_each_entry(cs, &ctx->cs, node) {
1491 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1492 /*
1493 * We need to toggle CS state for OMAP take this
1494 * change in account.
1495 */
1496 cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1497 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1498 cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1499 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1500 }
1501 }
1502 pm_runtime_mark_last_busy(mcspi->dev);
1503 pm_runtime_put_autosuspend(mcspi->dev);
1504 return 0;
1505 }
1506 #else
1507 #define omap2_mcspi_resume NULL
1508 #endif
1509
1510 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1511 .resume = omap2_mcspi_resume,
1512 .runtime_resume = omap_mcspi_runtime_resume,
1513 };
1514
1515 static struct platform_driver omap2_mcspi_driver = {
1516 .driver = {
1517 .name = "omap2_mcspi",
1518 .owner = THIS_MODULE,
1519 .pm = &omap2_mcspi_pm_ops,
1520 .of_match_table = omap_mcspi_of_match,
1521 },
1522 .probe = omap2_mcspi_probe,
1523 .remove = omap2_mcspi_remove,
1524 };
1525
1526 module_platform_driver(omap2_mcspi_driver);
1527 MODULE_LICENSE("GPL");