]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/spi/spi-davinci.c
Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[mirror_ubuntu-artful-kernel.git] / drivers / spi / spi-davinci.c
1 /*
2 * Copyright (C) 2009 Texas Instruments.
3 * Copyright (C) 2010 EF Johnson Technologies
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/gpio.h>
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/platform_device.h>
26 #include <linux/err.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/edma.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/of_gpio.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi_bitbang.h>
36 #include <linux/slab.h>
37
38 #include <linux/platform_data/spi-davinci.h>
39
40 #define SPI_NO_RESOURCE ((resource_size_t)-1)
41
42 #define CS_DEFAULT 0xFF
43
44 #define SPIFMT_PHASE_MASK BIT(16)
45 #define SPIFMT_POLARITY_MASK BIT(17)
46 #define SPIFMT_DISTIMER_MASK BIT(18)
47 #define SPIFMT_SHIFTDIR_MASK BIT(20)
48 #define SPIFMT_WAITENA_MASK BIT(21)
49 #define SPIFMT_PARITYENA_MASK BIT(22)
50 #define SPIFMT_ODD_PARITY_MASK BIT(23)
51 #define SPIFMT_WDELAY_MASK 0x3f000000u
52 #define SPIFMT_WDELAY_SHIFT 24
53 #define SPIFMT_PRESCALE_SHIFT 8
54
55 /* SPIPC0 */
56 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
57 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
58 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
59 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
60
61 #define SPIINT_MASKALL 0x0101035F
62 #define SPIINT_MASKINT 0x0000015F
63 #define SPI_INTLVL_1 0x000001FF
64 #define SPI_INTLVL_0 0x00000000
65
66 /* SPIDAT1 (upper 16 bit defines) */
67 #define SPIDAT1_CSHOLD_MASK BIT(12)
68 #define SPIDAT1_WDEL BIT(10)
69
70 /* SPIGCR1 */
71 #define SPIGCR1_CLKMOD_MASK BIT(1)
72 #define SPIGCR1_MASTER_MASK BIT(0)
73 #define SPIGCR1_POWERDOWN_MASK BIT(8)
74 #define SPIGCR1_LOOPBACK_MASK BIT(16)
75 #define SPIGCR1_SPIENA_MASK BIT(24)
76
77 /* SPIBUF */
78 #define SPIBUF_TXFULL_MASK BIT(29)
79 #define SPIBUF_RXEMPTY_MASK BIT(31)
80
81 /* SPIDELAY */
82 #define SPIDELAY_C2TDELAY_SHIFT 24
83 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
84 #define SPIDELAY_T2CDELAY_SHIFT 16
85 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
86 #define SPIDELAY_T2EDELAY_SHIFT 8
87 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
88 #define SPIDELAY_C2EDELAY_SHIFT 0
89 #define SPIDELAY_C2EDELAY_MASK 0xFF
90
91 /* Error Masks */
92 #define SPIFLG_DLEN_ERR_MASK BIT(0)
93 #define SPIFLG_TIMEOUT_MASK BIT(1)
94 #define SPIFLG_PARERR_MASK BIT(2)
95 #define SPIFLG_DESYNC_MASK BIT(3)
96 #define SPIFLG_BITERR_MASK BIT(4)
97 #define SPIFLG_OVRRUN_MASK BIT(6)
98 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
99 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
100 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
101 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
102 | SPIFLG_OVRRUN_MASK)
103
104 #define SPIINT_DMA_REQ_EN BIT(16)
105
106 /* SPI Controller registers */
107 #define SPIGCR0 0x00
108 #define SPIGCR1 0x04
109 #define SPIINT 0x08
110 #define SPILVL 0x0c
111 #define SPIFLG 0x10
112 #define SPIPC0 0x14
113 #define SPIDAT1 0x3c
114 #define SPIBUF 0x40
115 #define SPIDELAY 0x48
116 #define SPIDEF 0x4c
117 #define SPIFMT0 0x50
118
119 /* SPI Controller driver's private data. */
120 struct davinci_spi {
121 struct spi_bitbang bitbang;
122 struct clk *clk;
123
124 u8 version;
125 resource_size_t pbase;
126 void __iomem *base;
127 u32 irq;
128 struct completion done;
129
130 const void *tx;
131 void *rx;
132 int rcount;
133 int wcount;
134
135 struct dma_chan *dma_rx;
136 struct dma_chan *dma_tx;
137 int dma_rx_chnum;
138 int dma_tx_chnum;
139
140 struct davinci_spi_platform_data pdata;
141
142 void (*get_rx)(u32 rx_data, struct davinci_spi *);
143 u32 (*get_tx)(struct davinci_spi *);
144
145 u8 *bytes_per_word;
146 };
147
148 static struct davinci_spi_config davinci_spi_default_cfg;
149
150 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
151 {
152 if (dspi->rx) {
153 u8 *rx = dspi->rx;
154 *rx++ = (u8)data;
155 dspi->rx = rx;
156 }
157 }
158
159 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
160 {
161 if (dspi->rx) {
162 u16 *rx = dspi->rx;
163 *rx++ = (u16)data;
164 dspi->rx = rx;
165 }
166 }
167
168 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
169 {
170 u32 data = 0;
171
172 if (dspi->tx) {
173 const u8 *tx = dspi->tx;
174
175 data = *tx++;
176 dspi->tx = tx;
177 }
178 return data;
179 }
180
181 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
182 {
183 u32 data = 0;
184
185 if (dspi->tx) {
186 const u16 *tx = dspi->tx;
187
188 data = *tx++;
189 dspi->tx = tx;
190 }
191 return data;
192 }
193
194 static inline void set_io_bits(void __iomem *addr, u32 bits)
195 {
196 u32 v = ioread32(addr);
197
198 v |= bits;
199 iowrite32(v, addr);
200 }
201
202 static inline void clear_io_bits(void __iomem *addr, u32 bits)
203 {
204 u32 v = ioread32(addr);
205
206 v &= ~bits;
207 iowrite32(v, addr);
208 }
209
210 /*
211 * Interface to control the chip select signal
212 */
213 static void davinci_spi_chipselect(struct spi_device *spi, int value)
214 {
215 struct davinci_spi *dspi;
216 struct davinci_spi_platform_data *pdata;
217 struct davinci_spi_config *spicfg = spi->controller_data;
218 u8 chip_sel = spi->chip_select;
219 u16 spidat1 = CS_DEFAULT;
220 bool gpio_chipsel = false;
221 int gpio;
222
223 dspi = spi_master_get_devdata(spi->master);
224 pdata = &dspi->pdata;
225
226 if (spi->cs_gpio >= 0) {
227 /* SPI core parse and update master->cs_gpio */
228 gpio_chipsel = true;
229 gpio = spi->cs_gpio;
230 }
231
232 /* program delay transfers if tx_delay is non zero */
233 if (spicfg->wdelay)
234 spidat1 |= SPIDAT1_WDEL;
235
236 /*
237 * Board specific chip select logic decides the polarity and cs
238 * line for the controller
239 */
240 if (gpio_chipsel) {
241 if (value == BITBANG_CS_ACTIVE)
242 gpio_set_value(gpio, spi->mode & SPI_CS_HIGH);
243 else
244 gpio_set_value(gpio, !(spi->mode & SPI_CS_HIGH));
245 } else {
246 if (value == BITBANG_CS_ACTIVE) {
247 spidat1 |= SPIDAT1_CSHOLD_MASK;
248 spidat1 &= ~(0x1 << chip_sel);
249 }
250 }
251
252 iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
253 }
254
255 /**
256 * davinci_spi_get_prescale - Calculates the correct prescale value
257 * @maxspeed_hz: the maximum rate the SPI clock can run at
258 *
259 * This function calculates the prescale value that generates a clock rate
260 * less than or equal to the specified maximum.
261 *
262 * Returns: calculated prescale - 1 for easy programming into SPI registers
263 * or negative error number if valid prescalar cannot be updated.
264 */
265 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
266 u32 max_speed_hz)
267 {
268 int ret;
269
270 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
271
272 if (ret < 3 || ret > 256)
273 return -EINVAL;
274
275 return ret - 1;
276 }
277
278 /**
279 * davinci_spi_setup_transfer - This functions will determine transfer method
280 * @spi: spi device on which data transfer to be done
281 * @t: spi transfer in which transfer info is filled
282 *
283 * This function determines data transfer method (8/16/32 bit transfer).
284 * It will also set the SPI Clock Control register according to
285 * SPI slave device freq.
286 */
287 static int davinci_spi_setup_transfer(struct spi_device *spi,
288 struct spi_transfer *t)
289 {
290
291 struct davinci_spi *dspi;
292 struct davinci_spi_config *spicfg;
293 u8 bits_per_word = 0;
294 u32 hz = 0, spifmt = 0;
295 int prescale;
296
297 dspi = spi_master_get_devdata(spi->master);
298 spicfg = spi->controller_data;
299 if (!spicfg)
300 spicfg = &davinci_spi_default_cfg;
301
302 if (t) {
303 bits_per_word = t->bits_per_word;
304 hz = t->speed_hz;
305 }
306
307 /* if bits_per_word is not set then set it default */
308 if (!bits_per_word)
309 bits_per_word = spi->bits_per_word;
310
311 /*
312 * Assign function pointer to appropriate transfer method
313 * 8bit, 16bit or 32bit transfer
314 */
315 if (bits_per_word <= 8) {
316 dspi->get_rx = davinci_spi_rx_buf_u8;
317 dspi->get_tx = davinci_spi_tx_buf_u8;
318 dspi->bytes_per_word[spi->chip_select] = 1;
319 } else {
320 dspi->get_rx = davinci_spi_rx_buf_u16;
321 dspi->get_tx = davinci_spi_tx_buf_u16;
322 dspi->bytes_per_word[spi->chip_select] = 2;
323 }
324
325 if (!hz)
326 hz = spi->max_speed_hz;
327
328 /* Set up SPIFMTn register, unique to this chipselect. */
329
330 prescale = davinci_spi_get_prescale(dspi, hz);
331 if (prescale < 0)
332 return prescale;
333
334 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
335
336 if (spi->mode & SPI_LSB_FIRST)
337 spifmt |= SPIFMT_SHIFTDIR_MASK;
338
339 if (spi->mode & SPI_CPOL)
340 spifmt |= SPIFMT_POLARITY_MASK;
341
342 if (!(spi->mode & SPI_CPHA))
343 spifmt |= SPIFMT_PHASE_MASK;
344
345 /*
346 * Assume wdelay is used only on SPI peripherals that has this field
347 * in SPIFMTn register and when it's configured from board file or DT.
348 */
349 if (spicfg->wdelay)
350 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
351 & SPIFMT_WDELAY_MASK);
352
353 /*
354 * Version 1 hardware supports two basic SPI modes:
355 * - Standard SPI mode uses 4 pins, with chipselect
356 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
357 * (distinct from SPI_3WIRE, with just one data wire;
358 * or similar variants without MOSI or without MISO)
359 *
360 * Version 2 hardware supports an optional handshaking signal,
361 * so it can support two more modes:
362 * - 5 pin SPI variant is standard SPI plus SPI_READY
363 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
364 */
365
366 if (dspi->version == SPI_VERSION_2) {
367
368 u32 delay = 0;
369
370 if (spicfg->odd_parity)
371 spifmt |= SPIFMT_ODD_PARITY_MASK;
372
373 if (spicfg->parity_enable)
374 spifmt |= SPIFMT_PARITYENA_MASK;
375
376 if (spicfg->timer_disable) {
377 spifmt |= SPIFMT_DISTIMER_MASK;
378 } else {
379 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
380 & SPIDELAY_C2TDELAY_MASK;
381 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
382 & SPIDELAY_T2CDELAY_MASK;
383 }
384
385 if (spi->mode & SPI_READY) {
386 spifmt |= SPIFMT_WAITENA_MASK;
387 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
388 & SPIDELAY_T2EDELAY_MASK;
389 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
390 & SPIDELAY_C2EDELAY_MASK;
391 }
392
393 iowrite32(delay, dspi->base + SPIDELAY);
394 }
395
396 iowrite32(spifmt, dspi->base + SPIFMT0);
397
398 return 0;
399 }
400
401 static int davinci_spi_of_setup(struct spi_device *spi)
402 {
403 struct davinci_spi_config *spicfg = spi->controller_data;
404 struct device_node *np = spi->dev.of_node;
405 u32 prop;
406
407 if (spicfg == NULL && np) {
408 spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
409 if (!spicfg)
410 return -ENOMEM;
411 *spicfg = davinci_spi_default_cfg;
412 /* override with dt configured values */
413 if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
414 spicfg->wdelay = (u8)prop;
415 spi->controller_data = spicfg;
416 }
417
418 return 0;
419 }
420
421 /**
422 * davinci_spi_setup - This functions will set default transfer method
423 * @spi: spi device on which data transfer to be done
424 *
425 * This functions sets the default transfer method.
426 */
427 static int davinci_spi_setup(struct spi_device *spi)
428 {
429 int retval = 0;
430 struct davinci_spi *dspi;
431 struct davinci_spi_platform_data *pdata;
432 struct spi_master *master = spi->master;
433 struct device_node *np = spi->dev.of_node;
434 bool internal_cs = true;
435
436 dspi = spi_master_get_devdata(spi->master);
437 pdata = &dspi->pdata;
438
439 if (!(spi->mode & SPI_NO_CS)) {
440 if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
441 retval = gpio_direction_output(
442 spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
443 internal_cs = false;
444 } else if (pdata->chip_sel &&
445 spi->chip_select < pdata->num_chipselect &&
446 pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
447 spi->cs_gpio = pdata->chip_sel[spi->chip_select];
448 retval = gpio_direction_output(
449 spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
450 internal_cs = false;
451 }
452
453 if (retval) {
454 dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
455 spi->cs_gpio, retval);
456 return retval;
457 }
458
459 if (internal_cs)
460 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
461 }
462
463 if (spi->mode & SPI_READY)
464 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
465
466 if (spi->mode & SPI_LOOP)
467 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
468 else
469 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
470
471 return davinci_spi_of_setup(spi);
472 }
473
474 static void davinci_spi_cleanup(struct spi_device *spi)
475 {
476 struct davinci_spi_config *spicfg = spi->controller_data;
477
478 spi->controller_data = NULL;
479 if (spi->dev.of_node)
480 kfree(spicfg);
481 }
482
483 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
484 {
485 struct device *sdev = dspi->bitbang.master->dev.parent;
486
487 if (int_status & SPIFLG_TIMEOUT_MASK) {
488 dev_dbg(sdev, "SPI Time-out Error\n");
489 return -ETIMEDOUT;
490 }
491 if (int_status & SPIFLG_DESYNC_MASK) {
492 dev_dbg(sdev, "SPI Desynchronization Error\n");
493 return -EIO;
494 }
495 if (int_status & SPIFLG_BITERR_MASK) {
496 dev_dbg(sdev, "SPI Bit error\n");
497 return -EIO;
498 }
499
500 if (dspi->version == SPI_VERSION_2) {
501 if (int_status & SPIFLG_DLEN_ERR_MASK) {
502 dev_dbg(sdev, "SPI Data Length Error\n");
503 return -EIO;
504 }
505 if (int_status & SPIFLG_PARERR_MASK) {
506 dev_dbg(sdev, "SPI Parity Error\n");
507 return -EIO;
508 }
509 if (int_status & SPIFLG_OVRRUN_MASK) {
510 dev_dbg(sdev, "SPI Data Overrun error\n");
511 return -EIO;
512 }
513 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
514 dev_dbg(sdev, "SPI Buffer Init Active\n");
515 return -EBUSY;
516 }
517 }
518
519 return 0;
520 }
521
522 /**
523 * davinci_spi_process_events - check for and handle any SPI controller events
524 * @dspi: the controller data
525 *
526 * This function will check the SPIFLG register and handle any events that are
527 * detected there
528 */
529 static int davinci_spi_process_events(struct davinci_spi *dspi)
530 {
531 u32 buf, status, errors = 0, spidat1;
532
533 buf = ioread32(dspi->base + SPIBUF);
534
535 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
536 dspi->get_rx(buf & 0xFFFF, dspi);
537 dspi->rcount--;
538 }
539
540 status = ioread32(dspi->base + SPIFLG);
541
542 if (unlikely(status & SPIFLG_ERROR_MASK)) {
543 errors = status & SPIFLG_ERROR_MASK;
544 goto out;
545 }
546
547 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
548 spidat1 = ioread32(dspi->base + SPIDAT1);
549 dspi->wcount--;
550 spidat1 &= ~0xFFFF;
551 spidat1 |= 0xFFFF & dspi->get_tx(dspi);
552 iowrite32(spidat1, dspi->base + SPIDAT1);
553 }
554
555 out:
556 return errors;
557 }
558
559 static void davinci_spi_dma_rx_callback(void *data)
560 {
561 struct davinci_spi *dspi = (struct davinci_spi *)data;
562
563 dspi->rcount = 0;
564
565 if (!dspi->wcount && !dspi->rcount)
566 complete(&dspi->done);
567 }
568
569 static void davinci_spi_dma_tx_callback(void *data)
570 {
571 struct davinci_spi *dspi = (struct davinci_spi *)data;
572
573 dspi->wcount = 0;
574
575 if (!dspi->wcount && !dspi->rcount)
576 complete(&dspi->done);
577 }
578
579 /**
580 * davinci_spi_bufs - functions which will handle transfer data
581 * @spi: spi device on which data transfer to be done
582 * @t: spi transfer in which transfer info is filled
583 *
584 * This function will put data to be transferred into data register
585 * of SPI controller and then wait until the completion will be marked
586 * by the IRQ Handler.
587 */
588 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
589 {
590 struct davinci_spi *dspi;
591 int data_type, ret = -ENOMEM;
592 u32 tx_data, spidat1;
593 u32 errors = 0;
594 struct davinci_spi_config *spicfg;
595 struct davinci_spi_platform_data *pdata;
596 unsigned uninitialized_var(rx_buf_count);
597 void *dummy_buf = NULL;
598 struct scatterlist sg_rx, sg_tx;
599
600 dspi = spi_master_get_devdata(spi->master);
601 pdata = &dspi->pdata;
602 spicfg = (struct davinci_spi_config *)spi->controller_data;
603 if (!spicfg)
604 spicfg = &davinci_spi_default_cfg;
605
606 /* convert len to words based on bits_per_word */
607 data_type = dspi->bytes_per_word[spi->chip_select];
608
609 dspi->tx = t->tx_buf;
610 dspi->rx = t->rx_buf;
611 dspi->wcount = t->len / data_type;
612 dspi->rcount = dspi->wcount;
613
614 spidat1 = ioread32(dspi->base + SPIDAT1);
615
616 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
617 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
618
619 reinit_completion(&dspi->done);
620
621 if (spicfg->io_type == SPI_IO_TYPE_INTR)
622 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
623
624 if (spicfg->io_type != SPI_IO_TYPE_DMA) {
625 /* start the transfer */
626 dspi->wcount--;
627 tx_data = dspi->get_tx(dspi);
628 spidat1 &= 0xFFFF0000;
629 spidat1 |= tx_data & 0xFFFF;
630 iowrite32(spidat1, dspi->base + SPIDAT1);
631 } else {
632 struct dma_slave_config dma_rx_conf = {
633 .direction = DMA_DEV_TO_MEM,
634 .src_addr = (unsigned long)dspi->pbase + SPIBUF,
635 .src_addr_width = data_type,
636 .src_maxburst = 1,
637 };
638 struct dma_slave_config dma_tx_conf = {
639 .direction = DMA_MEM_TO_DEV,
640 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
641 .dst_addr_width = data_type,
642 .dst_maxburst = 1,
643 };
644 struct dma_async_tx_descriptor *rxdesc;
645 struct dma_async_tx_descriptor *txdesc;
646 void *buf;
647
648 dummy_buf = kzalloc(t->len, GFP_KERNEL);
649 if (!dummy_buf)
650 goto err_alloc_dummy_buf;
651
652 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
653 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
654
655 sg_init_table(&sg_rx, 1);
656 if (!t->rx_buf)
657 buf = dummy_buf;
658 else
659 buf = t->rx_buf;
660 t->rx_dma = dma_map_single(&spi->dev, buf,
661 t->len, DMA_FROM_DEVICE);
662 if (!t->rx_dma) {
663 ret = -EFAULT;
664 goto err_rx_map;
665 }
666 sg_dma_address(&sg_rx) = t->rx_dma;
667 sg_dma_len(&sg_rx) = t->len;
668
669 sg_init_table(&sg_tx, 1);
670 if (!t->tx_buf)
671 buf = dummy_buf;
672 else
673 buf = (void *)t->tx_buf;
674 t->tx_dma = dma_map_single(&spi->dev, buf,
675 t->len, DMA_TO_DEVICE);
676 if (!t->tx_dma) {
677 ret = -EFAULT;
678 goto err_tx_map;
679 }
680 sg_dma_address(&sg_tx) = t->tx_dma;
681 sg_dma_len(&sg_tx) = t->len;
682
683 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
684 &sg_rx, 1, DMA_DEV_TO_MEM,
685 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
686 if (!rxdesc)
687 goto err_desc;
688
689 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
690 &sg_tx, 1, DMA_MEM_TO_DEV,
691 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
692 if (!txdesc)
693 goto err_desc;
694
695 rxdesc->callback = davinci_spi_dma_rx_callback;
696 rxdesc->callback_param = (void *)dspi;
697 txdesc->callback = davinci_spi_dma_tx_callback;
698 txdesc->callback_param = (void *)dspi;
699
700 if (pdata->cshold_bug)
701 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
702
703 dmaengine_submit(rxdesc);
704 dmaengine_submit(txdesc);
705
706 dma_async_issue_pending(dspi->dma_rx);
707 dma_async_issue_pending(dspi->dma_tx);
708
709 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
710 }
711
712 /* Wait for the transfer to complete */
713 if (spicfg->io_type != SPI_IO_TYPE_POLL) {
714 wait_for_completion_interruptible(&(dspi->done));
715 } else {
716 while (dspi->rcount > 0 || dspi->wcount > 0) {
717 errors = davinci_spi_process_events(dspi);
718 if (errors)
719 break;
720 cpu_relax();
721 }
722 }
723
724 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
725 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
726 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
727
728 dma_unmap_single(&spi->dev, t->rx_dma,
729 t->len, DMA_FROM_DEVICE);
730 dma_unmap_single(&spi->dev, t->tx_dma,
731 t->len, DMA_TO_DEVICE);
732 kfree(dummy_buf);
733 }
734
735 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
736 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
737
738 /*
739 * Check for bit error, desync error,parity error,timeout error and
740 * receive overflow errors
741 */
742 if (errors) {
743 ret = davinci_spi_check_error(dspi, errors);
744 WARN(!ret, "%s: error reported but no error found!\n",
745 dev_name(&spi->dev));
746 return ret;
747 }
748
749 if (dspi->rcount != 0 || dspi->wcount != 0) {
750 dev_err(&spi->dev, "SPI data transfer error\n");
751 return -EIO;
752 }
753
754 return t->len;
755
756 err_desc:
757 dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
758 err_tx_map:
759 dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
760 err_rx_map:
761 kfree(dummy_buf);
762 err_alloc_dummy_buf:
763 return ret;
764 }
765
766 /**
767 * dummy_thread_fn - dummy thread function
768 * @irq: IRQ number for this SPI Master
769 * @context_data: structure for SPI Master controller davinci_spi
770 *
771 * This is to satisfy the request_threaded_irq() API so that the irq
772 * handler is called in interrupt context.
773 */
774 static irqreturn_t dummy_thread_fn(s32 irq, void *data)
775 {
776 return IRQ_HANDLED;
777 }
778
779 /**
780 * davinci_spi_irq - Interrupt handler for SPI Master Controller
781 * @irq: IRQ number for this SPI Master
782 * @context_data: structure for SPI Master controller davinci_spi
783 *
784 * ISR will determine that interrupt arrives either for READ or WRITE command.
785 * According to command it will do the appropriate action. It will check
786 * transfer length and if it is not zero then dispatch transfer command again.
787 * If transfer length is zero then it will indicate the COMPLETION so that
788 * davinci_spi_bufs function can go ahead.
789 */
790 static irqreturn_t davinci_spi_irq(s32 irq, void *data)
791 {
792 struct davinci_spi *dspi = data;
793 int status;
794
795 status = davinci_spi_process_events(dspi);
796 if (unlikely(status != 0))
797 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
798
799 if ((!dspi->rcount && !dspi->wcount) || status)
800 complete(&dspi->done);
801
802 return IRQ_HANDLED;
803 }
804
805 static int davinci_spi_request_dma(struct davinci_spi *dspi)
806 {
807 dma_cap_mask_t mask;
808 struct device *sdev = dspi->bitbang.master->dev.parent;
809 int r;
810
811 dma_cap_zero(mask);
812 dma_cap_set(DMA_SLAVE, mask);
813
814 dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
815 &dspi->dma_rx_chnum);
816 if (!dspi->dma_rx) {
817 dev_err(sdev, "request RX DMA channel failed\n");
818 r = -ENODEV;
819 goto rx_dma_failed;
820 }
821
822 dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
823 &dspi->dma_tx_chnum);
824 if (!dspi->dma_tx) {
825 dev_err(sdev, "request TX DMA channel failed\n");
826 r = -ENODEV;
827 goto tx_dma_failed;
828 }
829
830 return 0;
831
832 tx_dma_failed:
833 dma_release_channel(dspi->dma_rx);
834 rx_dma_failed:
835 return r;
836 }
837
838 #if defined(CONFIG_OF)
839 static const struct of_device_id davinci_spi_of_match[] = {
840 {
841 .compatible = "ti,dm6441-spi",
842 },
843 {
844 .compatible = "ti,da830-spi",
845 .data = (void *)SPI_VERSION_2,
846 },
847 { },
848 };
849 MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
850
851 /**
852 * spi_davinci_get_pdata - Get platform data from DTS binding
853 * @pdev: ptr to platform data
854 * @dspi: ptr to driver data
855 *
856 * Parses and populates pdata in dspi from device tree bindings.
857 *
858 * NOTE: Not all platform data params are supported currently.
859 */
860 static int spi_davinci_get_pdata(struct platform_device *pdev,
861 struct davinci_spi *dspi)
862 {
863 struct device_node *node = pdev->dev.of_node;
864 struct davinci_spi_platform_data *pdata;
865 unsigned int num_cs, intr_line = 0;
866 const struct of_device_id *match;
867
868 pdata = &dspi->pdata;
869
870 pdata->version = SPI_VERSION_1;
871 match = of_match_device(davinci_spi_of_match, &pdev->dev);
872 if (!match)
873 return -ENODEV;
874
875 /* match data has the SPI version number for SPI_VERSION_2 */
876 if (match->data == (void *)SPI_VERSION_2)
877 pdata->version = SPI_VERSION_2;
878
879 /*
880 * default num_cs is 1 and all chipsel are internal to the chip
881 * indicated by chip_sel being NULL or cs_gpios being NULL or
882 * set to -ENOENT. num-cs includes internal as well as gpios.
883 * indicated by chip_sel being NULL. GPIO based CS is not
884 * supported yet in DT bindings.
885 */
886 num_cs = 1;
887 of_property_read_u32(node, "num-cs", &num_cs);
888 pdata->num_chipselect = num_cs;
889 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
890 pdata->intr_line = intr_line;
891 return 0;
892 }
893 #else
894 static struct davinci_spi_platform_data
895 *spi_davinci_get_pdata(struct platform_device *pdev,
896 struct davinci_spi *dspi)
897 {
898 return -ENODEV;
899 }
900 #endif
901
902 /**
903 * davinci_spi_probe - probe function for SPI Master Controller
904 * @pdev: platform_device structure which contains plateform specific data
905 *
906 * According to Linux Device Model this function will be invoked by Linux
907 * with platform_device struct which contains the device specific info.
908 * This function will map the SPI controller's memory, register IRQ,
909 * Reset SPI controller and setting its registers to default value.
910 * It will invoke spi_bitbang_start to create work queue so that client driver
911 * can register transfer method to work queue.
912 */
913 static int davinci_spi_probe(struct platform_device *pdev)
914 {
915 struct spi_master *master;
916 struct davinci_spi *dspi;
917 struct davinci_spi_platform_data *pdata;
918 struct resource *r;
919 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
920 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
921 int ret = 0;
922 u32 spipc0;
923
924 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
925 if (master == NULL) {
926 ret = -ENOMEM;
927 goto err;
928 }
929
930 platform_set_drvdata(pdev, master);
931
932 dspi = spi_master_get_devdata(master);
933
934 if (dev_get_platdata(&pdev->dev)) {
935 pdata = dev_get_platdata(&pdev->dev);
936 dspi->pdata = *pdata;
937 } else {
938 /* update dspi pdata with that from the DT */
939 ret = spi_davinci_get_pdata(pdev, dspi);
940 if (ret < 0)
941 goto free_master;
942 }
943
944 /* pdata in dspi is now updated and point pdata to that */
945 pdata = &dspi->pdata;
946
947 dspi->bytes_per_word = devm_kzalloc(&pdev->dev,
948 sizeof(*dspi->bytes_per_word) *
949 pdata->num_chipselect, GFP_KERNEL);
950 if (dspi->bytes_per_word == NULL) {
951 ret = -ENOMEM;
952 goto free_master;
953 }
954
955 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
956 if (r == NULL) {
957 ret = -ENOENT;
958 goto free_master;
959 }
960
961 dspi->pbase = r->start;
962
963 dspi->base = devm_ioremap_resource(&pdev->dev, r);
964 if (IS_ERR(dspi->base)) {
965 ret = PTR_ERR(dspi->base);
966 goto free_master;
967 }
968
969 dspi->irq = platform_get_irq(pdev, 0);
970 if (dspi->irq <= 0) {
971 ret = -EINVAL;
972 goto free_master;
973 }
974
975 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
976 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
977 if (ret)
978 goto free_master;
979
980 dspi->bitbang.master = master;
981
982 dspi->clk = devm_clk_get(&pdev->dev, NULL);
983 if (IS_ERR(dspi->clk)) {
984 ret = -ENODEV;
985 goto free_master;
986 }
987 clk_prepare_enable(dspi->clk);
988
989 master->dev.of_node = pdev->dev.of_node;
990 master->bus_num = pdev->id;
991 master->num_chipselect = pdata->num_chipselect;
992 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
993 master->setup = davinci_spi_setup;
994 master->cleanup = davinci_spi_cleanup;
995
996 dspi->bitbang.chipselect = davinci_spi_chipselect;
997 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
998
999 dspi->version = pdata->version;
1000
1001 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1002 if (dspi->version == SPI_VERSION_2)
1003 dspi->bitbang.flags |= SPI_READY;
1004
1005 if (pdev->dev.of_node) {
1006 int i;
1007
1008 for (i = 0; i < pdata->num_chipselect; i++) {
1009 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
1010 "cs-gpios", i);
1011
1012 if (cs_gpio == -EPROBE_DEFER) {
1013 ret = cs_gpio;
1014 goto free_clk;
1015 }
1016
1017 if (gpio_is_valid(cs_gpio)) {
1018 ret = devm_gpio_request(&pdev->dev, cs_gpio,
1019 dev_name(&pdev->dev));
1020 if (ret)
1021 goto free_clk;
1022 }
1023 }
1024 }
1025
1026 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1027 if (r)
1028 dma_rx_chan = r->start;
1029 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1030 if (r)
1031 dma_tx_chan = r->start;
1032
1033 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
1034 if (dma_rx_chan != SPI_NO_RESOURCE &&
1035 dma_tx_chan != SPI_NO_RESOURCE) {
1036 dspi->dma_rx_chnum = dma_rx_chan;
1037 dspi->dma_tx_chnum = dma_tx_chan;
1038
1039 ret = davinci_spi_request_dma(dspi);
1040 if (ret)
1041 goto free_clk;
1042
1043 dev_info(&pdev->dev, "DMA: supported\n");
1044 dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
1045 &dma_rx_chan, &dma_tx_chan,
1046 pdata->dma_event_q);
1047 }
1048
1049 dspi->get_rx = davinci_spi_rx_buf_u8;
1050 dspi->get_tx = davinci_spi_tx_buf_u8;
1051
1052 init_completion(&dspi->done);
1053
1054 /* Reset In/OUT SPI module */
1055 iowrite32(0, dspi->base + SPIGCR0);
1056 udelay(100);
1057 iowrite32(1, dspi->base + SPIGCR0);
1058
1059 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
1060 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
1061 iowrite32(spipc0, dspi->base + SPIPC0);
1062
1063 if (pdata->intr_line)
1064 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
1065 else
1066 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
1067
1068 iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
1069
1070 /* master mode default */
1071 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
1072 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1073 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
1074
1075 ret = spi_bitbang_start(&dspi->bitbang);
1076 if (ret)
1077 goto free_dma;
1078
1079 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
1080
1081 return ret;
1082
1083 free_dma:
1084 dma_release_channel(dspi->dma_rx);
1085 dma_release_channel(dspi->dma_tx);
1086 free_clk:
1087 clk_disable_unprepare(dspi->clk);
1088 free_master:
1089 spi_master_put(master);
1090 err:
1091 return ret;
1092 }
1093
1094 /**
1095 * davinci_spi_remove - remove function for SPI Master Controller
1096 * @pdev: platform_device structure which contains plateform specific data
1097 *
1098 * This function will do the reverse action of davinci_spi_probe function
1099 * It will free the IRQ and SPI controller's memory region.
1100 * It will also call spi_bitbang_stop to destroy the work queue which was
1101 * created by spi_bitbang_start.
1102 */
1103 static int davinci_spi_remove(struct platform_device *pdev)
1104 {
1105 struct davinci_spi *dspi;
1106 struct spi_master *master;
1107
1108 master = platform_get_drvdata(pdev);
1109 dspi = spi_master_get_devdata(master);
1110
1111 spi_bitbang_stop(&dspi->bitbang);
1112
1113 clk_disable_unprepare(dspi->clk);
1114 spi_master_put(master);
1115
1116 return 0;
1117 }
1118
1119 static struct platform_driver davinci_spi_driver = {
1120 .driver = {
1121 .name = "spi_davinci",
1122 .owner = THIS_MODULE,
1123 .of_match_table = of_match_ptr(davinci_spi_of_match),
1124 },
1125 .probe = davinci_spi_probe,
1126 .remove = davinci_spi_remove,
1127 };
1128 module_platform_driver(davinci_spi_driver);
1129
1130 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1131 MODULE_LICENSE("GPL");