]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/spi/spi-lantiq-ssc.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / spi / spi-lantiq-ssc.c
1 /*
2 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
3 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
4 *
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/completion.h>
19 #include <linux/spinlock.h>
20 #include <linux/err.h>
21 #include <linux/gpio.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/spi/spi.h>
24
25 #ifdef CONFIG_LANTIQ
26 #include <lantiq_soc.h>
27 #endif
28
29 #define SPI_RX_IRQ_NAME "spi_rx"
30 #define SPI_TX_IRQ_NAME "spi_tx"
31 #define SPI_ERR_IRQ_NAME "spi_err"
32 #define SPI_FRM_IRQ_NAME "spi_frm"
33
34 #define SPI_CLC 0x00
35 #define SPI_PISEL 0x04
36 #define SPI_ID 0x08
37 #define SPI_CON 0x10
38 #define SPI_STAT 0x14
39 #define SPI_WHBSTATE 0x18
40 #define SPI_TB 0x20
41 #define SPI_RB 0x24
42 #define SPI_RXFCON 0x30
43 #define SPI_TXFCON 0x34
44 #define SPI_FSTAT 0x38
45 #define SPI_BRT 0x40
46 #define SPI_BRSTAT 0x44
47 #define SPI_SFCON 0x60
48 #define SPI_SFSTAT 0x64
49 #define SPI_GPOCON 0x70
50 #define SPI_GPOSTAT 0x74
51 #define SPI_FPGO 0x78
52 #define SPI_RXREQ 0x80
53 #define SPI_RXCNT 0x84
54 #define SPI_DMACON 0xec
55 #define SPI_IRNEN 0xf4
56 #define SPI_IRNICR 0xf8
57 #define SPI_IRNCR 0xfc
58
59 #define SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
60 #define SPI_CLC_SMC_M (0xFF << SPI_CLC_SMC_S)
61 #define SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
62 #define SPI_CLC_RMC_M (0xFF << SPI_CLC_RMC_S)
63 #define SPI_CLC_DISS BIT(1) /* Disable status bit */
64 #define SPI_CLC_DISR BIT(0) /* Disable request bit */
65
66 #define SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
67 #define SPI_ID_TXFS_M (0x3F << SPI_ID_TXFS_S)
68 #define SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
69 #define SPI_ID_RXFS_M (0x3F << SPI_ID_RXFS_S)
70 #define SPI_ID_MOD_S 8 /* Module ID */
71 #define SPI_ID_MOD_M (0xff << SPI_ID_MOD_S)
72 #define SPI_ID_CFG_S 5 /* DMA interface support */
73 #define SPI_ID_CFG_M (1 << SPI_ID_CFG_S)
74 #define SPI_ID_REV_M 0x1F /* Hardware revision number */
75
76 #define SPI_CON_BM_S 16 /* Data width selection */
77 #define SPI_CON_BM_M (0x1F << SPI_CON_BM_S)
78 #define SPI_CON_EM BIT(24) /* Echo mode */
79 #define SPI_CON_IDLE BIT(23) /* Idle bit value */
80 #define SPI_CON_ENBV BIT(22) /* Enable byte valid control */
81 #define SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
82 #define SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
83 #define SPI_CON_AEN BIT(10) /* Abort error enable */
84 #define SPI_CON_REN BIT(9) /* Receive overflow error enable */
85 #define SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
86 #define SPI_CON_LB BIT(7) /* Loopback control */
87 #define SPI_CON_PO BIT(6) /* Clock polarity control */
88 #define SPI_CON_PH BIT(5) /* Clock phase control */
89 #define SPI_CON_HB BIT(4) /* Heading control */
90 #define SPI_CON_RXOFF BIT(1) /* Switch receiver off */
91 #define SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
92
93 #define SPI_STAT_RXBV_S 28
94 #define SPI_STAT_RXBV_M (0x7 << SPI_STAT_RXBV_S)
95 #define SPI_STAT_BSY BIT(13) /* Busy flag */
96 #define SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
97 #define SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
98 #define SPI_STAT_AE BIT(10) /* Abort error flag */
99 #define SPI_STAT_RE BIT(9) /* Receive error flag */
100 #define SPI_STAT_TE BIT(8) /* Transmit error flag */
101 #define SPI_STAT_ME BIT(7) /* Mode error flag */
102 #define SPI_STAT_MS BIT(1) /* Master/slave select bit */
103 #define SPI_STAT_EN BIT(0) /* Enable bit */
104 #define SPI_STAT_ERRORS (SPI_STAT_ME | SPI_STAT_TE | SPI_STAT_RE | \
105 SPI_STAT_AE | SPI_STAT_TUE | SPI_STAT_RUE)
106
107 #define SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
108 #define SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
109 #define SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
110 #define SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
111 #define SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
112 #define SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
113 #define SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
114 #define SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
115 #define SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
116 #define SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
117 #define SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
118 #define SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
119 #define SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
120 #define SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
121 #define SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
122 #define SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
123 #define SPI_WHBSTATE_CLR_ERRORS (SPI_WHBSTATE_CLRRUE | SPI_WHBSTATE_CLRME | \
124 SPI_WHBSTATE_CLRTE | SPI_WHBSTATE_CLRRE | \
125 SPI_WHBSTATE_CLRAE | SPI_WHBSTATE_CLRTUE)
126
127 #define SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
128 #define SPI_RXFCON_RXFITL_M (0x3F << SPI_RXFCON_RXFITL_S)
129 #define SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
130 #define SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
131
132 #define SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
133 #define SPI_TXFCON_TXFITL_M (0x3F << SPI_TXFCON_TXFITL_S)
134 #define SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
135 #define SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
136
137 #define SPI_FSTAT_RXFFL_S 0
138 #define SPI_FSTAT_RXFFL_M (0x3f << SPI_FSTAT_RXFFL_S)
139 #define SPI_FSTAT_TXFFL_S 8
140 #define SPI_FSTAT_TXFFL_M (0x3f << SPI_FSTAT_TXFFL_S)
141
142 #define SPI_GPOCON_ISCSBN_S 8
143 #define SPI_GPOCON_INVOUTN_S 0
144
145 #define SPI_FGPO_SETOUTN_S 8
146 #define SPI_FGPO_CLROUTN_S 0
147
148 #define SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
149 #define SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
150
151 #define SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
152 #define SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
153 #define SPI_IRNEN_E BIT(2) /* Error end interrupt request */
154 #define SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
155 #define SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
156 #define SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
157 #define SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
158 #define SPI_IRNEN_ALL 0x1F
159
160 struct lantiq_ssc_hwcfg {
161 unsigned int irnen_r;
162 unsigned int irnen_t;
163 };
164
165 struct lantiq_ssc_spi {
166 struct spi_master *master;
167 struct device *dev;
168 void __iomem *regbase;
169 struct clk *spi_clk;
170 struct clk *fpi_clk;
171 const struct lantiq_ssc_hwcfg *hwcfg;
172
173 spinlock_t lock;
174 struct workqueue_struct *wq;
175 struct work_struct work;
176
177 const u8 *tx;
178 u8 *rx;
179 unsigned int tx_todo;
180 unsigned int rx_todo;
181 unsigned int bits_per_word;
182 unsigned int speed_hz;
183 unsigned int tx_fifo_size;
184 unsigned int rx_fifo_size;
185 unsigned int base_cs;
186 };
187
188 static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
189 {
190 return __raw_readl(spi->regbase + reg);
191 }
192
193 static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
194 u32 reg)
195 {
196 __raw_writel(val, spi->regbase + reg);
197 }
198
199 static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
200 u32 set, u32 reg)
201 {
202 u32 val = __raw_readl(spi->regbase + reg);
203
204 val &= ~clr;
205 val |= set;
206 __raw_writel(val, spi->regbase + reg);
207 }
208
209 static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
210 {
211 u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
212
213 return (fstat & SPI_FSTAT_TXFFL_M) >> SPI_FSTAT_TXFFL_S;
214 }
215
216 static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
217 {
218 u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
219
220 return fstat & SPI_FSTAT_RXFFL_M;
221 }
222
223 static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
224 {
225 return spi->tx_fifo_size - tx_fifo_level(spi);
226 }
227
228 static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
229 {
230 u32 val = spi->rx_fifo_size << SPI_RXFCON_RXFITL_S;
231
232 val |= SPI_RXFCON_RXFEN | SPI_RXFCON_RXFLU;
233 lantiq_ssc_writel(spi, val, SPI_RXFCON);
234 }
235
236 static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
237 {
238 u32 val = 1 << SPI_TXFCON_TXFITL_S;
239
240 val |= SPI_TXFCON_TXFEN | SPI_TXFCON_TXFLU;
241 lantiq_ssc_writel(spi, val, SPI_TXFCON);
242 }
243
244 static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
245 {
246 lantiq_ssc_maskl(spi, 0, SPI_RXFCON_RXFLU, SPI_RXFCON);
247 }
248
249 static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
250 {
251 lantiq_ssc_maskl(spi, 0, SPI_TXFCON_TXFLU, SPI_TXFCON);
252 }
253
254 static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
255 {
256 lantiq_ssc_writel(spi, SPI_WHBSTATE_CLREN, SPI_WHBSTATE);
257 }
258
259 static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
260 {
261 lantiq_ssc_writel(spi, SPI_WHBSTATE_SETEN, SPI_WHBSTATE);
262 }
263
264 static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
265 unsigned int max_speed_hz)
266 {
267 u32 spi_clk, brt;
268
269 /*
270 * SPI module clock is derived from FPI bus clock dependent on
271 * divider value in CLC.RMS which is always set to 1.
272 *
273 * f_SPI
274 * baudrate = --------------
275 * 2 * (BR + 1)
276 */
277 spi_clk = clk_get_rate(spi->fpi_clk) / 2;
278
279 if (max_speed_hz > spi_clk)
280 brt = 0;
281 else
282 brt = spi_clk / max_speed_hz - 1;
283
284 if (brt > 0xFFFF)
285 brt = 0xFFFF;
286
287 dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
288 spi_clk, max_speed_hz, brt);
289
290 lantiq_ssc_writel(spi, brt, SPI_BRT);
291 }
292
293 static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
294 unsigned int bits_per_word)
295 {
296 u32 bm;
297
298 /* CON.BM value = bits_per_word - 1 */
299 bm = (bits_per_word - 1) << SPI_CON_BM_S;
300
301 lantiq_ssc_maskl(spi, SPI_CON_BM_M, bm, SPI_CON);
302 }
303
304 static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
305 unsigned int mode)
306 {
307 u32 con_set = 0, con_clr = 0;
308
309 /*
310 * SPI mode mapping in CON register:
311 * Mode CPOL CPHA CON.PO CON.PH
312 * 0 0 0 0 1
313 * 1 0 1 0 0
314 * 2 1 0 1 1
315 * 3 1 1 1 0
316 */
317 if (mode & SPI_CPHA)
318 con_clr |= SPI_CON_PH;
319 else
320 con_set |= SPI_CON_PH;
321
322 if (mode & SPI_CPOL)
323 con_set |= SPI_CON_PO | SPI_CON_IDLE;
324 else
325 con_clr |= SPI_CON_PO | SPI_CON_IDLE;
326
327 /* Set heading control */
328 if (mode & SPI_LSB_FIRST)
329 con_clr |= SPI_CON_HB;
330 else
331 con_set |= SPI_CON_HB;
332
333 /* Set loopback mode */
334 if (mode & SPI_LOOP)
335 con_set |= SPI_CON_LB;
336 else
337 con_clr |= SPI_CON_LB;
338
339 lantiq_ssc_maskl(spi, con_clr, con_set, SPI_CON);
340 }
341
342 static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
343 {
344 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
345
346 /*
347 * Set clock divider for run mode to 1 to
348 * run at same frequency as FPI bus
349 */
350 lantiq_ssc_writel(spi, 1 << SPI_CLC_RMC_S, SPI_CLC);
351
352 /* Put controller into config mode */
353 hw_enter_config_mode(spi);
354
355 /* Clear error flags */
356 lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
357
358 /* Enable error checking, disable TX/RX */
359 lantiq_ssc_writel(spi, SPI_CON_RUEN | SPI_CON_AEN | SPI_CON_TEN |
360 SPI_CON_REN | SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
361
362 /* Setup default SPI mode */
363 hw_setup_bits_per_word(spi, spi->bits_per_word);
364 hw_setup_clock_mode(spi, SPI_MODE_0);
365
366 /* Enable master mode and clear error flags */
367 lantiq_ssc_writel(spi, SPI_WHBSTATE_SETMS | SPI_WHBSTATE_CLR_ERRORS,
368 SPI_WHBSTATE);
369
370 /* Reset GPIO/CS registers */
371 lantiq_ssc_writel(spi, 0, SPI_GPOCON);
372 lantiq_ssc_writel(spi, 0xFF00, SPI_FPGO);
373
374 /* Enable and flush FIFOs */
375 rx_fifo_reset(spi);
376 tx_fifo_reset(spi);
377
378 /* Enable interrupts */
379 lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r | SPI_IRNEN_E,
380 SPI_IRNEN);
381 }
382
383 static int lantiq_ssc_setup(struct spi_device *spidev)
384 {
385 struct spi_master *master = spidev->master;
386 struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
387 unsigned int cs = spidev->chip_select;
388 u32 gpocon;
389
390 /* GPIOs are used for CS */
391 if (gpio_is_valid(spidev->cs_gpio))
392 return 0;
393
394 dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
395
396 if (cs < spi->base_cs) {
397 dev_err(spi->dev,
398 "chipselect %i too small (min %i)\n", cs, spi->base_cs);
399 return -EINVAL;
400 }
401
402 /* set GPO pin to CS mode */
403 gpocon = 1 << ((cs - spi->base_cs) + SPI_GPOCON_ISCSBN_S);
404
405 /* invert GPO pin */
406 if (spidev->mode & SPI_CS_HIGH)
407 gpocon |= 1 << (cs - spi->base_cs);
408
409 lantiq_ssc_maskl(spi, 0, gpocon, SPI_GPOCON);
410
411 return 0;
412 }
413
414 static int lantiq_ssc_prepare_message(struct spi_master *master,
415 struct spi_message *message)
416 {
417 struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
418
419 hw_enter_config_mode(spi);
420 hw_setup_clock_mode(spi, message->spi->mode);
421 hw_enter_active_mode(spi);
422
423 return 0;
424 }
425
426 static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
427 struct spi_device *spidev, struct spi_transfer *t)
428 {
429 unsigned int speed_hz = t->speed_hz;
430 unsigned int bits_per_word = t->bits_per_word;
431 u32 con;
432
433 if (bits_per_word != spi->bits_per_word ||
434 speed_hz != spi->speed_hz) {
435 hw_enter_config_mode(spi);
436 hw_setup_speed_hz(spi, speed_hz);
437 hw_setup_bits_per_word(spi, bits_per_word);
438 hw_enter_active_mode(spi);
439
440 spi->speed_hz = speed_hz;
441 spi->bits_per_word = bits_per_word;
442 }
443
444 /* Configure transmitter and receiver */
445 con = lantiq_ssc_readl(spi, SPI_CON);
446 if (t->tx_buf)
447 con &= ~SPI_CON_TXOFF;
448 else
449 con |= SPI_CON_TXOFF;
450
451 if (t->rx_buf)
452 con &= ~SPI_CON_RXOFF;
453 else
454 con |= SPI_CON_RXOFF;
455
456 lantiq_ssc_writel(spi, con, SPI_CON);
457 }
458
459 static int lantiq_ssc_unprepare_message(struct spi_master *master,
460 struct spi_message *message)
461 {
462 struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
463
464 flush_workqueue(spi->wq);
465
466 /* Disable transmitter and receiver while idle */
467 lantiq_ssc_maskl(spi, 0, SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
468
469 return 0;
470 }
471
472 static void tx_fifo_write(struct lantiq_ssc_spi *spi)
473 {
474 const u8 *tx8;
475 const u16 *tx16;
476 const u32 *tx32;
477 u32 data;
478 unsigned int tx_free = tx_fifo_free(spi);
479
480 while (spi->tx_todo && tx_free) {
481 switch (spi->bits_per_word) {
482 case 2 ... 8:
483 tx8 = spi->tx;
484 data = *tx8;
485 spi->tx_todo--;
486 spi->tx++;
487 break;
488 case 16:
489 tx16 = (u16 *) spi->tx;
490 data = *tx16;
491 spi->tx_todo -= 2;
492 spi->tx += 2;
493 break;
494 case 32:
495 tx32 = (u32 *) spi->tx;
496 data = *tx32;
497 spi->tx_todo -= 4;
498 spi->tx += 4;
499 break;
500 default:
501 WARN_ON(1);
502 data = 0;
503 break;
504 }
505
506 lantiq_ssc_writel(spi, data, SPI_TB);
507 tx_free--;
508 }
509 }
510
511 static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
512 {
513 u8 *rx8;
514 u16 *rx16;
515 u32 *rx32;
516 u32 data;
517 unsigned int rx_fill = rx_fifo_level(spi);
518
519 while (rx_fill) {
520 data = lantiq_ssc_readl(spi, SPI_RB);
521
522 switch (spi->bits_per_word) {
523 case 2 ... 8:
524 rx8 = spi->rx;
525 *rx8 = data;
526 spi->rx_todo--;
527 spi->rx++;
528 break;
529 case 16:
530 rx16 = (u16 *) spi->rx;
531 *rx16 = data;
532 spi->rx_todo -= 2;
533 spi->rx += 2;
534 break;
535 case 32:
536 rx32 = (u32 *) spi->rx;
537 *rx32 = data;
538 spi->rx_todo -= 4;
539 spi->rx += 4;
540 break;
541 default:
542 WARN_ON(1);
543 break;
544 }
545
546 rx_fill--;
547 }
548 }
549
550 static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
551 {
552 u32 data, *rx32;
553 u8 *rx8;
554 unsigned int rxbv, shift;
555 unsigned int rx_fill = rx_fifo_level(spi);
556
557 /*
558 * In RX-only mode the bits per word value is ignored by HW. A value
559 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
560 * If remaining RX bytes are less than 4, the FIFO must be read
561 * differently. The amount of received and valid bytes is indicated
562 * by STAT.RXBV register value.
563 */
564 while (rx_fill) {
565 if (spi->rx_todo < 4) {
566 rxbv = (lantiq_ssc_readl(spi, SPI_STAT) &
567 SPI_STAT_RXBV_M) >> SPI_STAT_RXBV_S;
568 data = lantiq_ssc_readl(spi, SPI_RB);
569
570 shift = (rxbv - 1) * 8;
571 rx8 = spi->rx;
572
573 while (rxbv) {
574 *rx8++ = (data >> shift) & 0xFF;
575 rxbv--;
576 shift -= 8;
577 spi->rx_todo--;
578 spi->rx++;
579 }
580 } else {
581 data = lantiq_ssc_readl(spi, SPI_RB);
582 rx32 = (u32 *) spi->rx;
583
584 *rx32++ = data;
585 spi->rx_todo -= 4;
586 spi->rx += 4;
587 }
588 rx_fill--;
589 }
590 }
591
592 static void rx_request(struct lantiq_ssc_spi *spi)
593 {
594 unsigned int rxreq, rxreq_max;
595
596 /*
597 * To avoid receive overflows at high clocks it is better to request
598 * only the amount of bytes that fits into all FIFOs. This value
599 * depends on the FIFO size implemented in hardware.
600 */
601 rxreq = spi->rx_todo;
602 rxreq_max = spi->rx_fifo_size * 4;
603 if (rxreq > rxreq_max)
604 rxreq = rxreq_max;
605
606 lantiq_ssc_writel(spi, rxreq, SPI_RXREQ);
607 }
608
609 static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
610 {
611 struct lantiq_ssc_spi *spi = data;
612
613 if (spi->tx) {
614 if (spi->rx && spi->rx_todo)
615 rx_fifo_read_full_duplex(spi);
616
617 if (spi->tx_todo)
618 tx_fifo_write(spi);
619 else if (!tx_fifo_level(spi))
620 goto completed;
621 } else if (spi->rx) {
622 if (spi->rx_todo) {
623 rx_fifo_read_half_duplex(spi);
624
625 if (spi->rx_todo)
626 rx_request(spi);
627 else
628 goto completed;
629 } else {
630 goto completed;
631 }
632 }
633
634 return IRQ_HANDLED;
635
636 completed:
637 queue_work(spi->wq, &spi->work);
638
639 return IRQ_HANDLED;
640 }
641
642 static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
643 {
644 struct lantiq_ssc_spi *spi = data;
645 u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
646
647 if (!(stat & SPI_STAT_ERRORS))
648 return IRQ_NONE;
649
650 if (stat & SPI_STAT_RUE)
651 dev_err(spi->dev, "receive underflow error\n");
652 if (stat & SPI_STAT_TUE)
653 dev_err(spi->dev, "transmit underflow error\n");
654 if (stat & SPI_STAT_AE)
655 dev_err(spi->dev, "abort error\n");
656 if (stat & SPI_STAT_RE)
657 dev_err(spi->dev, "receive overflow error\n");
658 if (stat & SPI_STAT_TE)
659 dev_err(spi->dev, "transmit overflow error\n");
660 if (stat & SPI_STAT_ME)
661 dev_err(spi->dev, "mode error\n");
662
663 /* Clear error flags */
664 lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
665
666 /* set bad status so it can be retried */
667 if (spi->master->cur_msg)
668 spi->master->cur_msg->status = -EIO;
669 queue_work(spi->wq, &spi->work);
670
671 return IRQ_HANDLED;
672 }
673
674 static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
675 struct spi_transfer *t)
676 {
677 unsigned long flags;
678
679 spin_lock_irqsave(&spi->lock, flags);
680
681 spi->tx = t->tx_buf;
682 spi->rx = t->rx_buf;
683
684 if (t->tx_buf) {
685 spi->tx_todo = t->len;
686
687 /* initially fill TX FIFO */
688 tx_fifo_write(spi);
689 }
690
691 if (spi->rx) {
692 spi->rx_todo = t->len;
693
694 /* start shift clock in RX-only mode */
695 if (!spi->tx)
696 rx_request(spi);
697 }
698
699 spin_unlock_irqrestore(&spi->lock, flags);
700
701 return t->len;
702 }
703
704 /*
705 * The driver only gets an interrupt when the FIFO is empty, but there
706 * is an additional shift register from which the data is written to
707 * the wire. We get the last interrupt when the controller starts to
708 * write the last word to the wire, not when it is finished. Do busy
709 * waiting till it finishes.
710 */
711 static void lantiq_ssc_bussy_work(struct work_struct *work)
712 {
713 struct lantiq_ssc_spi *spi;
714 unsigned long long timeout = 8LL * 1000LL;
715 unsigned long end;
716
717 spi = container_of(work, typeof(*spi), work);
718
719 do_div(timeout, spi->speed_hz);
720 timeout += timeout + 100; /* some tolerance */
721
722 end = jiffies + msecs_to_jiffies(timeout);
723 do {
724 u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
725
726 if (!(stat & SPI_STAT_BSY)) {
727 spi_finalize_current_transfer(spi->master);
728 return;
729 }
730
731 cond_resched();
732 } while (!time_after_eq(jiffies, end));
733
734 if (spi->master->cur_msg)
735 spi->master->cur_msg->status = -EIO;
736 spi_finalize_current_transfer(spi->master);
737 }
738
739 static void lantiq_ssc_handle_err(struct spi_master *master,
740 struct spi_message *message)
741 {
742 struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
743
744 /* flush FIFOs on timeout */
745 rx_fifo_flush(spi);
746 tx_fifo_flush(spi);
747 }
748
749 static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
750 {
751 struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
752 unsigned int cs = spidev->chip_select;
753 u32 fgpo;
754
755 if (!!(spidev->mode & SPI_CS_HIGH) == enable)
756 fgpo = (1 << (cs - spi->base_cs));
757 else
758 fgpo = (1 << (cs - spi->base_cs + SPI_FGPO_SETOUTN_S));
759
760 lantiq_ssc_writel(spi, fgpo, SPI_FPGO);
761 }
762
763 static int lantiq_ssc_transfer_one(struct spi_master *master,
764 struct spi_device *spidev,
765 struct spi_transfer *t)
766 {
767 struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
768
769 hw_setup_transfer(spi, spidev, t);
770
771 return transfer_start(spi, spidev, t);
772 }
773
774 static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
775 .irnen_r = SPI_IRNEN_R_XWAY,
776 .irnen_t = SPI_IRNEN_T_XWAY,
777 };
778
779 static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
780 .irnen_r = SPI_IRNEN_R_XRX,
781 .irnen_t = SPI_IRNEN_T_XRX,
782 };
783
784 static const struct of_device_id lantiq_ssc_match[] = {
785 { .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
786 { .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
787 { .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
788 {},
789 };
790 MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
791
792 static int lantiq_ssc_probe(struct platform_device *pdev)
793 {
794 struct device *dev = &pdev->dev;
795 struct spi_master *master;
796 struct resource *res;
797 struct lantiq_ssc_spi *spi;
798 const struct lantiq_ssc_hwcfg *hwcfg;
799 const struct of_device_id *match;
800 int err, rx_irq, tx_irq, err_irq;
801 u32 id, supports_dma, revision;
802 unsigned int num_cs;
803
804 match = of_match_device(lantiq_ssc_match, dev);
805 if (!match) {
806 dev_err(dev, "no device match\n");
807 return -EINVAL;
808 }
809 hwcfg = match->data;
810
811 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
812 if (!res) {
813 dev_err(dev, "failed to get resources\n");
814 return -ENXIO;
815 }
816
817 rx_irq = platform_get_irq_byname(pdev, SPI_RX_IRQ_NAME);
818 if (rx_irq < 0) {
819 dev_err(dev, "failed to get %s\n", SPI_RX_IRQ_NAME);
820 return -ENXIO;
821 }
822
823 tx_irq = platform_get_irq_byname(pdev, SPI_TX_IRQ_NAME);
824 if (tx_irq < 0) {
825 dev_err(dev, "failed to get %s\n", SPI_TX_IRQ_NAME);
826 return -ENXIO;
827 }
828
829 err_irq = platform_get_irq_byname(pdev, SPI_ERR_IRQ_NAME);
830 if (err_irq < 0) {
831 dev_err(dev, "failed to get %s\n", SPI_ERR_IRQ_NAME);
832 return -ENXIO;
833 }
834
835 master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
836 if (!master)
837 return -ENOMEM;
838
839 spi = spi_master_get_devdata(master);
840 spi->master = master;
841 spi->dev = dev;
842 spi->hwcfg = hwcfg;
843 platform_set_drvdata(pdev, spi);
844
845 spi->regbase = devm_ioremap_resource(dev, res);
846 if (IS_ERR(spi->regbase)) {
847 err = PTR_ERR(spi->regbase);
848 goto err_master_put;
849 }
850
851 err = devm_request_irq(dev, rx_irq, lantiq_ssc_xmit_interrupt,
852 0, SPI_RX_IRQ_NAME, spi);
853 if (err)
854 goto err_master_put;
855
856 err = devm_request_irq(dev, tx_irq, lantiq_ssc_xmit_interrupt,
857 0, SPI_TX_IRQ_NAME, spi);
858 if (err)
859 goto err_master_put;
860
861 err = devm_request_irq(dev, err_irq, lantiq_ssc_err_interrupt,
862 0, SPI_ERR_IRQ_NAME, spi);
863 if (err)
864 goto err_master_put;
865
866 spi->spi_clk = devm_clk_get(dev, "gate");
867 if (IS_ERR(spi->spi_clk)) {
868 err = PTR_ERR(spi->spi_clk);
869 goto err_master_put;
870 }
871 err = clk_prepare_enable(spi->spi_clk);
872 if (err)
873 goto err_master_put;
874
875 /*
876 * Use the old clk_get_fpi() function on Lantiq platform, till it
877 * supports common clk.
878 */
879 #if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
880 spi->fpi_clk = clk_get_fpi();
881 #else
882 spi->fpi_clk = clk_get(dev, "freq");
883 #endif
884 if (IS_ERR(spi->fpi_clk)) {
885 err = PTR_ERR(spi->fpi_clk);
886 goto err_clk_disable;
887 }
888
889 num_cs = 8;
890 of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
891
892 spi->base_cs = 1;
893 of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
894
895 spin_lock_init(&spi->lock);
896 spi->bits_per_word = 8;
897 spi->speed_hz = 0;
898
899 master->dev.of_node = pdev->dev.of_node;
900 master->num_chipselect = num_cs;
901 master->setup = lantiq_ssc_setup;
902 master->set_cs = lantiq_ssc_set_cs;
903 master->handle_err = lantiq_ssc_handle_err;
904 master->prepare_message = lantiq_ssc_prepare_message;
905 master->unprepare_message = lantiq_ssc_unprepare_message;
906 master->transfer_one = lantiq_ssc_transfer_one;
907 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
908 SPI_LOOP;
909 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
910 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
911
912 spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
913 if (!spi->wq) {
914 err = -ENOMEM;
915 goto err_clk_put;
916 }
917 INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
918
919 id = lantiq_ssc_readl(spi, SPI_ID);
920 spi->tx_fifo_size = (id & SPI_ID_TXFS_M) >> SPI_ID_TXFS_S;
921 spi->rx_fifo_size = (id & SPI_ID_RXFS_M) >> SPI_ID_RXFS_S;
922 supports_dma = (id & SPI_ID_CFG_M) >> SPI_ID_CFG_S;
923 revision = id & SPI_ID_REV_M;
924
925 lantiq_ssc_hw_init(spi);
926
927 dev_info(dev,
928 "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
929 revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
930
931 err = devm_spi_register_master(dev, master);
932 if (err) {
933 dev_err(dev, "failed to register spi_master\n");
934 goto err_wq_destroy;
935 }
936
937 return 0;
938
939 err_wq_destroy:
940 destroy_workqueue(spi->wq);
941 err_clk_put:
942 clk_put(spi->fpi_clk);
943 err_clk_disable:
944 clk_disable_unprepare(spi->spi_clk);
945 err_master_put:
946 spi_master_put(master);
947
948 return err;
949 }
950
951 static int lantiq_ssc_remove(struct platform_device *pdev)
952 {
953 struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
954
955 lantiq_ssc_writel(spi, 0, SPI_IRNEN);
956 lantiq_ssc_writel(spi, 0, SPI_CLC);
957 rx_fifo_flush(spi);
958 tx_fifo_flush(spi);
959 hw_enter_config_mode(spi);
960
961 destroy_workqueue(spi->wq);
962 clk_disable_unprepare(spi->spi_clk);
963 clk_put(spi->fpi_clk);
964
965 return 0;
966 }
967
968 static struct platform_driver lantiq_ssc_driver = {
969 .probe = lantiq_ssc_probe,
970 .remove = lantiq_ssc_remove,
971 .driver = {
972 .name = "spi-lantiq-ssc",
973 .owner = THIS_MODULE,
974 .of_match_table = lantiq_ssc_match,
975 },
976 };
977 module_platform_driver(lantiq_ssc_driver);
978
979 MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
980 MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
981 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
982 MODULE_LICENSE("GPL");
983 MODULE_ALIAS("platform:spi-lantiq-ssc");