1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 // Jaswinder Singh <jassi.brar@samsung.com>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
18 #include <linux/of_device.h>
19 #include <linux/of_gpio.h>
21 #include <linux/platform_data/spi-s3c64xx.h>
23 #define MAX_SPI_PORTS 6
24 #define S3C64XX_SPI_QUIRK_POLL (1 << 0)
25 #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
26 #define AUTOSUSPEND_TIMEOUT 2000
28 /* Registers and bit-fields */
30 #define S3C64XX_SPI_CH_CFG 0x00
31 #define S3C64XX_SPI_CLK_CFG 0x04
32 #define S3C64XX_SPI_MODE_CFG 0x08
33 #define S3C64XX_SPI_CS_REG 0x0C
34 #define S3C64XX_SPI_INT_EN 0x10
35 #define S3C64XX_SPI_STATUS 0x14
36 #define S3C64XX_SPI_TX_DATA 0x18
37 #define S3C64XX_SPI_RX_DATA 0x1C
38 #define S3C64XX_SPI_PACKET_CNT 0x20
39 #define S3C64XX_SPI_PENDING_CLR 0x24
40 #define S3C64XX_SPI_SWAP_CFG 0x28
41 #define S3C64XX_SPI_FB_CLK 0x2C
43 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
44 #define S3C64XX_SPI_CH_SW_RST (1<<5)
45 #define S3C64XX_SPI_CH_SLAVE (1<<4)
46 #define S3C64XX_SPI_CPOL_L (1<<3)
47 #define S3C64XX_SPI_CPHA_B (1<<2)
48 #define S3C64XX_SPI_CH_RXCH_ON (1<<1)
49 #define S3C64XX_SPI_CH_TXCH_ON (1<<0)
51 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
52 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9
53 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
54 #define S3C64XX_SPI_PSR_MASK 0xff
56 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
57 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
58 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
59 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
60 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
61 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
62 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
63 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
64 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
65 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
66 #define S3C64XX_SPI_MODE_4BURST (1<<0)
68 #define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
69 #define S3C64XX_SPI_CS_AUTO (1<<1)
70 #define S3C64XX_SPI_CS_SIG_INACT (1<<0)
72 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
73 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
74 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
75 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
76 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
77 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
78 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
80 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
81 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
82 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
83 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
84 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
85 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
87 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
89 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
90 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
91 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
92 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
93 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
95 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
96 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
97 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
98 #define S3C64XX_SPI_SWAP_RX_EN (1<<4)
99 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
100 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
101 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
102 #define S3C64XX_SPI_SWAP_TX_EN (1<<0)
104 #define S3C64XX_SPI_FBCLK_MSK (3<<0)
106 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
107 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
108 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
109 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
110 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
113 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
114 #define S3C64XX_SPI_TRAILCNT_OFF 19
116 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
118 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
119 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
121 #define RXBUSY (1<<2)
122 #define TXBUSY (1<<3)
124 struct s3c64xx_spi_dma_data
{
127 enum dma_transfer_direction direction
;
131 * struct s3c64xx_spi_port_config - SPI Controller hardware info
132 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
133 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
134 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
135 * @quirks: Bitmask of known quirks
136 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
137 * @clk_from_cmu: True, if the controller does not include a clock mux and
139 * @clk_ioclk: True if clock is present on this device
141 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
142 * differ in some aspects such as the size of the fifo and spi bus clock
143 * setup. Such differences are specified to the driver using this structure
144 * which is provided as driver data to the driver.
146 struct s3c64xx_spi_port_config
{
147 int fifo_lvl_mask
[MAX_SPI_PORTS
];
157 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
158 * @clk: Pointer to the spi clock.
159 * @src_clk: Pointer to the clock used to generate SPI signals.
160 * @ioclk: Pointer to the i/o clock between master and slave
161 * @pdev: Pointer to device's platform device data
162 * @master: Pointer to the SPI Protocol master.
163 * @cntrlr_info: Platform specific data for the controller this driver manages.
164 * @lock: Controller specific lock.
165 * @state: Set of FLAGS to indicate status.
166 * @sfr_start: BUS address of SPI controller regs.
167 * @regs: Pointer to ioremap'ed controller registers.
168 * @xfer_completion: To indicate completion of xfer task.
169 * @cur_mode: Stores the active configuration of the controller.
170 * @cur_bpw: Stores the active bits per word settings.
171 * @cur_speed: Current clock speed
172 * @rx_dma: Local receive DMA data (e.g. chan and direction)
173 * @tx_dma: Local transmit DMA data (e.g. chan and direction)
174 * @port_conf: Local SPI port configuartion data
175 * @port_id: Port identification number
177 struct s3c64xx_spi_driver_data
{
182 struct platform_device
*pdev
;
183 struct spi_master
*master
;
184 struct s3c64xx_spi_info
*cntrlr_info
;
186 unsigned long sfr_start
;
187 struct completion xfer_completion
;
189 unsigned cur_mode
, cur_bpw
;
191 struct s3c64xx_spi_dma_data rx_dma
;
192 struct s3c64xx_spi_dma_data tx_dma
;
193 const struct s3c64xx_spi_port_config
*port_conf
;
194 unsigned int port_id
;
197 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data
*sdd
)
199 void __iomem
*regs
= sdd
->regs
;
203 writel(0, regs
+ S3C64XX_SPI_PACKET_CNT
);
205 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
206 val
&= ~(S3C64XX_SPI_CH_RXCH_ON
| S3C64XX_SPI_CH_TXCH_ON
);
207 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
209 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
210 val
|= S3C64XX_SPI_CH_SW_RST
;
211 val
&= ~S3C64XX_SPI_CH_HS_EN
;
212 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
215 loops
= msecs_to_loops(1);
217 val
= readl(regs
+ S3C64XX_SPI_STATUS
);
218 } while (TX_FIFO_LVL(val
, sdd
) && loops
--);
221 dev_warn(&sdd
->pdev
->dev
, "Timed out flushing TX FIFO\n");
224 loops
= msecs_to_loops(1);
226 val
= readl(regs
+ S3C64XX_SPI_STATUS
);
227 if (RX_FIFO_LVL(val
, sdd
))
228 readl(regs
+ S3C64XX_SPI_RX_DATA
);
234 dev_warn(&sdd
->pdev
->dev
, "Timed out flushing RX FIFO\n");
236 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
237 val
&= ~S3C64XX_SPI_CH_SW_RST
;
238 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
240 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
241 val
&= ~(S3C64XX_SPI_MODE_TXDMA_ON
| S3C64XX_SPI_MODE_RXDMA_ON
);
242 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
245 static void s3c64xx_spi_dmacb(void *data
)
247 struct s3c64xx_spi_driver_data
*sdd
;
248 struct s3c64xx_spi_dma_data
*dma
= data
;
251 if (dma
->direction
== DMA_DEV_TO_MEM
)
252 sdd
= container_of(data
,
253 struct s3c64xx_spi_driver_data
, rx_dma
);
255 sdd
= container_of(data
,
256 struct s3c64xx_spi_driver_data
, tx_dma
);
258 spin_lock_irqsave(&sdd
->lock
, flags
);
260 if (dma
->direction
== DMA_DEV_TO_MEM
) {
261 sdd
->state
&= ~RXBUSY
;
262 if (!(sdd
->state
& TXBUSY
))
263 complete(&sdd
->xfer_completion
);
265 sdd
->state
&= ~TXBUSY
;
266 if (!(sdd
->state
& RXBUSY
))
267 complete(&sdd
->xfer_completion
);
270 spin_unlock_irqrestore(&sdd
->lock
, flags
);
273 static int prepare_dma(struct s3c64xx_spi_dma_data
*dma
,
274 struct sg_table
*sgt
)
276 struct s3c64xx_spi_driver_data
*sdd
;
277 struct dma_slave_config config
;
278 struct dma_async_tx_descriptor
*desc
;
281 memset(&config
, 0, sizeof(config
));
283 if (dma
->direction
== DMA_DEV_TO_MEM
) {
284 sdd
= container_of((void *)dma
,
285 struct s3c64xx_spi_driver_data
, rx_dma
);
286 config
.direction
= dma
->direction
;
287 config
.src_addr
= sdd
->sfr_start
+ S3C64XX_SPI_RX_DATA
;
288 config
.src_addr_width
= sdd
->cur_bpw
/ 8;
289 config
.src_maxburst
= 1;
290 dmaengine_slave_config(dma
->ch
, &config
);
292 sdd
= container_of((void *)dma
,
293 struct s3c64xx_spi_driver_data
, tx_dma
);
294 config
.direction
= dma
->direction
;
295 config
.dst_addr
= sdd
->sfr_start
+ S3C64XX_SPI_TX_DATA
;
296 config
.dst_addr_width
= sdd
->cur_bpw
/ 8;
297 config
.dst_maxburst
= 1;
298 dmaengine_slave_config(dma
->ch
, &config
);
301 desc
= dmaengine_prep_slave_sg(dma
->ch
, sgt
->sgl
, sgt
->nents
,
302 dma
->direction
, DMA_PREP_INTERRUPT
);
304 dev_err(&sdd
->pdev
->dev
, "unable to prepare %s scatterlist",
305 dma
->direction
== DMA_DEV_TO_MEM
? "rx" : "tx");
309 desc
->callback
= s3c64xx_spi_dmacb
;
310 desc
->callback_param
= dma
;
312 dma
->cookie
= dmaengine_submit(desc
);
313 ret
= dma_submit_error(dma
->cookie
);
315 dev_err(&sdd
->pdev
->dev
, "DMA submission failed");
319 dma_async_issue_pending(dma
->ch
);
323 static void s3c64xx_spi_set_cs(struct spi_device
*spi
, bool enable
)
325 struct s3c64xx_spi_driver_data
*sdd
=
326 spi_master_get_devdata(spi
->master
);
328 if (sdd
->cntrlr_info
->no_cs
)
332 if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
)) {
333 writel(0, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
335 u32 ssel
= readl(sdd
->regs
+ S3C64XX_SPI_CS_REG
);
337 ssel
|= (S3C64XX_SPI_CS_AUTO
|
338 S3C64XX_SPI_CS_NSC_CNT_2
);
339 writel(ssel
, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
342 if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
))
343 writel(S3C64XX_SPI_CS_SIG_INACT
,
344 sdd
->regs
+ S3C64XX_SPI_CS_REG
);
348 static int s3c64xx_spi_prepare_transfer(struct spi_master
*spi
)
350 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(spi
);
355 spi
->dma_rx
= sdd
->rx_dma
.ch
;
356 spi
->dma_tx
= sdd
->tx_dma
.ch
;
361 static bool s3c64xx_spi_can_dma(struct spi_master
*master
,
362 struct spi_device
*spi
,
363 struct spi_transfer
*xfer
)
365 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
367 return xfer
->len
> (FIFO_LVL_MASK(sdd
) >> 1) + 1;
370 static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data
*sdd
,
371 struct spi_transfer
*xfer
, int dma_mode
)
373 void __iomem
*regs
= sdd
->regs
;
377 modecfg
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
378 modecfg
&= ~(S3C64XX_SPI_MODE_TXDMA_ON
| S3C64XX_SPI_MODE_RXDMA_ON
);
380 chcfg
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
381 chcfg
&= ~S3C64XX_SPI_CH_TXCH_ON
;
384 chcfg
&= ~S3C64XX_SPI_CH_RXCH_ON
;
386 /* Always shift in data in FIFO, even if xfer is Tx only,
387 * this helps setting PCKT_CNT value for generating clocks
390 chcfg
|= S3C64XX_SPI_CH_RXCH_ON
;
391 writel(((xfer
->len
* 8 / sdd
->cur_bpw
) & 0xffff)
392 | S3C64XX_SPI_PACKET_CNT_EN
,
393 regs
+ S3C64XX_SPI_PACKET_CNT
);
396 if (xfer
->tx_buf
!= NULL
) {
397 sdd
->state
|= TXBUSY
;
398 chcfg
|= S3C64XX_SPI_CH_TXCH_ON
;
400 modecfg
|= S3C64XX_SPI_MODE_TXDMA_ON
;
401 ret
= prepare_dma(&sdd
->tx_dma
, &xfer
->tx_sg
);
403 switch (sdd
->cur_bpw
) {
405 iowrite32_rep(regs
+ S3C64XX_SPI_TX_DATA
,
406 xfer
->tx_buf
, xfer
->len
/ 4);
409 iowrite16_rep(regs
+ S3C64XX_SPI_TX_DATA
,
410 xfer
->tx_buf
, xfer
->len
/ 2);
413 iowrite8_rep(regs
+ S3C64XX_SPI_TX_DATA
,
414 xfer
->tx_buf
, xfer
->len
);
420 if (xfer
->rx_buf
!= NULL
) {
421 sdd
->state
|= RXBUSY
;
423 if (sdd
->port_conf
->high_speed
&& sdd
->cur_speed
>= 30000000UL
424 && !(sdd
->cur_mode
& SPI_CPHA
))
425 chcfg
|= S3C64XX_SPI_CH_HS_EN
;
428 modecfg
|= S3C64XX_SPI_MODE_RXDMA_ON
;
429 chcfg
|= S3C64XX_SPI_CH_RXCH_ON
;
430 writel(((xfer
->len
* 8 / sdd
->cur_bpw
) & 0xffff)
431 | S3C64XX_SPI_PACKET_CNT_EN
,
432 regs
+ S3C64XX_SPI_PACKET_CNT
);
433 ret
= prepare_dma(&sdd
->rx_dma
, &xfer
->rx_sg
);
440 writel(modecfg
, regs
+ S3C64XX_SPI_MODE_CFG
);
441 writel(chcfg
, regs
+ S3C64XX_SPI_CH_CFG
);
446 static u32
s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data
*sdd
,
449 void __iomem
*regs
= sdd
->regs
;
450 unsigned long val
= 1;
453 /* max fifo depth available */
454 u32 max_fifo
= (FIFO_LVL_MASK(sdd
) >> 1) + 1;
457 val
= msecs_to_loops(timeout_ms
);
460 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
461 } while (RX_FIFO_LVL(status
, sdd
) < max_fifo
&& --val
);
463 /* return the actual received data length */
464 return RX_FIFO_LVL(status
, sdd
);
467 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data
*sdd
,
468 struct spi_transfer
*xfer
)
470 void __iomem
*regs
= sdd
->regs
;
475 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
476 ms
= xfer
->len
* 8 * 1000 / sdd
->cur_speed
;
477 ms
+= 30; /* some tolerance */
478 ms
= max(ms
, 100); /* minimum timeout */
480 val
= msecs_to_jiffies(ms
) + 10;
481 val
= wait_for_completion_timeout(&sdd
->xfer_completion
, val
);
484 * If the previous xfer was completed within timeout, then
485 * proceed further else return -EIO.
486 * DmaTx returns after simply writing data in the FIFO,
487 * w/o waiting for real transmission on the bus to finish.
488 * DmaRx returns only after Dma read data from FIFO which
489 * needs bus transmission to finish, so we don't worry if
490 * Xfer involved Rx(with or without Tx).
492 if (val
&& !xfer
->rx_buf
) {
493 val
= msecs_to_loops(10);
494 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
495 while ((TX_FIFO_LVL(status
, sdd
)
496 || !S3C64XX_SPI_ST_TX_DONE(status
, sdd
))
499 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
504 /* If timed out while checking rx/tx status return error */
511 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data
*sdd
,
512 struct spi_transfer
*xfer
)
514 void __iomem
*regs
= sdd
->regs
;
522 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
523 ms
= xfer
->len
* 8 * 1000 / sdd
->cur_speed
;
524 ms
+= 10; /* some tolerance */
526 val
= msecs_to_loops(ms
);
528 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
529 } while (RX_FIFO_LVL(status
, sdd
) < xfer
->len
&& --val
);
534 /* If it was only Tx */
536 sdd
->state
&= ~TXBUSY
;
541 * If the receive length is bigger than the controller fifo
542 * size, calculate the loops and read the fifo as many times.
543 * loops = length / max fifo size (calculated by using the
545 * For any size less than the fifo size the below code is
546 * executed atleast once.
548 loops
= xfer
->len
/ ((FIFO_LVL_MASK(sdd
) >> 1) + 1);
551 /* wait for data to be received in the fifo */
552 cpy_len
= s3c64xx_spi_wait_for_timeout(sdd
,
555 switch (sdd
->cur_bpw
) {
557 ioread32_rep(regs
+ S3C64XX_SPI_RX_DATA
,
561 ioread16_rep(regs
+ S3C64XX_SPI_RX_DATA
,
565 ioread8_rep(regs
+ S3C64XX_SPI_RX_DATA
,
572 sdd
->state
&= ~RXBUSY
;
577 static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data
*sdd
)
579 void __iomem
*regs
= sdd
->regs
;
584 if (!sdd
->port_conf
->clk_from_cmu
) {
585 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
586 val
&= ~S3C64XX_SPI_ENCLK_ENABLE
;
587 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
590 /* Set Polarity and Phase */
591 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
592 val
&= ~(S3C64XX_SPI_CH_SLAVE
|
596 if (sdd
->cur_mode
& SPI_CPOL
)
597 val
|= S3C64XX_SPI_CPOL_L
;
599 if (sdd
->cur_mode
& SPI_CPHA
)
600 val
|= S3C64XX_SPI_CPHA_B
;
602 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
604 /* Set Channel & DMA Mode */
605 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
606 val
&= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
607 | S3C64XX_SPI_MODE_CH_TSZ_MASK
);
609 switch (sdd
->cur_bpw
) {
611 val
|= S3C64XX_SPI_MODE_BUS_TSZ_WORD
;
612 val
|= S3C64XX_SPI_MODE_CH_TSZ_WORD
;
615 val
|= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD
;
616 val
|= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD
;
619 val
|= S3C64XX_SPI_MODE_BUS_TSZ_BYTE
;
620 val
|= S3C64XX_SPI_MODE_CH_TSZ_BYTE
;
624 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
626 if (sdd
->port_conf
->clk_from_cmu
) {
627 /* The src_clk clock is divided internally by 2 */
628 ret
= clk_set_rate(sdd
->src_clk
, sdd
->cur_speed
* 2);
631 sdd
->cur_speed
= clk_get_rate(sdd
->src_clk
) / 2;
633 /* Configure Clock */
634 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
635 val
&= ~S3C64XX_SPI_PSR_MASK
;
636 val
|= ((clk_get_rate(sdd
->src_clk
) / sdd
->cur_speed
/ 2 - 1)
637 & S3C64XX_SPI_PSR_MASK
);
638 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
641 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
642 val
|= S3C64XX_SPI_ENCLK_ENABLE
;
643 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
649 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
651 static int s3c64xx_spi_prepare_message(struct spi_master
*master
,
652 struct spi_message
*msg
)
654 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
655 struct spi_device
*spi
= msg
->spi
;
656 struct s3c64xx_spi_csinfo
*cs
= spi
->controller_data
;
658 /* Configure feedback delay */
659 writel(cs
->fb_delay
& 0x3, sdd
->regs
+ S3C64XX_SPI_FB_CLK
);
664 static int s3c64xx_spi_transfer_one(struct spi_master
*master
,
665 struct spi_device
*spi
,
666 struct spi_transfer
*xfer
)
668 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
669 const unsigned int fifo_len
= (FIFO_LVL_MASK(sdd
) >> 1) + 1;
670 const void *tx_buf
= NULL
;
672 int target_len
= 0, origin_len
= 0;
679 reinit_completion(&sdd
->xfer_completion
);
681 /* Only BPW and Speed may change across transfers */
682 bpw
= xfer
->bits_per_word
;
683 speed
= xfer
->speed_hz
;
685 if (bpw
!= sdd
->cur_bpw
|| speed
!= sdd
->cur_speed
) {
687 sdd
->cur_speed
= speed
;
688 sdd
->cur_mode
= spi
->mode
;
689 status
= s3c64xx_spi_config(sdd
);
694 if (!is_polling(sdd
) && (xfer
->len
> fifo_len
) &&
695 sdd
->rx_dma
.ch
&& sdd
->tx_dma
.ch
) {
698 } else if (is_polling(sdd
) && xfer
->len
> fifo_len
) {
699 tx_buf
= xfer
->tx_buf
;
700 rx_buf
= xfer
->rx_buf
;
701 origin_len
= xfer
->len
;
703 target_len
= xfer
->len
;
704 if (xfer
->len
> fifo_len
)
705 xfer
->len
= fifo_len
;
709 spin_lock_irqsave(&sdd
->lock
, flags
);
711 /* Pending only which is to be done */
712 sdd
->state
&= ~RXBUSY
;
713 sdd
->state
&= ~TXBUSY
;
715 /* Start the signals */
716 s3c64xx_spi_set_cs(spi
, true);
718 status
= s3c64xx_enable_datapath(sdd
, xfer
, use_dma
);
720 spin_unlock_irqrestore(&sdd
->lock
, flags
);
723 dev_err(&spi
->dev
, "failed to enable data path for transfer: %d\n", status
);
728 status
= s3c64xx_wait_for_dma(sdd
, xfer
);
730 status
= s3c64xx_wait_for_pio(sdd
, xfer
);
734 "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
735 xfer
->rx_buf
? 1 : 0, xfer
->tx_buf
? 1 : 0,
736 (sdd
->state
& RXBUSY
) ? 'f' : 'p',
737 (sdd
->state
& TXBUSY
) ? 'f' : 'p',
738 xfer
->len
, use_dma
? 1 : 0, status
);
741 struct dma_tx_state s
;
743 if (xfer
->tx_buf
&& (sdd
->state
& TXBUSY
)) {
744 dmaengine_pause(sdd
->tx_dma
.ch
);
745 dmaengine_tx_status(sdd
->tx_dma
.ch
, sdd
->tx_dma
.cookie
, &s
);
746 dmaengine_terminate_all(sdd
->tx_dma
.ch
);
747 dev_err(&spi
->dev
, "TX residue: %d\n", s
.residue
);
750 if (xfer
->rx_buf
&& (sdd
->state
& RXBUSY
)) {
751 dmaengine_pause(sdd
->rx_dma
.ch
);
752 dmaengine_tx_status(sdd
->rx_dma
.ch
, sdd
->rx_dma
.cookie
, &s
);
753 dmaengine_terminate_all(sdd
->rx_dma
.ch
);
754 dev_err(&spi
->dev
, "RX residue: %d\n", s
.residue
);
758 s3c64xx_flush_fifo(sdd
);
760 if (target_len
> 0) {
761 target_len
-= xfer
->len
;
764 xfer
->tx_buf
+= xfer
->len
;
767 xfer
->rx_buf
+= xfer
->len
;
769 if (target_len
> fifo_len
)
770 xfer
->len
= fifo_len
;
772 xfer
->len
= target_len
;
774 } while (target_len
> 0);
777 /* Restore original xfer buffers and length */
778 xfer
->tx_buf
= tx_buf
;
779 xfer
->rx_buf
= rx_buf
;
780 xfer
->len
= origin_len
;
786 static struct s3c64xx_spi_csinfo
*s3c64xx_get_slave_ctrldata(
787 struct spi_device
*spi
)
789 struct s3c64xx_spi_csinfo
*cs
;
790 struct device_node
*slave_np
, *data_np
= NULL
;
793 slave_np
= spi
->dev
.of_node
;
795 dev_err(&spi
->dev
, "device node not found\n");
796 return ERR_PTR(-EINVAL
);
799 data_np
= of_get_child_by_name(slave_np
, "controller-data");
801 dev_err(&spi
->dev
, "child node 'controller-data' not found\n");
802 return ERR_PTR(-EINVAL
);
805 cs
= kzalloc(sizeof(*cs
), GFP_KERNEL
);
807 of_node_put(data_np
);
808 return ERR_PTR(-ENOMEM
);
811 of_property_read_u32(data_np
, "samsung,spi-feedback-delay", &fb_delay
);
812 cs
->fb_delay
= fb_delay
;
813 of_node_put(data_np
);
818 * Here we only check the validity of requested configuration
819 * and save the configuration in a local data-structure.
820 * The controller is actually configured only just before we
821 * get a message to transfer.
823 static int s3c64xx_spi_setup(struct spi_device
*spi
)
825 struct s3c64xx_spi_csinfo
*cs
= spi
->controller_data
;
826 struct s3c64xx_spi_driver_data
*sdd
;
829 sdd
= spi_master_get_devdata(spi
->master
);
830 if (spi
->dev
.of_node
) {
831 cs
= s3c64xx_get_slave_ctrldata(spi
);
832 spi
->controller_data
= cs
;
834 /* On non-DT platforms the SPI core will set spi->cs_gpio
835 * to -ENOENT. The GPIO pin used to drive the chip select
836 * is defined by using platform data so spi->cs_gpio value
837 * has to be override to have the proper GPIO pin number.
839 spi
->cs_gpio
= cs
->line
;
842 if (IS_ERR_OR_NULL(cs
)) {
843 dev_err(&spi
->dev
, "No CS for SPI(%d)\n", spi
->chip_select
);
847 if (!spi_get_ctldata(spi
)) {
848 if (gpio_is_valid(spi
->cs_gpio
)) {
849 err
= gpio_request_one(spi
->cs_gpio
, GPIOF_OUT_INIT_HIGH
,
850 dev_name(&spi
->dev
));
853 "Failed to get /CS gpio [%d]: %d\n",
859 spi_set_ctldata(spi
, cs
);
862 pm_runtime_get_sync(&sdd
->pdev
->dev
);
864 /* Check if we can provide the requested rate */
865 if (!sdd
->port_conf
->clk_from_cmu
) {
869 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (0 + 1);
871 if (spi
->max_speed_hz
> speed
)
872 spi
->max_speed_hz
= speed
;
874 psr
= clk_get_rate(sdd
->src_clk
) / 2 / spi
->max_speed_hz
- 1;
875 psr
&= S3C64XX_SPI_PSR_MASK
;
876 if (psr
== S3C64XX_SPI_PSR_MASK
)
879 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (psr
+ 1);
880 if (spi
->max_speed_hz
< speed
) {
881 if (psr
+1 < S3C64XX_SPI_PSR_MASK
) {
889 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (psr
+ 1);
890 if (spi
->max_speed_hz
>= speed
) {
891 spi
->max_speed_hz
= speed
;
893 dev_err(&spi
->dev
, "Can't set %dHz transfer speed\n",
900 pm_runtime_mark_last_busy(&sdd
->pdev
->dev
);
901 pm_runtime_put_autosuspend(&sdd
->pdev
->dev
);
902 s3c64xx_spi_set_cs(spi
, false);
907 pm_runtime_mark_last_busy(&sdd
->pdev
->dev
);
908 pm_runtime_put_autosuspend(&sdd
->pdev
->dev
);
909 /* setup() returns with device de-selected */
910 s3c64xx_spi_set_cs(spi
, false);
912 if (gpio_is_valid(spi
->cs_gpio
))
913 gpio_free(spi
->cs_gpio
);
914 spi_set_ctldata(spi
, NULL
);
917 if (spi
->dev
.of_node
)
923 static void s3c64xx_spi_cleanup(struct spi_device
*spi
)
925 struct s3c64xx_spi_csinfo
*cs
= spi_get_ctldata(spi
);
927 if (gpio_is_valid(spi
->cs_gpio
)) {
928 gpio_free(spi
->cs_gpio
);
929 if (spi
->dev
.of_node
)
932 /* On non-DT platforms, the SPI core sets
933 * spi->cs_gpio to -ENOENT and .setup()
934 * overrides it with the GPIO pin value
935 * passed using platform data.
937 spi
->cs_gpio
= -ENOENT
;
941 spi_set_ctldata(spi
, NULL
);
944 static irqreturn_t
s3c64xx_spi_irq(int irq
, void *data
)
946 struct s3c64xx_spi_driver_data
*sdd
= data
;
947 struct spi_master
*spi
= sdd
->master
;
948 unsigned int val
, clr
= 0;
950 val
= readl(sdd
->regs
+ S3C64XX_SPI_STATUS
);
952 if (val
& S3C64XX_SPI_ST_RX_OVERRUN_ERR
) {
953 clr
= S3C64XX_SPI_PND_RX_OVERRUN_CLR
;
954 dev_err(&spi
->dev
, "RX overrun\n");
956 if (val
& S3C64XX_SPI_ST_RX_UNDERRUN_ERR
) {
957 clr
|= S3C64XX_SPI_PND_RX_UNDERRUN_CLR
;
958 dev_err(&spi
->dev
, "RX underrun\n");
960 if (val
& S3C64XX_SPI_ST_TX_OVERRUN_ERR
) {
961 clr
|= S3C64XX_SPI_PND_TX_OVERRUN_CLR
;
962 dev_err(&spi
->dev
, "TX overrun\n");
964 if (val
& S3C64XX_SPI_ST_TX_UNDERRUN_ERR
) {
965 clr
|= S3C64XX_SPI_PND_TX_UNDERRUN_CLR
;
966 dev_err(&spi
->dev
, "TX underrun\n");
969 /* Clear the pending irq by setting and then clearing it */
970 writel(clr
, sdd
->regs
+ S3C64XX_SPI_PENDING_CLR
);
971 writel(0, sdd
->regs
+ S3C64XX_SPI_PENDING_CLR
);
976 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data
*sdd
)
978 struct s3c64xx_spi_info
*sci
= sdd
->cntrlr_info
;
979 void __iomem
*regs
= sdd
->regs
;
985 writel(0, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
986 else if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
))
987 writel(S3C64XX_SPI_CS_SIG_INACT
, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
989 /* Disable Interrupts - we use Polling if not DMA mode */
990 writel(0, regs
+ S3C64XX_SPI_INT_EN
);
992 if (!sdd
->port_conf
->clk_from_cmu
)
993 writel(sci
->src_clk_nr
<< S3C64XX_SPI_CLKSEL_SRCSHFT
,
994 regs
+ S3C64XX_SPI_CLK_CFG
);
995 writel(0, regs
+ S3C64XX_SPI_MODE_CFG
);
996 writel(0, regs
+ S3C64XX_SPI_PACKET_CNT
);
998 /* Clear any irq pending bits, should set and clear the bits */
999 val
= S3C64XX_SPI_PND_RX_OVERRUN_CLR
|
1000 S3C64XX_SPI_PND_RX_UNDERRUN_CLR
|
1001 S3C64XX_SPI_PND_TX_OVERRUN_CLR
|
1002 S3C64XX_SPI_PND_TX_UNDERRUN_CLR
;
1003 writel(val
, regs
+ S3C64XX_SPI_PENDING_CLR
);
1004 writel(0, regs
+ S3C64XX_SPI_PENDING_CLR
);
1006 writel(0, regs
+ S3C64XX_SPI_SWAP_CFG
);
1008 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
1009 val
&= ~S3C64XX_SPI_MODE_4BURST
;
1010 val
&= ~(S3C64XX_SPI_MAX_TRAILCNT
<< S3C64XX_SPI_TRAILCNT_OFF
);
1011 val
|= (S3C64XX_SPI_TRAILCNT
<< S3C64XX_SPI_TRAILCNT_OFF
);
1012 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
1014 s3c64xx_flush_fifo(sdd
);
1018 static struct s3c64xx_spi_info
*s3c64xx_spi_parse_dt(struct device
*dev
)
1020 struct s3c64xx_spi_info
*sci
;
1023 sci
= devm_kzalloc(dev
, sizeof(*sci
), GFP_KERNEL
);
1025 return ERR_PTR(-ENOMEM
);
1027 if (of_property_read_u32(dev
->of_node
, "samsung,spi-src-clk", &temp
)) {
1028 dev_warn(dev
, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1029 sci
->src_clk_nr
= 0;
1031 sci
->src_clk_nr
= temp
;
1034 if (of_property_read_u32(dev
->of_node
, "num-cs", &temp
)) {
1035 dev_warn(dev
, "number of chip select lines not specified, assuming 1 chip select line\n");
1041 sci
->no_cs
= of_property_read_bool(dev
->of_node
, "no-cs-readback");
1046 static struct s3c64xx_spi_info
*s3c64xx_spi_parse_dt(struct device
*dev
)
1048 return dev_get_platdata(dev
);
1052 static inline const struct s3c64xx_spi_port_config
*s3c64xx_spi_get_port_config(
1053 struct platform_device
*pdev
)
1056 if (pdev
->dev
.of_node
)
1057 return of_device_get_match_data(&pdev
->dev
);
1059 return (const struct s3c64xx_spi_port_config
*)platform_get_device_id(pdev
)->driver_data
;
1062 static int s3c64xx_spi_probe(struct platform_device
*pdev
)
1064 struct resource
*mem_res
;
1065 struct s3c64xx_spi_driver_data
*sdd
;
1066 struct s3c64xx_spi_info
*sci
= dev_get_platdata(&pdev
->dev
);
1067 struct spi_master
*master
;
1071 if (!sci
&& pdev
->dev
.of_node
) {
1072 sci
= s3c64xx_spi_parse_dt(&pdev
->dev
);
1074 return PTR_ERR(sci
);
1078 dev_err(&pdev
->dev
, "platform_data missing!\n");
1082 mem_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1083 if (mem_res
== NULL
) {
1084 dev_err(&pdev
->dev
, "Unable to get SPI MEM resource\n");
1088 irq
= platform_get_irq(pdev
, 0);
1090 dev_warn(&pdev
->dev
, "Failed to get IRQ: %d\n", irq
);
1094 master
= spi_alloc_master(&pdev
->dev
,
1095 sizeof(struct s3c64xx_spi_driver_data
));
1096 if (master
== NULL
) {
1097 dev_err(&pdev
->dev
, "Unable to allocate SPI Master\n");
1101 platform_set_drvdata(pdev
, master
);
1103 sdd
= spi_master_get_devdata(master
);
1104 sdd
->port_conf
= s3c64xx_spi_get_port_config(pdev
);
1105 sdd
->master
= master
;
1106 sdd
->cntrlr_info
= sci
;
1108 sdd
->sfr_start
= mem_res
->start
;
1109 if (pdev
->dev
.of_node
) {
1110 ret
= of_alias_get_id(pdev
->dev
.of_node
, "spi");
1112 dev_err(&pdev
->dev
, "failed to get alias id, errno %d\n",
1114 goto err_deref_master
;
1118 sdd
->port_id
= pdev
->id
;
1123 sdd
->tx_dma
.direction
= DMA_MEM_TO_DEV
;
1124 sdd
->rx_dma
.direction
= DMA_DEV_TO_MEM
;
1126 master
->dev
.of_node
= pdev
->dev
.of_node
;
1127 master
->bus_num
= sdd
->port_id
;
1128 master
->setup
= s3c64xx_spi_setup
;
1129 master
->cleanup
= s3c64xx_spi_cleanup
;
1130 master
->prepare_transfer_hardware
= s3c64xx_spi_prepare_transfer
;
1131 master
->prepare_message
= s3c64xx_spi_prepare_message
;
1132 master
->transfer_one
= s3c64xx_spi_transfer_one
;
1133 master
->num_chipselect
= sci
->num_cs
;
1134 master
->dma_alignment
= 8;
1135 master
->bits_per_word_mask
= SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1137 /* the spi->mode bits understood by this driver: */
1138 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
;
1139 master
->auto_runtime_pm
= true;
1140 if (!is_polling(sdd
))
1141 master
->can_dma
= s3c64xx_spi_can_dma
;
1143 sdd
->regs
= devm_ioremap_resource(&pdev
->dev
, mem_res
);
1144 if (IS_ERR(sdd
->regs
)) {
1145 ret
= PTR_ERR(sdd
->regs
);
1146 goto err_deref_master
;
1149 if (sci
->cfg_gpio
&& sci
->cfg_gpio()) {
1150 dev_err(&pdev
->dev
, "Unable to config gpio\n");
1152 goto err_deref_master
;
1156 sdd
->clk
= devm_clk_get(&pdev
->dev
, "spi");
1157 if (IS_ERR(sdd
->clk
)) {
1158 dev_err(&pdev
->dev
, "Unable to acquire clock 'spi'\n");
1159 ret
= PTR_ERR(sdd
->clk
);
1160 goto err_deref_master
;
1163 ret
= clk_prepare_enable(sdd
->clk
);
1165 dev_err(&pdev
->dev
, "Couldn't enable clock 'spi'\n");
1166 goto err_deref_master
;
1169 sprintf(clk_name
, "spi_busclk%d", sci
->src_clk_nr
);
1170 sdd
->src_clk
= devm_clk_get(&pdev
->dev
, clk_name
);
1171 if (IS_ERR(sdd
->src_clk
)) {
1173 "Unable to acquire clock '%s'\n", clk_name
);
1174 ret
= PTR_ERR(sdd
->src_clk
);
1175 goto err_disable_clk
;
1178 ret
= clk_prepare_enable(sdd
->src_clk
);
1180 dev_err(&pdev
->dev
, "Couldn't enable clock '%s'\n", clk_name
);
1181 goto err_disable_clk
;
1184 if (sdd
->port_conf
->clk_ioclk
) {
1185 sdd
->ioclk
= devm_clk_get(&pdev
->dev
, "spi_ioclk");
1186 if (IS_ERR(sdd
->ioclk
)) {
1187 dev_err(&pdev
->dev
, "Unable to acquire 'ioclk'\n");
1188 ret
= PTR_ERR(sdd
->ioclk
);
1189 goto err_disable_src_clk
;
1192 ret
= clk_prepare_enable(sdd
->ioclk
);
1194 dev_err(&pdev
->dev
, "Couldn't enable clock 'ioclk'\n");
1195 goto err_disable_src_clk
;
1199 if (!is_polling(sdd
)) {
1200 /* Acquire DMA channels */
1201 sdd
->rx_dma
.ch
= dma_request_chan(&pdev
->dev
, "rx");
1202 if (IS_ERR(sdd
->rx_dma
.ch
)) {
1203 dev_err(&pdev
->dev
, "Failed to get RX DMA channel\n");
1204 ret
= PTR_ERR(sdd
->rx_dma
.ch
);
1205 goto err_disable_io_clk
;
1207 sdd
->tx_dma
.ch
= dma_request_chan(&pdev
->dev
, "tx");
1208 if (IS_ERR(sdd
->tx_dma
.ch
)) {
1209 dev_err(&pdev
->dev
, "Failed to get TX DMA channel\n");
1210 ret
= PTR_ERR(sdd
->tx_dma
.ch
);
1211 goto err_release_rx_dma
;
1215 pm_runtime_set_autosuspend_delay(&pdev
->dev
, AUTOSUSPEND_TIMEOUT
);
1216 pm_runtime_use_autosuspend(&pdev
->dev
);
1217 pm_runtime_set_active(&pdev
->dev
);
1218 pm_runtime_enable(&pdev
->dev
);
1219 pm_runtime_get_sync(&pdev
->dev
);
1221 /* Setup Deufult Mode */
1222 s3c64xx_spi_hwinit(sdd
);
1224 spin_lock_init(&sdd
->lock
);
1225 init_completion(&sdd
->xfer_completion
);
1227 ret
= devm_request_irq(&pdev
->dev
, irq
, s3c64xx_spi_irq
, 0,
1228 "spi-s3c64xx", sdd
);
1230 dev_err(&pdev
->dev
, "Failed to request IRQ %d: %d\n",
1235 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN
| S3C64XX_SPI_INT_RX_UNDERRUN_EN
|
1236 S3C64XX_SPI_INT_TX_OVERRUN_EN
| S3C64XX_SPI_INT_TX_UNDERRUN_EN
,
1237 sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1239 ret
= devm_spi_register_master(&pdev
->dev
, master
);
1241 dev_err(&pdev
->dev
, "cannot register SPI master: %d\n", ret
);
1245 dev_dbg(&pdev
->dev
, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1246 sdd
->port_id
, master
->num_chipselect
);
1247 dev_dbg(&pdev
->dev
, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1248 mem_res
, (FIFO_LVL_MASK(sdd
) >> 1) + 1);
1250 pm_runtime_mark_last_busy(&pdev
->dev
);
1251 pm_runtime_put_autosuspend(&pdev
->dev
);
1256 pm_runtime_put_noidle(&pdev
->dev
);
1257 pm_runtime_disable(&pdev
->dev
);
1258 pm_runtime_set_suspended(&pdev
->dev
);
1260 if (!is_polling(sdd
))
1261 dma_release_channel(sdd
->tx_dma
.ch
);
1263 if (!is_polling(sdd
))
1264 dma_release_channel(sdd
->rx_dma
.ch
);
1266 clk_disable_unprepare(sdd
->ioclk
);
1267 err_disable_src_clk
:
1268 clk_disable_unprepare(sdd
->src_clk
);
1270 clk_disable_unprepare(sdd
->clk
);
1272 spi_master_put(master
);
1277 static int s3c64xx_spi_remove(struct platform_device
*pdev
)
1279 struct spi_master
*master
= platform_get_drvdata(pdev
);
1280 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1282 pm_runtime_get_sync(&pdev
->dev
);
1284 writel(0, sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1286 if (!is_polling(sdd
)) {
1287 dma_release_channel(sdd
->rx_dma
.ch
);
1288 dma_release_channel(sdd
->tx_dma
.ch
);
1291 clk_disable_unprepare(sdd
->ioclk
);
1293 clk_disable_unprepare(sdd
->src_clk
);
1295 clk_disable_unprepare(sdd
->clk
);
1297 pm_runtime_put_noidle(&pdev
->dev
);
1298 pm_runtime_disable(&pdev
->dev
);
1299 pm_runtime_set_suspended(&pdev
->dev
);
1304 #ifdef CONFIG_PM_SLEEP
1305 static int s3c64xx_spi_suspend(struct device
*dev
)
1307 struct spi_master
*master
= dev_get_drvdata(dev
);
1308 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1310 int ret
= spi_master_suspend(master
);
1314 ret
= pm_runtime_force_suspend(dev
);
1318 sdd
->cur_speed
= 0; /* Output Clock is stopped */
1323 static int s3c64xx_spi_resume(struct device
*dev
)
1325 struct spi_master
*master
= dev_get_drvdata(dev
);
1326 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1327 struct s3c64xx_spi_info
*sci
= sdd
->cntrlr_info
;
1333 ret
= pm_runtime_force_resume(dev
);
1337 return spi_master_resume(master
);
1339 #endif /* CONFIG_PM_SLEEP */
1342 static int s3c64xx_spi_runtime_suspend(struct device
*dev
)
1344 struct spi_master
*master
= dev_get_drvdata(dev
);
1345 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1347 clk_disable_unprepare(sdd
->clk
);
1348 clk_disable_unprepare(sdd
->src_clk
);
1349 clk_disable_unprepare(sdd
->ioclk
);
1354 static int s3c64xx_spi_runtime_resume(struct device
*dev
)
1356 struct spi_master
*master
= dev_get_drvdata(dev
);
1357 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1360 if (sdd
->port_conf
->clk_ioclk
) {
1361 ret
= clk_prepare_enable(sdd
->ioclk
);
1366 ret
= clk_prepare_enable(sdd
->src_clk
);
1368 goto err_disable_ioclk
;
1370 ret
= clk_prepare_enable(sdd
->clk
);
1372 goto err_disable_src_clk
;
1374 s3c64xx_spi_hwinit(sdd
);
1376 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN
| S3C64XX_SPI_INT_RX_UNDERRUN_EN
|
1377 S3C64XX_SPI_INT_TX_OVERRUN_EN
| S3C64XX_SPI_INT_TX_UNDERRUN_EN
,
1378 sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1382 err_disable_src_clk
:
1383 clk_disable_unprepare(sdd
->src_clk
);
1385 clk_disable_unprepare(sdd
->ioclk
);
1389 #endif /* CONFIG_PM */
1391 static const struct dev_pm_ops s3c64xx_spi_pm
= {
1392 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend
, s3c64xx_spi_resume
)
1393 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend
,
1394 s3c64xx_spi_runtime_resume
, NULL
)
1397 static const struct s3c64xx_spi_port_config s3c2443_spi_port_config
= {
1398 .fifo_lvl_mask
= { 0x7f },
1399 .rx_lvl_offset
= 13,
1404 static const struct s3c64xx_spi_port_config s3c6410_spi_port_config
= {
1405 .fifo_lvl_mask
= { 0x7f, 0x7F },
1406 .rx_lvl_offset
= 13,
1410 static const struct s3c64xx_spi_port_config s5pv210_spi_port_config
= {
1411 .fifo_lvl_mask
= { 0x1ff, 0x7F },
1412 .rx_lvl_offset
= 15,
1417 static const struct s3c64xx_spi_port_config exynos4_spi_port_config
= {
1418 .fifo_lvl_mask
= { 0x1ff, 0x7F, 0x7F },
1419 .rx_lvl_offset
= 15,
1422 .clk_from_cmu
= true,
1423 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1426 static const struct s3c64xx_spi_port_config exynos7_spi_port_config
= {
1427 .fifo_lvl_mask
= { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1428 .rx_lvl_offset
= 15,
1431 .clk_from_cmu
= true,
1432 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1435 static const struct s3c64xx_spi_port_config exynos5433_spi_port_config
= {
1436 .fifo_lvl_mask
= { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1437 .rx_lvl_offset
= 15,
1440 .clk_from_cmu
= true,
1442 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1445 static const struct platform_device_id s3c64xx_spi_driver_ids
[] = {
1447 .name
= "s3c2443-spi",
1448 .driver_data
= (kernel_ulong_t
)&s3c2443_spi_port_config
,
1450 .name
= "s3c6410-spi",
1451 .driver_data
= (kernel_ulong_t
)&s3c6410_spi_port_config
,
1456 static const struct of_device_id s3c64xx_spi_dt_match
[] = {
1457 { .compatible
= "samsung,s3c2443-spi",
1458 .data
= (void *)&s3c2443_spi_port_config
,
1460 { .compatible
= "samsung,s3c6410-spi",
1461 .data
= (void *)&s3c6410_spi_port_config
,
1463 { .compatible
= "samsung,s5pv210-spi",
1464 .data
= (void *)&s5pv210_spi_port_config
,
1466 { .compatible
= "samsung,exynos4210-spi",
1467 .data
= (void *)&exynos4_spi_port_config
,
1469 { .compatible
= "samsung,exynos7-spi",
1470 .data
= (void *)&exynos7_spi_port_config
,
1472 { .compatible
= "samsung,exynos5433-spi",
1473 .data
= (void *)&exynos5433_spi_port_config
,
1477 MODULE_DEVICE_TABLE(of
, s3c64xx_spi_dt_match
);
1479 static struct platform_driver s3c64xx_spi_driver
= {
1481 .name
= "s3c64xx-spi",
1482 .pm
= &s3c64xx_spi_pm
,
1483 .of_match_table
= of_match_ptr(s3c64xx_spi_dt_match
),
1485 .probe
= s3c64xx_spi_probe
,
1486 .remove
= s3c64xx_spi_remove
,
1487 .id_table
= s3c64xx_spi_driver_ids
,
1489 MODULE_ALIAS("platform:s3c64xx-spi");
1491 module_platform_driver(s3c64xx_spi_driver
);
1493 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1494 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1495 MODULE_LICENSE("GPL");