]>
Commit | Line | Data |
---|---|---|
646781d3 MV |
1 | /* |
2 | * Freescale MXS SPI master driver | |
3 | * | |
4 | * Copyright 2012 DENX Software Engineering, GmbH. | |
5 | * Copyright 2012 Freescale Semiconductor, Inc. | |
6 | * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. | |
7 | * | |
8 | * Rework and transition to new API by: | |
9 | * Marek Vasut <marex@denx.de> | |
10 | * | |
11 | * Based on previous attempt by: | |
12 | * Fabio Estevam <fabio.estevam@freescale.com> | |
13 | * | |
14 | * Based on code from U-Boot bootloader by: | |
15 | * Marek Vasut <marex@denx.de> | |
16 | * | |
17 | * Based on spi-stmp.c, which is: | |
18 | * Author: Dmitry Pervushin <dimka@embeddedalley.com> | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or modify | |
21 | * it under the terms of the GNU General Public License as published by | |
22 | * the Free Software Foundation; either version 2 of the License, or | |
23 | * (at your option) any later version. | |
24 | * | |
25 | * This program is distributed in the hope that it will be useful, | |
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
28 | * GNU General Public License for more details. | |
29 | */ | |
30 | ||
31 | #include <linux/kernel.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/ioport.h> | |
34 | #include <linux/of.h> | |
35 | #include <linux/of_device.h> | |
36 | #include <linux/of_gpio.h> | |
37 | #include <linux/platform_device.h> | |
38 | #include <linux/delay.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/dmaengine.h> | |
42 | #include <linux/highmem.h> | |
43 | #include <linux/clk.h> | |
44 | #include <linux/err.h> | |
45 | #include <linux/completion.h> | |
46 | #include <linux/gpio.h> | |
47 | #include <linux/regulator/consumer.h> | |
48 | #include <linux/module.h> | |
646781d3 MV |
49 | #include <linux/pinctrl/consumer.h> |
50 | #include <linux/stmp_device.h> | |
51 | #include <linux/spi/spi.h> | |
52 | #include <linux/spi/mxs-spi.h> | |
53 | ||
54 | #define DRIVER_NAME "mxs-spi" | |
55 | ||
010b4818 MV |
56 | /* Use 10S timeout for very long transfers, it should suffice. */ |
57 | #define SSP_TIMEOUT 10000 | |
646781d3 | 58 | |
474afc04 MV |
59 | #define SG_MAXLEN 0xff00 |
60 | ||
646781d3 MV |
61 | struct mxs_spi { |
62 | struct mxs_ssp ssp; | |
474afc04 | 63 | struct completion c; |
646781d3 MV |
64 | }; |
65 | ||
66 | static int mxs_spi_setup_transfer(struct spi_device *dev, | |
67 | struct spi_transfer *t) | |
68 | { | |
69 | struct mxs_spi *spi = spi_master_get_devdata(dev->master); | |
70 | struct mxs_ssp *ssp = &spi->ssp; | |
71 | uint8_t bits_per_word; | |
72 | uint32_t hz = 0; | |
73 | ||
74 | bits_per_word = dev->bits_per_word; | |
75 | if (t && t->bits_per_word) | |
76 | bits_per_word = t->bits_per_word; | |
77 | ||
78 | if (bits_per_word != 8) { | |
79 | dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n", | |
80 | __func__, bits_per_word); | |
81 | return -EINVAL; | |
82 | } | |
83 | ||
84 | hz = dev->max_speed_hz; | |
85 | if (t && t->speed_hz) | |
86 | hz = min(hz, t->speed_hz); | |
87 | if (hz == 0) { | |
88 | dev_err(&dev->dev, "Cannot continue with zero clock\n"); | |
89 | return -EINVAL; | |
90 | } | |
91 | ||
92 | mxs_ssp_set_clk_rate(ssp, hz); | |
93 | ||
94 | writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | | |
95 | BF_SSP_CTRL1_WORD_LENGTH | |
96 | (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | | |
97 | ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | | |
98 | ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), | |
99 | ssp->base + HW_SSP_CTRL1(ssp)); | |
100 | ||
101 | writel(0x0, ssp->base + HW_SSP_CMD0); | |
102 | writel(0x0, ssp->base + HW_SSP_CMD1); | |
103 | ||
104 | return 0; | |
105 | } | |
106 | ||
107 | static int mxs_spi_setup(struct spi_device *dev) | |
108 | { | |
109 | int err = 0; | |
110 | ||
111 | if (!dev->bits_per_word) | |
112 | dev->bits_per_word = 8; | |
113 | ||
114 | if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) | |
115 | return -EINVAL; | |
116 | ||
117 | err = mxs_spi_setup_transfer(dev, NULL); | |
118 | if (err) { | |
119 | dev_err(&dev->dev, | |
120 | "Failed to setup transfer, error = %d\n", err); | |
121 | } | |
122 | ||
123 | return err; | |
124 | } | |
125 | ||
126 | static uint32_t mxs_spi_cs_to_reg(unsigned cs) | |
127 | { | |
128 | uint32_t select = 0; | |
129 | ||
130 | /* | |
131 | * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 | |
132 | * | |
133 | * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ | |
134 | * in HW_SSP_CTRL0 register do have multiple usage, please refer to | |
135 | * the datasheet for further details. In SPI mode, they are used to | |
136 | * toggle the chip-select lines (nCS pins). | |
137 | */ | |
138 | if (cs & 1) | |
139 | select |= BM_SSP_CTRL0_WAIT_FOR_CMD; | |
140 | if (cs & 2) | |
141 | select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; | |
142 | ||
143 | return select; | |
144 | } | |
145 | ||
146 | static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) | |
147 | { | |
148 | const uint32_t mask = | |
149 | BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; | |
150 | uint32_t select; | |
151 | struct mxs_ssp *ssp = &spi->ssp; | |
152 | ||
153 | writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | |
154 | select = mxs_spi_cs_to_reg(cs); | |
155 | writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
156 | } | |
157 | ||
158 | static inline void mxs_spi_enable(struct mxs_spi *spi) | |
159 | { | |
160 | struct mxs_ssp *ssp = &spi->ssp; | |
161 | ||
162 | writel(BM_SSP_CTRL0_LOCK_CS, | |
163 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
164 | writel(BM_SSP_CTRL0_IGNORE_CRC, | |
165 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | |
166 | } | |
167 | ||
168 | static inline void mxs_spi_disable(struct mxs_spi *spi) | |
169 | { | |
170 | struct mxs_ssp *ssp = &spi->ssp; | |
171 | ||
172 | writel(BM_SSP_CTRL0_LOCK_CS, | |
173 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | |
174 | writel(BM_SSP_CTRL0_IGNORE_CRC, | |
175 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
176 | } | |
177 | ||
178 | static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) | |
179 | { | |
f13639dc | 180 | const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); |
646781d3 MV |
181 | struct mxs_ssp *ssp = &spi->ssp; |
182 | uint32_t reg; | |
183 | ||
f13639dc | 184 | do { |
646781d3 MV |
185 | reg = readl_relaxed(ssp->base + offset); |
186 | ||
f13639dc MV |
187 | if (!set) |
188 | reg = ~reg; | |
646781d3 | 189 | |
f13639dc | 190 | reg &= mask; |
646781d3 | 191 | |
f13639dc MV |
192 | if (reg == mask) |
193 | return 0; | |
194 | } while (time_before(jiffies, timeout)); | |
646781d3 | 195 | |
f13639dc | 196 | return -ETIMEDOUT; |
646781d3 MV |
197 | } |
198 | ||
474afc04 MV |
199 | static void mxs_ssp_dma_irq_callback(void *param) |
200 | { | |
201 | struct mxs_spi *spi = param; | |
202 | complete(&spi->c); | |
203 | } | |
204 | ||
205 | static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) | |
206 | { | |
207 | struct mxs_ssp *ssp = dev_id; | |
208 | dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", | |
209 | __func__, __LINE__, | |
210 | readl(ssp->base + HW_SSP_CTRL1(ssp)), | |
211 | readl(ssp->base + HW_SSP_STATUS(ssp))); | |
212 | return IRQ_HANDLED; | |
213 | } | |
214 | ||
215 | static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, | |
216 | unsigned char *buf, int len, | |
217 | int *first, int *last, int write) | |
218 | { | |
219 | struct mxs_ssp *ssp = &spi->ssp; | |
010b4818 MV |
220 | struct dma_async_tx_descriptor *desc = NULL; |
221 | const bool vmalloced_buf = is_vmalloc_addr(buf); | |
222 | const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; | |
223 | const int sgs = DIV_ROUND_UP(len, desc_len); | |
474afc04 | 224 | int sg_count; |
010b4818 MV |
225 | int min, ret; |
226 | uint32_t ctrl0; | |
227 | struct page *vm_page; | |
228 | void *sg_buf; | |
229 | struct { | |
230 | uint32_t pio[4]; | |
231 | struct scatterlist sg; | |
232 | } *dma_xfer; | |
233 | ||
234 | if (!len) | |
474afc04 | 235 | return -EINVAL; |
010b4818 MV |
236 | |
237 | dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); | |
238 | if (!dma_xfer) | |
239 | return -ENOMEM; | |
474afc04 | 240 | |
41682e03 | 241 | INIT_COMPLETION(spi->c); |
474afc04 | 242 | |
010b4818 MV |
243 | ctrl0 = readl(ssp->base + HW_SSP_CTRL0); |
244 | ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); | |
245 | ||
474afc04 | 246 | if (*first) |
010b4818 | 247 | ctrl0 |= BM_SSP_CTRL0_LOCK_CS; |
474afc04 | 248 | if (!write) |
010b4818 | 249 | ctrl0 |= BM_SSP_CTRL0_READ; |
474afc04 MV |
250 | |
251 | /* Queue the DMA data transfer. */ | |
010b4818 MV |
252 | for (sg_count = 0; sg_count < sgs; sg_count++) { |
253 | min = min(len, desc_len); | |
254 | ||
255 | /* Prepare the transfer descriptor. */ | |
256 | if ((sg_count + 1 == sgs) && *last) | |
257 | ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; | |
258 | ||
259 | if (ssp->devid == IMX23_SSP) | |
260 | ctrl0 |= min; | |
261 | ||
262 | dma_xfer[sg_count].pio[0] = ctrl0; | |
263 | dma_xfer[sg_count].pio[3] = min; | |
264 | ||
265 | if (vmalloced_buf) { | |
266 | vm_page = vmalloc_to_page(buf); | |
267 | if (!vm_page) { | |
268 | ret = -ENOMEM; | |
269 | goto err_vmalloc; | |
270 | } | |
271 | sg_buf = page_address(vm_page) + | |
272 | ((size_t)buf & ~PAGE_MASK); | |
273 | } else { | |
274 | sg_buf = buf; | |
275 | } | |
276 | ||
277 | sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); | |
278 | ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, | |
279 | write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
280 | ||
281 | len -= min; | |
282 | buf += min; | |
283 | ||
284 | /* Queue the PIO register write transfer. */ | |
285 | desc = dmaengine_prep_slave_sg(ssp->dmach, | |
286 | (struct scatterlist *)dma_xfer[sg_count].pio, | |
287 | (ssp->devid == IMX23_SSP) ? 1 : 4, | |
288 | DMA_TRANS_NONE, | |
289 | sg_count ? DMA_PREP_INTERRUPT : 0); | |
290 | if (!desc) { | |
291 | dev_err(ssp->dev, | |
292 | "Failed to get PIO reg. write descriptor.\n"); | |
293 | ret = -EINVAL; | |
294 | goto err_mapped; | |
295 | } | |
296 | ||
297 | desc = dmaengine_prep_slave_sg(ssp->dmach, | |
298 | &dma_xfer[sg_count].sg, 1, | |
299 | write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | |
300 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
301 | ||
302 | if (!desc) { | |
303 | dev_err(ssp->dev, | |
304 | "Failed to get DMA data write descriptor.\n"); | |
305 | ret = -EINVAL; | |
306 | goto err_mapped; | |
307 | } | |
474afc04 MV |
308 | } |
309 | ||
310 | /* | |
311 | * The last descriptor must have this callback, | |
312 | * to finish the DMA transaction. | |
313 | */ | |
314 | desc->callback = mxs_ssp_dma_irq_callback; | |
315 | desc->callback_param = spi; | |
316 | ||
317 | /* Start the transfer. */ | |
318 | dmaengine_submit(desc); | |
319 | dma_async_issue_pending(ssp->dmach); | |
320 | ||
321 | ret = wait_for_completion_timeout(&spi->c, | |
322 | msecs_to_jiffies(SSP_TIMEOUT)); | |
474afc04 MV |
323 | if (!ret) { |
324 | dev_err(ssp->dev, "DMA transfer timeout\n"); | |
325 | ret = -ETIMEDOUT; | |
010b4818 | 326 | goto err_vmalloc; |
474afc04 MV |
327 | } |
328 | ||
329 | ret = 0; | |
330 | ||
010b4818 MV |
331 | err_vmalloc: |
332 | while (--sg_count >= 0) { | |
333 | err_mapped: | |
334 | dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, | |
474afc04 MV |
335 | write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
336 | } | |
337 | ||
010b4818 MV |
338 | kfree(dma_xfer); |
339 | ||
474afc04 MV |
340 | return ret; |
341 | } | |
342 | ||
646781d3 MV |
343 | static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, |
344 | unsigned char *buf, int len, | |
345 | int *first, int *last, int write) | |
346 | { | |
347 | struct mxs_ssp *ssp = &spi->ssp; | |
348 | ||
349 | if (*first) | |
350 | mxs_spi_enable(spi); | |
351 | ||
352 | mxs_spi_set_cs(spi, cs); | |
353 | ||
354 | while (len--) { | |
355 | if (*last && len == 0) | |
356 | mxs_spi_disable(spi); | |
357 | ||
358 | if (ssp->devid == IMX23_SSP) { | |
359 | writel(BM_SSP_CTRL0_XFER_COUNT, | |
360 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | |
361 | writel(1, | |
362 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
363 | } else { | |
364 | writel(1, ssp->base + HW_SSP_XFER_SIZE); | |
365 | } | |
366 | ||
367 | if (write) | |
368 | writel(BM_SSP_CTRL0_READ, | |
369 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | |
370 | else | |
371 | writel(BM_SSP_CTRL0_READ, | |
372 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
373 | ||
374 | writel(BM_SSP_CTRL0_RUN, | |
375 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
376 | ||
377 | if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) | |
378 | return -ETIMEDOUT; | |
379 | ||
380 | if (write) | |
381 | writel(*buf, ssp->base + HW_SSP_DATA(ssp)); | |
382 | ||
383 | writel(BM_SSP_CTRL0_DATA_XFER, | |
384 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | |
385 | ||
386 | if (!write) { | |
387 | if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), | |
388 | BM_SSP_STATUS_FIFO_EMPTY, 0)) | |
389 | return -ETIMEDOUT; | |
390 | ||
391 | *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); | |
392 | } | |
393 | ||
394 | if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) | |
395 | return -ETIMEDOUT; | |
396 | ||
397 | buf++; | |
398 | } | |
399 | ||
400 | if (len <= 0) | |
401 | return 0; | |
402 | ||
403 | return -ETIMEDOUT; | |
404 | } | |
405 | ||
406 | static int mxs_spi_transfer_one(struct spi_master *master, | |
407 | struct spi_message *m) | |
408 | { | |
409 | struct mxs_spi *spi = spi_master_get_devdata(master); | |
410 | struct mxs_ssp *ssp = &spi->ssp; | |
411 | int first, last; | |
412 | struct spi_transfer *t, *tmp_t; | |
413 | int status = 0; | |
414 | int cs; | |
415 | ||
416 | first = last = 0; | |
417 | ||
418 | cs = m->spi->chip_select; | |
419 | ||
420 | list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { | |
421 | ||
422 | status = mxs_spi_setup_transfer(m->spi, t); | |
423 | if (status) | |
424 | break; | |
425 | ||
426 | if (&t->transfer_list == m->transfers.next) | |
427 | first = 1; | |
428 | if (&t->transfer_list == m->transfers.prev) | |
429 | last = 1; | |
474afc04 | 430 | if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { |
646781d3 MV |
431 | dev_err(ssp->dev, |
432 | "Cannot send and receive simultaneously\n"); | |
433 | status = -EINVAL; | |
434 | break; | |
435 | } | |
436 | ||
474afc04 MV |
437 | /* |
438 | * Small blocks can be transfered via PIO. | |
439 | * Measured by empiric means: | |
440 | * | |
441 | * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 | |
442 | * | |
443 | * DMA only: 2.164808 seconds, 473.0KB/s | |
444 | * Combined: 1.676276 seconds, 610.9KB/s | |
445 | */ | |
727c10e3 | 446 | if (t->len < 32) { |
474afc04 MV |
447 | writel(BM_SSP_CTRL1_DMA_ENABLE, |
448 | ssp->base + HW_SSP_CTRL1(ssp) + | |
449 | STMP_OFFSET_REG_CLR); | |
450 | ||
451 | if (t->tx_buf) | |
452 | status = mxs_spi_txrx_pio(spi, cs, | |
453 | (void *)t->tx_buf, | |
454 | t->len, &first, &last, 1); | |
455 | if (t->rx_buf) | |
456 | status = mxs_spi_txrx_pio(spi, cs, | |
457 | t->rx_buf, t->len, | |
458 | &first, &last, 0); | |
459 | } else { | |
460 | writel(BM_SSP_CTRL1_DMA_ENABLE, | |
461 | ssp->base + HW_SSP_CTRL1(ssp) + | |
462 | STMP_OFFSET_REG_SET); | |
463 | ||
464 | if (t->tx_buf) | |
465 | status = mxs_spi_txrx_dma(spi, cs, | |
466 | (void *)t->tx_buf, t->len, | |
467 | &first, &last, 1); | |
468 | if (t->rx_buf) | |
469 | status = mxs_spi_txrx_dma(spi, cs, | |
470 | t->rx_buf, t->len, | |
471 | &first, &last, 0); | |
472 | } | |
646781d3 | 473 | |
c895db0f MV |
474 | if (status) { |
475 | stmp_reset_block(ssp->base); | |
646781d3 | 476 | break; |
c895db0f | 477 | } |
646781d3 | 478 | |
204e706f | 479 | m->actual_length += t->len; |
646781d3 MV |
480 | first = last = 0; |
481 | } | |
482 | ||
483 | m->status = 0; | |
484 | spi_finalize_current_message(master); | |
485 | ||
486 | return status; | |
487 | } | |
488 | ||
474afc04 MV |
489 | static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param) |
490 | { | |
491 | struct mxs_ssp *ssp = param; | |
492 | ||
493 | if (!mxs_dma_is_apbh(chan)) | |
494 | return false; | |
495 | ||
496 | if (chan->chan_id != ssp->dma_channel) | |
497 | return false; | |
498 | ||
499 | chan->private = &ssp->dma_data; | |
500 | ||
501 | return true; | |
502 | } | |
503 | ||
646781d3 MV |
504 | static const struct of_device_id mxs_spi_dt_ids[] = { |
505 | { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, | |
506 | { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, | |
507 | { /* sentinel */ } | |
508 | }; | |
509 | MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); | |
510 | ||
511 | static int __devinit mxs_spi_probe(struct platform_device *pdev) | |
512 | { | |
513 | const struct of_device_id *of_id = | |
514 | of_match_device(mxs_spi_dt_ids, &pdev->dev); | |
515 | struct device_node *np = pdev->dev.of_node; | |
516 | struct spi_master *master; | |
517 | struct mxs_spi *spi; | |
518 | struct mxs_ssp *ssp; | |
474afc04 | 519 | struct resource *iores, *dmares; |
646781d3 MV |
520 | struct pinctrl *pinctrl; |
521 | struct clk *clk; | |
522 | void __iomem *base; | |
474afc04 MV |
523 | int devid, dma_channel; |
524 | int ret = 0, irq_err, irq_dma; | |
525 | dma_cap_mask_t mask; | |
646781d3 MV |
526 | |
527 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
474afc04 MV |
528 | irq_err = platform_get_irq(pdev, 0); |
529 | irq_dma = platform_get_irq(pdev, 1); | |
530 | if (!iores || irq_err < 0 || irq_dma < 0) | |
646781d3 MV |
531 | return -EINVAL; |
532 | ||
533 | base = devm_request_and_ioremap(&pdev->dev, iores); | |
534 | if (!base) | |
535 | return -EADDRNOTAVAIL; | |
536 | ||
537 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | |
538 | if (IS_ERR(pinctrl)) | |
539 | return PTR_ERR(pinctrl); | |
540 | ||
541 | clk = devm_clk_get(&pdev->dev, NULL); | |
542 | if (IS_ERR(clk)) | |
543 | return PTR_ERR(clk); | |
544 | ||
474afc04 | 545 | if (np) { |
646781d3 | 546 | devid = (enum mxs_ssp_id) of_id->data; |
474afc04 MV |
547 | /* |
548 | * TODO: This is a temporary solution and should be changed | |
549 | * to use generic DMA binding later when the helpers get in. | |
550 | */ | |
551 | ret = of_property_read_u32(np, "fsl,ssp-dma-channel", | |
552 | &dma_channel); | |
553 | if (ret) { | |
554 | dev_err(&pdev->dev, | |
555 | "Failed to get DMA channel\n"); | |
556 | return -EINVAL; | |
557 | } | |
558 | } else { | |
559 | dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); | |
560 | if (!dmares) | |
561 | return -EINVAL; | |
646781d3 | 562 | devid = pdev->id_entry->driver_data; |
474afc04 MV |
563 | dma_channel = dmares->start; |
564 | } | |
646781d3 MV |
565 | |
566 | master = spi_alloc_master(&pdev->dev, sizeof(*spi)); | |
567 | if (!master) | |
568 | return -ENOMEM; | |
569 | ||
570 | master->transfer_one_message = mxs_spi_transfer_one; | |
571 | master->setup = mxs_spi_setup; | |
572 | master->mode_bits = SPI_CPOL | SPI_CPHA; | |
573 | master->num_chipselect = 3; | |
574 | master->dev.of_node = np; | |
575 | master->flags = SPI_MASTER_HALF_DUPLEX; | |
576 | ||
577 | spi = spi_master_get_devdata(master); | |
578 | ssp = &spi->ssp; | |
579 | ssp->dev = &pdev->dev; | |
580 | ssp->clk = clk; | |
581 | ssp->base = base; | |
582 | ssp->devid = devid; | |
474afc04 MV |
583 | ssp->dma_channel = dma_channel; |
584 | ||
41682e03 MV |
585 | init_completion(&spi->c); |
586 | ||
474afc04 MV |
587 | ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, |
588 | DRIVER_NAME, ssp); | |
589 | if (ret) | |
590 | goto out_master_free; | |
591 | ||
592 | dma_cap_zero(mask); | |
593 | dma_cap_set(DMA_SLAVE, mask); | |
594 | ssp->dma_data.chan_irq = irq_dma; | |
595 | ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp); | |
596 | if (!ssp->dmach) { | |
597 | dev_err(ssp->dev, "Failed to request DMA\n"); | |
598 | goto out_master_free; | |
599 | } | |
646781d3 | 600 | |
474afc04 MV |
601 | /* |
602 | * Crank up the clock to 120MHz, this will be further divided onto a | |
603 | * proper speed. | |
604 | */ | |
646781d3 | 605 | clk_prepare_enable(ssp->clk); |
474afc04 | 606 | clk_set_rate(ssp->clk, 120 * 1000 * 1000); |
646781d3 MV |
607 | ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; |
608 | ||
609 | stmp_reset_block(ssp->base); | |
610 | ||
611 | platform_set_drvdata(pdev, master); | |
612 | ||
613 | ret = spi_register_master(master); | |
614 | if (ret) { | |
615 | dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); | |
474afc04 | 616 | goto out_free_dma; |
646781d3 MV |
617 | } |
618 | ||
619 | return 0; | |
620 | ||
474afc04 | 621 | out_free_dma: |
474afc04 | 622 | dma_release_channel(ssp->dmach); |
646781d3 | 623 | clk_disable_unprepare(ssp->clk); |
474afc04 | 624 | out_master_free: |
646781d3 MV |
625 | spi_master_put(master); |
626 | return ret; | |
627 | } | |
628 | ||
629 | static int __devexit mxs_spi_remove(struct platform_device *pdev) | |
630 | { | |
631 | struct spi_master *master; | |
632 | struct mxs_spi *spi; | |
633 | struct mxs_ssp *ssp; | |
634 | ||
7d520d28 | 635 | master = spi_master_get(platform_get_drvdata(pdev)); |
646781d3 MV |
636 | spi = spi_master_get_devdata(master); |
637 | ssp = &spi->ssp; | |
638 | ||
639 | spi_unregister_master(master); | |
640 | ||
474afc04 MV |
641 | dma_release_channel(ssp->dmach); |
642 | ||
646781d3 MV |
643 | clk_disable_unprepare(ssp->clk); |
644 | ||
645 | spi_master_put(master); | |
646 | ||
647 | return 0; | |
648 | } | |
649 | ||
650 | static struct platform_driver mxs_spi_driver = { | |
651 | .probe = mxs_spi_probe, | |
652 | .remove = __devexit_p(mxs_spi_remove), | |
653 | .driver = { | |
654 | .name = DRIVER_NAME, | |
655 | .owner = THIS_MODULE, | |
656 | .of_match_table = mxs_spi_dt_ids, | |
657 | }, | |
658 | }; | |
659 | ||
660 | module_platform_driver(mxs_spi_driver); | |
661 | ||
662 | MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); | |
663 | MODULE_DESCRIPTION("MXS SPI master driver"); | |
664 | MODULE_LICENSE("GPL"); | |
665 | MODULE_ALIAS("platform:mxs-spi"); |