]>
Commit | Line | Data |
---|---|---|
5928808e MW |
1 | /* |
2 | * PXA2xx SPI DMA engine support. | |
3 | * | |
4 | * Copyright (C) 2013, Intel Corporation | |
5 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
5928808e MW |
12 | #include <linux/device.h> |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/pxa2xx_ssp.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/sizes.h> | |
18 | #include <linux/spi/spi.h> | |
19 | #include <linux/spi/pxa2xx_spi.h> | |
20 | ||
21 | #include "spi-pxa2xx.h" | |
22 | ||
23 | static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, | |
24 | enum dma_data_direction dir) | |
25 | { | |
26 | int i, nents, len = drv_data->len; | |
27 | struct scatterlist *sg; | |
28 | struct device *dmadev; | |
29 | struct sg_table *sgt; | |
30 | void *buf, *pbuf; | |
31 | ||
32 | /* | |
33 | * Some DMA controllers have problems transferring buffers that are | |
34 | * not multiple of 4 bytes. So we truncate the transfer so that it | |
35 | * is suitable for such controllers, and handle the trailing bytes | |
36 | * manually after the DMA completes. | |
37 | * | |
38 | * REVISIT: It would be better if this information could be | |
39 | * retrieved directly from the DMA device in a similar way than | |
40 | * ->copy_align etc. is done. | |
41 | */ | |
42 | len = ALIGN(drv_data->len, 4); | |
43 | ||
44 | if (dir == DMA_TO_DEVICE) { | |
45 | dmadev = drv_data->tx_chan->device->dev; | |
46 | sgt = &drv_data->tx_sgt; | |
47 | buf = drv_data->tx; | |
48 | drv_data->tx_map_len = len; | |
49 | } else { | |
50 | dmadev = drv_data->rx_chan->device->dev; | |
51 | sgt = &drv_data->rx_sgt; | |
52 | buf = drv_data->rx; | |
53 | drv_data->rx_map_len = len; | |
54 | } | |
55 | ||
56 | nents = DIV_ROUND_UP(len, SZ_2K); | |
57 | if (nents != sgt->nents) { | |
58 | int ret; | |
59 | ||
60 | sg_free_table(sgt); | |
5548f98c | 61 | ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); |
5928808e MW |
62 | if (ret) |
63 | return ret; | |
64 | } | |
65 | ||
66 | pbuf = buf; | |
67 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
68 | size_t bytes = min_t(size_t, len, SZ_2K); | |
69 | ||
70 | if (buf) | |
71 | sg_set_buf(sg, pbuf, bytes); | |
72 | else | |
73 | sg_set_buf(sg, drv_data->dummy, bytes); | |
74 | ||
75 | pbuf += bytes; | |
76 | len -= bytes; | |
77 | } | |
78 | ||
79 | nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir); | |
80 | if (!nents) | |
81 | return -ENOMEM; | |
82 | ||
83 | return nents; | |
84 | } | |
85 | ||
86 | static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data, | |
87 | enum dma_data_direction dir) | |
88 | { | |
89 | struct device *dmadev; | |
90 | struct sg_table *sgt; | |
91 | ||
92 | if (dir == DMA_TO_DEVICE) { | |
93 | dmadev = drv_data->tx_chan->device->dev; | |
94 | sgt = &drv_data->tx_sgt; | |
95 | } else { | |
96 | dmadev = drv_data->rx_chan->device->dev; | |
97 | sgt = &drv_data->rx_sgt; | |
98 | } | |
99 | ||
100 | dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir); | |
101 | } | |
102 | ||
103 | static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) | |
104 | { | |
105 | if (!drv_data->dma_mapped) | |
106 | return; | |
107 | ||
108 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE); | |
109 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); | |
110 | ||
111 | drv_data->dma_mapped = 0; | |
112 | } | |
113 | ||
114 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, | |
115 | bool error) | |
116 | { | |
117 | struct spi_message *msg = drv_data->cur_msg; | |
118 | ||
119 | /* | |
120 | * It is possible that one CPU is handling ROR interrupt and other | |
121 | * just gets DMA completion. Calling pump_transfers() twice for the | |
122 | * same transfer leads to problems thus we prevent concurrent calls | |
123 | * by using ->dma_running. | |
124 | */ | |
125 | if (atomic_dec_and_test(&drv_data->dma_running)) { | |
126 | void __iomem *reg = drv_data->ioaddr; | |
127 | ||
128 | /* | |
129 | * If the other CPU is still handling the ROR interrupt we | |
130 | * might not know about the error yet. So we re-check the | |
131 | * ROR bit here before we clear the status register. | |
132 | */ | |
133 | if (!error) { | |
134 | u32 status = read_SSSR(reg) & drv_data->mask_sr; | |
135 | error = status & SSSR_ROR; | |
136 | } | |
137 | ||
138 | /* Clear status & disable interrupts */ | |
139 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | |
140 | write_SSSR_CS(drv_data, drv_data->clear_sr); | |
141 | if (!pxa25x_ssp_comp(drv_data)) | |
142 | write_SSTO(0, reg); | |
143 | ||
144 | if (!error) { | |
145 | pxa2xx_spi_unmap_dma_buffers(drv_data); | |
146 | ||
147 | /* Handle the last bytes of unaligned transfer */ | |
148 | drv_data->tx += drv_data->tx_map_len; | |
149 | drv_data->write(drv_data); | |
150 | ||
151 | drv_data->rx += drv_data->rx_map_len; | |
152 | drv_data->read(drv_data); | |
153 | ||
154 | msg->actual_length += drv_data->len; | |
155 | msg->state = pxa2xx_spi_next_transfer(drv_data); | |
156 | } else { | |
157 | /* In case we got an error we disable the SSP now */ | |
158 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | |
159 | ||
160 | msg->state = ERROR_STATE; | |
161 | } | |
162 | ||
163 | tasklet_schedule(&drv_data->pump_transfers); | |
164 | } | |
165 | } | |
166 | ||
167 | static void pxa2xx_spi_dma_callback(void *data) | |
168 | { | |
169 | pxa2xx_spi_dma_transfer_complete(data, false); | |
170 | } | |
171 | ||
172 | static struct dma_async_tx_descriptor * | |
173 | pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | |
174 | enum dma_transfer_direction dir) | |
175 | { | |
176 | struct pxa2xx_spi_master *pdata = drv_data->master_info; | |
177 | struct chip_data *chip = drv_data->cur_chip; | |
178 | enum dma_slave_buswidth width; | |
179 | struct dma_slave_config cfg; | |
180 | struct dma_chan *chan; | |
181 | struct sg_table *sgt; | |
182 | int nents, ret; | |
183 | ||
184 | switch (drv_data->n_bytes) { | |
185 | case 1: | |
186 | width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
187 | break; | |
188 | case 2: | |
189 | width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
190 | break; | |
191 | default: | |
192 | width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
193 | break; | |
194 | } | |
195 | ||
196 | memset(&cfg, 0, sizeof(cfg)); | |
197 | cfg.direction = dir; | |
198 | ||
199 | if (dir == DMA_MEM_TO_DEV) { | |
200 | cfg.dst_addr = drv_data->ssdr_physical; | |
201 | cfg.dst_addr_width = width; | |
202 | cfg.dst_maxburst = chip->dma_burst_size; | |
203 | cfg.slave_id = pdata->tx_slave_id; | |
204 | ||
205 | sgt = &drv_data->tx_sgt; | |
206 | nents = drv_data->tx_nents; | |
207 | chan = drv_data->tx_chan; | |
208 | } else { | |
209 | cfg.src_addr = drv_data->ssdr_physical; | |
210 | cfg.src_addr_width = width; | |
211 | cfg.src_maxburst = chip->dma_burst_size; | |
212 | cfg.slave_id = pdata->rx_slave_id; | |
213 | ||
214 | sgt = &drv_data->rx_sgt; | |
215 | nents = drv_data->rx_nents; | |
216 | chan = drv_data->rx_chan; | |
217 | } | |
218 | ||
219 | ret = dmaengine_slave_config(chan, &cfg); | |
220 | if (ret) { | |
221 | dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); | |
222 | return NULL; | |
223 | } | |
224 | ||
225 | return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, | |
226 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
227 | } | |
228 | ||
229 | static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param) | |
230 | { | |
231 | const struct pxa2xx_spi_master *pdata = param; | |
232 | ||
233 | return chan->chan_id == pdata->tx_chan_id || | |
234 | chan->chan_id == pdata->rx_chan_id; | |
235 | } | |
236 | ||
237 | bool pxa2xx_spi_dma_is_possible(size_t len) | |
238 | { | |
239 | return len <= MAX_DMA_LEN; | |
240 | } | |
241 | ||
242 | int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) | |
243 | { | |
244 | const struct chip_data *chip = drv_data->cur_chip; | |
245 | int ret; | |
246 | ||
247 | if (!chip->enable_dma) | |
248 | return 0; | |
249 | ||
250 | /* Don't bother with DMA if we can't do even a single burst */ | |
251 | if (drv_data->len < chip->dma_burst_size) | |
252 | return 0; | |
253 | ||
254 | ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE); | |
255 | if (ret <= 0) { | |
256 | dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n"); | |
257 | return 0; | |
258 | } | |
259 | ||
260 | drv_data->tx_nents = ret; | |
261 | ||
262 | ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE); | |
263 | if (ret <= 0) { | |
264 | pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); | |
265 | dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n"); | |
266 | return 0; | |
267 | } | |
268 | ||
269 | drv_data->rx_nents = ret; | |
270 | return 1; | |
271 | } | |
272 | ||
273 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | |
274 | { | |
275 | u32 status; | |
276 | ||
277 | status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; | |
278 | if (status & SSSR_ROR) { | |
279 | dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); | |
280 | ||
281 | dmaengine_terminate_all(drv_data->rx_chan); | |
282 | dmaengine_terminate_all(drv_data->tx_chan); | |
283 | ||
284 | pxa2xx_spi_dma_transfer_complete(drv_data, true); | |
285 | return IRQ_HANDLED; | |
286 | } | |
287 | ||
288 | return IRQ_NONE; | |
289 | } | |
290 | ||
291 | int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) | |
292 | { | |
293 | struct dma_async_tx_descriptor *tx_desc, *rx_desc; | |
294 | ||
295 | tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); | |
296 | if (!tx_desc) { | |
297 | dev_err(&drv_data->pdev->dev, | |
298 | "failed to get DMA TX descriptor\n"); | |
299 | return -EBUSY; | |
300 | } | |
301 | ||
302 | rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); | |
303 | if (!rx_desc) { | |
304 | dev_err(&drv_data->pdev->dev, | |
305 | "failed to get DMA RX descriptor\n"); | |
306 | return -EBUSY; | |
307 | } | |
308 | ||
309 | /* We are ready when RX completes */ | |
310 | rx_desc->callback = pxa2xx_spi_dma_callback; | |
311 | rx_desc->callback_param = drv_data; | |
312 | ||
313 | dmaengine_submit(rx_desc); | |
314 | dmaengine_submit(tx_desc); | |
315 | return 0; | |
316 | } | |
317 | ||
318 | void pxa2xx_spi_dma_start(struct driver_data *drv_data) | |
319 | { | |
320 | dma_async_issue_pending(drv_data->rx_chan); | |
321 | dma_async_issue_pending(drv_data->tx_chan); | |
322 | ||
323 | atomic_set(&drv_data->dma_running, 1); | |
324 | } | |
325 | ||
326 | int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | |
327 | { | |
328 | struct pxa2xx_spi_master *pdata = drv_data->master_info; | |
cddb339b | 329 | struct device *dev = &drv_data->pdev->dev; |
5928808e MW |
330 | dma_cap_mask_t mask; |
331 | ||
332 | dma_cap_zero(mask); | |
333 | dma_cap_set(DMA_SLAVE, mask); | |
334 | ||
cddb339b | 335 | drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL); |
5928808e MW |
336 | if (!drv_data->dummy) |
337 | return -ENOMEM; | |
338 | ||
cddb339b MW |
339 | drv_data->tx_chan = dma_request_slave_channel_compat(mask, |
340 | pxa2xx_spi_dma_filter, pdata, dev, "tx"); | |
5928808e MW |
341 | if (!drv_data->tx_chan) |
342 | return -ENODEV; | |
343 | ||
cddb339b MW |
344 | drv_data->rx_chan = dma_request_slave_channel_compat(mask, |
345 | pxa2xx_spi_dma_filter, pdata, dev, "rx"); | |
5928808e MW |
346 | if (!drv_data->rx_chan) { |
347 | dma_release_channel(drv_data->tx_chan); | |
348 | drv_data->tx_chan = NULL; | |
349 | return -ENODEV; | |
350 | } | |
351 | ||
352 | return 0; | |
353 | } | |
354 | ||
355 | void pxa2xx_spi_dma_release(struct driver_data *drv_data) | |
356 | { | |
357 | if (drv_data->rx_chan) { | |
358 | dmaengine_terminate_all(drv_data->rx_chan); | |
359 | dma_release_channel(drv_data->rx_chan); | |
360 | sg_free_table(&drv_data->rx_sgt); | |
361 | drv_data->rx_chan = NULL; | |
362 | } | |
363 | if (drv_data->tx_chan) { | |
364 | dmaengine_terminate_all(drv_data->tx_chan); | |
365 | dma_release_channel(drv_data->tx_chan); | |
366 | sg_free_table(&drv_data->tx_sgt); | |
367 | drv_data->tx_chan = NULL; | |
368 | } | |
369 | } | |
370 | ||
371 | void pxa2xx_spi_dma_resume(struct driver_data *drv_data) | |
372 | { | |
373 | } | |
374 | ||
375 | int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | |
376 | struct spi_device *spi, | |
377 | u8 bits_per_word, u32 *burst_code, | |
378 | u32 *threshold) | |
379 | { | |
380 | struct pxa2xx_spi_chip *chip_info = spi->controller_data; | |
381 | ||
382 | /* | |
383 | * If the DMA burst size is given in chip_info we use that, | |
384 | * otherwise we use the default. Also we use the default FIFO | |
385 | * thresholds for now. | |
386 | */ | |
387 | *burst_code = chip_info ? chip_info->dma_burst_size : 16; | |
388 | *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) | |
389 | | SSCR1_TxTresh(TX_THRESH_DFLT); | |
390 | ||
391 | return 0; | |
392 | } |