2 * Broadcom Secondary Memory Interface driver
4 * Written by Luke Wren <luke@raspberrypi.org>
5 * Copyright (c) 2015, Raspberry Pi (Trading) Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2, as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/clk.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
41 #include <linux/platform_device.h>
42 #include <linux/of_address.h>
43 #include <linux/of_platform.h>
45 #include <linux/slab.h>
46 #include <linux/pagemap.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/dmaengine.h>
49 #include <linux/semaphore.h>
50 #include <linux/spinlock.h>
53 #define BCM2835_SMI_IMPLEMENTATION
54 #include <linux/broadcom/bcm2835_smi.h>
56 #define DRIVER_NAME "smi-bcm2835"
58 #define N_PAGES_FROM_BYTES(n) ((n + PAGE_SIZE-1) / PAGE_SIZE)
60 #define DMA_WRITE_TO_MEM true
61 #define DMA_READ_FROM_MEM false
63 struct bcm2835_smi_instance
{
65 struct smi_settings settings
;
66 __iomem
void *smi_regs_ptr
;
67 dma_addr_t smi_regs_busaddr
;
69 struct dma_chan
*dma_chan
;
70 struct dma_slave_config dma_config
;
72 struct bcm2835_smi_bounce_info bounce
;
74 struct scatterlist buffer_sgl
;
78 /* Sometimes we are called into in an atomic context (e.g. by
79 JFFS2 + MTD) so we can't use a mutex */
80 spinlock_t transaction_lock
;
83 /****************************************************************************
85 * SMI peripheral setup
87 ***************************************************************************/
89 static inline void write_smi_reg(struct bcm2835_smi_instance
*inst
,
90 u32 val
, unsigned reg
)
92 writel(val
, inst
->smi_regs_ptr
+ reg
);
95 static inline u32
read_smi_reg(struct bcm2835_smi_instance
*inst
, unsigned reg
)
97 return readl(inst
->smi_regs_ptr
+ reg
);
100 /* Token-paste macro for e.g SMIDSR_RSTROBE -> value of SMIDSR_RSTROBE_MASK */
101 #define _CONCAT(x, y) x##y
102 #define CONCAT(x, y) _CONCAT(x, y)
104 #define SET_BIT_FIELD(dest, field, bits) ((dest) = \
105 ((dest) & ~CONCAT(field, _MASK)) | (((bits) << CONCAT(field, _OFFS))& \
106 CONCAT(field, _MASK)))
107 #define GET_BIT_FIELD(src, field) (((src) & \
108 CONCAT(field, _MASK)) >> CONCAT(field, _OFFS))
110 static void smi_dump_context_labelled(struct bcm2835_smi_instance
*inst
,
113 dev_err(inst
->dev
, "SMI context dump: %s", label
);
114 dev_err(inst
->dev
, "SMICS: 0x%08x", read_smi_reg(inst
, SMICS
));
115 dev_err(inst
->dev
, "SMIL: 0x%08x", read_smi_reg(inst
, SMIL
));
116 dev_err(inst
->dev
, "SMIDSR: 0x%08x", read_smi_reg(inst
, SMIDSR0
));
117 dev_err(inst
->dev
, "SMIDSW: 0x%08x", read_smi_reg(inst
, SMIDSW0
));
118 dev_err(inst
->dev
, "SMIDC: 0x%08x", read_smi_reg(inst
, SMIDC
));
119 dev_err(inst
->dev
, "SMIFD: 0x%08x", read_smi_reg(inst
, SMIFD
));
120 dev_err(inst
->dev
, " ");
123 static inline void smi_dump_context(struct bcm2835_smi_instance
*inst
)
125 smi_dump_context_labelled(inst
, "");
128 static void smi_get_default_settings(struct bcm2835_smi_instance
*inst
)
130 struct smi_settings
*settings
= &inst
->settings
;
132 settings
->data_width
= SMI_WIDTH_16BIT
;
133 settings
->pack_data
= true;
135 settings
->read_setup_time
= 1;
136 settings
->read_hold_time
= 1;
137 settings
->read_pace_time
= 1;
138 settings
->read_strobe_time
= 3;
140 settings
->write_setup_time
= settings
->read_setup_time
;
141 settings
->write_hold_time
= settings
->read_hold_time
;
142 settings
->write_pace_time
= settings
->read_pace_time
;
143 settings
->write_strobe_time
= settings
->read_strobe_time
;
145 settings
->dma_enable
= true;
146 settings
->dma_passthrough_enable
= false;
147 settings
->dma_read_thresh
= 0x01;
148 settings
->dma_write_thresh
= 0x3f;
149 settings
->dma_panic_read_thresh
= 0x20;
150 settings
->dma_panic_write_thresh
= 0x20;
153 void bcm2835_smi_set_regs_from_settings(struct bcm2835_smi_instance
*inst
)
155 struct smi_settings
*settings
= &inst
->settings
;
156 int smidsr_temp
= 0, smidsw_temp
= 0, smics_temp
,
157 smidcs_temp
, smidc_temp
= 0;
159 spin_lock(&inst
->transaction_lock
);
161 /* temporarily disable the peripheral: */
162 smics_temp
= read_smi_reg(inst
, SMICS
);
163 write_smi_reg(inst
, 0, SMICS
);
164 smidcs_temp
= read_smi_reg(inst
, SMIDCS
);
165 write_smi_reg(inst
, 0, SMIDCS
);
167 if (settings
->pack_data
)
168 smics_temp
|= SMICS_PXLDAT
;
170 smics_temp
&= ~SMICS_PXLDAT
;
172 SET_BIT_FIELD(smidsr_temp
, SMIDSR_RWIDTH
, settings
->data_width
);
173 SET_BIT_FIELD(smidsr_temp
, SMIDSR_RSETUP
, settings
->read_setup_time
);
174 SET_BIT_FIELD(smidsr_temp
, SMIDSR_RHOLD
, settings
->read_hold_time
);
175 SET_BIT_FIELD(smidsr_temp
, SMIDSR_RPACE
, settings
->read_pace_time
);
176 SET_BIT_FIELD(smidsr_temp
, SMIDSR_RSTROBE
, settings
->read_strobe_time
);
177 write_smi_reg(inst
, smidsr_temp
, SMIDSR0
);
179 SET_BIT_FIELD(smidsw_temp
, SMIDSW_WWIDTH
, settings
->data_width
);
180 if (settings
->data_width
== SMI_WIDTH_8BIT
)
181 smidsw_temp
|= SMIDSW_WSWAP
;
183 smidsw_temp
&= ~SMIDSW_WSWAP
;
184 SET_BIT_FIELD(smidsw_temp
, SMIDSW_WSETUP
, settings
->write_setup_time
);
185 SET_BIT_FIELD(smidsw_temp
, SMIDSW_WHOLD
, settings
->write_hold_time
);
186 SET_BIT_FIELD(smidsw_temp
, SMIDSW_WPACE
, settings
->write_pace_time
);
187 SET_BIT_FIELD(smidsw_temp
, SMIDSW_WSTROBE
,
188 settings
->write_strobe_time
);
189 write_smi_reg(inst
, smidsw_temp
, SMIDSW0
);
191 SET_BIT_FIELD(smidc_temp
, SMIDC_REQR
, settings
->dma_read_thresh
);
192 SET_BIT_FIELD(smidc_temp
, SMIDC_REQW
, settings
->dma_write_thresh
);
193 SET_BIT_FIELD(smidc_temp
, SMIDC_PANICR
,
194 settings
->dma_panic_read_thresh
);
195 SET_BIT_FIELD(smidc_temp
, SMIDC_PANICW
,
196 settings
->dma_panic_write_thresh
);
197 if (settings
->dma_passthrough_enable
) {
198 smidc_temp
|= SMIDC_DMAP
;
199 smidsr_temp
|= SMIDSR_RDREQ
;
200 write_smi_reg(inst
, smidsr_temp
, SMIDSR0
);
201 smidsw_temp
|= SMIDSW_WDREQ
;
202 write_smi_reg(inst
, smidsw_temp
, SMIDSW0
);
204 smidc_temp
&= ~SMIDC_DMAP
;
205 if (settings
->dma_enable
)
206 smidc_temp
|= SMIDC_DMAEN
;
208 smidc_temp
&= ~SMIDC_DMAEN
;
210 write_smi_reg(inst
, smidc_temp
, SMIDC
);
212 /* re-enable (if was previously enabled) */
213 write_smi_reg(inst
, smics_temp
, SMICS
);
214 write_smi_reg(inst
, smidcs_temp
, SMIDCS
);
216 spin_unlock(&inst
->transaction_lock
);
218 EXPORT_SYMBOL(bcm2835_smi_set_regs_from_settings
);
220 struct smi_settings
*bcm2835_smi_get_settings_from_regs
221 (struct bcm2835_smi_instance
*inst
)
223 struct smi_settings
*settings
= &inst
->settings
;
224 int smidsr
, smidsw
, smidc
;
226 spin_lock(&inst
->transaction_lock
);
228 smidsr
= read_smi_reg(inst
, SMIDSR0
);
229 smidsw
= read_smi_reg(inst
, SMIDSW0
);
230 smidc
= read_smi_reg(inst
, SMIDC
);
232 settings
->pack_data
= (read_smi_reg(inst
, SMICS
) & SMICS_PXLDAT
) ?
235 settings
->data_width
= GET_BIT_FIELD(smidsr
, SMIDSR_RWIDTH
);
236 settings
->read_setup_time
= GET_BIT_FIELD(smidsr
, SMIDSR_RSETUP
);
237 settings
->read_hold_time
= GET_BIT_FIELD(smidsr
, SMIDSR_RHOLD
);
238 settings
->read_pace_time
= GET_BIT_FIELD(smidsr
, SMIDSR_RPACE
);
239 settings
->read_strobe_time
= GET_BIT_FIELD(smidsr
, SMIDSR_RSTROBE
);
241 settings
->write_setup_time
= GET_BIT_FIELD(smidsw
, SMIDSW_WSETUP
);
242 settings
->write_hold_time
= GET_BIT_FIELD(smidsw
, SMIDSW_WHOLD
);
243 settings
->write_pace_time
= GET_BIT_FIELD(smidsw
, SMIDSW_WPACE
);
244 settings
->write_strobe_time
= GET_BIT_FIELD(smidsw
, SMIDSW_WSTROBE
);
246 settings
->dma_read_thresh
= GET_BIT_FIELD(smidc
, SMIDC_REQR
);
247 settings
->dma_write_thresh
= GET_BIT_FIELD(smidc
, SMIDC_REQW
);
248 settings
->dma_panic_read_thresh
= GET_BIT_FIELD(smidc
, SMIDC_PANICR
);
249 settings
->dma_panic_write_thresh
= GET_BIT_FIELD(smidc
, SMIDC_PANICW
);
250 settings
->dma_passthrough_enable
= (smidc
& SMIDC_DMAP
) ? true : false;
251 settings
->dma_enable
= (smidc
& SMIDC_DMAEN
) ? true : false;
253 spin_unlock(&inst
->transaction_lock
);
257 EXPORT_SYMBOL(bcm2835_smi_get_settings_from_regs
);
259 static inline void smi_set_address(struct bcm2835_smi_instance
*inst
,
260 unsigned int address
)
262 int smia_temp
= 0, smida_temp
= 0;
264 SET_BIT_FIELD(smia_temp
, SMIA_ADDR
, address
);
265 SET_BIT_FIELD(smida_temp
, SMIDA_ADDR
, address
);
267 /* Write to both address registers - user doesn't care whether we're
268 doing programmed or direct transfers. */
269 write_smi_reg(inst
, smia_temp
, SMIA
);
270 write_smi_reg(inst
, smida_temp
, SMIDA
);
273 static void smi_setup_regs(struct bcm2835_smi_instance
*inst
)
276 dev_dbg(inst
->dev
, "Initialising SMI registers...");
277 /* Disable the peripheral if already enabled */
278 write_smi_reg(inst
, 0, SMICS
);
279 write_smi_reg(inst
, 0, SMIDCS
);
281 smi_get_default_settings(inst
);
282 bcm2835_smi_set_regs_from_settings(inst
);
283 smi_set_address(inst
, 0);
285 write_smi_reg(inst
, read_smi_reg(inst
, SMICS
) | SMICS_ENABLE
, SMICS
);
286 write_smi_reg(inst
, read_smi_reg(inst
, SMIDCS
) | SMIDCS_ENABLE
,
290 /****************************************************************************
292 * Low-level SMI access functions
293 * Other modules should use the exported higher-level functions e.g.
294 * bcm2835_smi_write_buf() unless they have a good reason to use these
296 ***************************************************************************/
298 static inline uint32_t smi_read_single_word(struct bcm2835_smi_instance
*inst
)
302 write_smi_reg(inst
, SMIDCS_ENABLE
, SMIDCS
);
303 write_smi_reg(inst
, SMIDCS_ENABLE
| SMIDCS_START
, SMIDCS
);
304 /* Make sure things happen in the right order...*/
306 while (!(read_smi_reg(inst
, SMIDCS
) & SMIDCS_DONE
) &&
310 return read_smi_reg(inst
, SMIDD
);
313 "SMI direct read timed out (is the clock set up correctly?)");
317 static inline void smi_write_single_word(struct bcm2835_smi_instance
*inst
,
322 write_smi_reg(inst
, SMIDCS_ENABLE
| SMIDCS_WRITE
, SMIDCS
);
323 write_smi_reg(inst
, data
, SMIDD
);
324 write_smi_reg(inst
, SMIDCS_ENABLE
| SMIDCS_WRITE
| SMIDCS_START
,
327 while (!(read_smi_reg(inst
, SMIDCS
) & SMIDCS_DONE
) &&
330 if (timeout
>= 10000)
332 "SMI direct write timed out (is the clock set up correctly?)");
335 /* Initiates a programmed read into the read FIFO. It is up to the caller to
336 * read data from the FIFO - either via paced DMA transfer,
337 * or polling SMICS_RXD to check whether data is available.
338 * SMICS_ACTIVE will go low upon completion. */
339 static void smi_init_programmed_read(struct bcm2835_smi_instance
*inst
,
344 /* Disable the peripheral: */
345 smics_temp
= read_smi_reg(inst
, SMICS
) & ~(SMICS_ENABLE
| SMICS_WRITE
);
346 write_smi_reg(inst
, smics_temp
, SMICS
);
347 while (read_smi_reg(inst
, SMICS
) & SMICS_ENABLE
)
350 /* Program the transfer count: */
351 write_smi_reg(inst
, num_transfers
, SMIL
);
353 /* re-enable and start: */
354 smics_temp
|= SMICS_ENABLE
;
355 write_smi_reg(inst
, smics_temp
, SMICS
);
356 smics_temp
|= SMICS_CLEAR
;
357 /* Just to be certain: */
359 while (read_smi_reg(inst
, SMICS
) & SMICS_ACTIVE
)
361 write_smi_reg(inst
, smics_temp
, SMICS
);
362 smics_temp
|= SMICS_START
;
363 write_smi_reg(inst
, smics_temp
, SMICS
);
366 /* Initiates a programmed write sequence, using data from the write FIFO.
367 * It is up to the caller to initiate a DMA transfer before calling,
368 * or use another method to keep the write FIFO topped up.
369 * SMICS_ACTIVE will go low upon completion.
371 static void smi_init_programmed_write(struct bcm2835_smi_instance
*inst
,
376 /* Disable the peripheral: */
377 smics_temp
= read_smi_reg(inst
, SMICS
) & ~SMICS_ENABLE
;
378 write_smi_reg(inst
, smics_temp
, SMICS
);
379 while (read_smi_reg(inst
, SMICS
) & SMICS_ENABLE
)
382 /* Program the transfer count: */
383 write_smi_reg(inst
, num_transfers
, SMIL
);
385 /* setup, re-enable and start: */
386 smics_temp
|= SMICS_WRITE
| SMICS_ENABLE
;
387 write_smi_reg(inst
, smics_temp
, SMICS
);
388 smics_temp
|= SMICS_START
;
389 write_smi_reg(inst
, smics_temp
, SMICS
);
392 /* Initiate a read and then poll FIFO for data, reading out as it appears. */
393 static void smi_read_fifo(struct bcm2835_smi_instance
*inst
,
394 uint32_t *dest
, int n_bytes
)
396 if (read_smi_reg(inst
, SMICS
) & SMICS_RXD
) {
397 smi_dump_context_labelled(inst
,
398 "WARNING: read FIFO not empty at start of read call.");
399 while (read_smi_reg(inst
, SMICS
))
403 /* Dispatch the read: */
404 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
)
405 smi_init_programmed_read(inst
, n_bytes
);
406 else if (inst
->settings
.data_width
== SMI_WIDTH_16BIT
)
407 smi_init_programmed_read(inst
, n_bytes
/ 2);
409 dev_err(inst
->dev
, "Unsupported data width for read.");
413 /* Poll FIFO to keep it empty */
414 while (!(read_smi_reg(inst
, SMICS
) & SMICS_DONE
))
415 if (read_smi_reg(inst
, SMICS
) & SMICS_RXD
)
416 *dest
++ = read_smi_reg(inst
, SMID
);
418 /* Ensure that the FIFO is emptied */
419 if (read_smi_reg(inst
, SMICS
) & SMICS_RXD
) {
422 fifo_count
= GET_BIT_FIELD(read_smi_reg(inst
, SMIFD
),
425 *dest
++ = read_smi_reg(inst
, SMID
);
428 if (!(read_smi_reg(inst
, SMICS
) & SMICS_DONE
))
429 smi_dump_context_labelled(inst
,
430 "WARNING: transaction finished but done bit not set.");
432 if (read_smi_reg(inst
, SMICS
) & SMICS_RXD
)
433 smi_dump_context_labelled(inst
,
434 "WARNING: read FIFO not empty at end of read call.");
438 /* Initiate a write, and then keep the FIFO topped up. */
439 static void smi_write_fifo(struct bcm2835_smi_instance
*inst
,
440 uint32_t *src
, int n_bytes
)
444 /* Empty FIFOs if not already so */
445 if (!(read_smi_reg(inst
, SMICS
) & SMICS_TXE
)) {
446 smi_dump_context_labelled(inst
,
447 "WARNING: write fifo not empty at start of write call.");
448 write_smi_reg(inst
, read_smi_reg(inst
, SMICS
) | SMICS_CLEAR
,
452 /* Initiate the transfer */
453 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
)
454 smi_init_programmed_write(inst
, n_bytes
);
455 else if (inst
->settings
.data_width
== SMI_WIDTH_16BIT
)
456 smi_init_programmed_write(inst
, n_bytes
/ 2);
458 dev_err(inst
->dev
, "Unsupported data width for write.");
462 for (i
= 0; i
< (n_bytes
- 1) / 4 + 1; ++i
) {
463 while (!(read_smi_reg(inst
, SMICS
) & SMICS_TXD
))
465 write_smi_reg(inst
, *src
++, SMID
);
468 while (!(read_smi_reg(inst
, SMICS
) & SMICS_DONE
) && ++timeout
<
471 if (timeout
>= 1000000)
472 smi_dump_context_labelled(inst
,
473 "Timed out on write operation!");
474 if (!(read_smi_reg(inst
, SMICS
) & SMICS_TXE
))
475 smi_dump_context_labelled(inst
,
476 "WARNING: FIFO not empty at end of write operation.");
479 /****************************************************************************
483 ***************************************************************************/
485 /* Disable SMI and put it into the correct direction before doing DMA setup.
486 Stops spurious DREQs during setup. Peripheral is re-enabled by init_*() */
487 static void smi_disable(struct bcm2835_smi_instance
*inst
,
488 enum dma_transfer_direction direction
)
490 int smics_temp
= read_smi_reg(inst
, SMICS
) & ~SMICS_ENABLE
;
492 if (direction
== DMA_DEV_TO_MEM
)
493 smics_temp
&= ~SMICS_WRITE
;
495 smics_temp
|= SMICS_WRITE
;
496 write_smi_reg(inst
, smics_temp
, SMICS
);
497 while (read_smi_reg(inst
, SMICS
) & SMICS_ACTIVE
)
501 static struct scatterlist
*smi_scatterlist_from_buffer(
502 struct bcm2835_smi_instance
*inst
,
505 struct scatterlist
*sg
)
507 sg_init_table(sg
, 1);
508 sg_dma_address(sg
) = buf
;
509 sg_dma_len(sg
) = len
;
513 static void smi_dma_callback_user_copy(void *param
)
515 /* Notify the bottom half that a chunk is ready for user copy */
516 struct bcm2835_smi_instance
*inst
=
517 (struct bcm2835_smi_instance
*)param
;
519 up(&inst
->bounce
.callback_sem
);
522 /* Creates a descriptor, assigns the given callback, and submits the
523 descriptor to dmaengine. Does not block - can queue up multiple
524 descriptors and then wait for them all to complete.
525 sg_len is the number of control blocks, NOT the number of bytes.
526 dir can be DMA_MEM_TO_DEV or DMA_DEV_TO_MEM.
527 callback can be NULL - in this case it is not called. */
528 static inline struct dma_async_tx_descriptor
*smi_dma_submit_sgl(
529 struct bcm2835_smi_instance
*inst
,
530 struct scatterlist
*sgl
,
532 enum dma_transfer_direction dir
,
533 dma_async_tx_callback callback
)
535 struct dma_async_tx_descriptor
*desc
;
537 desc
= dmaengine_prep_slave_sg(inst
->dma_chan
,
541 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
|
544 dev_err(inst
->dev
, "read_sgl: dma slave preparation failed!");
545 write_smi_reg(inst
, read_smi_reg(inst
, SMICS
) & ~SMICS_ACTIVE
,
547 while (read_smi_reg(inst
, SMICS
) & SMICS_ACTIVE
)
549 write_smi_reg(inst
, read_smi_reg(inst
, SMICS
) | SMICS_ACTIVE
,
553 desc
->callback
= callback
;
554 desc
->callback_param
= inst
;
555 if (dmaengine_submit(desc
) < 0)
560 /* NB this function blocks until the transfer is complete */
562 smi_dma_read_sgl(struct bcm2835_smi_instance
*inst
,
563 struct scatterlist
*sgl
, size_t sg_len
, size_t n_bytes
)
565 struct dma_async_tx_descriptor
*desc
;
567 /* Disable SMI and set to read before dispatching DMA - if SMI is in
568 * write mode and TX fifo is empty, it will generate a DREQ which may
569 * cause the read DMA to complete before the SMI read command is even
570 * dispatched! We want to dispatch DMA before SMI read so that reading
571 * is gapless, for logic analyser.
574 smi_disable(inst
, DMA_DEV_TO_MEM
);
576 desc
= smi_dma_submit_sgl(inst
, sgl
, sg_len
, DMA_DEV_TO_MEM
, NULL
);
577 dma_async_issue_pending(inst
->dma_chan
);
579 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
)
580 smi_init_programmed_read(inst
, n_bytes
);
582 smi_init_programmed_read(inst
, n_bytes
/ 2);
584 if (dma_wait_for_async_tx(desc
) == DMA_ERROR
)
585 smi_dump_context_labelled(inst
, "DMA timeout!");
589 smi_dma_write_sgl(struct bcm2835_smi_instance
*inst
,
590 struct scatterlist
*sgl
, size_t sg_len
, size_t n_bytes
)
592 struct dma_async_tx_descriptor
*desc
;
594 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
)
595 smi_init_programmed_write(inst
, n_bytes
);
597 smi_init_programmed_write(inst
, n_bytes
/ 2);
599 desc
= smi_dma_submit_sgl(inst
, sgl
, sg_len
, DMA_MEM_TO_DEV
, NULL
);
600 dma_async_issue_pending(inst
->dma_chan
);
602 if (dma_wait_for_async_tx(desc
) == DMA_ERROR
)
603 smi_dump_context_labelled(inst
, "DMA timeout!");
605 /* Wait for SMI to finish our writes */
606 while (!(read_smi_reg(inst
, SMICS
) & SMICS_DONE
))
610 ssize_t
bcm2835_smi_user_dma(
611 struct bcm2835_smi_instance
*inst
,
612 enum dma_transfer_direction dma_dir
,
613 char __user
*user_ptr
, size_t count
,
614 struct bcm2835_smi_bounce_info
**bounce
)
616 int chunk_no
= 0, chunk_size
, count_left
= count
;
617 struct scatterlist
*sgl
;
618 void (*init_trans_func
)(struct bcm2835_smi_instance
*, int);
620 spin_lock(&inst
->transaction_lock
);
622 if (dma_dir
== DMA_DEV_TO_MEM
)
623 init_trans_func
= smi_init_programmed_read
;
625 init_trans_func
= smi_init_programmed_write
;
627 smi_disable(inst
, dma_dir
);
629 sema_init(&inst
->bounce
.callback_sem
, 0);
631 *bounce
= &inst
->bounce
;
633 chunk_size
= count_left
> DMA_BOUNCE_BUFFER_SIZE
?
634 DMA_BOUNCE_BUFFER_SIZE
: count_left
;
635 if (chunk_size
== DMA_BOUNCE_BUFFER_SIZE
) {
637 &inst
->bounce
.sgl
[chunk_no
% DMA_BOUNCE_BUFFER_COUNT
];
639 sgl
= smi_scatterlist_from_buffer(
642 chunk_no
% DMA_BOUNCE_BUFFER_COUNT
],
647 if (!smi_dma_submit_sgl(inst
, sgl
, 1, dma_dir
,
648 smi_dma_callback_user_copy
650 dev_err(inst
->dev
, "sgl submit failed");
654 count_left
-= chunk_size
;
657 dma_async_issue_pending(inst
->dma_chan
);
659 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
)
660 init_trans_func(inst
, count
);
661 else if (inst
->settings
.data_width
== SMI_WIDTH_16BIT
)
662 init_trans_func(inst
, count
/ 2);
664 spin_unlock(&inst
->transaction_lock
);
667 EXPORT_SYMBOL(bcm2835_smi_user_dma
);
670 /****************************************************************************
672 * High level buffer transfer functions - for use by other drivers
674 ***************************************************************************/
676 /* Buffer must be physically contiguous - i.e. kmalloc, not vmalloc! */
677 void bcm2835_smi_write_buf(
678 struct bcm2835_smi_instance
*inst
,
679 const void *buf
, size_t n_bytes
)
681 int odd_bytes
= n_bytes
& 0x3;
683 n_bytes
-= odd_bytes
;
685 spin_lock(&inst
->transaction_lock
);
687 if (n_bytes
> DMA_THRESHOLD_BYTES
) {
688 dma_addr_t phy_addr
= dma_map_single(
693 struct scatterlist
*sgl
=
694 smi_scatterlist_from_buffer(inst
, phy_addr
, n_bytes
,
698 smi_dump_context_labelled(inst
,
699 "Error: could not create scatterlist for write!");
702 smi_dma_write_sgl(inst
, sgl
, 1, n_bytes
);
705 (inst
->dev
, phy_addr
, n_bytes
, DMA_MEM_TO_DEV
);
706 } else if (n_bytes
) {
707 smi_write_fifo(inst
, (uint32_t *) buf
, n_bytes
);
711 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
) {
713 smi_write_single_word(inst
, *(uint8_t *) (buf
++));
715 while (odd_bytes
>= 2) {
716 smi_write_single_word(inst
, *(uint16_t *)buf
);
721 /* Reading an odd number of bytes on a 16 bit bus is
722 a user bug. It's kinder to fail early and tell them
723 than to e.g. transparently give them the bottom byte
724 of a 16 bit transfer. */
726 "WARNING: odd number of bytes specified for wide transfer.");
728 "At least one byte dropped as a result.");
733 spin_unlock(&inst
->transaction_lock
);
735 EXPORT_SYMBOL(bcm2835_smi_write_buf
);
737 void bcm2835_smi_read_buf(struct bcm2835_smi_instance
*inst
,
738 void *buf
, size_t n_bytes
)
741 /* SMI is inherently 32-bit, which causes surprising amounts of mess
742 for bytes % 4 != 0. Easiest to avoid this mess altogether
743 by handling remainder separately. */
744 int odd_bytes
= n_bytes
& 0x3;
746 spin_lock(&inst
->transaction_lock
);
747 n_bytes
-= odd_bytes
;
748 if (n_bytes
> DMA_THRESHOLD_BYTES
) {
749 dma_addr_t phy_addr
= dma_map_single(inst
->dev
,
752 struct scatterlist
*sgl
= smi_scatterlist_from_buffer(
753 inst
, phy_addr
, n_bytes
,
756 smi_dump_context_labelled(inst
,
757 "Error: could not create scatterlist for read!");
760 smi_dma_read_sgl(inst
, sgl
, 1, n_bytes
);
761 dma_unmap_single(inst
->dev
, phy_addr
, n_bytes
, DMA_DEV_TO_MEM
);
762 } else if (n_bytes
) {
763 smi_read_fifo(inst
, (uint32_t *)buf
, n_bytes
);
767 if (inst
->settings
.data_width
== SMI_WIDTH_8BIT
) {
769 *((uint8_t *) (buf
++)) = smi_read_single_word(inst
);
771 while (odd_bytes
>= 2) {
772 *(uint16_t *) buf
= smi_read_single_word(inst
);
778 "WARNING: odd number of bytes specified for wide transfer.");
780 "At least one byte dropped as a result.");
785 spin_unlock(&inst
->transaction_lock
);
787 EXPORT_SYMBOL(bcm2835_smi_read_buf
);
789 void bcm2835_smi_set_address(struct bcm2835_smi_instance
*inst
,
790 unsigned int address
)
792 spin_lock(&inst
->transaction_lock
);
793 smi_set_address(inst
, address
);
794 spin_unlock(&inst
->transaction_lock
);
796 EXPORT_SYMBOL(bcm2835_smi_set_address
);
798 struct bcm2835_smi_instance
*bcm2835_smi_get(struct device_node
*node
)
800 struct platform_device
*pdev
;
805 pdev
= of_find_device_by_node(node
);
809 return platform_get_drvdata(pdev
);
811 EXPORT_SYMBOL(bcm2835_smi_get
);
813 /****************************************************************************
815 * bcm2835_smi_probe - called when the driver is loaded.
817 ***************************************************************************/
819 static int bcm2835_smi_dma_setup(struct bcm2835_smi_instance
*inst
)
823 inst
->dma_chan
= dma_request_slave_channel(inst
->dev
, "rx-tx");
825 inst
->dma_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
826 inst
->dma_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
827 inst
->dma_config
.src_addr
= inst
->smi_regs_busaddr
+ SMID
;
828 inst
->dma_config
.dst_addr
= inst
->dma_config
.src_addr
;
829 /* Direction unimportant - always overridden by prep_slave_sg */
830 inst
->dma_config
.direction
= DMA_DEV_TO_MEM
;
831 dmaengine_slave_config(inst
->dma_chan
, &inst
->dma_config
);
832 /* Alloc and map bounce buffers */
833 for (i
= 0; i
< DMA_BOUNCE_BUFFER_COUNT
; ++i
) {
834 inst
->bounce
.buffer
[i
] =
835 dmam_alloc_coherent(inst
->dev
, DMA_BOUNCE_BUFFER_SIZE
,
836 &inst
->bounce
.phys
[i
],
838 if (!inst
->bounce
.buffer
[i
]) {
839 dev_err(inst
->dev
, "Could not allocate buffer!");
843 smi_scatterlist_from_buffer(
845 inst
->bounce
.phys
[i
],
846 DMA_BOUNCE_BUFFER_SIZE
,
854 static int bcm2835_smi_probe(struct platform_device
*pdev
)
857 struct device
*dev
= &pdev
->dev
;
858 struct device_node
*node
= dev
->of_node
;
859 struct resource
*ioresource
;
860 struct bcm2835_smi_instance
*inst
;
863 /* We require device tree support */
866 /* Allocate buffers and instance data */
867 inst
= devm_kzalloc(dev
, sizeof(struct bcm2835_smi_instance
),
873 spin_lock_init(&inst
->transaction_lock
);
875 ioresource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
876 inst
->smi_regs_ptr
= devm_ioremap_resource(dev
, ioresource
);
877 if (IS_ERR(inst
->smi_regs_ptr
)) {
878 err
= PTR_ERR(inst
->smi_regs_ptr
);
881 addr
= of_get_address(node
, 0, NULL
, NULL
);
882 inst
->smi_regs_busaddr
= be32_to_cpu(addr
);
884 err
= bcm2835_smi_dma_setup(inst
);
889 inst
->clk
= devm_clk_get(dev
, NULL
);
892 clk_prepare_enable(inst
->clk
);
894 /* Finally, do peripheral setup */
895 smi_setup_regs(inst
);
897 platform_set_drvdata(pdev
, inst
);
899 dev_info(inst
->dev
, "initialised");
907 /****************************************************************************
909 * bcm2835_smi_remove - called when the driver is unloaded.
911 ***************************************************************************/
913 static int bcm2835_smi_remove(struct platform_device
*pdev
)
915 struct bcm2835_smi_instance
*inst
= platform_get_drvdata(pdev
);
916 struct device
*dev
= inst
->dev
;
918 dmaengine_terminate_all(inst
->dma_chan
);
919 dma_release_channel(inst
->dma_chan
);
921 clk_disable_unprepare(inst
->clk
);
923 dev_info(dev
, "SMI device removed - OK");
927 /****************************************************************************
929 * Register the driver with device tree
931 ***************************************************************************/
933 static const struct of_device_id bcm2835_smi_of_match
[] = {
934 {.compatible
= "brcm,bcm2835-smi",},
938 MODULE_DEVICE_TABLE(of
, bcm2835_smi_of_match
);
940 static struct platform_driver bcm2835_smi_driver
= {
941 .probe
= bcm2835_smi_probe
,
942 .remove
= bcm2835_smi_remove
,
945 .owner
= THIS_MODULE
,
946 .of_match_table
= bcm2835_smi_of_match
,
950 module_platform_driver(bcm2835_smi_driver
);
952 MODULE_ALIAS("platform:smi-bcm2835");
953 MODULE_LICENSE("GPL");
954 MODULE_DESCRIPTION("Device driver for BCM2835's secondary memory interface");
955 MODULE_AUTHOR("Luke Wren <luke@raspberrypi.org>");