]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/spi/spi-bcm-qspi.c
Merge tag 'drm-intel-fixes-2020-11-19' of git://anongit.freedesktop.org/drm/drm-intel...
[mirror_ubuntu-hirsute-kernel.git] / drivers / spi / spi-bcm-qspi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
4 *
5 * Copyright 2016 Broadcom
6 */
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 #include <linux/sysfs.h>
24 #include <linux/types.h>
25 #include "spi-bcm-qspi.h"
26
27 #define DRIVER_NAME "bcm_qspi"
28
29
30 /* BSPI register offsets */
31 #define BSPI_REVISION_ID 0x000
32 #define BSPI_SCRATCH 0x004
33 #define BSPI_MAST_N_BOOT_CTRL 0x008
34 #define BSPI_BUSY_STATUS 0x00c
35 #define BSPI_INTR_STATUS 0x010
36 #define BSPI_B0_STATUS 0x014
37 #define BSPI_B0_CTRL 0x018
38 #define BSPI_B1_STATUS 0x01c
39 #define BSPI_B1_CTRL 0x020
40 #define BSPI_STRAP_OVERRIDE_CTRL 0x024
41 #define BSPI_FLEX_MODE_ENABLE 0x028
42 #define BSPI_BITS_PER_CYCLE 0x02c
43 #define BSPI_BITS_PER_PHASE 0x030
44 #define BSPI_CMD_AND_MODE_BYTE 0x034
45 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
46 #define BSPI_BSPI_XOR_VALUE 0x03c
47 #define BSPI_BSPI_XOR_ENABLE 0x040
48 #define BSPI_BSPI_PIO_MODE_ENABLE 0x044
49 #define BSPI_BSPI_PIO_IODIR 0x048
50 #define BSPI_BSPI_PIO_DATA 0x04c
51
52 /* RAF register offsets */
53 #define BSPI_RAF_START_ADDR 0x100
54 #define BSPI_RAF_NUM_WORDS 0x104
55 #define BSPI_RAF_CTRL 0x108
56 #define BSPI_RAF_FULLNESS 0x10c
57 #define BSPI_RAF_WATERMARK 0x110
58 #define BSPI_RAF_STATUS 0x114
59 #define BSPI_RAF_READ_DATA 0x118
60 #define BSPI_RAF_WORD_CNT 0x11c
61 #define BSPI_RAF_CURR_ADDR 0x120
62
63 /* Override mode masks */
64 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
65 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
66 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
67 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
68 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
69
70 #define BSPI_ADDRLEN_3BYTES 3
71 #define BSPI_ADDRLEN_4BYTES 4
72
73 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
74
75 #define BSPI_RAF_CTRL_START_MASK BIT(0)
76 #define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
77
78 #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
79 #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
80
81 #define BSPI_READ_LENGTH 256
82
83 /* MSPI register offsets */
84 #define MSPI_SPCR0_LSB 0x000
85 #define MSPI_SPCR0_MSB 0x004
86 #define MSPI_SPCR1_LSB 0x008
87 #define MSPI_SPCR1_MSB 0x00c
88 #define MSPI_NEWQP 0x010
89 #define MSPI_ENDQP 0x014
90 #define MSPI_SPCR2 0x018
91 #define MSPI_MSPI_STATUS 0x020
92 #define MSPI_CPTQP 0x024
93 #define MSPI_SPCR3 0x028
94 #define MSPI_REV 0x02c
95 #define MSPI_TXRAM 0x040
96 #define MSPI_RXRAM 0x0c0
97 #define MSPI_CDRAM 0x140
98 #define MSPI_WRITE_LOCK 0x180
99
100 #define MSPI_MASTER_BIT BIT(7)
101
102 #define MSPI_NUM_CDRAM 16
103 #define MSPI_CDRAM_CONT_BIT BIT(7)
104 #define MSPI_CDRAM_BITSE_BIT BIT(6)
105 #define MSPI_CDRAM_PCS 0xf
106
107 #define MSPI_SPCR2_SPE BIT(6)
108 #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
109
110 #define MSPI_SPCR3_FASTBR BIT(0)
111 #define MSPI_SPCR3_FASTDT BIT(1)
112 #define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
113 #define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
114 ~(BIT(10) | BIT(11)))
115 #define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
116 BIT(11))
117
118 #define MSPI_MSPI_STATUS_SPIF BIT(0)
119
120 #define INTR_BASE_BIT_SHIFT 0x02
121 #define INTR_COUNT 0x07
122
123 #define NUM_CHIPSELECT 4
124 #define QSPI_SPBR_MAX 255U
125 #define MSPI_BASE_FREQ 27000000UL
126
127 #define OPCODE_DIOR 0xBB
128 #define OPCODE_QIOR 0xEB
129 #define OPCODE_DIOR_4B 0xBC
130 #define OPCODE_QIOR_4B 0xEC
131
132 #define MAX_CMD_SIZE 6
133
134 #define ADDR_4MB_MASK GENMASK(22, 0)
135
136 /* stop at end of transfer, no other reason */
137 #define TRANS_STATUS_BREAK_NONE 0
138 /* stop at end of spi_message */
139 #define TRANS_STATUS_BREAK_EOM 1
140 /* stop at end of spi_transfer if delay */
141 #define TRANS_STATUS_BREAK_DELAY 2
142 /* stop at end of spi_transfer if cs_change */
143 #define TRANS_STATUS_BREAK_CS_CHANGE 4
144 /* stop if we run out of bytes */
145 #define TRANS_STATUS_BREAK_NO_BYTES 8
146
147 /* events that make us stop filling TX slots */
148 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
149 TRANS_STATUS_BREAK_DELAY | \
150 TRANS_STATUS_BREAK_CS_CHANGE)
151
152 /* events that make us deassert CS */
153 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
154 TRANS_STATUS_BREAK_CS_CHANGE)
155
156 struct bcm_qspi_parms {
157 u32 speed_hz;
158 u8 mode;
159 u8 bits_per_word;
160 };
161
162 struct bcm_xfer_mode {
163 bool flex_mode;
164 unsigned int width;
165 unsigned int addrlen;
166 unsigned int hp;
167 };
168
169 enum base_type {
170 MSPI,
171 BSPI,
172 CHIP_SELECT,
173 BASEMAX,
174 };
175
176 enum irq_source {
177 SINGLE_L2,
178 MUXED_L1,
179 };
180
181 struct bcm_qspi_irq {
182 const char *irq_name;
183 const irq_handler_t irq_handler;
184 int irq_source;
185 u32 mask;
186 };
187
188 struct bcm_qspi_dev_id {
189 const struct bcm_qspi_irq *irqp;
190 void *dev;
191 };
192
193
194 struct qspi_trans {
195 struct spi_transfer *trans;
196 int byte;
197 bool mspi_last_trans;
198 };
199
200 struct bcm_qspi {
201 struct platform_device *pdev;
202 struct spi_master *master;
203 struct clk *clk;
204 u32 base_clk;
205 u32 max_speed_hz;
206 void __iomem *base[BASEMAX];
207
208 /* Some SoCs provide custom interrupt status register(s) */
209 struct bcm_qspi_soc_intc *soc_intc;
210
211 struct bcm_qspi_parms last_parms;
212 struct qspi_trans trans_pos;
213 int curr_cs;
214 int bspi_maj_rev;
215 int bspi_min_rev;
216 int bspi_enabled;
217 const struct spi_mem_op *bspi_rf_op;
218 u32 bspi_rf_op_idx;
219 u32 bspi_rf_op_len;
220 u32 bspi_rf_op_status;
221 struct bcm_xfer_mode xfer_mode;
222 u32 s3_strap_override_ctrl;
223 bool bspi_mode;
224 bool big_endian;
225 int num_irqs;
226 struct bcm_qspi_dev_id *dev_ids;
227 struct completion mspi_done;
228 struct completion bspi_done;
229 u8 mspi_maj_rev;
230 u8 mspi_min_rev;
231 bool mspi_spcr3_sysclk;
232 };
233
234 static inline bool has_bspi(struct bcm_qspi *qspi)
235 {
236 return qspi->bspi_mode;
237 }
238
239 /* hardware supports spcr3 and fast baud-rate */
240 static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
241 {
242 if (!has_bspi(qspi) &&
243 ((qspi->mspi_maj_rev >= 1) &&
244 (qspi->mspi_min_rev >= 5)))
245 return true;
246
247 return false;
248 }
249
250 /* hardware supports sys clk 108Mhz */
251 static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
252 {
253 if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
254 ((qspi->mspi_maj_rev >= 1) &&
255 (qspi->mspi_min_rev >= 6))))
256 return true;
257
258 return false;
259 }
260
261 static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
262 {
263 if (bcm_qspi_has_fastbr(qspi))
264 return 1;
265 else
266 return 8;
267 }
268
269 /* Read qspi controller register*/
270 static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
271 unsigned int offset)
272 {
273 return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
274 }
275
276 /* Write qspi controller register*/
277 static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
278 unsigned int offset, unsigned int data)
279 {
280 bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
281 }
282
283 /* BSPI helpers */
284 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
285 {
286 int i;
287
288 /* this should normally finish within 10us */
289 for (i = 0; i < 1000; i++) {
290 if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
291 return 0;
292 udelay(1);
293 }
294 dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
295 return -EIO;
296 }
297
298 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
299 {
300 if (qspi->bspi_maj_rev < 4)
301 return true;
302 return false;
303 }
304
305 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
306 {
307 bcm_qspi_bspi_busy_poll(qspi);
308 /* Force rising edge for the b0/b1 'flush' field */
309 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
310 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
311 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
312 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
313 }
314
315 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
316 {
317 return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
318 BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
319 }
320
321 static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
322 {
323 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
324
325 /* BSPI v3 LR is LE only, convert data to host endianness */
326 if (bcm_qspi_bspi_ver_three(qspi))
327 data = le32_to_cpu(data);
328
329 return data;
330 }
331
332 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
333 {
334 bcm_qspi_bspi_busy_poll(qspi);
335 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
336 BSPI_RAF_CTRL_START_MASK);
337 }
338
339 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
340 {
341 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
342 BSPI_RAF_CTRL_CLEAR_MASK);
343 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
344 }
345
346 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
347 {
348 u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
349 u32 data = 0;
350
351 dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
352 qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
353 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
354 data = bcm_qspi_bspi_lr_read_fifo(qspi);
355 if (likely(qspi->bspi_rf_op_len >= 4) &&
356 IS_ALIGNED((uintptr_t)buf, 4)) {
357 buf[qspi->bspi_rf_op_idx++] = data;
358 qspi->bspi_rf_op_len -= 4;
359 } else {
360 /* Read out remaining bytes, make sure*/
361 u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
362
363 data = cpu_to_le32(data);
364 while (qspi->bspi_rf_op_len) {
365 *cbuf++ = (u8)data;
366 data >>= 8;
367 qspi->bspi_rf_op_len--;
368 }
369 }
370 }
371 }
372
373 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
374 int bpp, int bpc, int flex_mode)
375 {
376 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
377 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
378 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
379 bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
380 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
381 }
382
383 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
384 const struct spi_mem_op *op, int hp)
385 {
386 int bpc = 0, bpp = 0;
387 u8 command = op->cmd.opcode;
388 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
389 int addrlen = op->addr.nbytes;
390 int flex_mode = 1;
391
392 dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
393 width, addrlen, hp);
394
395 if (addrlen == BSPI_ADDRLEN_4BYTES)
396 bpp = BSPI_BPP_ADDR_SELECT_MASK;
397
398 bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
399
400 switch (width) {
401 case SPI_NBITS_SINGLE:
402 if (addrlen == BSPI_ADDRLEN_3BYTES)
403 /* default mode, does not need flex_cmd */
404 flex_mode = 0;
405 break;
406 case SPI_NBITS_DUAL:
407 bpc = 0x00000001;
408 if (hp) {
409 bpc |= 0x00010100; /* address and mode are 2-bit */
410 bpp = BSPI_BPP_MODE_SELECT_MASK;
411 }
412 break;
413 case SPI_NBITS_QUAD:
414 bpc = 0x00000002;
415 if (hp) {
416 bpc |= 0x00020200; /* address and mode are 4-bit */
417 bpp |= BSPI_BPP_MODE_SELECT_MASK;
418 }
419 break;
420 default:
421 return -EINVAL;
422 }
423
424 bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
425
426 return 0;
427 }
428
429 static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
430 const struct spi_mem_op *op, int hp)
431 {
432 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
433 int addrlen = op->addr.nbytes;
434 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
435
436 dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
437 width, addrlen, hp);
438
439 switch (width) {
440 case SPI_NBITS_SINGLE:
441 /* clear quad/dual mode */
442 data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
443 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
444 break;
445 case SPI_NBITS_QUAD:
446 /* clear dual mode and set quad mode */
447 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
448 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
449 break;
450 case SPI_NBITS_DUAL:
451 /* clear quad mode set dual mode */
452 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
453 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
454 break;
455 default:
456 return -EINVAL;
457 }
458
459 if (addrlen == BSPI_ADDRLEN_4BYTES)
460 /* set 4byte mode*/
461 data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
462 else
463 /* clear 4 byte mode */
464 data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
465
466 /* set the override mode */
467 data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
468 bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
469 bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
470
471 return 0;
472 }
473
474 static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
475 const struct spi_mem_op *op, int hp)
476 {
477 int error = 0;
478 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
479 int addrlen = op->addr.nbytes;
480
481 /* default mode */
482 qspi->xfer_mode.flex_mode = true;
483
484 if (!bcm_qspi_bspi_ver_three(qspi)) {
485 u32 val, mask;
486
487 val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
488 mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
489 if (val & mask || qspi->s3_strap_override_ctrl & mask) {
490 qspi->xfer_mode.flex_mode = false;
491 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
492 error = bcm_qspi_bspi_set_override(qspi, op, hp);
493 }
494 }
495
496 if (qspi->xfer_mode.flex_mode)
497 error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
498
499 if (error) {
500 dev_warn(&qspi->pdev->dev,
501 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
502 width, addrlen, hp);
503 } else if (qspi->xfer_mode.width != width ||
504 qspi->xfer_mode.addrlen != addrlen ||
505 qspi->xfer_mode.hp != hp) {
506 qspi->xfer_mode.width = width;
507 qspi->xfer_mode.addrlen = addrlen;
508 qspi->xfer_mode.hp = hp;
509 dev_dbg(&qspi->pdev->dev,
510 "cs:%d %d-lane output, %d-byte address%s\n",
511 qspi->curr_cs,
512 qspi->xfer_mode.width,
513 qspi->xfer_mode.addrlen,
514 qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
515 }
516
517 return error;
518 }
519
520 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
521 {
522 if (!has_bspi(qspi))
523 return;
524
525 qspi->bspi_enabled = 1;
526 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
527 return;
528
529 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
530 udelay(1);
531 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
532 udelay(1);
533 }
534
535 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
536 {
537 if (!has_bspi(qspi))
538 return;
539
540 qspi->bspi_enabled = 0;
541 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
542 return;
543
544 bcm_qspi_bspi_busy_poll(qspi);
545 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
546 udelay(1);
547 }
548
549 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
550 {
551 u32 rd = 0;
552 u32 wr = 0;
553
554 if (qspi->base[CHIP_SELECT]) {
555 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
556 wr = (rd & ~0xff) | (1 << cs);
557 if (rd == wr)
558 return;
559 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
560 usleep_range(10, 20);
561 }
562
563 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
564 qspi->curr_cs = cs;
565 }
566
567 /* MSPI helpers */
568 static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
569 const struct bcm_qspi_parms *xp)
570 {
571 u32 spcr, spbr = 0;
572
573 if (xp->speed_hz)
574 spbr = qspi->base_clk / (2 * xp->speed_hz);
575
576 spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
577 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
578
579 if (!qspi->mspi_maj_rev)
580 /* legacy controller */
581 spcr = MSPI_MASTER_BIT;
582 else
583 spcr = 0;
584
585 /* for 16 bit the data should be zero */
586 if (xp->bits_per_word != 16)
587 spcr |= xp->bits_per_word << 2;
588 spcr |= xp->mode & 3;
589
590 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
591
592 if (bcm_qspi_has_fastbr(qspi)) {
593 spcr = 0;
594
595 /* enable fastbr */
596 spcr |= MSPI_SPCR3_FASTBR;
597
598 if (bcm_qspi_has_sysclk_108(qspi)) {
599 /* SYSCLK_108 */
600 spcr |= MSPI_SPCR3_SYSCLKSEL_108;
601 qspi->base_clk = MSPI_BASE_FREQ * 4;
602 /* Change spbr as we changed sysclk */
603 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4);
604 }
605
606 bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
607 }
608
609 qspi->last_parms = *xp;
610 }
611
612 static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
613 struct spi_device *spi,
614 struct spi_transfer *trans)
615 {
616 struct bcm_qspi_parms xp;
617
618 xp.speed_hz = trans->speed_hz;
619 xp.bits_per_word = trans->bits_per_word;
620 xp.mode = spi->mode;
621
622 bcm_qspi_hw_set_parms(qspi, &xp);
623 }
624
625 static int bcm_qspi_setup(struct spi_device *spi)
626 {
627 struct bcm_qspi_parms *xp;
628
629 if (spi->bits_per_word > 16)
630 return -EINVAL;
631
632 xp = spi_get_ctldata(spi);
633 if (!xp) {
634 xp = kzalloc(sizeof(*xp), GFP_KERNEL);
635 if (!xp)
636 return -ENOMEM;
637 spi_set_ctldata(spi, xp);
638 }
639 xp->speed_hz = spi->max_speed_hz;
640 xp->mode = spi->mode;
641
642 if (spi->bits_per_word)
643 xp->bits_per_word = spi->bits_per_word;
644 else
645 xp->bits_per_word = 8;
646
647 return 0;
648 }
649
650 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
651 struct qspi_trans *qt)
652 {
653 if (qt->mspi_last_trans &&
654 spi_transfer_is_last(qspi->master, qt->trans))
655 return true;
656 else
657 return false;
658 }
659
660 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
661 struct qspi_trans *qt, int flags)
662 {
663 int ret = TRANS_STATUS_BREAK_NONE;
664
665 /* count the last transferred bytes */
666 if (qt->trans->bits_per_word <= 8)
667 qt->byte++;
668 else
669 qt->byte += 2;
670
671 if (qt->byte >= qt->trans->len) {
672 /* we're at the end of the spi_transfer */
673 /* in TX mode, need to pause for a delay or CS change */
674 if (qt->trans->delay_usecs &&
675 (flags & TRANS_STATUS_BREAK_DELAY))
676 ret |= TRANS_STATUS_BREAK_DELAY;
677 if (qt->trans->cs_change &&
678 (flags & TRANS_STATUS_BREAK_CS_CHANGE))
679 ret |= TRANS_STATUS_BREAK_CS_CHANGE;
680
681 if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
682 ret |= TRANS_STATUS_BREAK_EOM;
683 else
684 ret |= TRANS_STATUS_BREAK_NO_BYTES;
685
686 qt->trans = NULL;
687 }
688
689 dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
690 qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
691 return ret;
692 }
693
694 static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
695 {
696 u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
697
698 /* mask out reserved bits */
699 return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
700 }
701
702 static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
703 {
704 u32 reg_offset = MSPI_RXRAM;
705 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
706 u32 msb_offset = reg_offset + (slot << 3);
707
708 return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
709 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
710 }
711
712 static void read_from_hw(struct bcm_qspi *qspi, int slots)
713 {
714 struct qspi_trans tp;
715 int slot;
716
717 bcm_qspi_disable_bspi(qspi);
718
719 if (slots > MSPI_NUM_CDRAM) {
720 /* should never happen */
721 dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
722 return;
723 }
724
725 tp = qspi->trans_pos;
726
727 for (slot = 0; slot < slots; slot++) {
728 if (tp.trans->bits_per_word <= 8) {
729 u8 *buf = tp.trans->rx_buf;
730
731 if (buf)
732 buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
733 dev_dbg(&qspi->pdev->dev, "RD %02x\n",
734 buf ? buf[tp.byte] : 0x0);
735 } else {
736 u16 *buf = tp.trans->rx_buf;
737
738 if (buf)
739 buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
740 slot);
741 dev_dbg(&qspi->pdev->dev, "RD %04x\n",
742 buf ? buf[tp.byte / 2] : 0x0);
743 }
744
745 update_qspi_trans_byte_count(qspi, &tp,
746 TRANS_STATUS_BREAK_NONE);
747 }
748
749 qspi->trans_pos = tp;
750 }
751
752 static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
753 u8 val)
754 {
755 u32 reg_offset = MSPI_TXRAM + (slot << 3);
756
757 /* mask out reserved bits */
758 bcm_qspi_write(qspi, MSPI, reg_offset, val);
759 }
760
761 static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
762 u16 val)
763 {
764 u32 reg_offset = MSPI_TXRAM;
765 u32 msb_offset = reg_offset + (slot << 3);
766 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
767
768 bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
769 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
770 }
771
772 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
773 {
774 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
775 }
776
777 static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
778 {
779 bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
780 }
781
782 /* Return number of slots written */
783 static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
784 {
785 struct qspi_trans tp;
786 int slot = 0, tstatus = 0;
787 u32 mspi_cdram = 0;
788
789 bcm_qspi_disable_bspi(qspi);
790 tp = qspi->trans_pos;
791 bcm_qspi_update_parms(qspi, spi, tp.trans);
792
793 /* Run until end of transfer or reached the max data */
794 while (!tstatus && slot < MSPI_NUM_CDRAM) {
795 if (tp.trans->bits_per_word <= 8) {
796 const u8 *buf = tp.trans->tx_buf;
797 u8 val = buf ? buf[tp.byte] : 0x00;
798
799 write_txram_slot_u8(qspi, slot, val);
800 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
801 } else {
802 const u16 *buf = tp.trans->tx_buf;
803 u16 val = buf ? buf[tp.byte / 2] : 0x0000;
804
805 write_txram_slot_u16(qspi, slot, val);
806 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
807 }
808 mspi_cdram = MSPI_CDRAM_CONT_BIT;
809
810 if (has_bspi(qspi))
811 mspi_cdram &= ~1;
812 else
813 mspi_cdram |= (~(1 << spi->chip_select) &
814 MSPI_CDRAM_PCS);
815
816 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
817 MSPI_CDRAM_BITSE_BIT);
818
819 write_cdram_slot(qspi, slot, mspi_cdram);
820
821 tstatus = update_qspi_trans_byte_count(qspi, &tp,
822 TRANS_STATUS_BREAK_TX);
823 slot++;
824 }
825
826 if (!slot) {
827 dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
828 goto done;
829 }
830
831 dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
832 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
833 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
834
835 /*
836 * case 1) EOM =1, cs_change =0: SSb inactive
837 * case 2) EOM =1, cs_change =1: SSb stay active
838 * case 3) EOM =0, cs_change =0: SSb stay active
839 * case 4) EOM =0, cs_change =1: SSb inactive
840 */
841 if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
842 == TRANS_STATUS_BREAK_CS_CHANGE) ||
843 ((tstatus & TRANS_STATUS_BREAK_DESELECT)
844 == TRANS_STATUS_BREAK_EOM)) {
845 mspi_cdram = read_cdram_slot(qspi, slot - 1) &
846 ~MSPI_CDRAM_CONT_BIT;
847 write_cdram_slot(qspi, slot - 1, mspi_cdram);
848 }
849
850 if (has_bspi(qspi))
851 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
852
853 /* Must flush previous writes before starting MSPI operation */
854 mb();
855 /* Set cont | spe | spifie */
856 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
857
858 done:
859 return slot;
860 }
861
862 static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
863 const struct spi_mem_op *op)
864 {
865 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
866 u32 addr = 0, len, rdlen, len_words, from = 0;
867 int ret = 0;
868 unsigned long timeo = msecs_to_jiffies(100);
869 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
870
871 if (bcm_qspi_bspi_ver_three(qspi))
872 if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
873 return -EIO;
874
875 from = op->addr.val;
876 if (!spi->cs_gpiod)
877 bcm_qspi_chip_select(qspi, spi->chip_select);
878 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
879
880 /*
881 * when using flex mode we need to send
882 * the upper address byte to bspi
883 */
884 if (bcm_qspi_bspi_ver_three(qspi) == false) {
885 addr = from & 0xff000000;
886 bcm_qspi_write(qspi, BSPI,
887 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
888 }
889
890 if (!qspi->xfer_mode.flex_mode)
891 addr = from;
892 else
893 addr = from & 0x00ffffff;
894
895 if (bcm_qspi_bspi_ver_three(qspi) == true)
896 addr = (addr + 0xc00000) & 0xffffff;
897
898 /*
899 * read into the entire buffer by breaking the reads
900 * into RAF buffer read lengths
901 */
902 len = op->data.nbytes;
903 qspi->bspi_rf_op_idx = 0;
904
905 do {
906 if (len > BSPI_READ_LENGTH)
907 rdlen = BSPI_READ_LENGTH;
908 else
909 rdlen = len;
910
911 reinit_completion(&qspi->bspi_done);
912 bcm_qspi_enable_bspi(qspi);
913 len_words = (rdlen + 3) >> 2;
914 qspi->bspi_rf_op = op;
915 qspi->bspi_rf_op_status = 0;
916 qspi->bspi_rf_op_len = rdlen;
917 dev_dbg(&qspi->pdev->dev,
918 "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
919 bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
920 bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
921 bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
922 if (qspi->soc_intc) {
923 /*
924 * clear soc MSPI and BSPI interrupts and enable
925 * BSPI interrupts.
926 */
927 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
928 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
929 }
930
931 /* Must flush previous writes before starting BSPI operation */
932 mb();
933 bcm_qspi_bspi_lr_start(qspi);
934 if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
935 dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
936 ret = -ETIMEDOUT;
937 break;
938 }
939
940 /* set msg return length */
941 addr += rdlen;
942 len -= rdlen;
943 } while (len);
944
945 return ret;
946 }
947
948 static int bcm_qspi_transfer_one(struct spi_master *master,
949 struct spi_device *spi,
950 struct spi_transfer *trans)
951 {
952 struct bcm_qspi *qspi = spi_master_get_devdata(master);
953 int slots;
954 unsigned long timeo = msecs_to_jiffies(100);
955
956 if (!spi->cs_gpiod)
957 bcm_qspi_chip_select(qspi, spi->chip_select);
958 qspi->trans_pos.trans = trans;
959 qspi->trans_pos.byte = 0;
960
961 while (qspi->trans_pos.byte < trans->len) {
962 reinit_completion(&qspi->mspi_done);
963
964 slots = write_to_hw(qspi, spi);
965 if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
966 dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
967 return -ETIMEDOUT;
968 }
969
970 read_from_hw(qspi, slots);
971 }
972 bcm_qspi_enable_bspi(qspi);
973
974 return 0;
975 }
976
977 static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
978 const struct spi_mem_op *op)
979 {
980 struct spi_master *master = spi->master;
981 struct bcm_qspi *qspi = spi_master_get_devdata(master);
982 struct spi_transfer t[2];
983 u8 cmd[6] = { };
984 int ret, i;
985
986 memset(cmd, 0, sizeof(cmd));
987 memset(t, 0, sizeof(t));
988
989 /* tx */
990 /* opcode is in cmd[0] */
991 cmd[0] = op->cmd.opcode;
992 for (i = 0; i < op->addr.nbytes; i++)
993 cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
994
995 t[0].tx_buf = cmd;
996 t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
997 t[0].bits_per_word = spi->bits_per_word;
998 t[0].tx_nbits = op->cmd.buswidth;
999 /* lets mspi know that this is not last transfer */
1000 qspi->trans_pos.mspi_last_trans = false;
1001 ret = bcm_qspi_transfer_one(master, spi, &t[0]);
1002
1003 /* rx */
1004 qspi->trans_pos.mspi_last_trans = true;
1005 if (!ret) {
1006 /* rx */
1007 t[1].rx_buf = op->data.buf.in;
1008 t[1].len = op->data.nbytes;
1009 t[1].rx_nbits = op->data.buswidth;
1010 t[1].bits_per_word = spi->bits_per_word;
1011 ret = bcm_qspi_transfer_one(master, spi, &t[1]);
1012 }
1013
1014 return ret;
1015 }
1016
1017 static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
1018 const struct spi_mem_op *op)
1019 {
1020 struct spi_device *spi = mem->spi;
1021 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
1022 int ret = 0;
1023 bool mspi_read = false;
1024 u32 addr = 0, len;
1025 u_char *buf;
1026
1027 if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
1028 op->data.dir != SPI_MEM_DATA_IN)
1029 return -ENOTSUPP;
1030
1031 buf = op->data.buf.in;
1032 addr = op->addr.val;
1033 len = op->data.nbytes;
1034
1035 if (bcm_qspi_bspi_ver_three(qspi) == true) {
1036 /*
1037 * The address coming into this function is a raw flash offset.
1038 * But for BSPI <= V3, we need to convert it to a remapped BSPI
1039 * address. If it crosses a 4MB boundary, just revert back to
1040 * using MSPI.
1041 */
1042 addr = (addr + 0xc00000) & 0xffffff;
1043
1044 if ((~ADDR_4MB_MASK & addr) ^
1045 (~ADDR_4MB_MASK & (addr + len - 1)))
1046 mspi_read = true;
1047 }
1048
1049 /* non-aligned and very short transfers are handled by MSPI */
1050 if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
1051 len < 4)
1052 mspi_read = true;
1053
1054 if (mspi_read)
1055 return bcm_qspi_mspi_exec_mem_op(spi, op);
1056
1057 ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
1058
1059 if (!ret)
1060 ret = bcm_qspi_bspi_exec_mem_op(spi, op);
1061
1062 return ret;
1063 }
1064
1065 static void bcm_qspi_cleanup(struct spi_device *spi)
1066 {
1067 struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1068
1069 kfree(xp);
1070 }
1071
1072 static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1073 {
1074 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1075 struct bcm_qspi *qspi = qspi_dev_id->dev;
1076 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1077
1078 if (status & MSPI_MSPI_STATUS_SPIF) {
1079 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1080 /* clear interrupt */
1081 status &= ~MSPI_MSPI_STATUS_SPIF;
1082 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1083 if (qspi->soc_intc)
1084 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1085 complete(&qspi->mspi_done);
1086 return IRQ_HANDLED;
1087 }
1088
1089 return IRQ_NONE;
1090 }
1091
1092 static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1093 {
1094 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1095 struct bcm_qspi *qspi = qspi_dev_id->dev;
1096 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1097 u32 status = qspi_dev_id->irqp->mask;
1098
1099 if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1100 bcm_qspi_bspi_lr_data_read(qspi);
1101 if (qspi->bspi_rf_op_len == 0) {
1102 qspi->bspi_rf_op = NULL;
1103 if (qspi->soc_intc) {
1104 /* disable soc BSPI interrupt */
1105 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1106 false);
1107 /* indicate done */
1108 status = INTR_BSPI_LR_SESSION_DONE_MASK;
1109 }
1110
1111 if (qspi->bspi_rf_op_status)
1112 bcm_qspi_bspi_lr_clear(qspi);
1113 else
1114 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1115 }
1116
1117 if (qspi->soc_intc)
1118 /* clear soc BSPI interrupt */
1119 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1120 }
1121
1122 status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1123 if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1124 complete(&qspi->bspi_done);
1125
1126 return IRQ_HANDLED;
1127 }
1128
1129 static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1130 {
1131 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1132 struct bcm_qspi *qspi = qspi_dev_id->dev;
1133 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1134
1135 dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1136 qspi->bspi_rf_op_status = -EIO;
1137 if (qspi->soc_intc)
1138 /* clear soc interrupt */
1139 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1140
1141 complete(&qspi->bspi_done);
1142 return IRQ_HANDLED;
1143 }
1144
1145 static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1146 {
1147 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1148 struct bcm_qspi *qspi = qspi_dev_id->dev;
1149 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1150 irqreturn_t ret = IRQ_NONE;
1151
1152 if (soc_intc) {
1153 u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1154
1155 if (status & MSPI_DONE)
1156 ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1157 else if (status & BSPI_DONE)
1158 ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1159 else if (status & BSPI_ERR)
1160 ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1161 }
1162
1163 return ret;
1164 }
1165
1166 static const struct bcm_qspi_irq qspi_irq_tab[] = {
1167 {
1168 .irq_name = "spi_lr_fullness_reached",
1169 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1170 .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1171 },
1172 {
1173 .irq_name = "spi_lr_session_aborted",
1174 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1175 .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1176 },
1177 {
1178 .irq_name = "spi_lr_impatient",
1179 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1180 .mask = INTR_BSPI_LR_IMPATIENT_MASK,
1181 },
1182 {
1183 .irq_name = "spi_lr_session_done",
1184 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1185 .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1186 },
1187 #ifdef QSPI_INT_DEBUG
1188 /* this interrupt is for debug purposes only, dont request irq */
1189 {
1190 .irq_name = "spi_lr_overread",
1191 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1192 .mask = INTR_BSPI_LR_OVERREAD_MASK,
1193 },
1194 #endif
1195 {
1196 .irq_name = "mspi_done",
1197 .irq_handler = bcm_qspi_mspi_l2_isr,
1198 .mask = INTR_MSPI_DONE_MASK,
1199 },
1200 {
1201 .irq_name = "mspi_halted",
1202 .irq_handler = bcm_qspi_mspi_l2_isr,
1203 .mask = INTR_MSPI_HALTED_MASK,
1204 },
1205 {
1206 /* single muxed L1 interrupt source */
1207 .irq_name = "spi_l1_intr",
1208 .irq_handler = bcm_qspi_l1_isr,
1209 .irq_source = MUXED_L1,
1210 .mask = QSPI_INTERRUPTS_ALL,
1211 },
1212 };
1213
1214 static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1215 {
1216 u32 val = 0;
1217
1218 val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1219 qspi->bspi_maj_rev = (val >> 8) & 0xff;
1220 qspi->bspi_min_rev = val & 0xff;
1221 if (!(bcm_qspi_bspi_ver_three(qspi))) {
1222 /* Force mapping of BSPI address -> flash offset */
1223 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1224 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1225 }
1226 qspi->bspi_enabled = 1;
1227 bcm_qspi_disable_bspi(qspi);
1228 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1229 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1230 }
1231
1232 static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1233 {
1234 struct bcm_qspi_parms parms;
1235
1236 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1237 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1238 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1239 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1240 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1241
1242 parms.mode = SPI_MODE_3;
1243 parms.bits_per_word = 8;
1244 parms.speed_hz = qspi->max_speed_hz;
1245 bcm_qspi_hw_set_parms(qspi, &parms);
1246
1247 if (has_bspi(qspi))
1248 bcm_qspi_bspi_init(qspi);
1249 }
1250
1251 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1252 {
1253 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1254 if (has_bspi(qspi))
1255 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1256
1257 }
1258
1259 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1260 .exec_op = bcm_qspi_exec_mem_op,
1261 };
1262
1263 struct bcm_qspi_data {
1264 bool has_mspi_rev;
1265 bool has_spcr3_sysclk;
1266 };
1267
1268 static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
1269 .has_mspi_rev = false,
1270 .has_spcr3_sysclk = false,
1271 };
1272
1273 static const struct bcm_qspi_data bcm_qspi_rev_data = {
1274 .has_mspi_rev = true,
1275 .has_spcr3_sysclk = false,
1276 };
1277
1278 static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
1279 .has_mspi_rev = true,
1280 .has_spcr3_sysclk = true,
1281 };
1282
1283 static const struct of_device_id bcm_qspi_of_match[] = {
1284 {
1285 .compatible = "brcm,spi-bcm7445-qspi",
1286 .data = &bcm_qspi_rev_data,
1287
1288 },
1289 {
1290 .compatible = "brcm,spi-bcm-qspi",
1291 .data = &bcm_qspi_no_rev_data,
1292 },
1293 {
1294 .compatible = "brcm,spi-bcm7216-qspi",
1295 .data = &bcm_qspi_spcr3_data,
1296 },
1297 {
1298 .compatible = "brcm,spi-bcm7278-qspi",
1299 .data = &bcm_qspi_spcr3_data,
1300 },
1301 {},
1302 };
1303 MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1304
1305 int bcm_qspi_probe(struct platform_device *pdev,
1306 struct bcm_qspi_soc_intc *soc_intc)
1307 {
1308 const struct of_device_id *of_id = NULL;
1309 const struct bcm_qspi_data *data;
1310 struct device *dev = &pdev->dev;
1311 struct bcm_qspi *qspi;
1312 struct spi_master *master;
1313 struct resource *res;
1314 int irq, ret = 0, num_ints = 0;
1315 u32 val;
1316 u32 rev = 0;
1317 const char *name = NULL;
1318 int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1319
1320 /* We only support device-tree instantiation */
1321 if (!dev->of_node)
1322 return -ENODEV;
1323
1324 of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
1325 if (!of_id)
1326 return -ENODEV;
1327
1328 data = of_id->data;
1329
1330 master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
1331 if (!master) {
1332 dev_err(dev, "error allocating spi_master\n");
1333 return -ENOMEM;
1334 }
1335
1336 qspi = spi_master_get_devdata(master);
1337
1338 qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
1339 if (IS_ERR(qspi->clk))
1340 return PTR_ERR(qspi->clk);
1341
1342 qspi->pdev = pdev;
1343 qspi->trans_pos.trans = NULL;
1344 qspi->trans_pos.byte = 0;
1345 qspi->trans_pos.mspi_last_trans = true;
1346 qspi->master = master;
1347
1348 master->bus_num = -1;
1349 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1350 master->setup = bcm_qspi_setup;
1351 master->transfer_one = bcm_qspi_transfer_one;
1352 master->mem_ops = &bcm_qspi_mem_ops;
1353 master->cleanup = bcm_qspi_cleanup;
1354 master->dev.of_node = dev->of_node;
1355 master->num_chipselect = NUM_CHIPSELECT;
1356 master->use_gpio_descriptors = true;
1357
1358 qspi->big_endian = of_device_is_big_endian(dev->of_node);
1359
1360 if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1361 master->num_chipselect = val;
1362
1363 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1364 if (!res)
1365 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1366 "mspi");
1367
1368 if (res) {
1369 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
1370 if (IS_ERR(qspi->base[MSPI])) {
1371 ret = PTR_ERR(qspi->base[MSPI]);
1372 goto qspi_resource_err;
1373 }
1374 } else {
1375 goto qspi_resource_err;
1376 }
1377
1378 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1379 if (res) {
1380 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
1381 if (IS_ERR(qspi->base[BSPI])) {
1382 ret = PTR_ERR(qspi->base[BSPI]);
1383 goto qspi_resource_err;
1384 }
1385 qspi->bspi_mode = true;
1386 } else {
1387 qspi->bspi_mode = false;
1388 }
1389
1390 dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1391
1392 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1393 if (res) {
1394 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1395 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1396 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1397 goto qspi_resource_err;
1398 }
1399 }
1400
1401 qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1402 GFP_KERNEL);
1403 if (!qspi->dev_ids) {
1404 ret = -ENOMEM;
1405 goto qspi_resource_err;
1406 }
1407
1408 for (val = 0; val < num_irqs; val++) {
1409 irq = -1;
1410 name = qspi_irq_tab[val].irq_name;
1411 if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1412 /* get the l2 interrupts */
1413 irq = platform_get_irq_byname_optional(pdev, name);
1414 } else if (!num_ints && soc_intc) {
1415 /* all mspi, bspi intrs muxed to one L1 intr */
1416 irq = platform_get_irq(pdev, 0);
1417 }
1418
1419 if (irq >= 0) {
1420 ret = devm_request_irq(&pdev->dev, irq,
1421 qspi_irq_tab[val].irq_handler, 0,
1422 name,
1423 &qspi->dev_ids[val]);
1424 if (ret < 0) {
1425 dev_err(&pdev->dev, "IRQ %s not found\n", name);
1426 goto qspi_probe_err;
1427 }
1428
1429 qspi->dev_ids[val].dev = qspi;
1430 qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1431 num_ints++;
1432 dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1433 qspi_irq_tab[val].irq_name,
1434 irq);
1435 }
1436 }
1437
1438 if (!num_ints) {
1439 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1440 ret = -EINVAL;
1441 goto qspi_probe_err;
1442 }
1443
1444 /*
1445 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1446 * in specific ways
1447 */
1448 if (soc_intc) {
1449 qspi->soc_intc = soc_intc;
1450 soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1451 } else {
1452 qspi->soc_intc = NULL;
1453 }
1454
1455 ret = clk_prepare_enable(qspi->clk);
1456 if (ret) {
1457 dev_err(dev, "failed to prepare clock\n");
1458 goto qspi_probe_err;
1459 }
1460
1461 qspi->base_clk = clk_get_rate(qspi->clk);
1462
1463 if (data->has_mspi_rev) {
1464 rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
1465 /* some older revs do not have a MSPI_REV register */
1466 if ((rev & 0xff) == 0xff)
1467 rev = 0;
1468 }
1469
1470 qspi->mspi_maj_rev = (rev >> 4) & 0xf;
1471 qspi->mspi_min_rev = rev & 0xf;
1472 qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
1473
1474 qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
1475
1476 bcm_qspi_hw_init(qspi);
1477 init_completion(&qspi->mspi_done);
1478 init_completion(&qspi->bspi_done);
1479 qspi->curr_cs = -1;
1480
1481 platform_set_drvdata(pdev, qspi);
1482
1483 qspi->xfer_mode.width = -1;
1484 qspi->xfer_mode.addrlen = -1;
1485 qspi->xfer_mode.hp = -1;
1486
1487 ret = devm_spi_register_master(&pdev->dev, master);
1488 if (ret < 0) {
1489 dev_err(dev, "can't register master\n");
1490 goto qspi_reg_err;
1491 }
1492
1493 return 0;
1494
1495 qspi_reg_err:
1496 bcm_qspi_hw_uninit(qspi);
1497 clk_disable_unprepare(qspi->clk);
1498 qspi_probe_err:
1499 kfree(qspi->dev_ids);
1500 qspi_resource_err:
1501 spi_master_put(master);
1502 return ret;
1503 }
1504 /* probe function to be called by SoC specific platform driver probe */
1505 EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1506
1507 int bcm_qspi_remove(struct platform_device *pdev)
1508 {
1509 struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1510
1511 bcm_qspi_hw_uninit(qspi);
1512 clk_disable_unprepare(qspi->clk);
1513 kfree(qspi->dev_ids);
1514 spi_unregister_master(qspi->master);
1515
1516 return 0;
1517 }
1518 /* function to be called by SoC specific platform driver remove() */
1519 EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1520
1521 static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1522 {
1523 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1524
1525 /* store the override strap value */
1526 if (!bcm_qspi_bspi_ver_three(qspi))
1527 qspi->s3_strap_override_ctrl =
1528 bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1529
1530 spi_master_suspend(qspi->master);
1531 clk_disable_unprepare(qspi->clk);
1532 bcm_qspi_hw_uninit(qspi);
1533
1534 return 0;
1535 };
1536
1537 static int __maybe_unused bcm_qspi_resume(struct device *dev)
1538 {
1539 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1540 int ret = 0;
1541
1542 bcm_qspi_hw_init(qspi);
1543 bcm_qspi_chip_select(qspi, qspi->curr_cs);
1544 if (qspi->soc_intc)
1545 /* enable MSPI interrupt */
1546 qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1547 true);
1548
1549 ret = clk_prepare_enable(qspi->clk);
1550 if (!ret)
1551 spi_master_resume(qspi->master);
1552
1553 return ret;
1554 }
1555
1556 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1557
1558 /* pm_ops to be called by SoC specific platform driver */
1559 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1560
1561 MODULE_AUTHOR("Kamal Dasu");
1562 MODULE_DESCRIPTION("Broadcom QSPI driver");
1563 MODULE_LICENSE("GPL v2");
1564 MODULE_ALIAS("platform:" DRIVER_NAME);