]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/mtd/nand/atmel/nand-controller.c
Merge tag 'nfs-for-4.15-3' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-bionic-kernel.git] / drivers / mtd / nand / atmel / nand-controller.c
1 /*
2 * Copyright 2017 ATMEL
3 * Copyright 2017 Free Electrons
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * Derived from the atmel_nand.c driver which contained the following
8 * copyrights:
9 *
10 * Copyright 2003 Rick Bronson
11 *
12 * Derived from drivers/mtd/nand/autcpu12.c
13 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
14 *
15 * Derived from drivers/mtd/spia.c
16 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
17 *
18 *
19 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
20 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
21 *
22 * Derived from Das U-Boot source code
23 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
24 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
25 *
26 * Add Programmable Multibit ECC support for various AT91 SoC
27 * Copyright 2012 ATMEL, Hong Xu
28 *
29 * Add Nand Flash Controller support for SAMA5 SoC
30 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License version 2 as
34 * published by the Free Software Foundation.
35 *
36 * A few words about the naming convention in this file. This convention
37 * applies to structure and function names.
38 *
39 * Prefixes:
40 *
41 * - atmel_nand_: all generic structures/functions
42 * - atmel_smc_nand_: all structures/functions specific to the SMC interface
43 * (at91sam9 and avr32 SoCs)
44 * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
45 * (sama5 SoCs and later)
46 * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
47 * that is available in the HSMC block
48 * - <soc>_nand_: all SoC specific structures/functions
49 */
50
51 #include <linux/clk.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/dmaengine.h>
54 #include <linux/genalloc.h>
55 #include <linux/gpio.h>
56 #include <linux/gpio/consumer.h>
57 #include <linux/interrupt.h>
58 #include <linux/mfd/syscon.h>
59 #include <linux/mfd/syscon/atmel-matrix.h>
60 #include <linux/mfd/syscon/atmel-smc.h>
61 #include <linux/module.h>
62 #include <linux/mtd/rawnand.h>
63 #include <linux/of_address.h>
64 #include <linux/of_irq.h>
65 #include <linux/of_platform.h>
66 #include <linux/iopoll.h>
67 #include <linux/platform_device.h>
68 #include <linux/regmap.h>
69
70 #include "pmecc.h"
71
72 #define ATMEL_HSMC_NFC_CFG 0x0
73 #define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
74 #define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
75 #define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
76 #define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
77 #define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
78 #define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
79 #define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
80 #define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
81 #define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
82 #define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
83
84 #define ATMEL_HSMC_NFC_CTRL 0x4
85 #define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
86 #define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
87
88 #define ATMEL_HSMC_NFC_SR 0x8
89 #define ATMEL_HSMC_NFC_IER 0xc
90 #define ATMEL_HSMC_NFC_IDR 0x10
91 #define ATMEL_HSMC_NFC_IMR 0x14
92 #define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
93 #define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
94 #define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
95 #define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
96 #define ATMEL_HSMC_NFC_SR_WR BIT(11)
97 #define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
98 #define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
99 #define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
100 #define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
101 #define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
102 #define ATMEL_HSMC_NFC_SR_AWB BIT(22)
103 #define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
104 #define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
105 ATMEL_HSMC_NFC_SR_UNDEF | \
106 ATMEL_HSMC_NFC_SR_AWB | \
107 ATMEL_HSMC_NFC_SR_NFCASE)
108 #define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
109
110 #define ATMEL_HSMC_NFC_ADDR 0x18
111 #define ATMEL_HSMC_NFC_BANK 0x1c
112
113 #define ATMEL_NFC_MAX_RB_ID 7
114
115 #define ATMEL_NFC_SRAM_SIZE 0x2400
116
117 #define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
118 #define ATMEL_NFC_VCMD2 BIT(18)
119 #define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
120 #define ATMEL_NFC_CSID(cs) ((cs) << 22)
121 #define ATMEL_NFC_DATAEN BIT(25)
122 #define ATMEL_NFC_NFCWR BIT(26)
123
124 #define ATMEL_NFC_MAX_ADDR_CYCLES 5
125
126 #define ATMEL_NAND_ALE_OFFSET BIT(21)
127 #define ATMEL_NAND_CLE_OFFSET BIT(22)
128
129 #define DEFAULT_TIMEOUT_MS 1000
130 #define MIN_DMA_LEN 128
131
132 enum atmel_nand_rb_type {
133 ATMEL_NAND_NO_RB,
134 ATMEL_NAND_NATIVE_RB,
135 ATMEL_NAND_GPIO_RB,
136 };
137
138 struct atmel_nand_rb {
139 enum atmel_nand_rb_type type;
140 union {
141 struct gpio_desc *gpio;
142 int id;
143 };
144 };
145
146 struct atmel_nand_cs {
147 int id;
148 struct atmel_nand_rb rb;
149 struct gpio_desc *csgpio;
150 struct {
151 void __iomem *virt;
152 dma_addr_t dma;
153 } io;
154
155 struct atmel_smc_cs_conf smcconf;
156 };
157
158 struct atmel_nand {
159 struct list_head node;
160 struct device *dev;
161 struct nand_chip base;
162 struct atmel_nand_cs *activecs;
163 struct atmel_pmecc_user *pmecc;
164 struct gpio_desc *cdgpio;
165 int numcs;
166 struct atmel_nand_cs cs[];
167 };
168
169 static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
170 {
171 return container_of(chip, struct atmel_nand, base);
172 }
173
174 enum atmel_nfc_data_xfer {
175 ATMEL_NFC_NO_DATA,
176 ATMEL_NFC_READ_DATA,
177 ATMEL_NFC_WRITE_DATA,
178 };
179
180 struct atmel_nfc_op {
181 u8 cs;
182 u8 ncmds;
183 u8 cmds[2];
184 u8 naddrs;
185 u8 addrs[5];
186 enum atmel_nfc_data_xfer data;
187 u32 wait;
188 u32 errors;
189 };
190
191 struct atmel_nand_controller;
192 struct atmel_nand_controller_caps;
193
194 struct atmel_nand_controller_ops {
195 int (*probe)(struct platform_device *pdev,
196 const struct atmel_nand_controller_caps *caps);
197 int (*remove)(struct atmel_nand_controller *nc);
198 void (*nand_init)(struct atmel_nand_controller *nc,
199 struct atmel_nand *nand);
200 int (*ecc_init)(struct atmel_nand *nand);
201 int (*setup_data_interface)(struct atmel_nand *nand, int csline,
202 const struct nand_data_interface *conf);
203 };
204
205 struct atmel_nand_controller_caps {
206 bool has_dma;
207 bool legacy_of_bindings;
208 u32 ale_offs;
209 u32 cle_offs;
210 const struct atmel_nand_controller_ops *ops;
211 };
212
213 struct atmel_nand_controller {
214 struct nand_hw_control base;
215 const struct atmel_nand_controller_caps *caps;
216 struct device *dev;
217 struct regmap *smc;
218 struct dma_chan *dmac;
219 struct atmel_pmecc *pmecc;
220 struct list_head chips;
221 struct clk *mck;
222 };
223
224 static inline struct atmel_nand_controller *
225 to_nand_controller(struct nand_hw_control *ctl)
226 {
227 return container_of(ctl, struct atmel_nand_controller, base);
228 }
229
230 struct atmel_smc_nand_controller {
231 struct atmel_nand_controller base;
232 struct regmap *matrix;
233 unsigned int ebi_csa_offs;
234 };
235
236 static inline struct atmel_smc_nand_controller *
237 to_smc_nand_controller(struct nand_hw_control *ctl)
238 {
239 return container_of(to_nand_controller(ctl),
240 struct atmel_smc_nand_controller, base);
241 }
242
243 struct atmel_hsmc_nand_controller {
244 struct atmel_nand_controller base;
245 struct {
246 struct gen_pool *pool;
247 void __iomem *virt;
248 dma_addr_t dma;
249 } sram;
250 const struct atmel_hsmc_reg_layout *hsmc_layout;
251 struct regmap *io;
252 struct atmel_nfc_op op;
253 struct completion complete;
254 int irq;
255
256 /* Only used when instantiating from legacy DT bindings. */
257 struct clk *clk;
258 };
259
260 static inline struct atmel_hsmc_nand_controller *
261 to_hsmc_nand_controller(struct nand_hw_control *ctl)
262 {
263 return container_of(to_nand_controller(ctl),
264 struct atmel_hsmc_nand_controller, base);
265 }
266
267 static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
268 {
269 op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
270 op->wait ^= status & op->wait;
271
272 return !op->wait || op->errors;
273 }
274
275 static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
276 {
277 struct atmel_hsmc_nand_controller *nc = data;
278 u32 sr, rcvd;
279 bool done;
280
281 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
282
283 rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
284 done = atmel_nfc_op_done(&nc->op, sr);
285
286 if (rcvd)
287 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
288
289 if (done)
290 complete(&nc->complete);
291
292 return rcvd ? IRQ_HANDLED : IRQ_NONE;
293 }
294
295 static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
296 unsigned int timeout_ms)
297 {
298 int ret;
299
300 if (!timeout_ms)
301 timeout_ms = DEFAULT_TIMEOUT_MS;
302
303 if (poll) {
304 u32 status;
305
306 ret = regmap_read_poll_timeout(nc->base.smc,
307 ATMEL_HSMC_NFC_SR, status,
308 atmel_nfc_op_done(&nc->op,
309 status),
310 0, timeout_ms * 1000);
311 } else {
312 init_completion(&nc->complete);
313 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
314 nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
315 ret = wait_for_completion_timeout(&nc->complete,
316 msecs_to_jiffies(timeout_ms));
317 if (!ret)
318 ret = -ETIMEDOUT;
319 else
320 ret = 0;
321
322 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
323 }
324
325 if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
326 dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
327 ret = -ETIMEDOUT;
328 }
329
330 if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
331 dev_err(nc->base.dev, "Access to an undefined area\n");
332 ret = -EIO;
333 }
334
335 if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
336 dev_err(nc->base.dev, "Access while busy\n");
337 ret = -EIO;
338 }
339
340 if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
341 dev_err(nc->base.dev, "Wrong access size\n");
342 ret = -EIO;
343 }
344
345 return ret;
346 }
347
348 static void atmel_nand_dma_transfer_finished(void *data)
349 {
350 struct completion *finished = data;
351
352 complete(finished);
353 }
354
355 static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
356 void *buf, dma_addr_t dev_dma, size_t len,
357 enum dma_data_direction dir)
358 {
359 DECLARE_COMPLETION_ONSTACK(finished);
360 dma_addr_t src_dma, dst_dma, buf_dma;
361 struct dma_async_tx_descriptor *tx;
362 dma_cookie_t cookie;
363
364 buf_dma = dma_map_single(nc->dev, buf, len, dir);
365 if (dma_mapping_error(nc->dev, dev_dma)) {
366 dev_err(nc->dev,
367 "Failed to prepare a buffer for DMA access\n");
368 goto err;
369 }
370
371 if (dir == DMA_FROM_DEVICE) {
372 src_dma = dev_dma;
373 dst_dma = buf_dma;
374 } else {
375 src_dma = buf_dma;
376 dst_dma = dev_dma;
377 }
378
379 tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
380 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
381 if (!tx) {
382 dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
383 goto err_unmap;
384 }
385
386 tx->callback = atmel_nand_dma_transfer_finished;
387 tx->callback_param = &finished;
388
389 cookie = dmaengine_submit(tx);
390 if (dma_submit_error(cookie)) {
391 dev_err(nc->dev, "Failed to do DMA tx_submit\n");
392 goto err_unmap;
393 }
394
395 dma_async_issue_pending(nc->dmac);
396 wait_for_completion(&finished);
397
398 return 0;
399
400 err_unmap:
401 dma_unmap_single(nc->dev, buf_dma, len, dir);
402
403 err:
404 dev_dbg(nc->dev, "Fall back to CPU I/O\n");
405
406 return -EIO;
407 }
408
409 static u8 atmel_nand_read_byte(struct mtd_info *mtd)
410 {
411 struct nand_chip *chip = mtd_to_nand(mtd);
412 struct atmel_nand *nand = to_atmel_nand(chip);
413
414 return ioread8(nand->activecs->io.virt);
415 }
416
417 static u16 atmel_nand_read_word(struct mtd_info *mtd)
418 {
419 struct nand_chip *chip = mtd_to_nand(mtd);
420 struct atmel_nand *nand = to_atmel_nand(chip);
421
422 return ioread16(nand->activecs->io.virt);
423 }
424
425 static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
426 {
427 struct nand_chip *chip = mtd_to_nand(mtd);
428 struct atmel_nand *nand = to_atmel_nand(chip);
429
430 if (chip->options & NAND_BUSWIDTH_16)
431 iowrite16(byte | (byte << 8), nand->activecs->io.virt);
432 else
433 iowrite8(byte, nand->activecs->io.virt);
434 }
435
436 static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
437 {
438 struct nand_chip *chip = mtd_to_nand(mtd);
439 struct atmel_nand *nand = to_atmel_nand(chip);
440 struct atmel_nand_controller *nc;
441
442 nc = to_nand_controller(chip->controller);
443
444 /*
445 * If the controller supports DMA, the buffer address is DMA-able and
446 * len is long enough to make DMA transfers profitable, let's trigger
447 * a DMA transfer. If it fails, fallback to PIO mode.
448 */
449 if (nc->dmac && virt_addr_valid(buf) &&
450 len >= MIN_DMA_LEN &&
451 !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
452 DMA_FROM_DEVICE))
453 return;
454
455 if (chip->options & NAND_BUSWIDTH_16)
456 ioread16_rep(nand->activecs->io.virt, buf, len / 2);
457 else
458 ioread8_rep(nand->activecs->io.virt, buf, len);
459 }
460
461 static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
462 {
463 struct nand_chip *chip = mtd_to_nand(mtd);
464 struct atmel_nand *nand = to_atmel_nand(chip);
465 struct atmel_nand_controller *nc;
466
467 nc = to_nand_controller(chip->controller);
468
469 /*
470 * If the controller supports DMA, the buffer address is DMA-able and
471 * len is long enough to make DMA transfers profitable, let's trigger
472 * a DMA transfer. If it fails, fallback to PIO mode.
473 */
474 if (nc->dmac && virt_addr_valid(buf) &&
475 len >= MIN_DMA_LEN &&
476 !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
477 len, DMA_TO_DEVICE))
478 return;
479
480 if (chip->options & NAND_BUSWIDTH_16)
481 iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
482 else
483 iowrite8_rep(nand->activecs->io.virt, buf, len);
484 }
485
486 static int atmel_nand_dev_ready(struct mtd_info *mtd)
487 {
488 struct nand_chip *chip = mtd_to_nand(mtd);
489 struct atmel_nand *nand = to_atmel_nand(chip);
490
491 return gpiod_get_value(nand->activecs->rb.gpio);
492 }
493
494 static void atmel_nand_select_chip(struct mtd_info *mtd, int cs)
495 {
496 struct nand_chip *chip = mtd_to_nand(mtd);
497 struct atmel_nand *nand = to_atmel_nand(chip);
498
499 if (cs < 0 || cs >= nand->numcs) {
500 nand->activecs = NULL;
501 chip->dev_ready = NULL;
502 return;
503 }
504
505 nand->activecs = &nand->cs[cs];
506
507 if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
508 chip->dev_ready = atmel_nand_dev_ready;
509 }
510
511 static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
512 {
513 struct nand_chip *chip = mtd_to_nand(mtd);
514 struct atmel_nand *nand = to_atmel_nand(chip);
515 struct atmel_hsmc_nand_controller *nc;
516 u32 status;
517
518 nc = to_hsmc_nand_controller(chip->controller);
519
520 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
521
522 return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
523 }
524
525 static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
526 {
527 struct nand_chip *chip = mtd_to_nand(mtd);
528 struct atmel_nand *nand = to_atmel_nand(chip);
529 struct atmel_hsmc_nand_controller *nc;
530
531 nc = to_hsmc_nand_controller(chip->controller);
532
533 atmel_nand_select_chip(mtd, cs);
534
535 if (!nand->activecs) {
536 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
537 ATMEL_HSMC_NFC_CTRL_DIS);
538 return;
539 }
540
541 if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
542 chip->dev_ready = atmel_hsmc_nand_dev_ready;
543
544 regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
545 ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
546 ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
547 ATMEL_HSMC_NFC_CFG_RSPARE |
548 ATMEL_HSMC_NFC_CFG_WSPARE,
549 ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
550 ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
551 ATMEL_HSMC_NFC_CFG_RSPARE);
552 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
553 ATMEL_HSMC_NFC_CTRL_EN);
554 }
555
556 static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
557 {
558 u8 *addrs = nc->op.addrs;
559 unsigned int op = 0;
560 u32 addr, val;
561 int i, ret;
562
563 nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
564
565 for (i = 0; i < nc->op.ncmds; i++)
566 op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
567
568 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
569 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
570
571 op |= ATMEL_NFC_CSID(nc->op.cs) |
572 ATMEL_NFC_ACYCLE(nc->op.naddrs);
573
574 if (nc->op.ncmds > 1)
575 op |= ATMEL_NFC_VCMD2;
576
577 addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
578 (addrs[3] << 24);
579
580 if (nc->op.data != ATMEL_NFC_NO_DATA) {
581 op |= ATMEL_NFC_DATAEN;
582 nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
583
584 if (nc->op.data == ATMEL_NFC_WRITE_DATA)
585 op |= ATMEL_NFC_NFCWR;
586 }
587
588 /* Clear all flags. */
589 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
590
591 /* Send the command. */
592 regmap_write(nc->io, op, addr);
593
594 ret = atmel_nfc_wait(nc, poll, 0);
595 if (ret)
596 dev_err(nc->base.dev,
597 "Failed to send NAND command (err = %d)!",
598 ret);
599
600 /* Reset the op state. */
601 memset(&nc->op, 0, sizeof(nc->op));
602
603 return ret;
604 }
605
606 static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
607 unsigned int ctrl)
608 {
609 struct nand_chip *chip = mtd_to_nand(mtd);
610 struct atmel_nand *nand = to_atmel_nand(chip);
611 struct atmel_hsmc_nand_controller *nc;
612
613 nc = to_hsmc_nand_controller(chip->controller);
614
615 if (ctrl & NAND_ALE) {
616 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
617 return;
618
619 nc->op.addrs[nc->op.naddrs++] = dat;
620 } else if (ctrl & NAND_CLE) {
621 if (nc->op.ncmds > 1)
622 return;
623
624 nc->op.cmds[nc->op.ncmds++] = dat;
625 }
626
627 if (dat == NAND_CMD_NONE) {
628 nc->op.cs = nand->activecs->id;
629 atmel_nfc_exec_op(nc, true);
630 }
631 }
632
633 static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
634 unsigned int ctrl)
635 {
636 struct nand_chip *chip = mtd_to_nand(mtd);
637 struct atmel_nand *nand = to_atmel_nand(chip);
638 struct atmel_nand_controller *nc;
639
640 nc = to_nand_controller(chip->controller);
641
642 if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
643 if (ctrl & NAND_NCE)
644 gpiod_set_value(nand->activecs->csgpio, 0);
645 else
646 gpiod_set_value(nand->activecs->csgpio, 1);
647 }
648
649 if (ctrl & NAND_ALE)
650 writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
651 else if (ctrl & NAND_CLE)
652 writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
653 }
654
655 static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
656 bool oob_required)
657 {
658 struct mtd_info *mtd = nand_to_mtd(chip);
659 struct atmel_hsmc_nand_controller *nc;
660 int ret = -EIO;
661
662 nc = to_hsmc_nand_controller(chip->controller);
663
664 if (nc->base.dmac)
665 ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
666 nc->sram.dma, mtd->writesize,
667 DMA_TO_DEVICE);
668
669 /* Falling back to CPU copy. */
670 if (ret)
671 memcpy_toio(nc->sram.virt, buf, mtd->writesize);
672
673 if (oob_required)
674 memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
675 mtd->oobsize);
676 }
677
678 static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
679 bool oob_required)
680 {
681 struct mtd_info *mtd = nand_to_mtd(chip);
682 struct atmel_hsmc_nand_controller *nc;
683 int ret = -EIO;
684
685 nc = to_hsmc_nand_controller(chip->controller);
686
687 if (nc->base.dmac)
688 ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
689 mtd->writesize, DMA_FROM_DEVICE);
690
691 /* Falling back to CPU copy. */
692 if (ret)
693 memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
694
695 if (oob_required)
696 memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
697 mtd->oobsize);
698 }
699
700 static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
701 {
702 struct mtd_info *mtd = nand_to_mtd(chip);
703 struct atmel_hsmc_nand_controller *nc;
704
705 nc = to_hsmc_nand_controller(chip->controller);
706
707 if (column >= 0) {
708 nc->op.addrs[nc->op.naddrs++] = column;
709
710 /*
711 * 2 address cycles for the column offset on large page NANDs.
712 */
713 if (mtd->writesize > 512)
714 nc->op.addrs[nc->op.naddrs++] = column >> 8;
715 }
716
717 if (page >= 0) {
718 nc->op.addrs[nc->op.naddrs++] = page;
719 nc->op.addrs[nc->op.naddrs++] = page >> 8;
720
721 if (chip->options & NAND_ROW_ADDR_3)
722 nc->op.addrs[nc->op.naddrs++] = page >> 16;
723 }
724 }
725
726 static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
727 {
728 struct atmel_nand *nand = to_atmel_nand(chip);
729 struct atmel_nand_controller *nc;
730 int ret;
731
732 nc = to_nand_controller(chip->controller);
733
734 if (raw)
735 return 0;
736
737 ret = atmel_pmecc_enable(nand->pmecc, op);
738 if (ret)
739 dev_err(nc->dev,
740 "Failed to enable ECC engine (err = %d)\n", ret);
741
742 return ret;
743 }
744
745 static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
746 {
747 struct atmel_nand *nand = to_atmel_nand(chip);
748
749 if (!raw)
750 atmel_pmecc_disable(nand->pmecc);
751 }
752
753 static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
754 {
755 struct atmel_nand *nand = to_atmel_nand(chip);
756 struct mtd_info *mtd = nand_to_mtd(chip);
757 struct atmel_nand_controller *nc;
758 struct mtd_oob_region oobregion;
759 void *eccbuf;
760 int ret, i;
761
762 nc = to_nand_controller(chip->controller);
763
764 if (raw)
765 return 0;
766
767 ret = atmel_pmecc_wait_rdy(nand->pmecc);
768 if (ret) {
769 dev_err(nc->dev,
770 "Failed to transfer NAND page data (err = %d)\n",
771 ret);
772 return ret;
773 }
774
775 mtd_ooblayout_ecc(mtd, 0, &oobregion);
776 eccbuf = chip->oob_poi + oobregion.offset;
777
778 for (i = 0; i < chip->ecc.steps; i++) {
779 atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
780 eccbuf);
781 eccbuf += chip->ecc.bytes;
782 }
783
784 return 0;
785 }
786
787 static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
788 bool raw)
789 {
790 struct atmel_nand *nand = to_atmel_nand(chip);
791 struct mtd_info *mtd = nand_to_mtd(chip);
792 struct atmel_nand_controller *nc;
793 struct mtd_oob_region oobregion;
794 int ret, i, max_bitflips = 0;
795 void *databuf, *eccbuf;
796
797 nc = to_nand_controller(chip->controller);
798
799 if (raw)
800 return 0;
801
802 ret = atmel_pmecc_wait_rdy(nand->pmecc);
803 if (ret) {
804 dev_err(nc->dev,
805 "Failed to read NAND page data (err = %d)\n",
806 ret);
807 return ret;
808 }
809
810 mtd_ooblayout_ecc(mtd, 0, &oobregion);
811 eccbuf = chip->oob_poi + oobregion.offset;
812 databuf = buf;
813
814 for (i = 0; i < chip->ecc.steps; i++) {
815 ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
816 eccbuf);
817 if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
818 ret = nand_check_erased_ecc_chunk(databuf,
819 chip->ecc.size,
820 eccbuf,
821 chip->ecc.bytes,
822 NULL, 0,
823 chip->ecc.strength);
824
825 if (ret >= 0)
826 max_bitflips = max(ret, max_bitflips);
827 else
828 mtd->ecc_stats.failed++;
829
830 databuf += chip->ecc.size;
831 eccbuf += chip->ecc.bytes;
832 }
833
834 return max_bitflips;
835 }
836
837 static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
838 bool oob_required, int page, bool raw)
839 {
840 struct mtd_info *mtd = nand_to_mtd(chip);
841 struct atmel_nand *nand = to_atmel_nand(chip);
842 int ret;
843
844 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
845 if (ret)
846 return ret;
847
848 atmel_nand_write_buf(mtd, buf, mtd->writesize);
849
850 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
851 if (ret) {
852 atmel_pmecc_disable(nand->pmecc);
853 return ret;
854 }
855
856 atmel_nand_pmecc_disable(chip, raw);
857
858 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
859
860 return 0;
861 }
862
863 static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
864 struct nand_chip *chip, const u8 *buf,
865 int oob_required, int page)
866 {
867 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
868 }
869
870 static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd,
871 struct nand_chip *chip,
872 const u8 *buf, int oob_required,
873 int page)
874 {
875 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
876 }
877
878 static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
879 bool oob_required, int page, bool raw)
880 {
881 struct mtd_info *mtd = nand_to_mtd(chip);
882 int ret;
883
884 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
885 if (ret)
886 return ret;
887
888 atmel_nand_read_buf(mtd, buf, mtd->writesize);
889 atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
890
891 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
892
893 atmel_nand_pmecc_disable(chip, raw);
894
895 return ret;
896 }
897
898 static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
899 struct nand_chip *chip, u8 *buf,
900 int oob_required, int page)
901 {
902 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
903 }
904
905 static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd,
906 struct nand_chip *chip, u8 *buf,
907 int oob_required, int page)
908 {
909 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
910 }
911
912 static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
913 const u8 *buf, bool oob_required,
914 int page, bool raw)
915 {
916 struct mtd_info *mtd = nand_to_mtd(chip);
917 struct atmel_nand *nand = to_atmel_nand(chip);
918 struct atmel_hsmc_nand_controller *nc;
919 int ret, status;
920
921 nc = to_hsmc_nand_controller(chip->controller);
922
923 atmel_nfc_copy_to_sram(chip, buf, false);
924
925 nc->op.cmds[0] = NAND_CMD_SEQIN;
926 nc->op.ncmds = 1;
927 atmel_nfc_set_op_addr(chip, page, 0x0);
928 nc->op.cs = nand->activecs->id;
929 nc->op.data = ATMEL_NFC_WRITE_DATA;
930
931 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
932 if (ret)
933 return ret;
934
935 ret = atmel_nfc_exec_op(nc, false);
936 if (ret) {
937 atmel_nand_pmecc_disable(chip, raw);
938 dev_err(nc->base.dev,
939 "Failed to transfer NAND page data (err = %d)\n",
940 ret);
941 return ret;
942 }
943
944 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
945
946 atmel_nand_pmecc_disable(chip, raw);
947
948 if (ret)
949 return ret;
950
951 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
952
953 nc->op.cmds[0] = NAND_CMD_PAGEPROG;
954 nc->op.ncmds = 1;
955 nc->op.cs = nand->activecs->id;
956 ret = atmel_nfc_exec_op(nc, false);
957 if (ret)
958 dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
959 ret);
960
961 status = chip->waitfunc(mtd, chip);
962 if (status & NAND_STATUS_FAIL)
963 return -EIO;
964
965 return ret;
966 }
967
968 static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
969 struct nand_chip *chip,
970 const u8 *buf, int oob_required,
971 int page)
972 {
973 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
974 false);
975 }
976
977 static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd,
978 struct nand_chip *chip,
979 const u8 *buf,
980 int oob_required, int page)
981 {
982 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
983 true);
984 }
985
986 static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
987 bool oob_required, int page,
988 bool raw)
989 {
990 struct mtd_info *mtd = nand_to_mtd(chip);
991 struct atmel_nand *nand = to_atmel_nand(chip);
992 struct atmel_hsmc_nand_controller *nc;
993 int ret;
994
995 nc = to_hsmc_nand_controller(chip->controller);
996
997 /*
998 * Optimized read page accessors only work when the NAND R/B pin is
999 * connected to a native SoC R/B pin. If that's not the case, fallback
1000 * to the non-optimized one.
1001 */
1002 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
1003 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1004
1005 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1006 raw);
1007 }
1008
1009 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1010
1011 if (mtd->writesize > 512)
1012 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1013
1014 atmel_nfc_set_op_addr(chip, page, 0x0);
1015 nc->op.cs = nand->activecs->id;
1016 nc->op.data = ATMEL_NFC_READ_DATA;
1017
1018 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1019 if (ret)
1020 return ret;
1021
1022 ret = atmel_nfc_exec_op(nc, false);
1023 if (ret) {
1024 atmel_nand_pmecc_disable(chip, raw);
1025 dev_err(nc->base.dev,
1026 "Failed to load NAND page data (err = %d)\n",
1027 ret);
1028 return ret;
1029 }
1030
1031 atmel_nfc_copy_from_sram(chip, buf, true);
1032
1033 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1034
1035 atmel_nand_pmecc_disable(chip, raw);
1036
1037 return ret;
1038 }
1039
1040 static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd,
1041 struct nand_chip *chip, u8 *buf,
1042 int oob_required, int page)
1043 {
1044 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1045 false);
1046 }
1047
1048 static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd,
1049 struct nand_chip *chip,
1050 u8 *buf, int oob_required,
1051 int page)
1052 {
1053 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1054 true);
1055 }
1056
1057 static int atmel_nand_pmecc_init(struct nand_chip *chip)
1058 {
1059 struct mtd_info *mtd = nand_to_mtd(chip);
1060 struct atmel_nand *nand = to_atmel_nand(chip);
1061 struct atmel_nand_controller *nc;
1062 struct atmel_pmecc_user_req req;
1063
1064 nc = to_nand_controller(chip->controller);
1065
1066 if (!nc->pmecc) {
1067 dev_err(nc->dev, "HW ECC not supported\n");
1068 return -ENOTSUPP;
1069 }
1070
1071 if (nc->caps->legacy_of_bindings) {
1072 u32 val;
1073
1074 if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1075 &val))
1076 chip->ecc.strength = val;
1077
1078 if (!of_property_read_u32(nc->dev->of_node,
1079 "atmel,pmecc-sector-size",
1080 &val))
1081 chip->ecc.size = val;
1082 }
1083
1084 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
1085 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1086 else if (chip->ecc.strength)
1087 req.ecc.strength = chip->ecc.strength;
1088 else if (chip->ecc_strength_ds)
1089 req.ecc.strength = chip->ecc_strength_ds;
1090 else
1091 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1092
1093 if (chip->ecc.size)
1094 req.ecc.sectorsize = chip->ecc.size;
1095 else if (chip->ecc_step_ds)
1096 req.ecc.sectorsize = chip->ecc_step_ds;
1097 else
1098 req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1099
1100 req.pagesize = mtd->writesize;
1101 req.oobsize = mtd->oobsize;
1102
1103 if (mtd->writesize <= 512) {
1104 req.ecc.bytes = 4;
1105 req.ecc.ooboffset = 0;
1106 } else {
1107 req.ecc.bytes = mtd->oobsize - 2;
1108 req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1109 }
1110
1111 nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1112 if (IS_ERR(nand->pmecc))
1113 return PTR_ERR(nand->pmecc);
1114
1115 chip->ecc.algo = NAND_ECC_BCH;
1116 chip->ecc.size = req.ecc.sectorsize;
1117 chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1118 chip->ecc.strength = req.ecc.strength;
1119
1120 chip->options |= NAND_NO_SUBPAGE_WRITE;
1121
1122 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1123
1124 return 0;
1125 }
1126
1127 static int atmel_nand_ecc_init(struct atmel_nand *nand)
1128 {
1129 struct nand_chip *chip = &nand->base;
1130 struct atmel_nand_controller *nc;
1131 int ret;
1132
1133 nc = to_nand_controller(chip->controller);
1134
1135 switch (chip->ecc.mode) {
1136 case NAND_ECC_NONE:
1137 case NAND_ECC_SOFT:
1138 /*
1139 * Nothing to do, the core will initialize everything for us.
1140 */
1141 break;
1142
1143 case NAND_ECC_HW:
1144 ret = atmel_nand_pmecc_init(chip);
1145 if (ret)
1146 return ret;
1147
1148 chip->ecc.read_page = atmel_nand_pmecc_read_page;
1149 chip->ecc.write_page = atmel_nand_pmecc_write_page;
1150 chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1151 chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1152 break;
1153
1154 default:
1155 /* Other modes are not supported. */
1156 dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1157 chip->ecc.mode);
1158 return -ENOTSUPP;
1159 }
1160
1161 return 0;
1162 }
1163
1164 static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
1165 {
1166 struct nand_chip *chip = &nand->base;
1167 int ret;
1168
1169 ret = atmel_nand_ecc_init(nand);
1170 if (ret)
1171 return ret;
1172
1173 if (chip->ecc.mode != NAND_ECC_HW)
1174 return 0;
1175
1176 /* Adjust the ECC operations for the HSMC IP. */
1177 chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1178 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1179 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1180 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1181 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1182
1183 return 0;
1184 }
1185
1186 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1187 const struct nand_data_interface *conf,
1188 struct atmel_smc_cs_conf *smcconf)
1189 {
1190 u32 ncycles, totalcycles, timeps, mckperiodps;
1191 struct atmel_nand_controller *nc;
1192 int ret;
1193
1194 nc = to_nand_controller(nand->base.controller);
1195
1196 /* DDR interface not supported. */
1197 if (conf->type != NAND_SDR_IFACE)
1198 return -ENOTSUPP;
1199
1200 /*
1201 * tRC < 30ns implies EDO mode. This controller does not support this
1202 * mode.
1203 */
1204 if (conf->timings.sdr.tRC_min < 30000)
1205 return -ENOTSUPP;
1206
1207 atmel_smc_cs_conf_init(smcconf);
1208
1209 mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1210 mckperiodps *= 1000;
1211
1212 /*
1213 * Set write pulse timing. This one is easy to extract:
1214 *
1215 * NWE_PULSE = tWP
1216 */
1217 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1218 totalcycles = ncycles;
1219 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1220 ncycles);
1221 if (ret)
1222 return ret;
1223
1224 /*
1225 * The write setup timing depends on the operation done on the NAND.
1226 * All operations goes through the same data bus, but the operation
1227 * type depends on the address we are writing to (ALE/CLE address
1228 * lines).
1229 * Since we have no way to differentiate the different operations at
1230 * the SMC level, we must consider the worst case (the biggest setup
1231 * time among all operation types):
1232 *
1233 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1234 */
1235 timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1236 conf->timings.sdr.tALS_min);
1237 timeps = max(timeps, conf->timings.sdr.tDS_min);
1238 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1239 ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1240 totalcycles += ncycles;
1241 ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1242 ncycles);
1243 if (ret)
1244 return ret;
1245
1246 /*
1247 * As for the write setup timing, the write hold timing depends on the
1248 * operation done on the NAND:
1249 *
1250 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1251 */
1252 timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1253 conf->timings.sdr.tALH_min);
1254 timeps = max3(timeps, conf->timings.sdr.tDH_min,
1255 conf->timings.sdr.tWH_min);
1256 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1257 totalcycles += ncycles;
1258
1259 /*
1260 * The write cycle timing is directly matching tWC, but is also
1261 * dependent on the other timings on the setup and hold timings we
1262 * calculated earlier, which gives:
1263 *
1264 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1265 */
1266 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1267 ncycles = max(totalcycles, ncycles);
1268 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1269 ncycles);
1270 if (ret)
1271 return ret;
1272
1273 /*
1274 * We don't want the CS line to be toggled between each byte/word
1275 * transfer to the NAND. The only way to guarantee that is to have the
1276 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1277 *
1278 * NCS_WR_PULSE = NWE_CYCLE
1279 */
1280 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1281 ncycles);
1282 if (ret)
1283 return ret;
1284
1285 /*
1286 * As for the write setup timing, the read hold timing depends on the
1287 * operation done on the NAND:
1288 *
1289 * NRD_HOLD = max(tREH, tRHOH)
1290 */
1291 timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1292 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1293 totalcycles = ncycles;
1294
1295 /*
1296 * TDF = tRHZ - NRD_HOLD
1297 */
1298 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1299 ncycles -= totalcycles;
1300
1301 /*
1302 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1303 * we might end up with a config that does not fit in the TDF field.
1304 * Just take the max value in this case and hope that the NAND is more
1305 * tolerant than advertised.
1306 */
1307 if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1308 ncycles = ATMEL_SMC_MODE_TDF_MAX;
1309 else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1310 ncycles = ATMEL_SMC_MODE_TDF_MIN;
1311
1312 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1313 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1314
1315 /*
1316 * Read pulse timing directly matches tRP:
1317 *
1318 * NRD_PULSE = tRP
1319 */
1320 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1321 totalcycles += ncycles;
1322 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1323 ncycles);
1324 if (ret)
1325 return ret;
1326
1327 /*
1328 * The write cycle timing is directly matching tWC, but is also
1329 * dependent on the setup and hold timings we calculated earlier,
1330 * which gives:
1331 *
1332 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
1333 *
1334 * NRD_SETUP is always 0.
1335 */
1336 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1337 ncycles = max(totalcycles, ncycles);
1338 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1339 ncycles);
1340 if (ret)
1341 return ret;
1342
1343 /*
1344 * We don't want the CS line to be toggled between each byte/word
1345 * transfer from the NAND. The only way to guarantee that is to have
1346 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1347 *
1348 * NCS_RD_PULSE = NRD_CYCLE
1349 */
1350 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1351 ncycles);
1352 if (ret)
1353 return ret;
1354
1355 /* Txxx timings are directly matching tXXX ones. */
1356 ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1357 ret = atmel_smc_cs_conf_set_timing(smcconf,
1358 ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1359 ncycles);
1360 if (ret)
1361 return ret;
1362
1363 ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1364 ret = atmel_smc_cs_conf_set_timing(smcconf,
1365 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1366 ncycles);
1367 /*
1368 * Version 4 of the ONFI spec mandates that tADL be at least 400
1369 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1370 * fit in the tADL field of the SMC reg. We need to relax the check and
1371 * accept the -ERANGE return code.
1372 *
1373 * Note that previous versions of the ONFI spec had a lower tADL_min
1374 * (100 or 200 ns). It's not clear why this timing constraint got
1375 * increased but it seems most NANDs are fine with values lower than
1376 * 400ns, so we should be safe.
1377 */
1378 if (ret && ret != -ERANGE)
1379 return ret;
1380
1381 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1382 ret = atmel_smc_cs_conf_set_timing(smcconf,
1383 ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1384 ncycles);
1385 if (ret)
1386 return ret;
1387
1388 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1389 ret = atmel_smc_cs_conf_set_timing(smcconf,
1390 ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1391 ncycles);
1392 if (ret)
1393 return ret;
1394
1395 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1396 ret = atmel_smc_cs_conf_set_timing(smcconf,
1397 ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1398 ncycles);
1399 if (ret)
1400 return ret;
1401
1402 /* Attach the CS line to the NFC logic. */
1403 smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1404
1405 /* Set the appropriate data bus width. */
1406 if (nand->base.options & NAND_BUSWIDTH_16)
1407 smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1408
1409 /* Operate in NRD/NWE READ/WRITEMODE. */
1410 smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1411 ATMEL_SMC_MODE_WRITEMODE_NWE;
1412
1413 return 0;
1414 }
1415
1416 static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
1417 int csline,
1418 const struct nand_data_interface *conf)
1419 {
1420 struct atmel_nand_controller *nc;
1421 struct atmel_smc_cs_conf smcconf;
1422 struct atmel_nand_cs *cs;
1423 int ret;
1424
1425 nc = to_nand_controller(nand->base.controller);
1426
1427 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1428 if (ret)
1429 return ret;
1430
1431 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1432 return 0;
1433
1434 cs = &nand->cs[csline];
1435 cs->smcconf = smcconf;
1436 atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1437
1438 return 0;
1439 }
1440
1441 static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
1442 int csline,
1443 const struct nand_data_interface *conf)
1444 {
1445 struct atmel_hsmc_nand_controller *nc;
1446 struct atmel_smc_cs_conf smcconf;
1447 struct atmel_nand_cs *cs;
1448 int ret;
1449
1450 nc = to_hsmc_nand_controller(nand->base.controller);
1451
1452 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1453 if (ret)
1454 return ret;
1455
1456 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1457 return 0;
1458
1459 cs = &nand->cs[csline];
1460 cs->smcconf = smcconf;
1461
1462 if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1463 cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1464
1465 atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1466 &cs->smcconf);
1467
1468 return 0;
1469 }
1470
1471 static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline,
1472 const struct nand_data_interface *conf)
1473 {
1474 struct nand_chip *chip = mtd_to_nand(mtd);
1475 struct atmel_nand *nand = to_atmel_nand(chip);
1476 struct atmel_nand_controller *nc;
1477
1478 nc = to_nand_controller(nand->base.controller);
1479
1480 if (csline >= nand->numcs ||
1481 (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1482 return -EINVAL;
1483
1484 return nc->caps->ops->setup_data_interface(nand, csline, conf);
1485 }
1486
1487 static void atmel_nand_init(struct atmel_nand_controller *nc,
1488 struct atmel_nand *nand)
1489 {
1490 struct nand_chip *chip = &nand->base;
1491 struct mtd_info *mtd = nand_to_mtd(chip);
1492
1493 mtd->dev.parent = nc->dev;
1494 nand->base.controller = &nc->base;
1495
1496 chip->cmd_ctrl = atmel_nand_cmd_ctrl;
1497 chip->read_byte = atmel_nand_read_byte;
1498 chip->read_word = atmel_nand_read_word;
1499 chip->write_byte = atmel_nand_write_byte;
1500 chip->read_buf = atmel_nand_read_buf;
1501 chip->write_buf = atmel_nand_write_buf;
1502 chip->select_chip = atmel_nand_select_chip;
1503
1504 if (nc->mck && nc->caps->ops->setup_data_interface)
1505 chip->setup_data_interface = atmel_nand_setup_data_interface;
1506
1507 /* Some NANDs require a longer delay than the default one (20us). */
1508 chip->chip_delay = 40;
1509
1510 /*
1511 * Use a bounce buffer when the buffer passed by the MTD user is not
1512 * suitable for DMA.
1513 */
1514 if (nc->dmac)
1515 chip->options |= NAND_USE_BOUNCE_BUFFER;
1516
1517 /* Default to HW ECC if pmecc is available. */
1518 if (nc->pmecc)
1519 chip->ecc.mode = NAND_ECC_HW;
1520 }
1521
1522 static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1523 struct atmel_nand *nand)
1524 {
1525 struct nand_chip *chip = &nand->base;
1526 struct atmel_smc_nand_controller *smc_nc;
1527 int i;
1528
1529 atmel_nand_init(nc, nand);
1530
1531 smc_nc = to_smc_nand_controller(chip->controller);
1532 if (!smc_nc->matrix)
1533 return;
1534
1535 /* Attach the CS to the NAND Flash logic. */
1536 for (i = 0; i < nand->numcs; i++)
1537 regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
1538 BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1539 }
1540
1541 static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
1542 struct atmel_nand *nand)
1543 {
1544 struct nand_chip *chip = &nand->base;
1545
1546 atmel_nand_init(nc, nand);
1547
1548 /* Overload some methods for the HSMC controller. */
1549 chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
1550 chip->select_chip = atmel_hsmc_nand_select_chip;
1551 }
1552
1553 static int atmel_nand_detect(struct atmel_nand *nand)
1554 {
1555 struct nand_chip *chip = &nand->base;
1556 struct mtd_info *mtd = nand_to_mtd(chip);
1557 struct atmel_nand_controller *nc;
1558 int ret;
1559
1560 nc = to_nand_controller(chip->controller);
1561
1562 ret = nand_scan_ident(mtd, nand->numcs, NULL);
1563 if (ret)
1564 dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
1565
1566 return ret;
1567 }
1568
1569 static int atmel_nand_unregister(struct atmel_nand *nand)
1570 {
1571 struct nand_chip *chip = &nand->base;
1572 struct mtd_info *mtd = nand_to_mtd(chip);
1573 int ret;
1574
1575 ret = mtd_device_unregister(mtd);
1576 if (ret)
1577 return ret;
1578
1579 nand_cleanup(chip);
1580 list_del(&nand->node);
1581
1582 return 0;
1583 }
1584
1585 static int atmel_nand_register(struct atmel_nand *nand)
1586 {
1587 struct nand_chip *chip = &nand->base;
1588 struct mtd_info *mtd = nand_to_mtd(chip);
1589 struct atmel_nand_controller *nc;
1590 int ret;
1591
1592 nc = to_nand_controller(chip->controller);
1593
1594 if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1595 /*
1596 * We keep the MTD name unchanged to avoid breaking platforms
1597 * where the MTD cmdline parser is used and the bootloader
1598 * has not been updated to use the new naming scheme.
1599 */
1600 mtd->name = "atmel_nand";
1601 } else if (!mtd->name) {
1602 /*
1603 * If the new bindings are used and the bootloader has not been
1604 * updated to pass a new mtdparts parameter on the cmdline, you
1605 * should define the following property in your nand node:
1606 *
1607 * label = "atmel_nand";
1608 *
1609 * This way, mtd->name will be set by the core when
1610 * nand_set_flash_node() is called.
1611 */
1612 mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
1613 "%s:nand.%d", dev_name(nc->dev),
1614 nand->cs[0].id);
1615 if (!mtd->name) {
1616 dev_err(nc->dev, "Failed to allocate mtd->name\n");
1617 return -ENOMEM;
1618 }
1619 }
1620
1621 ret = nand_scan_tail(mtd);
1622 if (ret) {
1623 dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
1624 return ret;
1625 }
1626
1627 ret = mtd_device_register(mtd, NULL, 0);
1628 if (ret) {
1629 dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1630 nand_cleanup(chip);
1631 return ret;
1632 }
1633
1634 list_add_tail(&nand->node, &nc->chips);
1635
1636 return 0;
1637 }
1638
1639 static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1640 struct device_node *np,
1641 int reg_cells)
1642 {
1643 struct atmel_nand *nand;
1644 struct gpio_desc *gpio;
1645 int numcs, ret, i;
1646
1647 numcs = of_property_count_elems_of_size(np, "reg",
1648 reg_cells * sizeof(u32));
1649 if (numcs < 1) {
1650 dev_err(nc->dev, "Missing or invalid reg property\n");
1651 return ERR_PTR(-EINVAL);
1652 }
1653
1654 nand = devm_kzalloc(nc->dev,
1655 sizeof(*nand) + (numcs * sizeof(*nand->cs)),
1656 GFP_KERNEL);
1657 if (!nand) {
1658 dev_err(nc->dev, "Failed to allocate NAND object\n");
1659 return ERR_PTR(-ENOMEM);
1660 }
1661
1662 nand->numcs = numcs;
1663
1664 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
1665 &np->fwnode, GPIOD_IN,
1666 "nand-det");
1667 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1668 dev_err(nc->dev,
1669 "Failed to get detect gpio (err = %ld)\n",
1670 PTR_ERR(gpio));
1671 return ERR_CAST(gpio);
1672 }
1673
1674 if (!IS_ERR(gpio))
1675 nand->cdgpio = gpio;
1676
1677 for (i = 0; i < numcs; i++) {
1678 struct resource res;
1679 u32 val;
1680
1681 ret = of_address_to_resource(np, 0, &res);
1682 if (ret) {
1683 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1684 ret);
1685 return ERR_PTR(ret);
1686 }
1687
1688 ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1689 &val);
1690 if (ret) {
1691 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1692 ret);
1693 return ERR_PTR(ret);
1694 }
1695
1696 nand->cs[i].id = val;
1697
1698 nand->cs[i].io.dma = res.start;
1699 nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1700 if (IS_ERR(nand->cs[i].io.virt))
1701 return ERR_CAST(nand->cs[i].io.virt);
1702
1703 if (!of_property_read_u32(np, "atmel,rb", &val)) {
1704 if (val > ATMEL_NFC_MAX_RB_ID)
1705 return ERR_PTR(-EINVAL);
1706
1707 nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1708 nand->cs[i].rb.id = val;
1709 } else {
1710 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
1711 "rb", i, &np->fwnode,
1712 GPIOD_IN, "nand-rb");
1713 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1714 dev_err(nc->dev,
1715 "Failed to get R/B gpio (err = %ld)\n",
1716 PTR_ERR(gpio));
1717 return ERR_CAST(gpio);
1718 }
1719
1720 if (!IS_ERR(gpio)) {
1721 nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1722 nand->cs[i].rb.gpio = gpio;
1723 }
1724 }
1725
1726 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
1727 i, &np->fwnode,
1728 GPIOD_OUT_HIGH,
1729 "nand-cs");
1730 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1731 dev_err(nc->dev,
1732 "Failed to get CS gpio (err = %ld)\n",
1733 PTR_ERR(gpio));
1734 return ERR_CAST(gpio);
1735 }
1736
1737 if (!IS_ERR(gpio))
1738 nand->cs[i].csgpio = gpio;
1739 }
1740
1741 nand_set_flash_node(&nand->base, np);
1742
1743 return nand;
1744 }
1745
1746 static int
1747 atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1748 struct atmel_nand *nand)
1749 {
1750 int ret;
1751
1752 /* No card inserted, skip this NAND. */
1753 if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1754 dev_info(nc->dev, "No SmartMedia card inserted.\n");
1755 return 0;
1756 }
1757
1758 nc->caps->ops->nand_init(nc, nand);
1759
1760 ret = atmel_nand_detect(nand);
1761 if (ret)
1762 return ret;
1763
1764 ret = nc->caps->ops->ecc_init(nand);
1765 if (ret)
1766 return ret;
1767
1768 return atmel_nand_register(nand);
1769 }
1770
1771 static int
1772 atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1773 {
1774 struct atmel_nand *nand, *tmp;
1775 int ret;
1776
1777 list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1778 ret = atmel_nand_unregister(nand);
1779 if (ret)
1780 return ret;
1781 }
1782
1783 return 0;
1784 }
1785
1786 static int
1787 atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1788 {
1789 struct device *dev = nc->dev;
1790 struct platform_device *pdev = to_platform_device(dev);
1791 struct atmel_nand *nand;
1792 struct gpio_desc *gpio;
1793 struct resource *res;
1794
1795 /*
1796 * Legacy bindings only allow connecting a single NAND with a unique CS
1797 * line to the controller.
1798 */
1799 nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1800 GFP_KERNEL);
1801 if (!nand)
1802 return -ENOMEM;
1803
1804 nand->numcs = 1;
1805
1806 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1807 nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1808 if (IS_ERR(nand->cs[0].io.virt))
1809 return PTR_ERR(nand->cs[0].io.virt);
1810
1811 nand->cs[0].io.dma = res->start;
1812
1813 /*
1814 * The old driver was hardcoding the CS id to 3 for all sama5
1815 * controllers. Since this id is only meaningful for the sama5
1816 * controller we can safely assign this id to 3 no matter the
1817 * controller.
1818 * If one wants to connect a NAND to a different CS line, he will
1819 * have to use the new bindings.
1820 */
1821 nand->cs[0].id = 3;
1822
1823 /* R/B GPIO. */
1824 gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
1825 if (IS_ERR(gpio)) {
1826 dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1827 PTR_ERR(gpio));
1828 return PTR_ERR(gpio);
1829 }
1830
1831 if (gpio) {
1832 nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1833 nand->cs[0].rb.gpio = gpio;
1834 }
1835
1836 /* CS GPIO. */
1837 gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1838 if (IS_ERR(gpio)) {
1839 dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1840 PTR_ERR(gpio));
1841 return PTR_ERR(gpio);
1842 }
1843
1844 nand->cs[0].csgpio = gpio;
1845
1846 /* Card detect GPIO. */
1847 gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1848 if (IS_ERR(gpio)) {
1849 dev_err(dev,
1850 "Failed to get detect gpio (err = %ld)\n",
1851 PTR_ERR(gpio));
1852 return PTR_ERR(gpio);
1853 }
1854
1855 nand->cdgpio = gpio;
1856
1857 nand_set_flash_node(&nand->base, nc->dev->of_node);
1858
1859 return atmel_nand_controller_add_nand(nc, nand);
1860 }
1861
1862 static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1863 {
1864 struct device_node *np, *nand_np;
1865 struct device *dev = nc->dev;
1866 int ret, reg_cells;
1867 u32 val;
1868
1869 /* We do not retrieve the SMC syscon when parsing old DTs. */
1870 if (nc->caps->legacy_of_bindings)
1871 return atmel_nand_controller_legacy_add_nands(nc);
1872
1873 np = dev->of_node;
1874
1875 ret = of_property_read_u32(np, "#address-cells", &val);
1876 if (ret) {
1877 dev_err(dev, "missing #address-cells property\n");
1878 return ret;
1879 }
1880
1881 reg_cells = val;
1882
1883 ret = of_property_read_u32(np, "#size-cells", &val);
1884 if (ret) {
1885 dev_err(dev, "missing #address-cells property\n");
1886 return ret;
1887 }
1888
1889 reg_cells += val;
1890
1891 for_each_child_of_node(np, nand_np) {
1892 struct atmel_nand *nand;
1893
1894 nand = atmel_nand_create(nc, nand_np, reg_cells);
1895 if (IS_ERR(nand)) {
1896 ret = PTR_ERR(nand);
1897 goto err;
1898 }
1899
1900 ret = atmel_nand_controller_add_nand(nc, nand);
1901 if (ret)
1902 goto err;
1903 }
1904
1905 return 0;
1906
1907 err:
1908 atmel_nand_controller_remove_nands(nc);
1909
1910 return ret;
1911 }
1912
1913 static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1914 {
1915 if (nc->dmac)
1916 dma_release_channel(nc->dmac);
1917
1918 clk_put(nc->mck);
1919 }
1920
1921 static const struct of_device_id atmel_matrix_of_ids[] = {
1922 {
1923 .compatible = "atmel,at91sam9260-matrix",
1924 .data = (void *)AT91SAM9260_MATRIX_EBICSA,
1925 },
1926 {
1927 .compatible = "atmel,at91sam9261-matrix",
1928 .data = (void *)AT91SAM9261_MATRIX_EBICSA,
1929 },
1930 {
1931 .compatible = "atmel,at91sam9263-matrix",
1932 .data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
1933 },
1934 {
1935 .compatible = "atmel,at91sam9rl-matrix",
1936 .data = (void *)AT91SAM9RL_MATRIX_EBICSA,
1937 },
1938 {
1939 .compatible = "atmel,at91sam9g45-matrix",
1940 .data = (void *)AT91SAM9G45_MATRIX_EBICSA,
1941 },
1942 {
1943 .compatible = "atmel,at91sam9n12-matrix",
1944 .data = (void *)AT91SAM9N12_MATRIX_EBICSA,
1945 },
1946 {
1947 .compatible = "atmel,at91sam9x5-matrix",
1948 .data = (void *)AT91SAM9X5_MATRIX_EBICSA,
1949 },
1950 { /* sentinel */ },
1951 };
1952
1953 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
1954 struct platform_device *pdev,
1955 const struct atmel_nand_controller_caps *caps)
1956 {
1957 struct device *dev = &pdev->dev;
1958 struct device_node *np = dev->of_node;
1959 int ret;
1960
1961 nand_hw_control_init(&nc->base);
1962 INIT_LIST_HEAD(&nc->chips);
1963 nc->dev = dev;
1964 nc->caps = caps;
1965
1966 platform_set_drvdata(pdev, nc);
1967
1968 nc->pmecc = devm_atmel_pmecc_get(dev);
1969 if (IS_ERR(nc->pmecc)) {
1970 ret = PTR_ERR(nc->pmecc);
1971 if (ret != -EPROBE_DEFER)
1972 dev_err(dev, "Could not get PMECC object (err = %d)\n",
1973 ret);
1974 return ret;
1975 }
1976
1977 if (nc->caps->has_dma) {
1978 dma_cap_mask_t mask;
1979
1980 dma_cap_zero(mask);
1981 dma_cap_set(DMA_MEMCPY, mask);
1982
1983 nc->dmac = dma_request_channel(mask, NULL, NULL);
1984 if (!nc->dmac)
1985 dev_err(nc->dev, "Failed to request DMA channel\n");
1986 }
1987
1988 /* We do not retrieve the SMC syscon when parsing old DTs. */
1989 if (nc->caps->legacy_of_bindings)
1990 return 0;
1991
1992 nc->mck = of_clk_get(dev->parent->of_node, 0);
1993 if (IS_ERR(nc->mck)) {
1994 dev_err(dev, "Failed to retrieve MCK clk\n");
1995 return PTR_ERR(nc->mck);
1996 }
1997
1998 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
1999 if (!np) {
2000 dev_err(dev, "Missing or invalid atmel,smc property\n");
2001 return -EINVAL;
2002 }
2003
2004 nc->smc = syscon_node_to_regmap(np);
2005 of_node_put(np);
2006 if (IS_ERR(nc->smc)) {
2007 ret = PTR_ERR(nc->smc);
2008 dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2009 return ret;
2010 }
2011
2012 return 0;
2013 }
2014
2015 static int
2016 atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2017 {
2018 struct device *dev = nc->base.dev;
2019 const struct of_device_id *match;
2020 struct device_node *np;
2021 int ret;
2022
2023 /* We do not retrieve the matrix syscon when parsing old DTs. */
2024 if (nc->base.caps->legacy_of_bindings)
2025 return 0;
2026
2027 np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
2028 if (!np)
2029 return 0;
2030
2031 match = of_match_node(atmel_matrix_of_ids, np);
2032 if (!match) {
2033 of_node_put(np);
2034 return 0;
2035 }
2036
2037 nc->matrix = syscon_node_to_regmap(np);
2038 of_node_put(np);
2039 if (IS_ERR(nc->matrix)) {
2040 ret = PTR_ERR(nc->matrix);
2041 dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
2042 return ret;
2043 }
2044
2045 nc->ebi_csa_offs = (unsigned int)match->data;
2046
2047 /*
2048 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2049 * add 4 to ->ebi_csa_offs.
2050 */
2051 if (of_device_is_compatible(dev->parent->of_node,
2052 "atmel,at91sam9263-ebi1"))
2053 nc->ebi_csa_offs += 4;
2054
2055 return 0;
2056 }
2057
2058 static int
2059 atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2060 {
2061 struct regmap_config regmap_conf = {
2062 .reg_bits = 32,
2063 .val_bits = 32,
2064 .reg_stride = 4,
2065 };
2066
2067 struct device *dev = nc->base.dev;
2068 struct device_node *nand_np, *nfc_np;
2069 void __iomem *iomem;
2070 struct resource res;
2071 int ret;
2072
2073 nand_np = dev->of_node;
2074 nfc_np = of_find_compatible_node(dev->of_node, NULL,
2075 "atmel,sama5d3-nfc");
2076
2077 nc->clk = of_clk_get(nfc_np, 0);
2078 if (IS_ERR(nc->clk)) {
2079 ret = PTR_ERR(nc->clk);
2080 dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2081 ret);
2082 goto out;
2083 }
2084
2085 ret = clk_prepare_enable(nc->clk);
2086 if (ret) {
2087 dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2088 ret);
2089 goto out;
2090 }
2091
2092 nc->irq = of_irq_get(nand_np, 0);
2093 if (nc->irq <= 0) {
2094 ret = nc->irq ?: -ENXIO;
2095 if (ret != -EPROBE_DEFER)
2096 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2097 ret);
2098 goto out;
2099 }
2100
2101 ret = of_address_to_resource(nfc_np, 0, &res);
2102 if (ret) {
2103 dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2104 ret);
2105 goto out;
2106 }
2107
2108 iomem = devm_ioremap_resource(dev, &res);
2109 if (IS_ERR(iomem)) {
2110 ret = PTR_ERR(iomem);
2111 goto out;
2112 }
2113
2114 regmap_conf.name = "nfc-io";
2115 regmap_conf.max_register = resource_size(&res) - 4;
2116 nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2117 if (IS_ERR(nc->io)) {
2118 ret = PTR_ERR(nc->io);
2119 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2120 ret);
2121 goto out;
2122 }
2123
2124 ret = of_address_to_resource(nfc_np, 1, &res);
2125 if (ret) {
2126 dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2127 ret);
2128 goto out;
2129 }
2130
2131 iomem = devm_ioremap_resource(dev, &res);
2132 if (IS_ERR(iomem)) {
2133 ret = PTR_ERR(iomem);
2134 goto out;
2135 }
2136
2137 regmap_conf.name = "smc";
2138 regmap_conf.max_register = resource_size(&res) - 4;
2139 nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2140 if (IS_ERR(nc->base.smc)) {
2141 ret = PTR_ERR(nc->base.smc);
2142 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2143 ret);
2144 goto out;
2145 }
2146
2147 ret = of_address_to_resource(nfc_np, 2, &res);
2148 if (ret) {
2149 dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2150 ret);
2151 goto out;
2152 }
2153
2154 nc->sram.virt = devm_ioremap_resource(dev, &res);
2155 if (IS_ERR(nc->sram.virt)) {
2156 ret = PTR_ERR(nc->sram.virt);
2157 goto out;
2158 }
2159
2160 nc->sram.dma = res.start;
2161
2162 out:
2163 of_node_put(nfc_np);
2164
2165 return ret;
2166 }
2167
2168 static int
2169 atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2170 {
2171 struct device *dev = nc->base.dev;
2172 struct device_node *np;
2173 int ret;
2174
2175 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2176 if (!np) {
2177 dev_err(dev, "Missing or invalid atmel,smc property\n");
2178 return -EINVAL;
2179 }
2180
2181 nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2182
2183 nc->irq = of_irq_get(np, 0);
2184 of_node_put(np);
2185 if (nc->irq <= 0) {
2186 ret = nc->irq ?: -ENXIO;
2187 if (ret != -EPROBE_DEFER)
2188 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2189 ret);
2190 return ret;
2191 }
2192
2193 np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2194 if (!np) {
2195 dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2196 return -EINVAL;
2197 }
2198
2199 nc->io = syscon_node_to_regmap(np);
2200 of_node_put(np);
2201 if (IS_ERR(nc->io)) {
2202 ret = PTR_ERR(nc->io);
2203 dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2204 return ret;
2205 }
2206
2207 nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2208 "atmel,nfc-sram", 0);
2209 if (!nc->sram.pool) {
2210 dev_err(nc->base.dev, "Missing SRAM\n");
2211 return -ENOMEM;
2212 }
2213
2214 nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
2215 ATMEL_NFC_SRAM_SIZE,
2216 &nc->sram.dma);
2217 if (!nc->sram.virt) {
2218 dev_err(nc->base.dev,
2219 "Could not allocate memory from the NFC SRAM pool\n");
2220 return -ENOMEM;
2221 }
2222
2223 return 0;
2224 }
2225
2226 static int
2227 atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2228 {
2229 struct atmel_hsmc_nand_controller *hsmc_nc;
2230 int ret;
2231
2232 ret = atmel_nand_controller_remove_nands(nc);
2233 if (ret)
2234 return ret;
2235
2236 hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2237 if (hsmc_nc->sram.pool)
2238 gen_pool_free(hsmc_nc->sram.pool,
2239 (unsigned long)hsmc_nc->sram.virt,
2240 ATMEL_NFC_SRAM_SIZE);
2241
2242 if (hsmc_nc->clk) {
2243 clk_disable_unprepare(hsmc_nc->clk);
2244 clk_put(hsmc_nc->clk);
2245 }
2246
2247 atmel_nand_controller_cleanup(nc);
2248
2249 return 0;
2250 }
2251
2252 static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2253 const struct atmel_nand_controller_caps *caps)
2254 {
2255 struct device *dev = &pdev->dev;
2256 struct atmel_hsmc_nand_controller *nc;
2257 int ret;
2258
2259 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2260 if (!nc)
2261 return -ENOMEM;
2262
2263 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2264 if (ret)
2265 return ret;
2266
2267 if (caps->legacy_of_bindings)
2268 ret = atmel_hsmc_nand_controller_legacy_init(nc);
2269 else
2270 ret = atmel_hsmc_nand_controller_init(nc);
2271
2272 if (ret)
2273 return ret;
2274
2275 /* Make sure all irqs are masked before registering our IRQ handler. */
2276 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2277 ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2278 IRQF_SHARED, "nfc", nc);
2279 if (ret) {
2280 dev_err(dev,
2281 "Could not get register NFC interrupt handler (err = %d)\n",
2282 ret);
2283 goto err;
2284 }
2285
2286 /* Initial NFC configuration. */
2287 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2288 ATMEL_HSMC_NFC_CFG_DTO_MAX);
2289
2290 ret = atmel_nand_controller_add_nands(&nc->base);
2291 if (ret)
2292 goto err;
2293
2294 return 0;
2295
2296 err:
2297 atmel_hsmc_nand_controller_remove(&nc->base);
2298
2299 return ret;
2300 }
2301
2302 static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2303 .probe = atmel_hsmc_nand_controller_probe,
2304 .remove = atmel_hsmc_nand_controller_remove,
2305 .ecc_init = atmel_hsmc_nand_ecc_init,
2306 .nand_init = atmel_hsmc_nand_init,
2307 .setup_data_interface = atmel_hsmc_nand_setup_data_interface,
2308 };
2309
2310 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2311 .has_dma = true,
2312 .ale_offs = BIT(21),
2313 .cle_offs = BIT(22),
2314 .ops = &atmel_hsmc_nc_ops,
2315 };
2316
2317 /* Only used to parse old bindings. */
2318 static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2319 .has_dma = true,
2320 .ale_offs = BIT(21),
2321 .cle_offs = BIT(22),
2322 .ops = &atmel_hsmc_nc_ops,
2323 .legacy_of_bindings = true,
2324 };
2325
2326 static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2327 const struct atmel_nand_controller_caps *caps)
2328 {
2329 struct device *dev = &pdev->dev;
2330 struct atmel_smc_nand_controller *nc;
2331 int ret;
2332
2333 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2334 if (!nc)
2335 return -ENOMEM;
2336
2337 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2338 if (ret)
2339 return ret;
2340
2341 ret = atmel_smc_nand_controller_init(nc);
2342 if (ret)
2343 return ret;
2344
2345 return atmel_nand_controller_add_nands(&nc->base);
2346 }
2347
2348 static int
2349 atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2350 {
2351 int ret;
2352
2353 ret = atmel_nand_controller_remove_nands(nc);
2354 if (ret)
2355 return ret;
2356
2357 atmel_nand_controller_cleanup(nc);
2358
2359 return 0;
2360 }
2361
2362 /*
2363 * The SMC reg layout of at91rm9200 is completely different which prevents us
2364 * from re-using atmel_smc_nand_setup_data_interface() for the
2365 * ->setup_data_interface() hook.
2366 * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2367 * ->setup_data_interface() unassigned.
2368 */
2369 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2370 .probe = atmel_smc_nand_controller_probe,
2371 .remove = atmel_smc_nand_controller_remove,
2372 .ecc_init = atmel_nand_ecc_init,
2373 .nand_init = atmel_smc_nand_init,
2374 };
2375
2376 static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2377 .ale_offs = BIT(21),
2378 .cle_offs = BIT(22),
2379 .ops = &at91rm9200_nc_ops,
2380 };
2381
2382 static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2383 .probe = atmel_smc_nand_controller_probe,
2384 .remove = atmel_smc_nand_controller_remove,
2385 .ecc_init = atmel_nand_ecc_init,
2386 .nand_init = atmel_smc_nand_init,
2387 .setup_data_interface = atmel_smc_nand_setup_data_interface,
2388 };
2389
2390 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2391 .ale_offs = BIT(21),
2392 .cle_offs = BIT(22),
2393 .ops = &atmel_smc_nc_ops,
2394 };
2395
2396 static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2397 .ale_offs = BIT(22),
2398 .cle_offs = BIT(21),
2399 .ops = &atmel_smc_nc_ops,
2400 };
2401
2402 static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2403 .has_dma = true,
2404 .ale_offs = BIT(21),
2405 .cle_offs = BIT(22),
2406 .ops = &atmel_smc_nc_ops,
2407 };
2408
2409 /* Only used to parse old bindings. */
2410 static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2411 .ale_offs = BIT(21),
2412 .cle_offs = BIT(22),
2413 .ops = &atmel_smc_nc_ops,
2414 .legacy_of_bindings = true,
2415 };
2416
2417 static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2418 .ale_offs = BIT(22),
2419 .cle_offs = BIT(21),
2420 .ops = &atmel_smc_nc_ops,
2421 .legacy_of_bindings = true,
2422 };
2423
2424 static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2425 .has_dma = true,
2426 .ale_offs = BIT(21),
2427 .cle_offs = BIT(22),
2428 .ops = &atmel_smc_nc_ops,
2429 .legacy_of_bindings = true,
2430 };
2431
2432 static const struct of_device_id atmel_nand_controller_of_ids[] = {
2433 {
2434 .compatible = "atmel,at91rm9200-nand-controller",
2435 .data = &atmel_rm9200_nc_caps,
2436 },
2437 {
2438 .compatible = "atmel,at91sam9260-nand-controller",
2439 .data = &atmel_sam9260_nc_caps,
2440 },
2441 {
2442 .compatible = "atmel,at91sam9261-nand-controller",
2443 .data = &atmel_sam9261_nc_caps,
2444 },
2445 {
2446 .compatible = "atmel,at91sam9g45-nand-controller",
2447 .data = &atmel_sam9g45_nc_caps,
2448 },
2449 {
2450 .compatible = "atmel,sama5d3-nand-controller",
2451 .data = &atmel_sama5_nc_caps,
2452 },
2453 /* Support for old/deprecated bindings: */
2454 {
2455 .compatible = "atmel,at91rm9200-nand",
2456 .data = &atmel_rm9200_nand_caps,
2457 },
2458 {
2459 .compatible = "atmel,sama5d4-nand",
2460 .data = &atmel_rm9200_nand_caps,
2461 },
2462 {
2463 .compatible = "atmel,sama5d2-nand",
2464 .data = &atmel_rm9200_nand_caps,
2465 },
2466 { /* sentinel */ },
2467 };
2468 MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2469
2470 static int atmel_nand_controller_probe(struct platform_device *pdev)
2471 {
2472 const struct atmel_nand_controller_caps *caps;
2473
2474 if (pdev->id_entry)
2475 caps = (void *)pdev->id_entry->driver_data;
2476 else
2477 caps = of_device_get_match_data(&pdev->dev);
2478
2479 if (!caps) {
2480 dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2481 return -EINVAL;
2482 }
2483
2484 if (caps->legacy_of_bindings) {
2485 u32 ale_offs = 21;
2486
2487 /*
2488 * If we are parsing legacy DT props and the DT contains a
2489 * valid NFC node, forward the request to the sama5 logic.
2490 */
2491 if (of_find_compatible_node(pdev->dev.of_node, NULL,
2492 "atmel,sama5d3-nfc"))
2493 caps = &atmel_sama5_nand_caps;
2494
2495 /*
2496 * Even if the compatible says we are dealing with an
2497 * at91rm9200 controller, the atmel,nand-has-dma specify that
2498 * this controller supports DMA, which means we are in fact
2499 * dealing with an at91sam9g45+ controller.
2500 */
2501 if (!caps->has_dma &&
2502 of_property_read_bool(pdev->dev.of_node,
2503 "atmel,nand-has-dma"))
2504 caps = &atmel_sam9g45_nand_caps;
2505
2506 /*
2507 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2508 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2509 * actually dealing with an at91sam9261 controller.
2510 */
2511 of_property_read_u32(pdev->dev.of_node,
2512 "atmel,nand-addr-offset", &ale_offs);
2513 if (ale_offs != 21)
2514 caps = &atmel_sam9261_nand_caps;
2515 }
2516
2517 return caps->ops->probe(pdev, caps);
2518 }
2519
2520 static int atmel_nand_controller_remove(struct platform_device *pdev)
2521 {
2522 struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2523
2524 return nc->caps->ops->remove(nc);
2525 }
2526
2527 static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2528 {
2529 struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2530 struct atmel_nand *nand;
2531
2532 if (nc->pmecc)
2533 atmel_pmecc_reset(nc->pmecc);
2534
2535 list_for_each_entry(nand, &nc->chips, node) {
2536 int i;
2537
2538 for (i = 0; i < nand->numcs; i++)
2539 nand_reset(&nand->base, i);
2540 }
2541
2542 return 0;
2543 }
2544
2545 static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2546 atmel_nand_controller_resume);
2547
2548 static struct platform_driver atmel_nand_controller_driver = {
2549 .driver = {
2550 .name = "atmel-nand-controller",
2551 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2552 .pm = &atmel_nand_controller_pm_ops,
2553 },
2554 .probe = atmel_nand_controller_probe,
2555 .remove = atmel_nand_controller_remove,
2556 };
2557 module_platform_driver(atmel_nand_controller_driver);
2558
2559 MODULE_LICENSE("GPL");
2560 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2561 MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2562 MODULE_ALIAS("platform:atmel-nand-controller");