]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/mtd/nand/lpc32xx_mlc.c
mtd: nand: jz4780: switch to mtd_ooblayout_ops
[mirror_ubuntu-focal-kernel.git] / drivers / mtd / nand / lpc32xx_mlc.c
CommitLineData
70f7cb78
RS
1/*
2 * Driver for NAND MLC Controller in LPC32xx
3 *
4 * Author: Roland Stigge <stigge@antcom.de>
5 *
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 *
20 * NAND Flash Controller Operation:
21 * - Read: Auto Decode
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
24 */
25
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/delay.h>
35#include <linux/completion.h>
36#include <linux/interrupt.h>
37#include <linux/of.h>
70f7cb78 38#include <linux/of_gpio.h>
9c6f62a7 39#include <linux/mtd/lpc32xx_mlc.h>
70f7cb78
RS
40#include <linux/io.h>
41#include <linux/mm.h>
42#include <linux/dma-mapping.h>
43#include <linux/dmaengine.h>
44#include <linux/mtd/nand_ecc.h>
45
46#define DRV_NAME "lpc32xx_mlc"
47
48/**********************************************************************
49* MLC NAND controller register offsets
50**********************************************************************/
51
52#define MLC_BUFF(x) (x + 0x00000)
53#define MLC_DATA(x) (x + 0x08000)
54#define MLC_CMD(x) (x + 0x10000)
55#define MLC_ADDR(x) (x + 0x10004)
56#define MLC_ECC_ENC_REG(x) (x + 0x10008)
57#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
58#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
59#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
60#define MLC_RPR(x) (x + 0x10018)
61#define MLC_WPR(x) (x + 0x1001C)
62#define MLC_RUBP(x) (x + 0x10020)
63#define MLC_ROBP(x) (x + 0x10024)
64#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
65#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
66#define MLC_ICR(x) (x + 0x10030)
67#define MLC_TIME_REG(x) (x + 0x10034)
68#define MLC_IRQ_MR(x) (x + 0x10038)
69#define MLC_IRQ_SR(x) (x + 0x1003C)
70#define MLC_LOCK_PR(x) (x + 0x10044)
71#define MLC_ISR(x) (x + 0x10048)
72#define MLC_CEH(x) (x + 0x1004C)
73
74/**********************************************************************
75* MLC_CMD bit definitions
76**********************************************************************/
77#define MLCCMD_RESET 0xFF
78
79/**********************************************************************
80* MLC_ICR bit definitions
81**********************************************************************/
82#define MLCICR_WPROT (1 << 3)
83#define MLCICR_LARGEBLOCK (1 << 2)
84#define MLCICR_LONGADDR (1 << 1)
85#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
86
87/**********************************************************************
88* MLC_TIME_REG bit definitions
89**********************************************************************/
90#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
91#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
92#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
93#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
94#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
95#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
96#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
97
98/**********************************************************************
99* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
100**********************************************************************/
101#define MLCIRQ_NAND_READY (1 << 5)
102#define MLCIRQ_CONTROLLER_READY (1 << 4)
103#define MLCIRQ_DECODE_FAILURE (1 << 3)
104#define MLCIRQ_DECODE_ERROR (1 << 2)
105#define MLCIRQ_ECC_READY (1 << 1)
106#define MLCIRQ_WRPROT_FAULT (1 << 0)
107
108/**********************************************************************
109* MLC_LOCK_PR bit definitions
110**********************************************************************/
111#define MLCLOCKPR_MAGIC 0xA25E
112
113/**********************************************************************
114* MLC_ISR bit definitions
115**********************************************************************/
116#define MLCISR_DECODER_FAILURE (1 << 6)
117#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
118#define MLCISR_ERRORS_DETECTED (1 << 3)
119#define MLCISR_ECC_READY (1 << 2)
120#define MLCISR_CONTROLLER_READY (1 << 1)
121#define MLCISR_NAND_READY (1 << 0)
122
123/**********************************************************************
124* MLC_CEH bit definitions
125**********************************************************************/
126#define MLCCEH_NORMAL (1 << 0)
127
128struct lpc32xx_nand_cfg_mlc {
129 uint32_t tcea_delay;
130 uint32_t busy_delay;
131 uint32_t nand_ta;
132 uint32_t rd_high;
133 uint32_t rd_low;
134 uint32_t wr_high;
135 uint32_t wr_low;
136 int wp_gpio;
137 struct mtd_partition *parts;
138 unsigned num_parts;
139};
140
141static struct nand_ecclayout lpc32xx_nand_oob = {
142 .eccbytes = 40,
143 .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
144 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
145 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
146 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
147 .oobfree = {
148 { .offset = 0,
149 .length = 6, },
150 { .offset = 16,
151 .length = 6, },
152 { .offset = 32,
153 .length = 6, },
154 { .offset = 48,
155 .length = 6, },
156 },
157};
158
159static struct nand_bbt_descr lpc32xx_nand_bbt = {
160 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
161 NAND_BBT_WRITE,
162 .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
163};
164
165static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
166 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
167 NAND_BBT_WRITE,
168 .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
169};
170
171struct lpc32xx_nand_host {
172 struct nand_chip nand_chip;
9c6f62a7 173 struct lpc32xx_mlc_platform_data *pdata;
70f7cb78 174 struct clk *clk;
70f7cb78
RS
175 void __iomem *io_base;
176 int irq;
177 struct lpc32xx_nand_cfg_mlc *ncfg;
178 struct completion comp_nand;
179 struct completion comp_controller;
180 uint32_t llptr;
181 /*
182 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
183 */
184 dma_addr_t oob_buf_phy;
185 /*
186 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
187 */
188 uint8_t *oob_buf;
189 /* Physical address of DMA base address */
190 dma_addr_t io_base_phy;
191
192 struct completion comp_dma;
193 struct dma_chan *dma_chan;
194 struct dma_slave_config dma_slave_config;
195 struct scatterlist sgl;
196 uint8_t *dma_buf;
197 uint8_t *dummy_buf;
198 int mlcsubpages; /* number of 512bytes-subpages */
199};
200
201/*
202 * Activate/Deactivate DMA Operation:
203 *
204 * Using the PL080 DMA Controller for transferring the 512 byte subpages
205 * instead of doing readl() / writel() in a loop slows it down significantly.
206 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
207 *
208 * - readl() of 128 x 32 bits in a loop: ~20us
209 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
210 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
211 *
212 * This applies to the transfer itself. In the DMA case: only the
213 * wait_for_completion() (DMA setup _not_ included).
214 *
215 * Note that the 512 bytes subpage transfer is done directly from/to a
216 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
217 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
218 * controller transferring data between its internal buffer to/from the NAND
219 * chip.)
220 *
221 * Therefore, using the PL080 DMA is disabled by default, for now.
222 *
223 */
224static int use_dma;
225
226static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
227{
228 uint32_t clkrate, tmp;
229
230 /* Reset MLC controller */
231 writel(MLCCMD_RESET, MLC_CMD(host->io_base));
232 udelay(1000);
233
234 /* Get base clock for MLC block */
235 clkrate = clk_get_rate(host->clk);
236 if (clkrate == 0)
237 clkrate = 104000000;
238
239 /* Unlock MLC_ICR
240 * (among others, will be locked again automatically) */
241 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
242
243 /* Configure MLC Controller: Large Block, 5 Byte Address */
244 tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
245 writel(tmp, MLC_ICR(host->io_base));
246
247 /* Unlock MLC_TIME_REG
248 * (among others, will be locked again automatically) */
249 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
250
251 /* Compute clock setup values, see LPC and NAND manual */
252 tmp = 0;
253 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
254 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
255 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
256 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
257 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
258 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
259 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
260 writel(tmp, MLC_TIME_REG(host->io_base));
261
262 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
263 writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
264 MLC_IRQ_MR(host->io_base));
265
266 /* Normal nCE operation: nCE controlled by controller */
267 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
268}
269
270/*
271 * Hardware specific access to control lines
272 */
273static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
274 unsigned int ctrl)
275{
4bd4ebcc 276 struct nand_chip *nand_chip = mtd_to_nand(mtd);
d699ed25 277 struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
70f7cb78
RS
278
279 if (cmd != NAND_CMD_NONE) {
280 if (ctrl & NAND_CLE)
281 writel(cmd, MLC_CMD(host->io_base));
282 else
283 writel(cmd, MLC_ADDR(host->io_base));
284 }
285}
286
287/*
288 * Read Device Ready (NAND device _and_ controller ready)
289 */
290static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
291{
4bd4ebcc 292 struct nand_chip *nand_chip = mtd_to_nand(mtd);
d699ed25 293 struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
70f7cb78
RS
294
295 if ((readb(MLC_ISR(host->io_base)) &
296 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
297 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
298 return 1;
299
300 return 0;
301}
302
303static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
304{
305 uint8_t sr;
306
307 /* Clear interrupt flag by reading status */
308 sr = readb(MLC_IRQ_SR(host->io_base));
309 if (sr & MLCIRQ_NAND_READY)
310 complete(&host->comp_nand);
311 if (sr & MLCIRQ_CONTROLLER_READY)
312 complete(&host->comp_controller);
313
314 return IRQ_HANDLED;
315}
316
317static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
318{
d699ed25 319 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
320
321 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
322 goto exit;
323
324 wait_for_completion(&host->comp_nand);
325
326 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
327 /* Seems to be delayed sometimes by controller */
328 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
329 cpu_relax();
330 }
331
332exit:
333 return NAND_STATUS_READY;
334}
335
336static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
337 struct nand_chip *chip)
338{
d699ed25 339 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
340
341 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
342 goto exit;
343
344 wait_for_completion(&host->comp_controller);
345
346 while (!(readb(MLC_ISR(host->io_base)) &
347 MLCISR_CONTROLLER_READY)) {
348 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
349 cpu_relax();
350 }
351
352exit:
353 return NAND_STATUS_READY;
354}
355
356static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
357{
358 lpc32xx_waitfunc_nand(mtd, chip);
359 lpc32xx_waitfunc_controller(mtd, chip);
360
361 return NAND_STATUS_READY;
362}
363
364/*
365 * Enable NAND write protect
366 */
367static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
368{
369 if (gpio_is_valid(host->ncfg->wp_gpio))
370 gpio_set_value(host->ncfg->wp_gpio, 0);
371}
372
373/*
374 * Disable NAND write protect
375 */
376static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
377{
378 if (gpio_is_valid(host->ncfg->wp_gpio))
379 gpio_set_value(host->ncfg->wp_gpio, 1);
380}
381
382static void lpc32xx_dma_complete_func(void *completion)
383{
384 complete(completion);
385}
386
387static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
388 enum dma_transfer_direction dir)
389{
4bd4ebcc 390 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 391 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
392 struct dma_async_tx_descriptor *desc;
393 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
394 int res;
395
396 sg_init_one(&host->sgl, mem, len);
397
398 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
399 DMA_BIDIRECTIONAL);
400 if (res != 1) {
401 dev_err(mtd->dev.parent, "Failed to map sg list\n");
402 return -ENXIO;
403 }
404 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
405 flags);
406 if (!desc) {
407 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
408 goto out1;
409 }
410
411 init_completion(&host->comp_dma);
412 desc->callback = lpc32xx_dma_complete_func;
413 desc->callback_param = &host->comp_dma;
414
415 dmaengine_submit(desc);
416 dma_async_issue_pending(host->dma_chan);
417
418 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
419
420 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
421 DMA_BIDIRECTIONAL);
422 return 0;
423out1:
424 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
425 DMA_BIDIRECTIONAL);
426 return -ENXIO;
427}
428
429static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
430 uint8_t *buf, int oob_required, int page)
431{
d699ed25 432 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
433 int i, j;
434 uint8_t *oobbuf = chip->oob_poi;
435 uint32_t mlc_isr;
436 int res;
437 uint8_t *dma_buf;
438 bool dma_mapped;
439
440 if ((void *)buf <= high_memory) {
441 dma_buf = buf;
442 dma_mapped = true;
443 } else {
444 dma_buf = host->dma_buf;
445 dma_mapped = false;
446 }
447
448 /* Writing Command and Address */
449 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
450
451 /* For all sub-pages */
452 for (i = 0; i < host->mlcsubpages; i++) {
453 /* Start Auto Decode Command */
454 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
455
456 /* Wait for Controller Ready */
457 lpc32xx_waitfunc_controller(mtd, chip);
458
459 /* Check ECC Error status */
460 mlc_isr = readl(MLC_ISR(host->io_base));
461 if (mlc_isr & MLCISR_DECODER_FAILURE) {
462 mtd->ecc_stats.failed++;
463 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
464 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
465 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
466 }
467
468 /* Read 512 + 16 Bytes */
469 if (use_dma) {
470 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
471 DMA_DEV_TO_MEM);
472 if (res)
473 return res;
474 } else {
475 for (j = 0; j < (512 >> 2); j++) {
476 *((uint32_t *)(buf)) =
477 readl(MLC_BUFF(host->io_base));
478 buf += 4;
479 }
480 }
481 for (j = 0; j < (16 >> 2); j++) {
482 *((uint32_t *)(oobbuf)) =
483 readl(MLC_BUFF(host->io_base));
484 oobbuf += 4;
485 }
486 }
487
488 if (use_dma && !dma_mapped)
489 memcpy(buf, dma_buf, mtd->writesize);
490
491 return 0;
492}
493
494static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
495 struct nand_chip *chip,
45aaeff9
BB
496 const uint8_t *buf, int oob_required,
497 int page)
70f7cb78 498{
d699ed25 499 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
500 const uint8_t *oobbuf = chip->oob_poi;
501 uint8_t *dma_buf = (uint8_t *)buf;
502 int res;
503 int i, j;
504
505 if (use_dma && (void *)buf >= high_memory) {
506 dma_buf = host->dma_buf;
507 memcpy(dma_buf, buf, mtd->writesize);
508 }
509
510 for (i = 0; i < host->mlcsubpages; i++) {
511 /* Start Encode */
512 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
513
514 /* Write 512 + 6 Bytes to Buffer */
515 if (use_dma) {
516 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
517 DMA_MEM_TO_DEV);
518 if (res)
519 return res;
520 } else {
521 for (j = 0; j < (512 >> 2); j++) {
522 writel(*((uint32_t *)(buf)),
523 MLC_BUFF(host->io_base));
524 buf += 4;
525 }
526 }
527 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
528 oobbuf += 4;
529 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
530 oobbuf += 12;
531
532 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
533 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
534
535 /* Wait for Controller Ready */
536 lpc32xx_waitfunc_controller(mtd, chip);
537 }
538 return 0;
539}
540
70f7cb78
RS
541static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
542 int page)
543{
d699ed25 544 struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
70f7cb78
RS
545
546 /* Read whole page - necessary with MLC controller! */
547 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
548
549 return 0;
550}
551
552static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
553 int page)
554{
555 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
556 return 0;
557}
558
559/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
560static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
561{
562 /* Always enabled! */
563}
564
70f7cb78
RS
565static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
566{
0faf8c39 567 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
70f7cb78
RS
568 dma_cap_mask_t mask;
569
9c6f62a7
RS
570 if (!host->pdata || !host->pdata->dma_filter) {
571 dev_err(mtd->dev.parent, "no DMA platform data\n");
572 return -ENOENT;
573 }
574
70f7cb78
RS
575 dma_cap_zero(mask);
576 dma_cap_set(DMA_SLAVE, mask);
9c6f62a7
RS
577 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
578 "nand-mlc");
70f7cb78
RS
579 if (!host->dma_chan) {
580 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
581 return -EBUSY;
582 }
583
584 /*
585 * Set direction to a sensible value even if the dmaengine driver
586 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
587 * driver criticizes it as "alien transfer direction".
588 */
589 host->dma_slave_config.direction = DMA_DEV_TO_MEM;
590 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
591 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
592 host->dma_slave_config.src_maxburst = 128;
593 host->dma_slave_config.dst_maxburst = 128;
594 /* DMA controller does flow control: */
595 host->dma_slave_config.device_fc = false;
596 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
597 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
598 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
599 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
600 goto out1;
601 }
602
603 return 0;
604out1:
605 dma_release_channel(host->dma_chan);
606 return -ENXIO;
607}
608
70f7cb78
RS
609static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
610{
62beee20 611 struct lpc32xx_nand_cfg_mlc *ncfg;
70f7cb78
RS
612 struct device_node *np = dev->of_node;
613
62beee20 614 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
3479c9dc 615 if (!ncfg)
70f7cb78 616 return NULL;
70f7cb78 617
62beee20
RS
618 of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
619 of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
620 of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
621 of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
622 of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
623 of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
624 of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
625
626 if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
627 !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
628 !ncfg->wr_low) {
70f7cb78
RS
629 dev_err(dev, "chip parameters not specified correctly\n");
630 return NULL;
631 }
632
62beee20 633 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
70f7cb78 634
62beee20 635 return ncfg;
70f7cb78 636}
70f7cb78
RS
637
638/*
639 * Probe for NAND controller
640 */
06f25510 641static int lpc32xx_nand_probe(struct platform_device *pdev)
70f7cb78
RS
642{
643 struct lpc32xx_nand_host *host;
644 struct mtd_info *mtd;
645 struct nand_chip *nand_chip;
646 struct resource *rc;
647 int res;
70f7cb78
RS
648
649 /* Allocate memory for the device structure (and zero it) */
650 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
3479c9dc 651 if (!host)
70f7cb78 652 return -ENOMEM;
70f7cb78
RS
653
654 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
b0de774c
TR
655 host->io_base = devm_ioremap_resource(&pdev->dev, rc);
656 if (IS_ERR(host->io_base))
657 return PTR_ERR(host->io_base);
658
70f7cb78
RS
659 host->io_base_phy = rc->start;
660
70f7cb78 661 nand_chip = &host->nand_chip;
0faf8c39 662 mtd = nand_to_mtd(nand_chip);
70f7cb78
RS
663 if (pdev->dev.of_node)
664 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
70f7cb78 665 if (!host->ncfg) {
62beee20
RS
666 dev_err(&pdev->dev,
667 "Missing or bad NAND config from device tree\n");
70f7cb78
RS
668 return -ENOENT;
669 }
670 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
671 return -EPROBE_DEFER;
672 if (gpio_is_valid(host->ncfg->wp_gpio) &&
673 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
674 dev_err(&pdev->dev, "GPIO not available\n");
675 return -EBUSY;
676 }
677 lpc32xx_wp_disable(host);
678
453810b7 679 host->pdata = dev_get_platdata(&pdev->dev);
9c6f62a7 680
d699ed25
BB
681 /* link the private data structures */
682 nand_set_controller_data(nand_chip, host);
a61ae81a 683 nand_set_flash_node(nand_chip, pdev->dev.of_node);
70f7cb78
RS
684 mtd->dev.parent = &pdev->dev;
685
686 /* Get NAND clock */
687 host->clk = clk_get(&pdev->dev, NULL);
688 if (IS_ERR(host->clk)) {
689 dev_err(&pdev->dev, "Clock initialization failure\n");
690 res = -ENOENT;
691 goto err_exit1;
692 }
64862dbc 693 clk_prepare_enable(host->clk);
70f7cb78
RS
694
695 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
696 nand_chip->dev_ready = lpc32xx_nand_device_ready;
697 nand_chip->chip_delay = 25; /* us */
698 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
699 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
700
701 /* Init NAND controller */
702 lpc32xx_nand_setup(host);
703
704 platform_set_drvdata(pdev, host);
705
706 /* Initialize function pointers */
707 nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
708 nand_chip->ecc.read_page_raw = lpc32xx_read_page;
709 nand_chip->ecc.read_page = lpc32xx_read_page;
710 nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
711 nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
712 nand_chip->ecc.write_oob = lpc32xx_write_oob;
713 nand_chip->ecc.read_oob = lpc32xx_read_oob;
714 nand_chip->ecc.strength = 4;
70f7cb78
RS
715 nand_chip->waitfunc = lpc32xx_waitfunc;
716
5e41d0a7 717 nand_chip->options = NAND_NO_SUBPAGE_WRITE;
70f7cb78
RS
718 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
719 nand_chip->bbt_td = &lpc32xx_nand_bbt;
720 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
721
70f7cb78
RS
722 if (use_dma) {
723 res = lpc32xx_dma_setup(host);
724 if (res) {
725 res = -EIO;
726 goto err_exit2;
727 }
728 }
729
730 /*
731 * Scan to find existance of the device and
732 * Get the type of NAND device SMALL block or LARGE block
733 */
734 if (nand_scan_ident(mtd, 1, NULL)) {
735 res = -ENXIO;
736 goto err_exit3;
737 }
738
739 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
740 if (!host->dma_buf) {
70f7cb78
RS
741 res = -ENOMEM;
742 goto err_exit3;
743 }
744
745 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
746 if (!host->dummy_buf) {
70f7cb78
RS
747 res = -ENOMEM;
748 goto err_exit3;
749 }
750
751 nand_chip->ecc.mode = NAND_ECC_HW;
f771749e 752 nand_chip->ecc.size = 512;
70f7cb78
RS
753 nand_chip->ecc.layout = &lpc32xx_nand_oob;
754 host->mlcsubpages = mtd->writesize / 512;
755
756 /* initially clear interrupt status */
757 readb(MLC_IRQ_SR(host->io_base));
758
759 init_completion(&host->comp_nand);
760 init_completion(&host->comp_controller);
761
762 host->irq = platform_get_irq(pdev, 0);
763 if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
764 dev_err(&pdev->dev, "failed to get platform irq\n");
765 res = -EINVAL;
766 goto err_exit3;
767 }
768
769 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
770 IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
771 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
772 res = -ENXIO;
773 goto err_exit3;
774 }
775
776 /*
777 * Fills out all the uninitialized function pointers with the defaults
778 * And scans for a bad block table if appropriate.
779 */
780 if (nand_scan_tail(mtd)) {
781 res = -ENXIO;
782 goto err_exit4;
783 }
784
785 mtd->name = DRV_NAME;
786
a61ae81a
BN
787 res = mtd_device_register(mtd, host->ncfg->parts,
788 host->ncfg->num_parts);
70f7cb78
RS
789 if (!res)
790 return res;
791
792 nand_release(mtd);
793
794err_exit4:
795 free_irq(host->irq, host);
796err_exit3:
797 if (use_dma)
798 dma_release_channel(host->dma_chan);
799err_exit2:
64862dbc 800 clk_disable_unprepare(host->clk);
70f7cb78 801 clk_put(host->clk);
70f7cb78
RS
802err_exit1:
803 lpc32xx_wp_enable(host);
804 gpio_free(host->ncfg->wp_gpio);
805
806 return res;
807}
808
809/*
810 * Remove NAND device
811 */
810b7e06 812static int lpc32xx_nand_remove(struct platform_device *pdev)
70f7cb78
RS
813{
814 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
0faf8c39 815 struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
70f7cb78
RS
816
817 nand_release(mtd);
818 free_irq(host->irq, host);
819 if (use_dma)
820 dma_release_channel(host->dma_chan);
821
64862dbc 822 clk_disable_unprepare(host->clk);
70f7cb78 823 clk_put(host->clk);
70f7cb78
RS
824
825 lpc32xx_wp_enable(host);
826 gpio_free(host->ncfg->wp_gpio);
827
828 return 0;
829}
830
831#ifdef CONFIG_PM
832static int lpc32xx_nand_resume(struct platform_device *pdev)
833{
834 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
835
836 /* Re-enable NAND clock */
64862dbc 837 clk_prepare_enable(host->clk);
70f7cb78
RS
838
839 /* Fresh init of NAND controller */
840 lpc32xx_nand_setup(host);
841
842 /* Disable write protect */
843 lpc32xx_wp_disable(host);
844
845 return 0;
846}
847
848static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
849{
850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
851
852 /* Enable write protect for safety */
853 lpc32xx_wp_enable(host);
854
855 /* Disable clock */
64862dbc 856 clk_disable_unprepare(host->clk);
70f7cb78
RS
857 return 0;
858}
859
860#else
861#define lpc32xx_nand_resume NULL
862#define lpc32xx_nand_suspend NULL
863#endif
864
70f7cb78
RS
865static const struct of_device_id lpc32xx_nand_match[] = {
866 { .compatible = "nxp,lpc3220-mlc" },
867 { /* sentinel */ },
868};
869MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
70f7cb78
RS
870
871static struct platform_driver lpc32xx_nand_driver = {
872 .probe = lpc32xx_nand_probe,
5153b88c 873 .remove = lpc32xx_nand_remove,
70f7cb78
RS
874 .resume = lpc32xx_nand_resume,
875 .suspend = lpc32xx_nand_suspend,
876 .driver = {
877 .name = DRV_NAME,
6dcd5920 878 .of_match_table = lpc32xx_nand_match,
70f7cb78
RS
879 },
880};
881
882module_platform_driver(lpc32xx_nand_driver);
883
884MODULE_LICENSE("GPL");
885MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
886MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");