]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/mtd/nand/pxa3xx_nand.c
Merge remote-tracking branches 'asoc/topic/tas6424', 'asoc/topic/tfa9879', 'asoc...
[mirror_ubuntu-focal-kernel.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
d4092d76 24#include <linux/mtd/rawnand.h>
fe69af00 25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
293b2da1 32#include <linux/platform_data/mtd-nand-pxa3xx.h>
fc256f57
MR
33#include <linux/mfd/syscon.h>
34#include <linux/regmap.h>
fe69af00 35
e5860c18
NMG
36#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
37#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 38#define PAGE_CHUNK_SIZE (2048)
fe69af00 39
62e8b851
EG
40/*
41 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
42 * STATUS, READID and PARAM.
43 * ONFI param page is 256 bytes, and there are three redundant copies
44 * to be read. JEDEC param page is 512 bytes, and there are also three
45 * redundant copies to be read.
46 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 47 */
c1634097 48#define INIT_BUFFER_SIZE 2048
62e8b851 49
fc256f57
MR
50/* System control register and bit to enable NAND on some SoCs */
51#define GENCONF_SOC_DEVICE_MUX 0x208
52#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
53
fe69af00 54/* registers and bit definitions */
55#define NDCR (0x00) /* Control register */
56#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58#define NDSR (0x14) /* Status Register */
59#define NDPCR (0x18) /* Page Count Register */
60#define NDBDR0 (0x1C) /* Bad Block Register 0 */
61#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 62#define NDECCCTRL (0x28) /* ECC control */
fe69af00 63#define NDDB (0x40) /* Data Buffer */
64#define NDCB0 (0x48) /* Command Buffer0 */
65#define NDCB1 (0x4C) /* Command Buffer1 */
66#define NDCB2 (0x50) /* Command Buffer2 */
67
68#define NDCR_SPARE_EN (0x1 << 31)
69#define NDCR_ECC_EN (0x1 << 30)
70#define NDCR_DMA_EN (0x1 << 29)
71#define NDCR_ND_RUN (0x1 << 28)
72#define NDCR_DWIDTH_C (0x1 << 27)
73#define NDCR_DWIDTH_M (0x1 << 26)
74#define NDCR_PAGE_SZ (0x1 << 24)
75#define NDCR_NCSX (0x1 << 23)
76#define NDCR_ND_MODE (0x3 << 21)
77#define NDCR_NAND_MODE (0x0)
78#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
79#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 81#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84#define NDCR_RA_START (0x1 << 15)
85#define NDCR_PG_PER_BLK (0x1 << 14)
86#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 87#define NDCR_INT_MASK (0xFFF)
fe69af00 88
89#define NDSR_MASK (0xfff)
87f5336e
EG
90#define NDSR_ERR_CNT_OFF (16)
91#define NDSR_ERR_CNT_MASK (0x1f)
92#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
93#define NDSR_RDY (0x1 << 12)
94#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 95#define NDSR_CS0_PAGED (0x1 << 10)
96#define NDSR_CS1_PAGED (0x1 << 9)
97#define NDSR_CS0_CMDD (0x1 << 8)
98#define NDSR_CS1_CMDD (0x1 << 7)
99#define NDSR_CS0_BBD (0x1 << 6)
100#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
101#define NDSR_UNCORERR (0x1 << 4)
102#define NDSR_CORERR (0x1 << 3)
fe69af00 103#define NDSR_WRDREQ (0x1 << 2)
104#define NDSR_RDDREQ (0x1 << 1)
105#define NDSR_WRCMDREQ (0x1)
106
41a63430 107#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 108#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 109#define NDCB0_AUTO_RS (0x1 << 25)
110#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
111#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 113#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115#define NDCB0_NC (0x1 << 20)
116#define NDCB0_DBC (0x1 << 19)
117#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119#define NDCB0_CMD2_MASK (0xff << 8)
120#define NDCB0_CMD1_MASK (0xff)
121#define NDCB0_ADDR_CYC_SHIFT (16)
122
70ed8523
EG
123#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125#define EXT_CMD_TYPE_READ 4 /* Read */
126#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127#define EXT_CMD_TYPE_FINAL 3 /* Final command */
128#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
130
b226eca2
EG
131/*
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 */
136#define READ_ID_BYTES 7
137
fe69af00 138/* macros for registers read/write */
26d072e3
RJ
139#define nand_writel(info, off, val) \
140 do { \
141 dev_vdbg(&info->pdev->dev, \
142 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
143 __func__, __LINE__, (val), (off)); \
144 writel_relaxed((val), (info)->mmio_base + (off)); \
145 } while (0)
fe69af00 146
26d072e3
RJ
147#define nand_readl(info, off) \
148 ({ \
149 unsigned int _v; \
150 _v = readl_relaxed((info)->mmio_base + (off)); \
151 dev_vdbg(&info->pdev->dev, \
152 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
153 __func__, __LINE__, (off), _v); \
154 _v; \
155 })
fe69af00 156
157/* error code and state */
158enum {
159 ERR_NONE = 0,
160 ERR_DMABUSERR = -1,
161 ERR_SENDCMD = -2,
87f5336e 162 ERR_UNCORERR = -3,
fe69af00 163 ERR_BBERR = -4,
87f5336e 164 ERR_CORERR = -5,
fe69af00 165};
166
167enum {
f8155a40 168 STATE_IDLE = 0,
d456882b 169 STATE_PREPARED,
fe69af00 170 STATE_CMD_HANDLE,
171 STATE_DMA_READING,
172 STATE_DMA_WRITING,
173 STATE_DMA_DONE,
174 STATE_PIO_READING,
175 STATE_PIO_WRITING,
f8155a40
LW
176 STATE_CMD_DONE,
177 STATE_READY,
fe69af00 178};
179
c0f3b864
EG
180enum pxa3xx_nand_variant {
181 PXA3XX_NAND_VARIANT_PXA,
182 PXA3XX_NAND_VARIANT_ARMADA370,
fc256f57 183 PXA3XX_NAND_VARIANT_ARMADA_8K,
c0f3b864
EG
184};
185
d456882b
LW
186struct pxa3xx_nand_host {
187 struct nand_chip chip;
d456882b
LW
188 void *info_data;
189
190 /* page size of attached chip */
d456882b 191 int use_ecc;
f3c8cfc2 192 int cs;
fe69af00 193
d456882b
LW
194 /* calculated from pxa3xx_nand_flash data */
195 unsigned int col_addr_cycles;
196 unsigned int row_addr_cycles;
d456882b
LW
197};
198
199struct pxa3xx_nand_info {
401e67e2 200 struct nand_hw_control controller;
fe69af00 201 struct platform_device *pdev;
fe69af00 202
203 struct clk *clk;
204 void __iomem *mmio_base;
8638fac8 205 unsigned long mmio_phys;
55d9fd6e 206 struct completion cmd_complete, dev_ready;
fe69af00 207
208 unsigned int buf_start;
209 unsigned int buf_count;
62e8b851 210 unsigned int buf_size;
fa543bef
EG
211 unsigned int data_buff_pos;
212 unsigned int oob_buff_pos;
fe69af00 213
214 /* DMA information */
8f5ba31a
RJ
215 struct scatterlist sg;
216 enum dma_data_direction dma_dir;
217 struct dma_chan *dma_chan;
218 dma_cookie_t dma_cookie;
fe69af00 219 int drcmr_dat;
fe69af00 220
221 unsigned char *data_buff;
18c81b18 222 unsigned char *oob_buff;
fe69af00 223 dma_addr_t data_buff_phys;
fe69af00 224 int data_dma_ch;
fe69af00 225
f3c8cfc2 226 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 227 unsigned int state;
228
c0f3b864
EG
229 /*
230 * This driver supports NFCv1 (as found in PXA SoC)
231 * and NFCv2 (as found in Armada 370/XP SoC).
232 */
233 enum pxa3xx_nand_variant variant;
234
f3c8cfc2 235 int cs;
fe69af00 236 int use_ecc; /* use HW ECC ? */
43bcfd2b 237 int ecc_bch; /* using BCH ECC? */
fe69af00 238 int use_dma; /* use DMA ? */
5bb653e8 239 int use_spare; /* use spare ? */
55d9fd6e 240 int need_wait;
fe69af00 241
c2cdace7
TP
242 /* Amount of real data per full chunk */
243 unsigned int chunk_size;
244
245 /* Amount of spare data per full chunk */
43bcfd2b 246 unsigned int spare_size;
c2cdace7
TP
247
248 /* Number of full chunks (i.e chunk_size + spare_size) */
249 unsigned int nfullchunks;
250
251 /*
252 * Total number of chunks. If equal to nfullchunks, then there
253 * are only full chunks. Otherwise, there is one last chunk of
254 * size (last_chunk_size + last_spare_size)
255 */
256 unsigned int ntotalchunks;
257
258 /* Amount of real data in the last chunk */
259 unsigned int last_chunk_size;
260
261 /* Amount of spare data in the last chunk */
262 unsigned int last_spare_size;
263
43bcfd2b 264 unsigned int ecc_size;
87f5336e
EG
265 unsigned int ecc_err_cnt;
266 unsigned int max_bitflips;
fe69af00 267 int retcode;
fe69af00 268
c2cdace7
TP
269 /*
270 * Variables only valid during command
271 * execution. step_chunk_size and step_spare_size is the
272 * amount of real data and spare data in the current
273 * chunk. cur_chunk is the current chunk being
274 * read/programmed.
275 */
276 unsigned int step_chunk_size;
277 unsigned int step_spare_size;
278 unsigned int cur_chunk;
279
48cf7efa
EG
280 /* cached register value */
281 uint32_t reg_ndcr;
282 uint32_t ndtr0cs0;
283 uint32_t ndtr1cs0;
284
fe69af00 285 /* generated NDCBx register values */
286 uint32_t ndcb0;
287 uint32_t ndcb1;
288 uint32_t ndcb2;
3a1a344a 289 uint32_t ndcb3;
fe69af00 290};
291
90ab5ee9 292static bool use_dma = 1;
fe69af00 293module_param(use_dma, bool, 0444);
25985edc 294MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 295
a9cadf72
EG
296struct pxa3xx_nand_timing {
297 unsigned int tCH; /* Enable signal hold time */
298 unsigned int tCS; /* Enable signal setup time */
299 unsigned int tWH; /* ND_nWE high duration */
300 unsigned int tWP; /* ND_nWE pulse time */
301 unsigned int tRH; /* ND_nRE high duration */
302 unsigned int tRP; /* ND_nRE pulse width */
303 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
304 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
305 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
306};
307
308struct pxa3xx_nand_flash {
a9cadf72 309 uint32_t chip_id;
a9cadf72
EG
310 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
311 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
a9cadf72
EG
312 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
313};
314
c1f82478 315static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
316 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
317 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
318 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
319 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
320};
321
c1f82478 322static struct pxa3xx_nand_flash builtin_flash_types[] = {
89c1702d
AT
323 { 0x46ec, 16, 16, &timing[1] },
324 { 0xdaec, 8, 8, &timing[1] },
325 { 0xd7ec, 8, 8, &timing[1] },
326 { 0xa12c, 8, 8, &timing[2] },
327 { 0xb12c, 16, 16, &timing[2] },
328 { 0xdc2c, 8, 8, &timing[2] },
329 { 0xcc2c, 16, 16, &timing[2] },
330 { 0xba20, 16, 16, &timing[3] },
d3490dfd
HZ
331};
332
39980c56
BB
333static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
334 struct mtd_oob_region *oobregion)
335{
336 struct nand_chip *chip = mtd_to_nand(mtd);
337 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
338 struct pxa3xx_nand_info *info = host->info_data;
339 int nchunks = mtd->writesize / info->chunk_size;
340
341 if (section >= nchunks)
342 return -ERANGE;
343
344 oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
345 info->spare_size;
346 oobregion->length = info->ecc_size;
347
348 return 0;
349}
350
351static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
352 struct mtd_oob_region *oobregion)
353{
354 struct nand_chip *chip = mtd_to_nand(mtd);
355 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
356 struct pxa3xx_nand_info *info = host->info_data;
357 int nchunks = mtd->writesize / info->chunk_size;
358
359 if (section >= nchunks)
360 return -ERANGE;
361
362 if (!info->spare_size)
363 return 0;
364
365 oobregion->offset = section * (info->ecc_size + info->spare_size);
366 oobregion->length = info->spare_size;
367 if (!section) {
368 /*
369 * Bootrom looks in bytes 0 & 5 for bad blocks for the
370 * 4KB page / 4bit BCH combination.
371 */
372 if (mtd->writesize == 4096 && info->chunk_size == 2048) {
373 oobregion->offset += 6;
374 oobregion->length -= 6;
375 } else {
376 oobregion->offset += 2;
377 oobregion->length -= 2;
378 }
379 }
380
381 return 0;
382}
383
384static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
385 .ecc = pxa3xx_ooblayout_ecc,
386 .free = pxa3xx_ooblayout_free,
387};
388
776f265e
EG
389static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
390static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
391
392static struct nand_bbt_descr bbt_main_descr = {
393 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
394 | NAND_BBT_2BIT | NAND_BBT_VERSION,
395 .offs = 8,
396 .len = 6,
397 .veroffs = 14,
398 .maxblocks = 8, /* Last 8 blocks in each chip */
399 .pattern = bbt_pattern
400};
401
402static struct nand_bbt_descr bbt_mirror_descr = {
403 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
404 | NAND_BBT_2BIT | NAND_BBT_VERSION,
405 .offs = 8,
406 .len = 6,
407 .veroffs = 14,
408 .maxblocks = 8, /* Last 8 blocks in each chip */
409 .pattern = bbt_mirror_pattern
410};
411
fe69af00 412#define NDTR0_tCH(c) (min((c), 7) << 19)
413#define NDTR0_tCS(c) (min((c), 7) << 16)
414#define NDTR0_tWH(c) (min((c), 7) << 11)
415#define NDTR0_tWP(c) (min((c), 7) << 8)
416#define NDTR0_tRH(c) (min((c), 7) << 3)
417#define NDTR0_tRP(c) (min((c), 7) << 0)
418
419#define NDTR1_tR(c) (min((c), 65535) << 16)
420#define NDTR1_tWHR(c) (min((c), 15) << 4)
421#define NDTR1_tAR(c) (min((c), 15) << 0)
422
423/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 424#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 425
17754ad6 426static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
427 {
428 .compatible = "marvell,pxa3xx-nand",
429 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
430 },
1963ff97
EG
431 {
432 .compatible = "marvell,armada370-nand",
433 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
434 },
fc256f57
MR
435 {
436 .compatible = "marvell,armada-8k-nand",
437 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
438 },
c7e9c7e7
EG
439 {}
440};
441MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
442
443static enum pxa3xx_nand_variant
444pxa3xx_nand_get_variant(struct platform_device *pdev)
445{
446 const struct of_device_id *of_id =
447 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
448 if (!of_id)
449 return PXA3XX_NAND_VARIANT_PXA;
450 return (enum pxa3xx_nand_variant)of_id->data;
451}
452
d456882b 453static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 454 const struct pxa3xx_nand_timing *t)
fe69af00 455{
d456882b 456 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 457 unsigned long nand_clk = clk_get_rate(info->clk);
458 uint32_t ndtr0, ndtr1;
459
460 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
461 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
462 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
463 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
464 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
465 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
466
467 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
468 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
469 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
470
48cf7efa
EG
471 info->ndtr0cs0 = ndtr0;
472 info->ndtr1cs0 = ndtr1;
fe69af00 473 nand_writel(info, NDTR0CS0, ndtr0);
474 nand_writel(info, NDTR1CS0, ndtr1);
475}
476
3f225b7f
AT
477static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
478 const struct nand_sdr_timings *t)
479{
480 struct pxa3xx_nand_info *info = host->info_data;
481 struct nand_chip *chip = &host->chip;
482 unsigned long nand_clk = clk_get_rate(info->clk);
483 uint32_t ndtr0, ndtr1;
484
485 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
486 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
487 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
488 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
489 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
490 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
491 u32 tR = chip->chip_delay * 1000;
492 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
493 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
494
495 /* fallback to a default value if tR = 0 */
496 if (!tR)
497 tR = 20000;
498
499 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
500 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
501 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
502 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
503 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
504 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
505
506 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
507 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
508 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
509
510 info->ndtr0cs0 = ndtr0;
511 info->ndtr1cs0 = ndtr1;
512 nand_writel(info, NDTR0CS0, ndtr0);
513 nand_writel(info, NDTR1CS0, ndtr1);
514}
515
516static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
517 unsigned int *flash_width,
518 unsigned int *dfc_width)
519{
520 struct nand_chip *chip = &host->chip;
521 struct pxa3xx_nand_info *info = host->info_data;
522 const struct pxa3xx_nand_flash *f = NULL;
063294a3 523 struct mtd_info *mtd = nand_to_mtd(&host->chip);
3f225b7f
AT
524 int i, id, ntypes;
525
526 ntypes = ARRAY_SIZE(builtin_flash_types);
527
063294a3 528 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3f225b7f 529
063294a3
BB
530 id = chip->read_byte(mtd);
531 id |= chip->read_byte(mtd) << 0x8;
3f225b7f
AT
532
533 for (i = 0; i < ntypes; i++) {
534 f = &builtin_flash_types[i];
535
536 if (f->chip_id == id)
537 break;
538 }
539
540 if (i == ntypes) {
541 dev_err(&info->pdev->dev, "Error: timings not found\n");
542 return -EINVAL;
543 }
544
545 pxa3xx_nand_set_timing(host, f->timing);
546
547 *flash_width = f->flash_width;
548 *dfc_width = f->dfc_width;
549
550 return 0;
551}
552
553static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
554 int mode)
555{
556 const struct nand_sdr_timings *timings;
557
558 mode = fls(mode) - 1;
559 if (mode < 0)
560 mode = 0;
561
562 timings = onfi_async_timing_mode_to_sdr_timings(mode);
563 if (IS_ERR(timings))
564 return PTR_ERR(timings);
565
566 pxa3xx_nand_set_sdr_timing(host, timings);
567
568 return 0;
569}
570
571static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
572{
573 struct nand_chip *chip = &host->chip;
574 struct pxa3xx_nand_info *info = host->info_data;
575 unsigned int flash_width = 0, dfc_width = 0;
576 int mode, err;
577
578 mode = onfi_get_async_timing_mode(chip);
579 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
580 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
581 &dfc_width);
582 if (err)
583 return err;
584
585 if (flash_width == 16) {
586 info->reg_ndcr |= NDCR_DWIDTH_M;
587 chip->options |= NAND_BUSWIDTH_16;
588 }
589
590 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
591 } else {
592 err = pxa3xx_nand_init_timings_onfi(host, mode);
593 if (err)
594 return err;
595 }
596
597 return 0;
598}
599
f8155a40
LW
600/**
601 * NOTE: it is a must to set ND_RUN firstly, then write
602 * command buffer, otherwise, it does not work.
603 * We enable all the interrupt at the same time, and
604 * let pxa3xx_nand_irq to handle all logic.
605 */
606static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
607{
608 uint32_t ndcr;
609
48cf7efa 610 ndcr = info->reg_ndcr;
cd9d1182 611
43bcfd2b 612 if (info->use_ecc) {
cd9d1182 613 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
614 if (info->ecc_bch)
615 nand_writel(info, NDECCCTRL, 0x1);
616 } else {
cd9d1182 617 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
618 if (info->ecc_bch)
619 nand_writel(info, NDECCCTRL, 0x0);
620 }
cd9d1182
EG
621
622 if (info->use_dma)
623 ndcr |= NDCR_DMA_EN;
624 else
625 ndcr &= ~NDCR_DMA_EN;
626
5bb653e8
EG
627 if (info->use_spare)
628 ndcr |= NDCR_SPARE_EN;
629 else
630 ndcr &= ~NDCR_SPARE_EN;
631
f8155a40
LW
632 ndcr |= NDCR_ND_RUN;
633
634 /* clear status bits and run */
f8155a40 635 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 636 nand_writel(info, NDCR, 0);
f8155a40
LW
637 nand_writel(info, NDCR, ndcr);
638}
639
640static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
641{
642 uint32_t ndcr;
643 int timeout = NAND_STOP_DELAY;
644
645 /* wait RUN bit in NDCR become 0 */
646 ndcr = nand_readl(info, NDCR);
647 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
648 ndcr = nand_readl(info, NDCR);
649 udelay(1);
650 }
651
652 if (timeout <= 0) {
653 ndcr &= ~NDCR_ND_RUN;
654 nand_writel(info, NDCR, ndcr);
655 }
8f5ba31a
RJ
656 if (info->dma_chan)
657 dmaengine_terminate_all(info->dma_chan);
658
f8155a40
LW
659 /* clear status bits */
660 nand_writel(info, NDSR, NDSR_MASK);
661}
662
57ff88f0
EG
663static void __maybe_unused
664enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 665{
666 uint32_t ndcr;
667
668 ndcr = nand_readl(info, NDCR);
669 nand_writel(info, NDCR, ndcr & ~int_mask);
670}
671
672static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
673{
674 uint32_t ndcr;
675
676 ndcr = nand_readl(info, NDCR);
677 nand_writel(info, NDCR, ndcr | int_mask);
678}
679
8dad0386
MR
680static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
681{
682 if (info->ecc_bch) {
afca11ec
MR
683 u32 val;
684 int ret;
8dad0386
MR
685
686 /*
687 * According to the datasheet, when reading from NDDB
688 * with BCH enabled, after each 32 bytes reads, we
689 * have to make sure that the NDSR.RDDREQ bit is set.
690 *
691 * Drain the FIFO 8 32 bits reads at a time, and skip
692 * the polling on the last read.
693 */
694 while (len > 8) {
ab53a571 695 ioread32_rep(info->mmio_base + NDDB, data, 8);
8dad0386 696
afca11ec
MR
697 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
698 val & NDSR_RDDREQ, 1000, 5000);
699 if (ret) {
700 dev_err(&info->pdev->dev,
701 "Timeout on RDDREQ while draining the FIFO\n");
702 return;
8dad0386
MR
703 }
704
705 data += 32;
706 len -= 8;
707 }
708 }
709
ab53a571 710 ioread32_rep(info->mmio_base + NDDB, data, len);
8dad0386
MR
711}
712
f8155a40 713static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 714{
fe69af00 715 switch (info->state) {
716 case STATE_PIO_WRITING:
c2cdace7
TP
717 if (info->step_chunk_size)
718 writesl(info->mmio_base + NDDB,
719 info->data_buff + info->data_buff_pos,
720 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 721
c2cdace7 722 if (info->step_spare_size)
ce914e6b
RH
723 writesl(info->mmio_base + NDDB,
724 info->oob_buff + info->oob_buff_pos,
c2cdace7 725 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 726 break;
727 case STATE_PIO_READING:
c2cdace7
TP
728 if (info->step_chunk_size)
729 drain_fifo(info,
730 info->data_buff + info->data_buff_pos,
731 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 732
c2cdace7 733 if (info->step_spare_size)
8dad0386
MR
734 drain_fifo(info,
735 info->oob_buff + info->oob_buff_pos,
c2cdace7 736 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 737 break;
738 default:
da675b4e 739 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 740 info->state);
f8155a40 741 BUG();
fe69af00 742 }
fa543bef
EG
743
744 /* Update buffer pointers for multi-page read/write */
c2cdace7
TP
745 info->data_buff_pos += info->step_chunk_size;
746 info->oob_buff_pos += info->step_spare_size;
fe69af00 747}
748
8f5ba31a 749static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 750{
8f5ba31a
RJ
751 struct pxa3xx_nand_info *info = data;
752 struct dma_tx_state state;
753 enum dma_status status;
fe69af00 754
8f5ba31a
RJ
755 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
756 if (likely(status == DMA_COMPLETE)) {
757 info->state = STATE_DMA_DONE;
758 } else {
759 dev_err(&info->pdev->dev, "DMA error on data channel\n");
760 info->retcode = ERR_DMABUSERR;
761 }
762 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
763
764 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
765 enable_int(info, NDCR_INT_MASK);
766}
767
768static void start_data_dma(struct pxa3xx_nand_info *info)
769{
770 enum dma_transfer_direction direction;
771 struct dma_async_tx_descriptor *tx;
fe69af00 772
f8155a40
LW
773 switch (info->state) {
774 case STATE_DMA_WRITING:
8f5ba31a
RJ
775 info->dma_dir = DMA_TO_DEVICE;
776 direction = DMA_MEM_TO_DEV;
f8155a40
LW
777 break;
778 case STATE_DMA_READING:
8f5ba31a
RJ
779 info->dma_dir = DMA_FROM_DEVICE;
780 direction = DMA_DEV_TO_MEM;
f8155a40
LW
781 break;
782 default:
da675b4e 783 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
784 info->state);
785 BUG();
fe69af00 786 }
c2cdace7
TP
787 info->sg.length = info->chunk_size;
788 if (info->use_spare)
789 info->sg.length += info->spare_size + info->ecc_size;
8f5ba31a
RJ
790 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
791
792 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
793 DMA_PREP_INTERRUPT);
794 if (!tx) {
795 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
796 return;
fe69af00 797 }
8f5ba31a
RJ
798 tx->callback = pxa3xx_nand_data_dma_irq;
799 tx->callback_param = info;
800 info->dma_cookie = dmaengine_submit(tx);
801 dma_async_issue_pending(info->dma_chan);
802 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
803 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 804}
805
24542257
RJ
806static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
807{
808 struct pxa3xx_nand_info *info = data;
809
810 handle_data_pio(info);
811
812 info->state = STATE_CMD_DONE;
813 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
814
815 return IRQ_HANDLED;
816}
817
fe69af00 818static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
819{
820 struct pxa3xx_nand_info *info = devid;
55d9fd6e 821 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 822 unsigned int ready, cmd_done;
24542257 823 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
824
825 if (info->cs == 0) {
826 ready = NDSR_FLASH_RDY;
827 cmd_done = NDSR_CS0_CMDD;
828 } else {
829 ready = NDSR_RDY;
830 cmd_done = NDSR_CS1_CMDD;
831 }
fe69af00 832
833 status = nand_readl(info, NDSR);
834
87f5336e
EG
835 if (status & NDSR_UNCORERR)
836 info->retcode = ERR_UNCORERR;
837 if (status & NDSR_CORERR) {
838 info->retcode = ERR_CORERR;
fc256f57
MR
839 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
840 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
87f5336e
EG
841 info->ecc_bch)
842 info->ecc_err_cnt = NDSR_ERR_CNT(status);
843 else
844 info->ecc_err_cnt = 1;
845
846 /*
847 * Each chunk composing a page is corrected independently,
848 * and we need to store maximum number of corrected bitflips
849 * to return it to the MTD layer in ecc.read_page().
850 */
851 info->max_bitflips = max_t(unsigned int,
852 info->max_bitflips,
853 info->ecc_err_cnt);
854 }
f8155a40
LW
855 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
856 /* whether use dma to transfer data */
fe69af00 857 if (info->use_dma) {
f8155a40
LW
858 disable_int(info, NDCR_INT_MASK);
859 info->state = (status & NDSR_RDDREQ) ?
860 STATE_DMA_READING : STATE_DMA_WRITING;
861 start_data_dma(info);
862 goto NORMAL_IRQ_EXIT;
fe69af00 863 } else {
f8155a40
LW
864 info->state = (status & NDSR_RDDREQ) ?
865 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
866 ret = IRQ_WAKE_THREAD;
867 goto NORMAL_IRQ_EXIT;
fe69af00 868 }
fe69af00 869 }
f3c8cfc2 870 if (status & cmd_done) {
f8155a40
LW
871 info->state = STATE_CMD_DONE;
872 is_completed = 1;
fe69af00 873 }
f3c8cfc2 874 if (status & ready) {
f8155a40 875 info->state = STATE_READY;
55d9fd6e 876 is_ready = 1;
401e67e2 877 }
fe69af00 878
21fc0ef9
RJ
879 /*
880 * Clear all status bit before issuing the next command, which
881 * can and will alter the status bits and will deserve a new
882 * interrupt on its own. This lets the controller exit the IRQ
883 */
884 nand_writel(info, NDSR, status);
885
f8155a40 886 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
887 status &= ~NDSR_WRCMDREQ;
888 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
889
890 /*
891 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
892 * must be loaded by writing directly either 12 or 16
893 * bytes directly to NDCB0, four bytes at a time.
894 *
895 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
896 * but each NDCBx register can be read.
897 */
f8155a40
LW
898 nand_writel(info, NDCB0, info->ndcb0);
899 nand_writel(info, NDCB0, info->ndcb1);
900 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
901
902 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
fc256f57
MR
903 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
904 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
3a1a344a 905 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 906 }
907
f8155a40
LW
908 if (is_completed)
909 complete(&info->cmd_complete);
55d9fd6e
EG
910 if (is_ready)
911 complete(&info->dev_ready);
f8155a40 912NORMAL_IRQ_EXIT:
24542257 913 return ret;
fe69af00 914}
915
fe69af00 916static inline int is_buf_blank(uint8_t *buf, size_t len)
917{
918 for (; len > 0; len--)
919 if (*buf++ != 0xff)
920 return 0;
921 return 1;
922}
923
86beebae
EG
924static void set_command_address(struct pxa3xx_nand_info *info,
925 unsigned int page_size, uint16_t column, int page_addr)
926{
927 /* small page addr setting */
928 if (page_size < PAGE_CHUNK_SIZE) {
929 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
930 | (column & 0xFF);
931
932 info->ndcb2 = 0;
933 } else {
934 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
935 | (column & 0xFFFF);
936
937 if (page_addr & 0xFF0000)
938 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
939 else
940 info->ndcb2 = 0;
941 }
942}
943
c39ff03a 944static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 945{
39f83d15 946 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3 947 struct mtd_info *mtd = nand_to_mtd(&host->chip);
39f83d15 948
4eb2da89 949 /* reset data and oob column point to handle data */
401e67e2
LW
950 info->buf_start = 0;
951 info->buf_count = 0;
fa543bef
EG
952 info->data_buff_pos = 0;
953 info->oob_buff_pos = 0;
c2cdace7
TP
954 info->step_chunk_size = 0;
955 info->step_spare_size = 0;
956 info->cur_chunk = 0;
4eb2da89 957 info->use_ecc = 0;
5bb653e8 958 info->use_spare = 1;
4eb2da89 959 info->retcode = ERR_NONE;
87f5336e 960 info->ecc_err_cnt = 0;
f0e6a32e 961 info->ndcb3 = 0;
d20d0a6c 962 info->need_wait = 0;
fe69af00 963
964 switch (command) {
4eb2da89 965 case NAND_CMD_READ0:
fee4380f 966 case NAND_CMD_READOOB:
4eb2da89
LW
967 case NAND_CMD_PAGEPROG:
968 info->use_ecc = 1;
fe69af00 969 break;
41a63430
EG
970 case NAND_CMD_PARAM:
971 info->use_spare = 0;
972 break;
4eb2da89
LW
973 default:
974 info->ndcb1 = 0;
975 info->ndcb2 = 0;
976 break;
977 }
39f83d15
EG
978
979 /*
980 * If we are about to issue a read command, or about to set
981 * the write address, then clean the data buffer.
982 */
983 if (command == NAND_CMD_READ0 ||
984 command == NAND_CMD_READOOB ||
985 command == NAND_CMD_SEQIN) {
986
987 info->buf_count = mtd->writesize + mtd->oobsize;
988 memset(info->data_buff, 0xFF, info->buf_count);
989 }
990
c39ff03a
EG
991}
992
993static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 994 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
995{
996 int addr_cycle, exec_cmd;
997 struct pxa3xx_nand_host *host;
998 struct mtd_info *mtd;
999
1000 host = info->host[info->cs];
063294a3 1001 mtd = nand_to_mtd(&host->chip);
c39ff03a
EG
1002 addr_cycle = 0;
1003 exec_cmd = 1;
1004
1005 if (info->cs != 0)
1006 info->ndcb0 = NDCB0_CSEL;
1007 else
1008 info->ndcb0 = 0;
1009
1010 if (command == NAND_CMD_SEQIN)
1011 exec_cmd = 0;
4eb2da89 1012
d456882b
LW
1013 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
1014 + host->col_addr_cycles);
fe69af00 1015
4eb2da89
LW
1016 switch (command) {
1017 case NAND_CMD_READOOB:
fe69af00 1018 case NAND_CMD_READ0:
ec82135a
EG
1019 info->buf_start = column;
1020 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1021 | addr_cycle
1022 | NAND_CMD_READ0;
1023
4eb2da89 1024 if (command == NAND_CMD_READOOB)
ec82135a 1025 info->buf_start += mtd->writesize;
4eb2da89 1026
c2cdace7
TP
1027 if (info->cur_chunk < info->nfullchunks) {
1028 info->step_chunk_size = info->chunk_size;
1029 info->step_spare_size = info->spare_size;
1030 } else {
1031 info->step_chunk_size = info->last_chunk_size;
1032 info->step_spare_size = info->last_spare_size;
1033 }
1034
70ed8523
EG
1035 /*
1036 * Multiple page read needs an 'extended command type' field,
1037 * which is either naked-read or last-read according to the
1038 * state.
1039 */
1040 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 1041 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
1042 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1043 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1044 | NDCB0_LEN_OVRD
1045 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1046 info->ndcb3 = info->step_chunk_size +
1047 info->step_spare_size;
70ed8523 1048 }
fe69af00 1049
01d9947e 1050 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
1051 break;
1052
fe69af00 1053 case NAND_CMD_SEQIN:
4eb2da89 1054
e7f9a6a4
EG
1055 info->buf_start = column;
1056 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
1057
1058 /*
1059 * Multiple page programming needs to execute the initial
1060 * SEQIN command that sets the page address.
1061 */
1062 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1063 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1064 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1065 | addr_cycle
1066 | command;
535cb57a
EG
1067 exec_cmd = 1;
1068 }
fe69af00 1069 break;
4eb2da89 1070
fe69af00 1071 case NAND_CMD_PAGEPROG:
4eb2da89
LW
1072 if (is_buf_blank(info->data_buff,
1073 (mtd->writesize + mtd->oobsize))) {
1074 exec_cmd = 0;
1075 break;
1076 }
fe69af00 1077
c2cdace7
TP
1078 if (info->cur_chunk < info->nfullchunks) {
1079 info->step_chunk_size = info->chunk_size;
1080 info->step_spare_size = info->spare_size;
1081 } else {
1082 info->step_chunk_size = info->last_chunk_size;
1083 info->step_spare_size = info->last_spare_size;
1084 }
1085
535cb57a
EG
1086 /* Second command setting for large pages */
1087 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1088 /*
1089 * Multiple page write uses the 'extended command'
1090 * field. This can be used to issue a command dispatch
1091 * or a naked-write depending on the current stage.
1092 */
1093 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1094 | NDCB0_LEN_OVRD
1095 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1096 info->ndcb3 = info->step_chunk_size +
1097 info->step_spare_size;
535cb57a
EG
1098
1099 /*
1100 * This is the command dispatch that completes a chunked
1101 * page program operation.
1102 */
c2cdace7 1103 if (info->cur_chunk == info->ntotalchunks) {
535cb57a
EG
1104 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1105 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1106 | command;
1107 info->ndcb1 = 0;
1108 info->ndcb2 = 0;
1109 info->ndcb3 = 0;
1110 }
1111 } else {
1112 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1113 | NDCB0_AUTO_RS
1114 | NDCB0_ST_ROW_EN
1115 | NDCB0_DBC
1116 | (NAND_CMD_PAGEPROG << 8)
1117 | NAND_CMD_SEQIN
1118 | addr_cycle;
1119 }
fe69af00 1120 break;
4eb2da89 1121
ce0268f6 1122 case NAND_CMD_PARAM:
c1634097 1123 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
1124 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1125 | NDCB0_ADDR_CYC(1)
41a63430 1126 | NDCB0_LEN_OVRD
ec82135a 1127 | command;
ce0268f6 1128 info->ndcb1 = (column & 0xFF);
c1634097 1129 info->ndcb3 = INIT_BUFFER_SIZE;
c2cdace7 1130 info->step_chunk_size = INIT_BUFFER_SIZE;
ce0268f6
EG
1131 break;
1132
fe69af00 1133 case NAND_CMD_READID:
b226eca2 1134 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
1135 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1136 | NDCB0_ADDR_CYC(1)
ec82135a 1137 | command;
d14231f1 1138 info->ndcb1 = (column & 0xFF);
4eb2da89 1139
c2cdace7 1140 info->step_chunk_size = 8;
4eb2da89 1141 break;
fe69af00 1142 case NAND_CMD_STATUS:
4eb2da89
LW
1143 info->buf_count = 1;
1144 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1145 | NDCB0_ADDR_CYC(1)
ec82135a 1146 | command;
4eb2da89 1147
c2cdace7 1148 info->step_chunk_size = 8;
4eb2da89
LW
1149 break;
1150
1151 case NAND_CMD_ERASE1:
4eb2da89
LW
1152 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1153 | NDCB0_AUTO_RS
1154 | NDCB0_ADDR_CYC(3)
1155 | NDCB0_DBC
ec82135a
EG
1156 | (NAND_CMD_ERASE2 << 8)
1157 | NAND_CMD_ERASE1;
4eb2da89
LW
1158 info->ndcb1 = page_addr;
1159 info->ndcb2 = 0;
1160
fe69af00 1161 break;
1162 case NAND_CMD_RESET:
4eb2da89 1163 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 1164 | command;
4eb2da89
LW
1165
1166 break;
1167
1168 case NAND_CMD_ERASE2:
1169 exec_cmd = 0;
fe69af00 1170 break;
4eb2da89 1171
fe69af00 1172 default:
4eb2da89 1173 exec_cmd = 0;
da675b4e
LW
1174 dev_err(&info->pdev->dev, "non-supported command %x\n",
1175 command);
fe69af00 1176 break;
1177 }
1178
4eb2da89
LW
1179 return exec_cmd;
1180}
1181
5cbbdc6a
EG
1182static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1183 int column, int page_addr)
4eb2da89 1184{
4bd4ebcc 1185 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1186 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1187 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1188 int exec_cmd;
4eb2da89
LW
1189
1190 /*
1191 * if this is a x16 device ,then convert the input
1192 * "byte" address into a "word" address appropriate
1193 * for indexing a word-oriented device
1194 */
48cf7efa 1195 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1196 column /= 2;
1197
f3c8cfc2
LW
1198 /*
1199 * There may be different NAND chip hooked to
1200 * different chip select, so check whether
1201 * chip select has been changed, if yes, reset the timing
1202 */
1203 if (info->cs != host->cs) {
1204 info->cs = host->cs;
48cf7efa
EG
1205 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1206 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1207 }
1208
c39ff03a
EG
1209 prepare_start_command(info, command);
1210
d456882b 1211 info->state = STATE_PREPARED;
70ed8523
EG
1212 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1213
f8155a40
LW
1214 if (exec_cmd) {
1215 init_completion(&info->cmd_complete);
55d9fd6e
EG
1216 init_completion(&info->dev_ready);
1217 info->need_wait = 1;
f8155a40
LW
1218 pxa3xx_nand_start(info);
1219
e5860c18
NMG
1220 if (!wait_for_completion_timeout(&info->cmd_complete,
1221 CHIP_DELAY_TIMEOUT)) {
da675b4e 1222 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1223 /* Stop State Machine for next command cycle */
1224 pxa3xx_nand_stop(info);
1225 }
f8155a40 1226 }
d456882b 1227 info->state = STATE_IDLE;
f8155a40
LW
1228}
1229
5cbbdc6a
EG
1230static void nand_cmdfunc_extended(struct mtd_info *mtd,
1231 const unsigned command,
1232 int column, int page_addr)
70ed8523 1233{
4bd4ebcc 1234 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1235 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
70ed8523 1236 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1237 int exec_cmd, ext_cmd_type;
70ed8523
EG
1238
1239 /*
1240 * if this is a x16 device then convert the input
1241 * "byte" address into a "word" address appropriate
1242 * for indexing a word-oriented device
1243 */
1244 if (info->reg_ndcr & NDCR_DWIDTH_M)
1245 column /= 2;
1246
1247 /*
1248 * There may be different NAND chip hooked to
1249 * different chip select, so check whether
1250 * chip select has been changed, if yes, reset the timing
1251 */
1252 if (info->cs != host->cs) {
1253 info->cs = host->cs;
1254 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1255 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1256 }
1257
1258 /* Select the extended command for the first command */
1259 switch (command) {
1260 case NAND_CMD_READ0:
1261 case NAND_CMD_READOOB:
1262 ext_cmd_type = EXT_CMD_TYPE_MONO;
1263 break;
535cb57a
EG
1264 case NAND_CMD_SEQIN:
1265 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1266 break;
1267 case NAND_CMD_PAGEPROG:
1268 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1269 break;
70ed8523
EG
1270 default:
1271 ext_cmd_type = 0;
535cb57a 1272 break;
70ed8523
EG
1273 }
1274
1275 prepare_start_command(info, command);
1276
1277 /*
1278 * Prepare the "is ready" completion before starting a command
1279 * transaction sequence. If the command is not executed the
1280 * completion will be completed, see below.
1281 *
1282 * We can do that inside the loop because the command variable
1283 * is invariant and thus so is the exec_cmd.
1284 */
1285 info->need_wait = 1;
1286 init_completion(&info->dev_ready);
1287 do {
1288 info->state = STATE_PREPARED;
c2cdace7 1289
70ed8523
EG
1290 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1291 column, page_addr);
1292 if (!exec_cmd) {
1293 info->need_wait = 0;
1294 complete(&info->dev_ready);
1295 break;
1296 }
1297
1298 init_completion(&info->cmd_complete);
1299 pxa3xx_nand_start(info);
1300
e5860c18
NMG
1301 if (!wait_for_completion_timeout(&info->cmd_complete,
1302 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1303 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1304 /* Stop State Machine for next command cycle */
1305 pxa3xx_nand_stop(info);
1306 break;
1307 }
1308
c2cdace7
TP
1309 /* Only a few commands need several steps */
1310 if (command != NAND_CMD_PAGEPROG &&
1311 command != NAND_CMD_READ0 &&
1312 command != NAND_CMD_READOOB)
1313 break;
1314
1315 info->cur_chunk++;
1316
70ed8523 1317 /* Check if the sequence is complete */
c2cdace7 1318 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
535cb57a
EG
1319 break;
1320
1321 /*
1322 * After a splitted program command sequence has issued
1323 * the command dispatch, the command sequence is complete.
1324 */
c2cdace7 1325 if (info->cur_chunk == (info->ntotalchunks + 1) &&
535cb57a
EG
1326 command == NAND_CMD_PAGEPROG &&
1327 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1328 break;
1329
1330 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1331 /* Last read: issue a 'last naked read' */
c2cdace7 1332 if (info->cur_chunk == info->ntotalchunks - 1)
70ed8523
EG
1333 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1334 else
1335 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1336
1337 /*
1338 * If a splitted program command has no more data to transfer,
1339 * the command dispatch must be issued to complete.
1340 */
1341 } else if (command == NAND_CMD_PAGEPROG &&
c2cdace7 1342 info->cur_chunk == info->ntotalchunks) {
535cb57a 1343 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1344 }
1345 } while (1);
1346
1347 info->state = STATE_IDLE;
1348}
1349
fdbad98d 1350static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
45aaeff9
BB
1351 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1352 int page)
f8155a40
LW
1353{
1354 chip->write_buf(mtd, buf, mtd->writesize);
1355 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1356
1357 return 0;
f8155a40
LW
1358}
1359
1360static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1361 struct nand_chip *chip, uint8_t *buf, int oob_required,
1362 int page)
f8155a40 1363{
d699ed25 1364 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1365 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1366
1367 chip->read_buf(mtd, buf, mtd->writesize);
1368 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1369
87f5336e
EG
1370 if (info->retcode == ERR_CORERR && info->use_ecc) {
1371 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1372
1373 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1374 /*
1375 * for blank page (all 0xff), HW will calculate its ECC as
1376 * 0, which is different from the ECC information within
87f5336e 1377 * OOB, ignore such uncorrectable errors
f8155a40
LW
1378 */
1379 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1380 info->retcode = ERR_NONE;
1381 else
f8155a40 1382 mtd->ecc_stats.failed++;
fe69af00 1383 }
f8155a40 1384
87f5336e 1385 return info->max_bitflips;
fe69af00 1386}
1387
1388static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1389{
4bd4ebcc 1390 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1391 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1392 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1393 char retval = 0xFF;
1394
1395 if (info->buf_start < info->buf_count)
1396 /* Has just send a new command? */
1397 retval = info->data_buff[info->buf_start++];
1398
1399 return retval;
1400}
1401
1402static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1403{
4bd4ebcc 1404 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1405 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1406 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1407 u16 retval = 0xFFFF;
1408
1409 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1410 retval = *((u16 *)(info->data_buff+info->buf_start));
1411 info->buf_start += 2;
1412 }
1413 return retval;
1414}
1415
1416static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1417{
4bd4ebcc 1418 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1419 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1420 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1421 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1422
1423 memcpy(buf, info->data_buff + info->buf_start, real_len);
1424 info->buf_start += real_len;
1425}
1426
1427static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1428 const uint8_t *buf, int len)
1429{
4bd4ebcc 1430 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1431 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1432 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1433 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1434
1435 memcpy(info->data_buff + info->buf_start, buf, real_len);
1436 info->buf_start += real_len;
1437}
1438
fe69af00 1439static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1440{
1441 return;
1442}
1443
1444static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1445{
4bd4ebcc 1446 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1447 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1448 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1449
1450 if (info->need_wait) {
55d9fd6e 1451 info->need_wait = 0;
e5860c18
NMG
1452 if (!wait_for_completion_timeout(&info->dev_ready,
1453 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1454 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1455 return NAND_STATUS_FAIL;
1456 }
1457 }
fe69af00 1458
1459 /* pxa3xx_nand_send_command has waited for command complete */
1460 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1461 if (info->retcode == ERR_NONE)
1462 return 0;
55d9fd6e
EG
1463 else
1464 return NAND_STATUS_FAIL;
fe69af00 1465 }
1466
55d9fd6e 1467 return NAND_STATUS_READY;
fe69af00 1468}
1469
66e8e47e 1470static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
fe69af00 1471{
b1e48577 1472 struct pxa3xx_nand_host *host = info->host[info->cs];
fe69af00 1473 struct platform_device *pdev = info->pdev;
453810b7 1474 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
b1e48577 1475 const struct nand_sdr_timings *timings;
fe69af00 1476
66e8e47e
EG
1477 /* Configure default flash values */
1478 info->chunk_size = PAGE_CHUNK_SIZE;
f19fe983
AT
1479 info->reg_ndcr = 0x0; /* enable all interrupts */
1480 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1481 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
66e8e47e
EG
1482 info->reg_ndcr |= NDCR_SPARE_EN;
1483
b1e48577
EG
1484 /* use the common timing to make a try */
1485 timings = onfi_async_timing_mode_to_sdr_timings(0);
1486 if (IS_ERR(timings))
1487 return PTR_ERR(timings);
1488
1489 pxa3xx_nand_set_sdr_timing(host, timings);
66e8e47e
EG
1490 return 0;
1491}
1492
1493static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1494{
1495 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3
BB
1496 struct nand_chip *chip = &host->chip;
1497 struct mtd_info *mtd = nand_to_mtd(chip);
66e8e47e 1498
f19fe983
AT
1499 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1500 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1501 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
fe69af00 1502}
1503
154f50fb 1504static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
f271049e 1505{
66e8e47e
EG
1506 struct platform_device *pdev = info->pdev;
1507 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f271049e 1508 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1509
70ed8523 1510 /* Set an initial chunk size */
b226eca2 1511 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1512 info->reg_ndcr = ndcr &
1513 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
66e8e47e 1514 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa
EG
1515 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1516 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1517}
1518
fe69af00 1519static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1520{
1521 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1522 struct dma_slave_config config;
1523 dma_cap_mask_t mask;
1524 struct pxad_param param;
1525 int ret;
fe69af00 1526
8f5ba31a
RJ
1527 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1528 if (info->data_buff == NULL)
1529 return -ENOMEM;
1530 if (use_dma == 0)
fe69af00 1531 return 0;
fe69af00 1532
8f5ba31a
RJ
1533 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1534 if (ret)
1535 return ret;
fe69af00 1536
8f5ba31a
RJ
1537 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1538 dma_cap_zero(mask);
1539 dma_cap_set(DMA_SLAVE, mask);
1540 param.prio = PXAD_PRIO_LOWEST;
1541 param.drcmr = info->drcmr_dat;
1542 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1543 &param, &pdev->dev,
1544 "data");
1545 if (!info->dma_chan) {
1546 dev_err(&pdev->dev, "unable to request data dma channel\n");
1547 return -ENODEV;
1548 }
fe69af00 1549
8f5ba31a
RJ
1550 memset(&config, 0, sizeof(config));
1551 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1552 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1553 config.src_addr = info->mmio_phys + NDDB;
1554 config.dst_addr = info->mmio_phys + NDDB;
1555 config.src_maxburst = 32;
1556 config.dst_maxburst = 32;
1557 ret = dmaengine_slave_config(info->dma_chan, &config);
1558 if (ret < 0) {
1559 dev_err(&info->pdev->dev,
1560 "dma channel configuration failed: %d\n",
1561 ret);
1562 return ret;
fe69af00 1563 }
1564
95b26563
EG
1565 /*
1566 * Now that DMA buffers are allocated we turn on
1567 * DMA proper for I/O operations.
1568 */
1569 info->use_dma = 1;
fe69af00 1570 return 0;
1571}
1572
498b6145
EG
1573static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1574{
15b540c7 1575 if (info->use_dma) {
8f5ba31a
RJ
1576 dmaengine_terminate_all(info->dma_chan);
1577 dma_release_channel(info->dma_chan);
498b6145 1578 }
f4db2e3a
EG
1579 kfree(info->data_buff);
1580}
498b6145 1581
43bcfd2b 1582static int pxa_ecc_init(struct pxa3xx_nand_info *info,
39980c56 1583 struct mtd_info *mtd,
30b2afc8 1584 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1585{
39980c56
BB
1586 struct nand_chip *chip = mtd_to_nand(mtd);
1587 struct nand_ecc_ctrl *ecc = &chip->ecc;
1588
30b2afc8 1589 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
c2cdace7
TP
1590 info->nfullchunks = 1;
1591 info->ntotalchunks = 1;
70ed8523 1592 info->chunk_size = 2048;
43bcfd2b
EG
1593 info->spare_size = 40;
1594 info->ecc_size = 24;
1595 ecc->mode = NAND_ECC_HW;
1596 ecc->size = 512;
1597 ecc->strength = 1;
43bcfd2b 1598
30b2afc8 1599 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
c2cdace7
TP
1600 info->nfullchunks = 1;
1601 info->ntotalchunks = 1;
70ed8523 1602 info->chunk_size = 512;
43bcfd2b
EG
1603 info->spare_size = 8;
1604 info->ecc_size = 8;
1605 ecc->mode = NAND_ECC_HW;
1606 ecc->size = 512;
1607 ecc->strength = 1;
43bcfd2b 1608
6033a949
BN
1609 /*
1610 * Required ECC: 4-bit correction per 512 bytes
1611 * Select: 16-bit correction per 2048 bytes
1612 */
3db227b6
RG
1613 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1614 info->ecc_bch = 1;
c2cdace7
TP
1615 info->nfullchunks = 1;
1616 info->ntotalchunks = 1;
3db227b6
RG
1617 info->chunk_size = 2048;
1618 info->spare_size = 32;
1619 info->ecc_size = 32;
1620 ecc->mode = NAND_ECC_HW;
1621 ecc->size = info->chunk_size;
39980c56 1622 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
3db227b6 1623 ecc->strength = 16;
3db227b6 1624
30b2afc8 1625 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1626 info->ecc_bch = 1;
c2cdace7
TP
1627 info->nfullchunks = 2;
1628 info->ntotalchunks = 2;
70ed8523
EG
1629 info->chunk_size = 2048;
1630 info->spare_size = 32;
1631 info->ecc_size = 32;
1632 ecc->mode = NAND_ECC_HW;
1633 ecc->size = info->chunk_size;
39980c56 1634 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
70ed8523 1635 ecc->strength = 16;
70ed8523 1636
6033a949
BN
1637 /*
1638 * Required ECC: 8-bit correction per 512 bytes
1639 * Select: 16-bit correction per 1024 bytes
1640 */
1641 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1642 info->ecc_bch = 1;
c2cdace7
TP
1643 info->nfullchunks = 4;
1644 info->ntotalchunks = 5;
70ed8523
EG
1645 info->chunk_size = 1024;
1646 info->spare_size = 0;
c2cdace7
TP
1647 info->last_chunk_size = 0;
1648 info->last_spare_size = 64;
70ed8523
EG
1649 info->ecc_size = 32;
1650 ecc->mode = NAND_ECC_HW;
1651 ecc->size = info->chunk_size;
39980c56 1652 mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
70ed8523 1653 ecc->strength = 16;
eee0166d
EG
1654 } else {
1655 dev_err(&info->pdev->dev,
1656 "ECC strength %d at page size %d is not supported\n",
1657 strength, page_size);
1658 return -ENODEV;
70ed8523 1659 }
eee0166d
EG
1660
1661 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1662 ecc->strength, ecc->size);
43bcfd2b
EG
1663 return 0;
1664}
1665
401e67e2 1666static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1667{
4bd4ebcc 1668 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1669 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1670 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1671 struct platform_device *pdev = info->pdev;
453810b7 1672 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f19fe983 1673 int ret;
30b2afc8 1674 uint16_t ecc_strength, ecc_step;
401e67e2 1675
154f50fb
EG
1676 if (pdata->keep_config) {
1677 pxa3xx_nand_detect_config(info);
1678 } else {
1679 ret = pxa3xx_nand_config_ident(info);
1680 if (ret)
1681 return ret;
401e67e2
LW
1682 }
1683
48cf7efa 1684 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1685 chip->options |= NAND_BUSWIDTH_16;
1686
43bcfd2b 1687 /* Device detection must be done with ECC disabled */
fc256f57
MR
1688 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1689 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
43bcfd2b
EG
1690 nand_writel(info, NDECCCTRL, 0x0);
1691
44ccb64f
BB
1692 if (pdata->flash_bbt)
1693 chip->bbt_options |= NAND_BBT_USE_FLASH;
1694
1695 chip->ecc.strength = pdata->ecc_strength;
1696 chip->ecc.size = pdata->ecc_step_size;
1697
133fe8fa
MY
1698 ret = nand_scan_ident(mtd, 1, NULL);
1699 if (ret)
1700 return ret;
776f265e 1701
f19fe983
AT
1702 if (!pdata->keep_config) {
1703 ret = pxa3xx_nand_init(host);
1704 if (ret) {
1705 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1706 ret);
1707 return ret;
1708 }
1709 }
1710
44ccb64f 1711 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
776f265e
EG
1712 /*
1713 * We'll use a bad block table stored in-flash and don't
1714 * allow writing the bad block marker to the flash.
1715 */
44ccb64f 1716 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
776f265e
EG
1717 chip->bbt_td = &bbt_main_descr;
1718 chip->bbt_md = &bbt_mirror_descr;
1719 }
1720
5cbbdc6a
EG
1721 /*
1722 * If the page size is bigger than the FIFO size, let's check
1723 * we are given the right variant and then switch to the extended
1724 * (aka splitted) command handling,
1725 */
1726 if (mtd->writesize > PAGE_CHUNK_SIZE) {
fc256f57
MR
1727 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1728 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
5cbbdc6a
EG
1729 chip->cmdfunc = nand_cmdfunc_extended;
1730 } else {
1731 dev_err(&info->pdev->dev,
1732 "unsupported page size on this variant\n");
1733 return -ENODEV;
1734 }
1735 }
1736
44ccb64f
BB
1737 ecc_strength = chip->ecc.strength;
1738 ecc_step = chip->ecc.size;
1739 if (!ecc_strength || !ecc_step) {
5b3e5078
EG
1740 ecc_strength = chip->ecc_strength_ds;
1741 ecc_step = chip->ecc_step_ds;
1742 }
30b2afc8
EG
1743
1744 /* Set default ECC strength requirements on non-ONFI devices */
1745 if (ecc_strength < 1 && ecc_step < 1) {
1746 ecc_strength = 1;
1747 ecc_step = 512;
1748 }
1749
39980c56 1750 ret = pxa_ecc_init(info, mtd, ecc_strength,
30b2afc8 1751 ecc_step, mtd->writesize);
eee0166d
EG
1752 if (ret)
1753 return ret;
43bcfd2b 1754
4332c116 1755 /* calculate addressing information */
d456882b
LW
1756 if (mtd->writesize >= 2048)
1757 host->col_addr_cycles = 2;
1758 else
1759 host->col_addr_cycles = 1;
1760
62e8b851
EG
1761 /* release the initial buffer */
1762 kfree(info->data_buff);
1763
1764 /* allocate the real data + oob buffer */
1765 info->buf_size = mtd->writesize + mtd->oobsize;
1766 ret = pxa3xx_nand_init_buff(info);
1767 if (ret)
1768 return ret;
4332c116 1769 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1770
4332c116 1771 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1772 host->row_addr_cycles = 3;
4332c116 1773 else
d456882b 1774 host->row_addr_cycles = 2;
66e8e47e
EG
1775
1776 if (!pdata->keep_config)
1777 pxa3xx_nand_config_tail(info);
1778
401e67e2 1779 return nand_scan_tail(mtd);
fe69af00 1780}
1781
d456882b 1782static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1783{
a61ae81a 1784 struct device_node *np = pdev->dev.of_node;
f3c8cfc2 1785 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1786 struct pxa3xx_nand_info *info;
d456882b 1787 struct pxa3xx_nand_host *host;
6e308f87 1788 struct nand_chip *chip = NULL;
fe69af00 1789 struct mtd_info *mtd;
1790 struct resource *r;
f3c8cfc2 1791 int ret, irq, cs;
fe69af00 1792
453810b7 1793 pdata = dev_get_platdata(&pdev->dev);
8bd8fbd8
UKK
1794 if (pdata->num_cs <= 0) {
1795 dev_err(&pdev->dev, "invalid number of chip selects\n");
e423c90a 1796 return -ENODEV;
8bd8fbd8
UKK
1797 }
1798
063294a3
BB
1799 info = devm_kzalloc(&pdev->dev,
1800 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1801 GFP_KERNEL);
4c073cd2 1802 if (!info)
d456882b 1803 return -ENOMEM;
fe69af00 1804
fe69af00 1805 info->pdev = pdev;
c7e9c7e7 1806 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1807 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3
BB
1808 host = (void *)&info[1] + sizeof(*host) * cs;
1809 chip = &host->chip;
d699ed25 1810 nand_set_controller_data(chip, host);
063294a3 1811 mtd = nand_to_mtd(chip);
f3c8cfc2 1812 info->host[cs] = host;
f3c8cfc2
LW
1813 host->cs = cs;
1814 host->info_data = info;
550dab5b 1815 mtd->dev.parent = &pdev->dev;
a61ae81a
BN
1816 /* FIXME: all chips use the same device tree partitions */
1817 nand_set_flash_node(chip, np);
f3c8cfc2 1818
d699ed25 1819 nand_set_controller_data(chip, host);
f3c8cfc2
LW
1820 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1821 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1822 chip->controller = &info->controller;
1823 chip->waitfunc = pxa3xx_nand_waitfunc;
1824 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1825 chip->read_word = pxa3xx_nand_read_word;
1826 chip->read_byte = pxa3xx_nand_read_byte;
1827 chip->read_buf = pxa3xx_nand_read_buf;
1828 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1829 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1830 chip->cmdfunc = nand_cmdfunc;
4a78cc64
BB
1831 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
1832 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
f3c8cfc2 1833 }
401e67e2 1834
d45bc58d 1835 nand_hw_control_init(chip->controller);
9ca7944d 1836 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1837 if (IS_ERR(info->clk)) {
8bd8fbd8
UKK
1838 ret = PTR_ERR(info->clk);
1839 dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
1840 return ret;
fe69af00 1841 }
1f8eaff2
EG
1842 ret = clk_prepare_enable(info->clk);
1843 if (ret < 0)
1844 return ret;
fe69af00 1845
9097103f 1846 if (!np && use_dma) {
8f5ba31a
RJ
1847 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1848 if (r == NULL) {
1849 dev_err(&pdev->dev,
1850 "no resource defined for data DMA\n");
1851 ret = -ENXIO;
1852 goto fail_disable_clk;
1e7ba630 1853 }
8f5ba31a 1854 info->drcmr_dat = r->start;
fe69af00 1855 }
fe69af00 1856
1857 irq = platform_get_irq(pdev, 0);
1858 if (irq < 0) {
1859 dev_err(&pdev->dev, "no IRQ resource defined\n");
1860 ret = -ENXIO;
9ca7944d 1861 goto fail_disable_clk;
fe69af00 1862 }
1863
1864 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1865 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1866 if (IS_ERR(info->mmio_base)) {
1867 ret = PTR_ERR(info->mmio_base);
8bd8fbd8 1868 dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
9ca7944d 1869 goto fail_disable_clk;
fe69af00 1870 }
8638fac8 1871 info->mmio_phys = r->start;
fe69af00 1872
62e8b851
EG
1873 /* Allocate a buffer to allow flash detection */
1874 info->buf_size = INIT_BUFFER_SIZE;
1875 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1876 if (info->data_buff == NULL) {
1877 ret = -ENOMEM;
9ca7944d 1878 goto fail_disable_clk;
62e8b851 1879 }
fe69af00 1880
346e1259
HZ
1881 /* initialize all interrupts to be disabled */
1882 disable_int(info, NDSR_MASK);
1883
24542257
RJ
1884 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1885 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1886 pdev->name, info);
fe69af00 1887 if (ret < 0) {
8bd8fbd8 1888 dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
fe69af00 1889 goto fail_free_buf;
1890 }
1891
e353a20a 1892 platform_set_drvdata(pdev, info);
fe69af00 1893
d456882b 1894 return 0;
fe69af00 1895
fe69af00 1896fail_free_buf:
401e67e2 1897 free_irq(irq, info);
62e8b851 1898 kfree(info->data_buff);
9ca7944d 1899fail_disable_clk:
fb32061f 1900 clk_disable_unprepare(info->clk);
d456882b 1901 return ret;
fe69af00 1902}
1903
1904static int pxa3xx_nand_remove(struct platform_device *pdev)
1905{
e353a20a 1906 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1907 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1908 int irq, cs;
fe69af00 1909
d456882b
LW
1910 if (!info)
1911 return 0;
1912
453810b7 1913 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1914
dbf5986a
HZ
1915 irq = platform_get_irq(pdev, 0);
1916 if (irq >= 0)
1917 free_irq(irq, info);
498b6145 1918 pxa3xx_nand_free_buff(info);
82a72d10 1919
e971affa
RJ
1920 /*
1921 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1922 * In order to prevent a lockup of the system bus, the DFI bus
1923 * arbitration is granted to SMC upon driver removal. This is done by
1924 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1925 * access to the bus anymore.
1926 */
1927 nand_writel(info, NDCR,
1928 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1929 NFCV1_NDCR_ARB_CNTL);
fb32061f 1930 clk_disable_unprepare(info->clk);
82a72d10 1931
f3c8cfc2 1932 for (cs = 0; cs < pdata->num_cs; cs++)
063294a3 1933 nand_release(nand_to_mtd(&info->host[cs]->chip));
fe69af00 1934 return 0;
1935}
1936
1e7ba630
DM
1937static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1938{
1939 struct pxa3xx_nand_platform_data *pdata;
1940 struct device_node *np = pdev->dev.of_node;
1941 const struct of_device_id *of_id =
1942 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1943
1944 if (!of_id)
1945 return 0;
1946
fc256f57
MR
1947 /*
1948 * Some SoCs like A7k/A8k need to enable manually the NAND
1949 * controller to avoid being bootloader dependent. This is done
1950 * through the use of a single bit in the System Functions registers.
1951 */
1952 if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1953 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1954 pdev->dev.of_node, "marvell,system-controller");
1955 u32 reg;
1956
1957 if (IS_ERR(sysctrl_base))
1958 return PTR_ERR(sysctrl_base);
1959
1960 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
1961 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1962 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1963 }
1964
1e7ba630
DM
1965 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1966 if (!pdata)
1967 return -ENOMEM;
1968
1969 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1970 pdata->enable_arbiter = 1;
1971 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1972 pdata->keep_config = 1;
1973 of_property_read_u32(np, "num-cs", &pdata->num_cs);
5b3e5078 1974
1e7ba630
DM
1975 pdev->dev.platform_data = pdata;
1976
1977 return 0;
1978}
1e7ba630 1979
e353a20a
LW
1980static int pxa3xx_nand_probe(struct platform_device *pdev)
1981{
1982 struct pxa3xx_nand_platform_data *pdata;
1983 struct pxa3xx_nand_info *info;
8f5ba31a 1984 int ret, cs, probe_success, dma_available;
e353a20a 1985
8f5ba31a
RJ
1986 dma_available = IS_ENABLED(CONFIG_ARM) &&
1987 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1988 if (use_dma && !dma_available) {
f4db2e3a
EG
1989 use_dma = 0;
1990 dev_warn(&pdev->dev,
1991 "This platform can't do DMA on this device\n");
1992 }
8f5ba31a 1993
1e7ba630
DM
1994 ret = pxa3xx_nand_probe_dt(pdev);
1995 if (ret)
1996 return ret;
1997
453810b7 1998 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1999 if (!pdata) {
2000 dev_err(&pdev->dev, "no platform data defined\n");
2001 return -ENODEV;
2002 }
2003
d456882b 2004 ret = alloc_nand_resource(pdev);
8bd8fbd8 2005 if (ret)
d456882b 2006 return ret;
e353a20a 2007
d456882b 2008 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
2009 probe_success = 0;
2010 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3 2011 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
f455578d 2012
18a84e93
EG
2013 /*
2014 * The mtd name matches the one used in 'mtdparts' kernel
2015 * parameter. This name cannot be changed or otherwise
2016 * user's mtd partitions configuration would get broken.
2017 */
2018 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 2019 info->cs = cs;
b7655bcb 2020 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
2021 if (ret) {
2022 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
2023 cs);
2024 continue;
2025 }
2026
a61ae81a
BN
2027 ret = mtd_device_register(mtd, pdata->parts[cs],
2028 pdata->nr_parts[cs]);
f3c8cfc2
LW
2029 if (!ret)
2030 probe_success = 1;
2031 }
2032
2033 if (!probe_success) {
e353a20a
LW
2034 pxa3xx_nand_remove(pdev);
2035 return -ENODEV;
2036 }
2037
f3c8cfc2 2038 return 0;
e353a20a
LW
2039}
2040
fe69af00 2041#ifdef CONFIG_PM
d3e94f3f 2042static int pxa3xx_nand_suspend(struct device *dev)
fe69af00 2043{
d3e94f3f 2044 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
fe69af00 2045
f8155a40 2046 if (info->state) {
d3e94f3f 2047 dev_err(dev, "driver busy, state = %d\n", info->state);
fe69af00 2048 return -EAGAIN;
2049 }
2050
d55d31a6 2051 clk_disable(info->clk);
fe69af00 2052 return 0;
2053}
2054
d3e94f3f 2055static int pxa3xx_nand_resume(struct device *dev)
fe69af00 2056{
d3e94f3f 2057 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
d55d31a6
EG
2058 int ret;
2059
2060 ret = clk_enable(info->clk);
2061 if (ret < 0)
2062 return ret;
051fc41c
LW
2063
2064 /* We don't want to handle interrupt without calling mtd routine */
2065 disable_int(info, NDCR_INT_MASK);
fe69af00 2066
f3c8cfc2
LW
2067 /*
2068 * Directly set the chip select to a invalid value,
2069 * then the driver would reset the timing according
2070 * to current chip select at the beginning of cmdfunc
2071 */
2072 info->cs = 0xff;
fe69af00 2073
051fc41c
LW
2074 /*
2075 * As the spec says, the NDSR would be updated to 0x1800 when
2076 * doing the nand_clk disable/enable.
2077 * To prevent it damaging state machine of the driver, clear
2078 * all status before resume
2079 */
2080 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2 2081
18c81b18 2082 return 0;
fe69af00 2083}
2084#else
2085#define pxa3xx_nand_suspend NULL
2086#define pxa3xx_nand_resume NULL
2087#endif
2088
d3e94f3f
BN
2089static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2090 .suspend = pxa3xx_nand_suspend,
2091 .resume = pxa3xx_nand_resume,
2092};
2093
fe69af00 2094static struct platform_driver pxa3xx_nand_driver = {
2095 .driver = {
2096 .name = "pxa3xx-nand",
5576bc7b 2097 .of_match_table = pxa3xx_nand_dt_ids,
d3e94f3f 2098 .pm = &pxa3xx_nand_pm_ops,
fe69af00 2099 },
2100 .probe = pxa3xx_nand_probe,
2101 .remove = pxa3xx_nand_remove,
fe69af00 2102};
2103
f99640de 2104module_platform_driver(pxa3xx_nand_driver);
fe69af00 2105
2106MODULE_LICENSE("GPL");
2107MODULE_DESCRIPTION("PXA3xx NAND controller driver");