]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/nand/pxa3xx_nand.c
mtd: nand: remove useless mtd->priv = chip assignments
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
34
35 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE (2048)
38
39 /*
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46 */
47 #define INIT_BUFFER_SIZE 2048
48
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
62
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79 #define NDCR_RA_START (0x1 << 15)
80 #define NDCR_PG_PER_BLK (0x1 << 14)
81 #define NDCR_ND_ARB_EN (0x1 << 12)
82 #define NDCR_INT_MASK (0xFFF)
83
84 #define NDSR_MASK (0xfff)
85 #define NDSR_ERR_CNT_OFF (16)
86 #define NDSR_ERR_CNT_MASK (0x1f)
87 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_UNCORERR (0x1 << 4)
97 #define NDSR_CORERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
101
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC (0x1 << 20)
111 #define NDCB0_DBC (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK (0xff << 8)
115 #define NDCB0_CMD1_MASK (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT (16)
117
118 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ 4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
125
126 /*
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
130 */
131 #define READ_ID_BYTES 7
132
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val) \
135 writel_relaxed((val), (info)->mmio_base + (off))
136
137 #define nand_readl(info, off) \
138 readl_relaxed((info)->mmio_base + (off))
139
140 /* error code and state */
141 enum {
142 ERR_NONE = 0,
143 ERR_DMABUSERR = -1,
144 ERR_SENDCMD = -2,
145 ERR_UNCORERR = -3,
146 ERR_BBERR = -4,
147 ERR_CORERR = -5,
148 };
149
150 enum {
151 STATE_IDLE = 0,
152 STATE_PREPARED,
153 STATE_CMD_HANDLE,
154 STATE_DMA_READING,
155 STATE_DMA_WRITING,
156 STATE_DMA_DONE,
157 STATE_PIO_READING,
158 STATE_PIO_WRITING,
159 STATE_CMD_DONE,
160 STATE_READY,
161 };
162
163 enum pxa3xx_nand_variant {
164 PXA3XX_NAND_VARIANT_PXA,
165 PXA3XX_NAND_VARIANT_ARMADA370,
166 };
167
168 struct pxa3xx_nand_host {
169 struct nand_chip chip;
170 void *info_data;
171
172 /* page size of attached chip */
173 int use_ecc;
174 int cs;
175
176 /* calculated from pxa3xx_nand_flash data */
177 unsigned int col_addr_cycles;
178 unsigned int row_addr_cycles;
179 };
180
181 struct pxa3xx_nand_info {
182 struct nand_hw_control controller;
183 struct platform_device *pdev;
184
185 struct clk *clk;
186 void __iomem *mmio_base;
187 unsigned long mmio_phys;
188 struct completion cmd_complete, dev_ready;
189
190 unsigned int buf_start;
191 unsigned int buf_count;
192 unsigned int buf_size;
193 unsigned int data_buff_pos;
194 unsigned int oob_buff_pos;
195
196 /* DMA information */
197 struct scatterlist sg;
198 enum dma_data_direction dma_dir;
199 struct dma_chan *dma_chan;
200 dma_cookie_t dma_cookie;
201 int drcmr_dat;
202 int drcmr_cmd;
203
204 unsigned char *data_buff;
205 unsigned char *oob_buff;
206 dma_addr_t data_buff_phys;
207 int data_dma_ch;
208
209 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
210 unsigned int state;
211
212 /*
213 * This driver supports NFCv1 (as found in PXA SoC)
214 * and NFCv2 (as found in Armada 370/XP SoC).
215 */
216 enum pxa3xx_nand_variant variant;
217
218 int cs;
219 int use_ecc; /* use HW ECC ? */
220 int ecc_bch; /* using BCH ECC? */
221 int use_dma; /* use DMA ? */
222 int use_spare; /* use spare ? */
223 int need_wait;
224
225 unsigned int data_size; /* data to be read from FIFO */
226 unsigned int chunk_size; /* split commands chunk size */
227 unsigned int oob_size;
228 unsigned int spare_size;
229 unsigned int ecc_size;
230 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips;
232 int retcode;
233
234 /* cached register value */
235 uint32_t reg_ndcr;
236 uint32_t ndtr0cs0;
237 uint32_t ndtr1cs0;
238
239 /* generated NDCBx register values */
240 uint32_t ndcb0;
241 uint32_t ndcb1;
242 uint32_t ndcb2;
243 uint32_t ndcb3;
244 };
245
246 static bool use_dma = 1;
247 module_param(use_dma, bool, 0444);
248 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
249
250 struct pxa3xx_nand_timing {
251 unsigned int tCH; /* Enable signal hold time */
252 unsigned int tCS; /* Enable signal setup time */
253 unsigned int tWH; /* ND_nWE high duration */
254 unsigned int tWP; /* ND_nWE pulse time */
255 unsigned int tRH; /* ND_nRE high duration */
256 unsigned int tRP; /* ND_nRE pulse width */
257 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
258 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
259 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
260 };
261
262 struct pxa3xx_nand_flash {
263 uint32_t chip_id;
264 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
265 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
266 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
267 };
268
269 static struct pxa3xx_nand_timing timing[] = {
270 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
271 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
272 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
273 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
274 };
275
276 static struct pxa3xx_nand_flash builtin_flash_types[] = {
277 { 0x46ec, 16, 16, &timing[1] },
278 { 0xdaec, 8, 8, &timing[1] },
279 { 0xd7ec, 8, 8, &timing[1] },
280 { 0xa12c, 8, 8, &timing[2] },
281 { 0xb12c, 16, 16, &timing[2] },
282 { 0xdc2c, 8, 8, &timing[2] },
283 { 0xcc2c, 16, 16, &timing[2] },
284 { 0xba20, 16, 16, &timing[3] },
285 };
286
287 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
288 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
289
290 static struct nand_bbt_descr bbt_main_descr = {
291 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
292 | NAND_BBT_2BIT | NAND_BBT_VERSION,
293 .offs = 8,
294 .len = 6,
295 .veroffs = 14,
296 .maxblocks = 8, /* Last 8 blocks in each chip */
297 .pattern = bbt_pattern
298 };
299
300 static struct nand_bbt_descr bbt_mirror_descr = {
301 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
302 | NAND_BBT_2BIT | NAND_BBT_VERSION,
303 .offs = 8,
304 .len = 6,
305 .veroffs = 14,
306 .maxblocks = 8, /* Last 8 blocks in each chip */
307 .pattern = bbt_mirror_pattern
308 };
309
310 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
311 .eccbytes = 32,
312 .eccpos = {
313 32, 33, 34, 35, 36, 37, 38, 39,
314 40, 41, 42, 43, 44, 45, 46, 47,
315 48, 49, 50, 51, 52, 53, 54, 55,
316 56, 57, 58, 59, 60, 61, 62, 63},
317 .oobfree = { {2, 30} }
318 };
319
320 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
321 .eccbytes = 64,
322 .eccpos = {
323 32, 33, 34, 35, 36, 37, 38, 39,
324 40, 41, 42, 43, 44, 45, 46, 47,
325 48, 49, 50, 51, 52, 53, 54, 55,
326 56, 57, 58, 59, 60, 61, 62, 63,
327 96, 97, 98, 99, 100, 101, 102, 103,
328 104, 105, 106, 107, 108, 109, 110, 111,
329 112, 113, 114, 115, 116, 117, 118, 119,
330 120, 121, 122, 123, 124, 125, 126, 127},
331 /* Bootrom looks in bytes 0 & 5 for bad blocks */
332 .oobfree = { {6, 26}, { 64, 32} }
333 };
334
335 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
336 .eccbytes = 128,
337 .eccpos = {
338 32, 33, 34, 35, 36, 37, 38, 39,
339 40, 41, 42, 43, 44, 45, 46, 47,
340 48, 49, 50, 51, 52, 53, 54, 55,
341 56, 57, 58, 59, 60, 61, 62, 63},
342 .oobfree = { }
343 };
344
345 #define NDTR0_tCH(c) (min((c), 7) << 19)
346 #define NDTR0_tCS(c) (min((c), 7) << 16)
347 #define NDTR0_tWH(c) (min((c), 7) << 11)
348 #define NDTR0_tWP(c) (min((c), 7) << 8)
349 #define NDTR0_tRH(c) (min((c), 7) << 3)
350 #define NDTR0_tRP(c) (min((c), 7) << 0)
351
352 #define NDTR1_tR(c) (min((c), 65535) << 16)
353 #define NDTR1_tWHR(c) (min((c), 15) << 4)
354 #define NDTR1_tAR(c) (min((c), 15) << 0)
355
356 /* convert nano-seconds to nand flash controller clock cycles */
357 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
358
359 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
360 {
361 .compatible = "marvell,pxa3xx-nand",
362 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
363 },
364 {
365 .compatible = "marvell,armada370-nand",
366 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
367 },
368 {}
369 };
370 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
371
372 static enum pxa3xx_nand_variant
373 pxa3xx_nand_get_variant(struct platform_device *pdev)
374 {
375 const struct of_device_id *of_id =
376 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
377 if (!of_id)
378 return PXA3XX_NAND_VARIANT_PXA;
379 return (enum pxa3xx_nand_variant)of_id->data;
380 }
381
382 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
383 const struct pxa3xx_nand_timing *t)
384 {
385 struct pxa3xx_nand_info *info = host->info_data;
386 unsigned long nand_clk = clk_get_rate(info->clk);
387 uint32_t ndtr0, ndtr1;
388
389 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
390 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
391 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
392 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
393 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
394 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
395
396 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
397 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
398 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
399
400 info->ndtr0cs0 = ndtr0;
401 info->ndtr1cs0 = ndtr1;
402 nand_writel(info, NDTR0CS0, ndtr0);
403 nand_writel(info, NDTR1CS0, ndtr1);
404 }
405
406 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
407 const struct nand_sdr_timings *t)
408 {
409 struct pxa3xx_nand_info *info = host->info_data;
410 struct nand_chip *chip = &host->chip;
411 unsigned long nand_clk = clk_get_rate(info->clk);
412 uint32_t ndtr0, ndtr1;
413
414 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
415 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
416 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
417 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
418 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
419 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
420 u32 tR = chip->chip_delay * 1000;
421 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
422 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
423
424 /* fallback to a default value if tR = 0 */
425 if (!tR)
426 tR = 20000;
427
428 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
429 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
430 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
431 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
432 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
433 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
434
435 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
436 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
437 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
438
439 info->ndtr0cs0 = ndtr0;
440 info->ndtr1cs0 = ndtr1;
441 nand_writel(info, NDTR0CS0, ndtr0);
442 nand_writel(info, NDTR1CS0, ndtr1);
443 }
444
445 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
446 unsigned int *flash_width,
447 unsigned int *dfc_width)
448 {
449 struct nand_chip *chip = &host->chip;
450 struct pxa3xx_nand_info *info = host->info_data;
451 const struct pxa3xx_nand_flash *f = NULL;
452 struct mtd_info *mtd = nand_to_mtd(&host->chip);
453 int i, id, ntypes;
454
455 ntypes = ARRAY_SIZE(builtin_flash_types);
456
457 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
458
459 id = chip->read_byte(mtd);
460 id |= chip->read_byte(mtd) << 0x8;
461
462 for (i = 0; i < ntypes; i++) {
463 f = &builtin_flash_types[i];
464
465 if (f->chip_id == id)
466 break;
467 }
468
469 if (i == ntypes) {
470 dev_err(&info->pdev->dev, "Error: timings not found\n");
471 return -EINVAL;
472 }
473
474 pxa3xx_nand_set_timing(host, f->timing);
475
476 *flash_width = f->flash_width;
477 *dfc_width = f->dfc_width;
478
479 return 0;
480 }
481
482 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
483 int mode)
484 {
485 const struct nand_sdr_timings *timings;
486
487 mode = fls(mode) - 1;
488 if (mode < 0)
489 mode = 0;
490
491 timings = onfi_async_timing_mode_to_sdr_timings(mode);
492 if (IS_ERR(timings))
493 return PTR_ERR(timings);
494
495 pxa3xx_nand_set_sdr_timing(host, timings);
496
497 return 0;
498 }
499
500 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
501 {
502 struct nand_chip *chip = &host->chip;
503 struct pxa3xx_nand_info *info = host->info_data;
504 unsigned int flash_width = 0, dfc_width = 0;
505 int mode, err;
506
507 mode = onfi_get_async_timing_mode(chip);
508 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
509 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
510 &dfc_width);
511 if (err)
512 return err;
513
514 if (flash_width == 16) {
515 info->reg_ndcr |= NDCR_DWIDTH_M;
516 chip->options |= NAND_BUSWIDTH_16;
517 }
518
519 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
520 } else {
521 err = pxa3xx_nand_init_timings_onfi(host, mode);
522 if (err)
523 return err;
524 }
525
526 return 0;
527 }
528
529 /*
530 * Set the data and OOB size, depending on the selected
531 * spare and ECC configuration.
532 * Only applicable to READ0, READOOB and PAGEPROG commands.
533 */
534 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
535 struct mtd_info *mtd)
536 {
537 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
538
539 info->data_size = mtd->writesize;
540 if (!oob_enable)
541 return;
542
543 info->oob_size = info->spare_size;
544 if (!info->use_ecc)
545 info->oob_size += info->ecc_size;
546 }
547
548 /**
549 * NOTE: it is a must to set ND_RUN firstly, then write
550 * command buffer, otherwise, it does not work.
551 * We enable all the interrupt at the same time, and
552 * let pxa3xx_nand_irq to handle all logic.
553 */
554 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
555 {
556 uint32_t ndcr;
557
558 ndcr = info->reg_ndcr;
559
560 if (info->use_ecc) {
561 ndcr |= NDCR_ECC_EN;
562 if (info->ecc_bch)
563 nand_writel(info, NDECCCTRL, 0x1);
564 } else {
565 ndcr &= ~NDCR_ECC_EN;
566 if (info->ecc_bch)
567 nand_writel(info, NDECCCTRL, 0x0);
568 }
569
570 if (info->use_dma)
571 ndcr |= NDCR_DMA_EN;
572 else
573 ndcr &= ~NDCR_DMA_EN;
574
575 if (info->use_spare)
576 ndcr |= NDCR_SPARE_EN;
577 else
578 ndcr &= ~NDCR_SPARE_EN;
579
580 ndcr |= NDCR_ND_RUN;
581
582 /* clear status bits and run */
583 nand_writel(info, NDSR, NDSR_MASK);
584 nand_writel(info, NDCR, 0);
585 nand_writel(info, NDCR, ndcr);
586 }
587
588 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
589 {
590 uint32_t ndcr;
591 int timeout = NAND_STOP_DELAY;
592
593 /* wait RUN bit in NDCR become 0 */
594 ndcr = nand_readl(info, NDCR);
595 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
596 ndcr = nand_readl(info, NDCR);
597 udelay(1);
598 }
599
600 if (timeout <= 0) {
601 ndcr &= ~NDCR_ND_RUN;
602 nand_writel(info, NDCR, ndcr);
603 }
604 if (info->dma_chan)
605 dmaengine_terminate_all(info->dma_chan);
606
607 /* clear status bits */
608 nand_writel(info, NDSR, NDSR_MASK);
609 }
610
611 static void __maybe_unused
612 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
613 {
614 uint32_t ndcr;
615
616 ndcr = nand_readl(info, NDCR);
617 nand_writel(info, NDCR, ndcr & ~int_mask);
618 }
619
620 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
621 {
622 uint32_t ndcr;
623
624 ndcr = nand_readl(info, NDCR);
625 nand_writel(info, NDCR, ndcr | int_mask);
626 }
627
628 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
629 {
630 if (info->ecc_bch) {
631 u32 val;
632 int ret;
633
634 /*
635 * According to the datasheet, when reading from NDDB
636 * with BCH enabled, after each 32 bytes reads, we
637 * have to make sure that the NDSR.RDDREQ bit is set.
638 *
639 * Drain the FIFO 8 32 bits reads at a time, and skip
640 * the polling on the last read.
641 */
642 while (len > 8) {
643 ioread32_rep(info->mmio_base + NDDB, data, 8);
644
645 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
646 val & NDSR_RDDREQ, 1000, 5000);
647 if (ret) {
648 dev_err(&info->pdev->dev,
649 "Timeout on RDDREQ while draining the FIFO\n");
650 return;
651 }
652
653 data += 32;
654 len -= 8;
655 }
656 }
657
658 ioread32_rep(info->mmio_base + NDDB, data, len);
659 }
660
661 static void handle_data_pio(struct pxa3xx_nand_info *info)
662 {
663 unsigned int do_bytes = min(info->data_size, info->chunk_size);
664
665 switch (info->state) {
666 case STATE_PIO_WRITING:
667 writesl(info->mmio_base + NDDB,
668 info->data_buff + info->data_buff_pos,
669 DIV_ROUND_UP(do_bytes, 4));
670
671 if (info->oob_size > 0)
672 writesl(info->mmio_base + NDDB,
673 info->oob_buff + info->oob_buff_pos,
674 DIV_ROUND_UP(info->oob_size, 4));
675 break;
676 case STATE_PIO_READING:
677 drain_fifo(info,
678 info->data_buff + info->data_buff_pos,
679 DIV_ROUND_UP(do_bytes, 4));
680
681 if (info->oob_size > 0)
682 drain_fifo(info,
683 info->oob_buff + info->oob_buff_pos,
684 DIV_ROUND_UP(info->oob_size, 4));
685 break;
686 default:
687 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
688 info->state);
689 BUG();
690 }
691
692 /* Update buffer pointers for multi-page read/write */
693 info->data_buff_pos += do_bytes;
694 info->oob_buff_pos += info->oob_size;
695 info->data_size -= do_bytes;
696 }
697
698 static void pxa3xx_nand_data_dma_irq(void *data)
699 {
700 struct pxa3xx_nand_info *info = data;
701 struct dma_tx_state state;
702 enum dma_status status;
703
704 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
705 if (likely(status == DMA_COMPLETE)) {
706 info->state = STATE_DMA_DONE;
707 } else {
708 dev_err(&info->pdev->dev, "DMA error on data channel\n");
709 info->retcode = ERR_DMABUSERR;
710 }
711 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
712
713 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
714 enable_int(info, NDCR_INT_MASK);
715 }
716
717 static void start_data_dma(struct pxa3xx_nand_info *info)
718 {
719 enum dma_transfer_direction direction;
720 struct dma_async_tx_descriptor *tx;
721
722 switch (info->state) {
723 case STATE_DMA_WRITING:
724 info->dma_dir = DMA_TO_DEVICE;
725 direction = DMA_MEM_TO_DEV;
726 break;
727 case STATE_DMA_READING:
728 info->dma_dir = DMA_FROM_DEVICE;
729 direction = DMA_DEV_TO_MEM;
730 break;
731 default:
732 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
733 info->state);
734 BUG();
735 }
736 info->sg.length = info->data_size +
737 (info->oob_size ? info->spare_size + info->ecc_size : 0);
738 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
739
740 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
741 DMA_PREP_INTERRUPT);
742 if (!tx) {
743 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
744 return;
745 }
746 tx->callback = pxa3xx_nand_data_dma_irq;
747 tx->callback_param = info;
748 info->dma_cookie = dmaengine_submit(tx);
749 dma_async_issue_pending(info->dma_chan);
750 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
751 __func__, direction, info->dma_cookie, info->sg.length);
752 }
753
754 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
755 {
756 struct pxa3xx_nand_info *info = data;
757
758 handle_data_pio(info);
759
760 info->state = STATE_CMD_DONE;
761 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
762
763 return IRQ_HANDLED;
764 }
765
766 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
767 {
768 struct pxa3xx_nand_info *info = devid;
769 unsigned int status, is_completed = 0, is_ready = 0;
770 unsigned int ready, cmd_done;
771 irqreturn_t ret = IRQ_HANDLED;
772
773 if (info->cs == 0) {
774 ready = NDSR_FLASH_RDY;
775 cmd_done = NDSR_CS0_CMDD;
776 } else {
777 ready = NDSR_RDY;
778 cmd_done = NDSR_CS1_CMDD;
779 }
780
781 status = nand_readl(info, NDSR);
782
783 if (status & NDSR_UNCORERR)
784 info->retcode = ERR_UNCORERR;
785 if (status & NDSR_CORERR) {
786 info->retcode = ERR_CORERR;
787 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
788 info->ecc_bch)
789 info->ecc_err_cnt = NDSR_ERR_CNT(status);
790 else
791 info->ecc_err_cnt = 1;
792
793 /*
794 * Each chunk composing a page is corrected independently,
795 * and we need to store maximum number of corrected bitflips
796 * to return it to the MTD layer in ecc.read_page().
797 */
798 info->max_bitflips = max_t(unsigned int,
799 info->max_bitflips,
800 info->ecc_err_cnt);
801 }
802 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
803 /* whether use dma to transfer data */
804 if (info->use_dma) {
805 disable_int(info, NDCR_INT_MASK);
806 info->state = (status & NDSR_RDDREQ) ?
807 STATE_DMA_READING : STATE_DMA_WRITING;
808 start_data_dma(info);
809 goto NORMAL_IRQ_EXIT;
810 } else {
811 info->state = (status & NDSR_RDDREQ) ?
812 STATE_PIO_READING : STATE_PIO_WRITING;
813 ret = IRQ_WAKE_THREAD;
814 goto NORMAL_IRQ_EXIT;
815 }
816 }
817 if (status & cmd_done) {
818 info->state = STATE_CMD_DONE;
819 is_completed = 1;
820 }
821 if (status & ready) {
822 info->state = STATE_READY;
823 is_ready = 1;
824 }
825
826 /*
827 * Clear all status bit before issuing the next command, which
828 * can and will alter the status bits and will deserve a new
829 * interrupt on its own. This lets the controller exit the IRQ
830 */
831 nand_writel(info, NDSR, status);
832
833 if (status & NDSR_WRCMDREQ) {
834 status &= ~NDSR_WRCMDREQ;
835 info->state = STATE_CMD_HANDLE;
836
837 /*
838 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
839 * must be loaded by writing directly either 12 or 16
840 * bytes directly to NDCB0, four bytes at a time.
841 *
842 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
843 * but each NDCBx register can be read.
844 */
845 nand_writel(info, NDCB0, info->ndcb0);
846 nand_writel(info, NDCB0, info->ndcb1);
847 nand_writel(info, NDCB0, info->ndcb2);
848
849 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
850 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
851 nand_writel(info, NDCB0, info->ndcb3);
852 }
853
854 if (is_completed)
855 complete(&info->cmd_complete);
856 if (is_ready)
857 complete(&info->dev_ready);
858 NORMAL_IRQ_EXIT:
859 return ret;
860 }
861
862 static inline int is_buf_blank(uint8_t *buf, size_t len)
863 {
864 for (; len > 0; len--)
865 if (*buf++ != 0xff)
866 return 0;
867 return 1;
868 }
869
870 static void set_command_address(struct pxa3xx_nand_info *info,
871 unsigned int page_size, uint16_t column, int page_addr)
872 {
873 /* small page addr setting */
874 if (page_size < PAGE_CHUNK_SIZE) {
875 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
876 | (column & 0xFF);
877
878 info->ndcb2 = 0;
879 } else {
880 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
881 | (column & 0xFFFF);
882
883 if (page_addr & 0xFF0000)
884 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
885 else
886 info->ndcb2 = 0;
887 }
888 }
889
890 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
891 {
892 struct pxa3xx_nand_host *host = info->host[info->cs];
893 struct mtd_info *mtd = nand_to_mtd(&host->chip);
894
895 /* reset data and oob column point to handle data */
896 info->buf_start = 0;
897 info->buf_count = 0;
898 info->oob_size = 0;
899 info->data_buff_pos = 0;
900 info->oob_buff_pos = 0;
901 info->use_ecc = 0;
902 info->use_spare = 1;
903 info->retcode = ERR_NONE;
904 info->ecc_err_cnt = 0;
905 info->ndcb3 = 0;
906 info->need_wait = 0;
907
908 switch (command) {
909 case NAND_CMD_READ0:
910 case NAND_CMD_PAGEPROG:
911 info->use_ecc = 1;
912 case NAND_CMD_READOOB:
913 pxa3xx_set_datasize(info, mtd);
914 break;
915 case NAND_CMD_PARAM:
916 info->use_spare = 0;
917 break;
918 default:
919 info->ndcb1 = 0;
920 info->ndcb2 = 0;
921 break;
922 }
923
924 /*
925 * If we are about to issue a read command, or about to set
926 * the write address, then clean the data buffer.
927 */
928 if (command == NAND_CMD_READ0 ||
929 command == NAND_CMD_READOOB ||
930 command == NAND_CMD_SEQIN) {
931
932 info->buf_count = mtd->writesize + mtd->oobsize;
933 memset(info->data_buff, 0xFF, info->buf_count);
934 }
935
936 }
937
938 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
939 int ext_cmd_type, uint16_t column, int page_addr)
940 {
941 int addr_cycle, exec_cmd;
942 struct pxa3xx_nand_host *host;
943 struct mtd_info *mtd;
944
945 host = info->host[info->cs];
946 mtd = nand_to_mtd(&host->chip);
947 addr_cycle = 0;
948 exec_cmd = 1;
949
950 if (info->cs != 0)
951 info->ndcb0 = NDCB0_CSEL;
952 else
953 info->ndcb0 = 0;
954
955 if (command == NAND_CMD_SEQIN)
956 exec_cmd = 0;
957
958 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
959 + host->col_addr_cycles);
960
961 switch (command) {
962 case NAND_CMD_READOOB:
963 case NAND_CMD_READ0:
964 info->buf_start = column;
965 info->ndcb0 |= NDCB0_CMD_TYPE(0)
966 | addr_cycle
967 | NAND_CMD_READ0;
968
969 if (command == NAND_CMD_READOOB)
970 info->buf_start += mtd->writesize;
971
972 /*
973 * Multiple page read needs an 'extended command type' field,
974 * which is either naked-read or last-read according to the
975 * state.
976 */
977 if (mtd->writesize == PAGE_CHUNK_SIZE) {
978 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
979 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
980 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
981 | NDCB0_LEN_OVRD
982 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
983 info->ndcb3 = info->chunk_size +
984 info->oob_size;
985 }
986
987 set_command_address(info, mtd->writesize, column, page_addr);
988 break;
989
990 case NAND_CMD_SEQIN:
991
992 info->buf_start = column;
993 set_command_address(info, mtd->writesize, 0, page_addr);
994
995 /*
996 * Multiple page programming needs to execute the initial
997 * SEQIN command that sets the page address.
998 */
999 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1000 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1001 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1002 | addr_cycle
1003 | command;
1004 /* No data transfer in this case */
1005 info->data_size = 0;
1006 exec_cmd = 1;
1007 }
1008 break;
1009
1010 case NAND_CMD_PAGEPROG:
1011 if (is_buf_blank(info->data_buff,
1012 (mtd->writesize + mtd->oobsize))) {
1013 exec_cmd = 0;
1014 break;
1015 }
1016
1017 /* Second command setting for large pages */
1018 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1019 /*
1020 * Multiple page write uses the 'extended command'
1021 * field. This can be used to issue a command dispatch
1022 * or a naked-write depending on the current stage.
1023 */
1024 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1025 | NDCB0_LEN_OVRD
1026 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1027 info->ndcb3 = info->chunk_size +
1028 info->oob_size;
1029
1030 /*
1031 * This is the command dispatch that completes a chunked
1032 * page program operation.
1033 */
1034 if (info->data_size == 0) {
1035 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1036 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1037 | command;
1038 info->ndcb1 = 0;
1039 info->ndcb2 = 0;
1040 info->ndcb3 = 0;
1041 }
1042 } else {
1043 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1044 | NDCB0_AUTO_RS
1045 | NDCB0_ST_ROW_EN
1046 | NDCB0_DBC
1047 | (NAND_CMD_PAGEPROG << 8)
1048 | NAND_CMD_SEQIN
1049 | addr_cycle;
1050 }
1051 break;
1052
1053 case NAND_CMD_PARAM:
1054 info->buf_count = INIT_BUFFER_SIZE;
1055 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1056 | NDCB0_ADDR_CYC(1)
1057 | NDCB0_LEN_OVRD
1058 | command;
1059 info->ndcb1 = (column & 0xFF);
1060 info->ndcb3 = INIT_BUFFER_SIZE;
1061 info->data_size = INIT_BUFFER_SIZE;
1062 break;
1063
1064 case NAND_CMD_READID:
1065 info->buf_count = READ_ID_BYTES;
1066 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1067 | NDCB0_ADDR_CYC(1)
1068 | command;
1069 info->ndcb1 = (column & 0xFF);
1070
1071 info->data_size = 8;
1072 break;
1073 case NAND_CMD_STATUS:
1074 info->buf_count = 1;
1075 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1076 | NDCB0_ADDR_CYC(1)
1077 | command;
1078
1079 info->data_size = 8;
1080 break;
1081
1082 case NAND_CMD_ERASE1:
1083 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1084 | NDCB0_AUTO_RS
1085 | NDCB0_ADDR_CYC(3)
1086 | NDCB0_DBC
1087 | (NAND_CMD_ERASE2 << 8)
1088 | NAND_CMD_ERASE1;
1089 info->ndcb1 = page_addr;
1090 info->ndcb2 = 0;
1091
1092 break;
1093 case NAND_CMD_RESET:
1094 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1095 | command;
1096
1097 break;
1098
1099 case NAND_CMD_ERASE2:
1100 exec_cmd = 0;
1101 break;
1102
1103 default:
1104 exec_cmd = 0;
1105 dev_err(&info->pdev->dev, "non-supported command %x\n",
1106 command);
1107 break;
1108 }
1109
1110 return exec_cmd;
1111 }
1112
1113 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1114 int column, int page_addr)
1115 {
1116 struct nand_chip *chip = mtd_to_nand(mtd);
1117 struct pxa3xx_nand_host *host = chip->priv;
1118 struct pxa3xx_nand_info *info = host->info_data;
1119 int exec_cmd;
1120
1121 /*
1122 * if this is a x16 device ,then convert the input
1123 * "byte" address into a "word" address appropriate
1124 * for indexing a word-oriented device
1125 */
1126 if (info->reg_ndcr & NDCR_DWIDTH_M)
1127 column /= 2;
1128
1129 /*
1130 * There may be different NAND chip hooked to
1131 * different chip select, so check whether
1132 * chip select has been changed, if yes, reset the timing
1133 */
1134 if (info->cs != host->cs) {
1135 info->cs = host->cs;
1136 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1137 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1138 }
1139
1140 prepare_start_command(info, command);
1141
1142 info->state = STATE_PREPARED;
1143 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1144
1145 if (exec_cmd) {
1146 init_completion(&info->cmd_complete);
1147 init_completion(&info->dev_ready);
1148 info->need_wait = 1;
1149 pxa3xx_nand_start(info);
1150
1151 if (!wait_for_completion_timeout(&info->cmd_complete,
1152 CHIP_DELAY_TIMEOUT)) {
1153 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1154 /* Stop State Machine for next command cycle */
1155 pxa3xx_nand_stop(info);
1156 }
1157 }
1158 info->state = STATE_IDLE;
1159 }
1160
1161 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1162 const unsigned command,
1163 int column, int page_addr)
1164 {
1165 struct nand_chip *chip = mtd_to_nand(mtd);
1166 struct pxa3xx_nand_host *host = chip->priv;
1167 struct pxa3xx_nand_info *info = host->info_data;
1168 int exec_cmd, ext_cmd_type;
1169
1170 /*
1171 * if this is a x16 device then convert the input
1172 * "byte" address into a "word" address appropriate
1173 * for indexing a word-oriented device
1174 */
1175 if (info->reg_ndcr & NDCR_DWIDTH_M)
1176 column /= 2;
1177
1178 /*
1179 * There may be different NAND chip hooked to
1180 * different chip select, so check whether
1181 * chip select has been changed, if yes, reset the timing
1182 */
1183 if (info->cs != host->cs) {
1184 info->cs = host->cs;
1185 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1186 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1187 }
1188
1189 /* Select the extended command for the first command */
1190 switch (command) {
1191 case NAND_CMD_READ0:
1192 case NAND_CMD_READOOB:
1193 ext_cmd_type = EXT_CMD_TYPE_MONO;
1194 break;
1195 case NAND_CMD_SEQIN:
1196 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1197 break;
1198 case NAND_CMD_PAGEPROG:
1199 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1200 break;
1201 default:
1202 ext_cmd_type = 0;
1203 break;
1204 }
1205
1206 prepare_start_command(info, command);
1207
1208 /*
1209 * Prepare the "is ready" completion before starting a command
1210 * transaction sequence. If the command is not executed the
1211 * completion will be completed, see below.
1212 *
1213 * We can do that inside the loop because the command variable
1214 * is invariant and thus so is the exec_cmd.
1215 */
1216 info->need_wait = 1;
1217 init_completion(&info->dev_ready);
1218 do {
1219 info->state = STATE_PREPARED;
1220 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1221 column, page_addr);
1222 if (!exec_cmd) {
1223 info->need_wait = 0;
1224 complete(&info->dev_ready);
1225 break;
1226 }
1227
1228 init_completion(&info->cmd_complete);
1229 pxa3xx_nand_start(info);
1230
1231 if (!wait_for_completion_timeout(&info->cmd_complete,
1232 CHIP_DELAY_TIMEOUT)) {
1233 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1234 /* Stop State Machine for next command cycle */
1235 pxa3xx_nand_stop(info);
1236 break;
1237 }
1238
1239 /* Check if the sequence is complete */
1240 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1241 break;
1242
1243 /*
1244 * After a splitted program command sequence has issued
1245 * the command dispatch, the command sequence is complete.
1246 */
1247 if (info->data_size == 0 &&
1248 command == NAND_CMD_PAGEPROG &&
1249 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1250 break;
1251
1252 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1253 /* Last read: issue a 'last naked read' */
1254 if (info->data_size == info->chunk_size)
1255 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1256 else
1257 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1258
1259 /*
1260 * If a splitted program command has no more data to transfer,
1261 * the command dispatch must be issued to complete.
1262 */
1263 } else if (command == NAND_CMD_PAGEPROG &&
1264 info->data_size == 0) {
1265 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1266 }
1267 } while (1);
1268
1269 info->state = STATE_IDLE;
1270 }
1271
1272 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1273 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1274 int page)
1275 {
1276 chip->write_buf(mtd, buf, mtd->writesize);
1277 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1278
1279 return 0;
1280 }
1281
1282 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1283 struct nand_chip *chip, uint8_t *buf, int oob_required,
1284 int page)
1285 {
1286 struct pxa3xx_nand_host *host = chip->priv;
1287 struct pxa3xx_nand_info *info = host->info_data;
1288
1289 chip->read_buf(mtd, buf, mtd->writesize);
1290 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1291
1292 if (info->retcode == ERR_CORERR && info->use_ecc) {
1293 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1294
1295 } else if (info->retcode == ERR_UNCORERR) {
1296 /*
1297 * for blank page (all 0xff), HW will calculate its ECC as
1298 * 0, which is different from the ECC information within
1299 * OOB, ignore such uncorrectable errors
1300 */
1301 if (is_buf_blank(buf, mtd->writesize))
1302 info->retcode = ERR_NONE;
1303 else
1304 mtd->ecc_stats.failed++;
1305 }
1306
1307 return info->max_bitflips;
1308 }
1309
1310 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1311 {
1312 struct nand_chip *chip = mtd_to_nand(mtd);
1313 struct pxa3xx_nand_host *host = chip->priv;
1314 struct pxa3xx_nand_info *info = host->info_data;
1315 char retval = 0xFF;
1316
1317 if (info->buf_start < info->buf_count)
1318 /* Has just send a new command? */
1319 retval = info->data_buff[info->buf_start++];
1320
1321 return retval;
1322 }
1323
1324 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1325 {
1326 struct nand_chip *chip = mtd_to_nand(mtd);
1327 struct pxa3xx_nand_host *host = chip->priv;
1328 struct pxa3xx_nand_info *info = host->info_data;
1329 u16 retval = 0xFFFF;
1330
1331 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1332 retval = *((u16 *)(info->data_buff+info->buf_start));
1333 info->buf_start += 2;
1334 }
1335 return retval;
1336 }
1337
1338 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1339 {
1340 struct nand_chip *chip = mtd_to_nand(mtd);
1341 struct pxa3xx_nand_host *host = chip->priv;
1342 struct pxa3xx_nand_info *info = host->info_data;
1343 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1344
1345 memcpy(buf, info->data_buff + info->buf_start, real_len);
1346 info->buf_start += real_len;
1347 }
1348
1349 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1350 const uint8_t *buf, int len)
1351 {
1352 struct nand_chip *chip = mtd_to_nand(mtd);
1353 struct pxa3xx_nand_host *host = chip->priv;
1354 struct pxa3xx_nand_info *info = host->info_data;
1355 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1356
1357 memcpy(info->data_buff + info->buf_start, buf, real_len);
1358 info->buf_start += real_len;
1359 }
1360
1361 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1362 {
1363 return;
1364 }
1365
1366 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1367 {
1368 struct nand_chip *chip = mtd_to_nand(mtd);
1369 struct pxa3xx_nand_host *host = chip->priv;
1370 struct pxa3xx_nand_info *info = host->info_data;
1371
1372 if (info->need_wait) {
1373 info->need_wait = 0;
1374 if (!wait_for_completion_timeout(&info->dev_ready,
1375 CHIP_DELAY_TIMEOUT)) {
1376 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1377 return NAND_STATUS_FAIL;
1378 }
1379 }
1380
1381 /* pxa3xx_nand_send_command has waited for command complete */
1382 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1383 if (info->retcode == ERR_NONE)
1384 return 0;
1385 else
1386 return NAND_STATUS_FAIL;
1387 }
1388
1389 return NAND_STATUS_READY;
1390 }
1391
1392 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1393 {
1394 struct pxa3xx_nand_host *host = info->host[info->cs];
1395 struct platform_device *pdev = info->pdev;
1396 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1397 const struct nand_sdr_timings *timings;
1398
1399 /* Configure default flash values */
1400 info->chunk_size = PAGE_CHUNK_SIZE;
1401 info->reg_ndcr = 0x0; /* enable all interrupts */
1402 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1403 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1404 info->reg_ndcr |= NDCR_SPARE_EN;
1405
1406 /* use the common timing to make a try */
1407 timings = onfi_async_timing_mode_to_sdr_timings(0);
1408 if (IS_ERR(timings))
1409 return PTR_ERR(timings);
1410
1411 pxa3xx_nand_set_sdr_timing(host, timings);
1412 return 0;
1413 }
1414
1415 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1416 {
1417 struct pxa3xx_nand_host *host = info->host[info->cs];
1418 struct nand_chip *chip = &host->chip;
1419 struct mtd_info *mtd = nand_to_mtd(chip);
1420
1421 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1422 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1423 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1424 }
1425
1426 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1427 {
1428 struct platform_device *pdev = info->pdev;
1429 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1430 uint32_t ndcr = nand_readl(info, NDCR);
1431
1432 /* Set an initial chunk size */
1433 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1434 info->reg_ndcr = ndcr &
1435 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1436 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1437 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1438 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1439 }
1440
1441 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1442 {
1443 struct platform_device *pdev = info->pdev;
1444 struct dma_slave_config config;
1445 dma_cap_mask_t mask;
1446 struct pxad_param param;
1447 int ret;
1448
1449 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1450 if (info->data_buff == NULL)
1451 return -ENOMEM;
1452 if (use_dma == 0)
1453 return 0;
1454
1455 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1456 if (ret)
1457 return ret;
1458
1459 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1460 dma_cap_zero(mask);
1461 dma_cap_set(DMA_SLAVE, mask);
1462 param.prio = PXAD_PRIO_LOWEST;
1463 param.drcmr = info->drcmr_dat;
1464 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1465 &param, &pdev->dev,
1466 "data");
1467 if (!info->dma_chan) {
1468 dev_err(&pdev->dev, "unable to request data dma channel\n");
1469 return -ENODEV;
1470 }
1471
1472 memset(&config, 0, sizeof(config));
1473 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1474 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1475 config.src_addr = info->mmio_phys + NDDB;
1476 config.dst_addr = info->mmio_phys + NDDB;
1477 config.src_maxburst = 32;
1478 config.dst_maxburst = 32;
1479 ret = dmaengine_slave_config(info->dma_chan, &config);
1480 if (ret < 0) {
1481 dev_err(&info->pdev->dev,
1482 "dma channel configuration failed: %d\n",
1483 ret);
1484 return ret;
1485 }
1486
1487 /*
1488 * Now that DMA buffers are allocated we turn on
1489 * DMA proper for I/O operations.
1490 */
1491 info->use_dma = 1;
1492 return 0;
1493 }
1494
1495 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1496 {
1497 if (info->use_dma) {
1498 dmaengine_terminate_all(info->dma_chan);
1499 dma_release_channel(info->dma_chan);
1500 }
1501 kfree(info->data_buff);
1502 }
1503
1504 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1505 struct nand_ecc_ctrl *ecc,
1506 int strength, int ecc_stepsize, int page_size)
1507 {
1508 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1509 info->chunk_size = 2048;
1510 info->spare_size = 40;
1511 info->ecc_size = 24;
1512 ecc->mode = NAND_ECC_HW;
1513 ecc->size = 512;
1514 ecc->strength = 1;
1515
1516 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1517 info->chunk_size = 512;
1518 info->spare_size = 8;
1519 info->ecc_size = 8;
1520 ecc->mode = NAND_ECC_HW;
1521 ecc->size = 512;
1522 ecc->strength = 1;
1523
1524 /*
1525 * Required ECC: 4-bit correction per 512 bytes
1526 * Select: 16-bit correction per 2048 bytes
1527 */
1528 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1529 info->ecc_bch = 1;
1530 info->chunk_size = 2048;
1531 info->spare_size = 32;
1532 info->ecc_size = 32;
1533 ecc->mode = NAND_ECC_HW;
1534 ecc->size = info->chunk_size;
1535 ecc->layout = &ecc_layout_2KB_bch4bit;
1536 ecc->strength = 16;
1537
1538 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1539 info->ecc_bch = 1;
1540 info->chunk_size = 2048;
1541 info->spare_size = 32;
1542 info->ecc_size = 32;
1543 ecc->mode = NAND_ECC_HW;
1544 ecc->size = info->chunk_size;
1545 ecc->layout = &ecc_layout_4KB_bch4bit;
1546 ecc->strength = 16;
1547
1548 /*
1549 * Required ECC: 8-bit correction per 512 bytes
1550 * Select: 16-bit correction per 1024 bytes
1551 */
1552 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1553 info->ecc_bch = 1;
1554 info->chunk_size = 1024;
1555 info->spare_size = 0;
1556 info->ecc_size = 32;
1557 ecc->mode = NAND_ECC_HW;
1558 ecc->size = info->chunk_size;
1559 ecc->layout = &ecc_layout_4KB_bch8bit;
1560 ecc->strength = 16;
1561 } else {
1562 dev_err(&info->pdev->dev,
1563 "ECC strength %d at page size %d is not supported\n",
1564 strength, page_size);
1565 return -ENODEV;
1566 }
1567
1568 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1569 ecc->strength, ecc->size);
1570 return 0;
1571 }
1572
1573 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1574 {
1575 struct nand_chip *chip = mtd_to_nand(mtd);
1576 struct pxa3xx_nand_host *host = chip->priv;
1577 struct pxa3xx_nand_info *info = host->info_data;
1578 struct platform_device *pdev = info->pdev;
1579 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1580 int ret;
1581 uint16_t ecc_strength, ecc_step;
1582
1583 if (pdata->keep_config) {
1584 pxa3xx_nand_detect_config(info);
1585 } else {
1586 ret = pxa3xx_nand_config_ident(info);
1587 if (ret)
1588 return ret;
1589 }
1590
1591 if (info->reg_ndcr & NDCR_DWIDTH_M)
1592 chip->options |= NAND_BUSWIDTH_16;
1593
1594 /* Device detection must be done with ECC disabled */
1595 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1596 nand_writel(info, NDECCCTRL, 0x0);
1597
1598 if (nand_scan_ident(mtd, 1, NULL))
1599 return -ENODEV;
1600
1601 if (!pdata->keep_config) {
1602 ret = pxa3xx_nand_init(host);
1603 if (ret) {
1604 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1605 ret);
1606 return ret;
1607 }
1608 }
1609
1610 if (pdata->flash_bbt) {
1611 /*
1612 * We'll use a bad block table stored in-flash and don't
1613 * allow writing the bad block marker to the flash.
1614 */
1615 chip->bbt_options |= NAND_BBT_USE_FLASH |
1616 NAND_BBT_NO_OOB_BBM;
1617 chip->bbt_td = &bbt_main_descr;
1618 chip->bbt_md = &bbt_mirror_descr;
1619 }
1620
1621 /*
1622 * If the page size is bigger than the FIFO size, let's check
1623 * we are given the right variant and then switch to the extended
1624 * (aka splitted) command handling,
1625 */
1626 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1627 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1628 chip->cmdfunc = nand_cmdfunc_extended;
1629 } else {
1630 dev_err(&info->pdev->dev,
1631 "unsupported page size on this variant\n");
1632 return -ENODEV;
1633 }
1634 }
1635
1636 if (pdata->ecc_strength && pdata->ecc_step_size) {
1637 ecc_strength = pdata->ecc_strength;
1638 ecc_step = pdata->ecc_step_size;
1639 } else {
1640 ecc_strength = chip->ecc_strength_ds;
1641 ecc_step = chip->ecc_step_ds;
1642 }
1643
1644 /* Set default ECC strength requirements on non-ONFI devices */
1645 if (ecc_strength < 1 && ecc_step < 1) {
1646 ecc_strength = 1;
1647 ecc_step = 512;
1648 }
1649
1650 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1651 ecc_step, mtd->writesize);
1652 if (ret)
1653 return ret;
1654
1655 /* calculate addressing information */
1656 if (mtd->writesize >= 2048)
1657 host->col_addr_cycles = 2;
1658 else
1659 host->col_addr_cycles = 1;
1660
1661 /* release the initial buffer */
1662 kfree(info->data_buff);
1663
1664 /* allocate the real data + oob buffer */
1665 info->buf_size = mtd->writesize + mtd->oobsize;
1666 ret = pxa3xx_nand_init_buff(info);
1667 if (ret)
1668 return ret;
1669 info->oob_buff = info->data_buff + mtd->writesize;
1670
1671 if ((mtd->size >> chip->page_shift) > 65536)
1672 host->row_addr_cycles = 3;
1673 else
1674 host->row_addr_cycles = 2;
1675
1676 if (!pdata->keep_config)
1677 pxa3xx_nand_config_tail(info);
1678
1679 return nand_scan_tail(mtd);
1680 }
1681
1682 static int alloc_nand_resource(struct platform_device *pdev)
1683 {
1684 struct device_node *np = pdev->dev.of_node;
1685 struct pxa3xx_nand_platform_data *pdata;
1686 struct pxa3xx_nand_info *info;
1687 struct pxa3xx_nand_host *host;
1688 struct nand_chip *chip = NULL;
1689 struct mtd_info *mtd;
1690 struct resource *r;
1691 int ret, irq, cs;
1692
1693 pdata = dev_get_platdata(&pdev->dev);
1694 if (pdata->num_cs <= 0)
1695 return -ENODEV;
1696 info = devm_kzalloc(&pdev->dev,
1697 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1698 GFP_KERNEL);
1699 if (!info)
1700 return -ENOMEM;
1701
1702 info->pdev = pdev;
1703 info->variant = pxa3xx_nand_get_variant(pdev);
1704 for (cs = 0; cs < pdata->num_cs; cs++) {
1705 host = (void *)&info[1] + sizeof(*host) * cs;
1706 chip = &host->chip;
1707 chip->priv = host;
1708 mtd = nand_to_mtd(chip);
1709 info->host[cs] = host;
1710 host->cs = cs;
1711 host->info_data = info;
1712 mtd->dev.parent = &pdev->dev;
1713 /* FIXME: all chips use the same device tree partitions */
1714 nand_set_flash_node(chip, np);
1715
1716 chip->priv = host;
1717 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1718 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1719 chip->controller = &info->controller;
1720 chip->waitfunc = pxa3xx_nand_waitfunc;
1721 chip->select_chip = pxa3xx_nand_select_chip;
1722 chip->read_word = pxa3xx_nand_read_word;
1723 chip->read_byte = pxa3xx_nand_read_byte;
1724 chip->read_buf = pxa3xx_nand_read_buf;
1725 chip->write_buf = pxa3xx_nand_write_buf;
1726 chip->options |= NAND_NO_SUBPAGE_WRITE;
1727 chip->cmdfunc = nand_cmdfunc;
1728 }
1729
1730 spin_lock_init(&chip->controller->lock);
1731 init_waitqueue_head(&chip->controller->wq);
1732 info->clk = devm_clk_get(&pdev->dev, NULL);
1733 if (IS_ERR(info->clk)) {
1734 dev_err(&pdev->dev, "failed to get nand clock\n");
1735 return PTR_ERR(info->clk);
1736 }
1737 ret = clk_prepare_enable(info->clk);
1738 if (ret < 0)
1739 return ret;
1740
1741 if (use_dma) {
1742 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1743 if (r == NULL) {
1744 dev_err(&pdev->dev,
1745 "no resource defined for data DMA\n");
1746 ret = -ENXIO;
1747 goto fail_disable_clk;
1748 }
1749 info->drcmr_dat = r->start;
1750
1751 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1752 if (r == NULL) {
1753 dev_err(&pdev->dev,
1754 "no resource defined for cmd DMA\n");
1755 ret = -ENXIO;
1756 goto fail_disable_clk;
1757 }
1758 info->drcmr_cmd = r->start;
1759 }
1760
1761 irq = platform_get_irq(pdev, 0);
1762 if (irq < 0) {
1763 dev_err(&pdev->dev, "no IRQ resource defined\n");
1764 ret = -ENXIO;
1765 goto fail_disable_clk;
1766 }
1767
1768 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1769 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1770 if (IS_ERR(info->mmio_base)) {
1771 ret = PTR_ERR(info->mmio_base);
1772 goto fail_disable_clk;
1773 }
1774 info->mmio_phys = r->start;
1775
1776 /* Allocate a buffer to allow flash detection */
1777 info->buf_size = INIT_BUFFER_SIZE;
1778 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1779 if (info->data_buff == NULL) {
1780 ret = -ENOMEM;
1781 goto fail_disable_clk;
1782 }
1783
1784 /* initialize all interrupts to be disabled */
1785 disable_int(info, NDSR_MASK);
1786
1787 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1788 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1789 pdev->name, info);
1790 if (ret < 0) {
1791 dev_err(&pdev->dev, "failed to request IRQ\n");
1792 goto fail_free_buf;
1793 }
1794
1795 platform_set_drvdata(pdev, info);
1796
1797 return 0;
1798
1799 fail_free_buf:
1800 free_irq(irq, info);
1801 kfree(info->data_buff);
1802 fail_disable_clk:
1803 clk_disable_unprepare(info->clk);
1804 return ret;
1805 }
1806
1807 static int pxa3xx_nand_remove(struct platform_device *pdev)
1808 {
1809 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1810 struct pxa3xx_nand_platform_data *pdata;
1811 int irq, cs;
1812
1813 if (!info)
1814 return 0;
1815
1816 pdata = dev_get_platdata(&pdev->dev);
1817
1818 irq = platform_get_irq(pdev, 0);
1819 if (irq >= 0)
1820 free_irq(irq, info);
1821 pxa3xx_nand_free_buff(info);
1822
1823 /*
1824 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1825 * In order to prevent a lockup of the system bus, the DFI bus
1826 * arbitration is granted to SMC upon driver removal. This is done by
1827 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1828 * access to the bus anymore.
1829 */
1830 nand_writel(info, NDCR,
1831 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1832 NFCV1_NDCR_ARB_CNTL);
1833 clk_disable_unprepare(info->clk);
1834
1835 for (cs = 0; cs < pdata->num_cs; cs++)
1836 nand_release(nand_to_mtd(&info->host[cs]->chip));
1837 return 0;
1838 }
1839
1840 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1841 {
1842 struct pxa3xx_nand_platform_data *pdata;
1843 struct device_node *np = pdev->dev.of_node;
1844 const struct of_device_id *of_id =
1845 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1846
1847 if (!of_id)
1848 return 0;
1849
1850 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1851 if (!pdata)
1852 return -ENOMEM;
1853
1854 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1855 pdata->enable_arbiter = 1;
1856 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1857 pdata->keep_config = 1;
1858 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1859 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1860
1861 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1862 if (pdata->ecc_strength < 0)
1863 pdata->ecc_strength = 0;
1864
1865 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1866 if (pdata->ecc_step_size < 0)
1867 pdata->ecc_step_size = 0;
1868
1869 pdev->dev.platform_data = pdata;
1870
1871 return 0;
1872 }
1873
1874 static int pxa3xx_nand_probe(struct platform_device *pdev)
1875 {
1876 struct pxa3xx_nand_platform_data *pdata;
1877 struct pxa3xx_nand_info *info;
1878 int ret, cs, probe_success, dma_available;
1879
1880 dma_available = IS_ENABLED(CONFIG_ARM) &&
1881 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1882 if (use_dma && !dma_available) {
1883 use_dma = 0;
1884 dev_warn(&pdev->dev,
1885 "This platform can't do DMA on this device\n");
1886 }
1887
1888 ret = pxa3xx_nand_probe_dt(pdev);
1889 if (ret)
1890 return ret;
1891
1892 pdata = dev_get_platdata(&pdev->dev);
1893 if (!pdata) {
1894 dev_err(&pdev->dev, "no platform data defined\n");
1895 return -ENODEV;
1896 }
1897
1898 ret = alloc_nand_resource(pdev);
1899 if (ret) {
1900 dev_err(&pdev->dev, "alloc nand resource failed\n");
1901 return ret;
1902 }
1903
1904 info = platform_get_drvdata(pdev);
1905 probe_success = 0;
1906 for (cs = 0; cs < pdata->num_cs; cs++) {
1907 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1908
1909 /*
1910 * The mtd name matches the one used in 'mtdparts' kernel
1911 * parameter. This name cannot be changed or otherwise
1912 * user's mtd partitions configuration would get broken.
1913 */
1914 mtd->name = "pxa3xx_nand-0";
1915 info->cs = cs;
1916 ret = pxa3xx_nand_scan(mtd);
1917 if (ret) {
1918 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1919 cs);
1920 continue;
1921 }
1922
1923 ret = mtd_device_register(mtd, pdata->parts[cs],
1924 pdata->nr_parts[cs]);
1925 if (!ret)
1926 probe_success = 1;
1927 }
1928
1929 if (!probe_success) {
1930 pxa3xx_nand_remove(pdev);
1931 return -ENODEV;
1932 }
1933
1934 return 0;
1935 }
1936
1937 #ifdef CONFIG_PM
1938 static int pxa3xx_nand_suspend(struct device *dev)
1939 {
1940 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1941
1942 if (info->state) {
1943 dev_err(dev, "driver busy, state = %d\n", info->state);
1944 return -EAGAIN;
1945 }
1946
1947 clk_disable(info->clk);
1948 return 0;
1949 }
1950
1951 static int pxa3xx_nand_resume(struct device *dev)
1952 {
1953 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1954 int ret;
1955
1956 ret = clk_enable(info->clk);
1957 if (ret < 0)
1958 return ret;
1959
1960 /* We don't want to handle interrupt without calling mtd routine */
1961 disable_int(info, NDCR_INT_MASK);
1962
1963 /*
1964 * Directly set the chip select to a invalid value,
1965 * then the driver would reset the timing according
1966 * to current chip select at the beginning of cmdfunc
1967 */
1968 info->cs = 0xff;
1969
1970 /*
1971 * As the spec says, the NDSR would be updated to 0x1800 when
1972 * doing the nand_clk disable/enable.
1973 * To prevent it damaging state machine of the driver, clear
1974 * all status before resume
1975 */
1976 nand_writel(info, NDSR, NDSR_MASK);
1977
1978 return 0;
1979 }
1980 #else
1981 #define pxa3xx_nand_suspend NULL
1982 #define pxa3xx_nand_resume NULL
1983 #endif
1984
1985 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
1986 .suspend = pxa3xx_nand_suspend,
1987 .resume = pxa3xx_nand_resume,
1988 };
1989
1990 static struct platform_driver pxa3xx_nand_driver = {
1991 .driver = {
1992 .name = "pxa3xx-nand",
1993 .of_match_table = pxa3xx_nand_dt_ids,
1994 .pm = &pxa3xx_nand_pm_ops,
1995 },
1996 .probe = pxa3xx_nand_probe,
1997 .remove = pxa3xx_nand_remove,
1998 };
1999
2000 module_platform_driver(pxa3xx_nand_driver);
2001
2002 MODULE_LICENSE("GPL");
2003 MODULE_DESCRIPTION("PXA3xx NAND controller driver");