]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/nand/sunxi_nand.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / nand / sunxi_nand.c
1 /*
2 * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
3 *
4 * Derived from:
5 * https://github.com/yuq/sunxi-nfc-mtd
6 * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
7 *
8 * https://github.com/hno/Allwinner-Info
9 * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
10 *
11 * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
12 * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/platform_device.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/nand.h>
35 #include <linux/mtd/partitions.h>
36 #include <linux/clk.h>
37 #include <linux/delay.h>
38 #include <linux/dmaengine.h>
39 #include <linux/gpio.h>
40 #include <linux/interrupt.h>
41 #include <linux/iopoll.h>
42 #include <linux/reset.h>
43
44 #define NFC_REG_CTL 0x0000
45 #define NFC_REG_ST 0x0004
46 #define NFC_REG_INT 0x0008
47 #define NFC_REG_TIMING_CTL 0x000C
48 #define NFC_REG_TIMING_CFG 0x0010
49 #define NFC_REG_ADDR_LOW 0x0014
50 #define NFC_REG_ADDR_HIGH 0x0018
51 #define NFC_REG_SECTOR_NUM 0x001C
52 #define NFC_REG_CNT 0x0020
53 #define NFC_REG_CMD 0x0024
54 #define NFC_REG_RCMD_SET 0x0028
55 #define NFC_REG_WCMD_SET 0x002C
56 #define NFC_REG_IO_DATA 0x0030
57 #define NFC_REG_ECC_CTL 0x0034
58 #define NFC_REG_ECC_ST 0x0038
59 #define NFC_REG_DEBUG 0x003C
60 #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
61 #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
62 #define NFC_REG_SPARE_AREA 0x00A0
63 #define NFC_REG_PAT_ID 0x00A4
64 #define NFC_RAM0_BASE 0x0400
65 #define NFC_RAM1_BASE 0x0800
66
67 /* define bit use in NFC_CTL */
68 #define NFC_EN BIT(0)
69 #define NFC_RESET BIT(1)
70 #define NFC_BUS_WIDTH_MSK BIT(2)
71 #define NFC_BUS_WIDTH_8 (0 << 2)
72 #define NFC_BUS_WIDTH_16 (1 << 2)
73 #define NFC_RB_SEL_MSK BIT(3)
74 #define NFC_RB_SEL(x) ((x) << 3)
75 #define NFC_CE_SEL_MSK GENMASK(26, 24)
76 #define NFC_CE_SEL(x) ((x) << 24)
77 #define NFC_CE_CTL BIT(6)
78 #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
79 #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
80 #define NFC_SAM BIT(12)
81 #define NFC_RAM_METHOD BIT(14)
82 #define NFC_DEBUG_CTL BIT(31)
83
84 /* define bit use in NFC_ST */
85 #define NFC_RB_B2R BIT(0)
86 #define NFC_CMD_INT_FLAG BIT(1)
87 #define NFC_DMA_INT_FLAG BIT(2)
88 #define NFC_CMD_FIFO_STATUS BIT(3)
89 #define NFC_STA BIT(4)
90 #define NFC_NATCH_INT_FLAG BIT(5)
91 #define NFC_RB_STATE(x) BIT(x + 8)
92
93 /* define bit use in NFC_INT */
94 #define NFC_B2R_INT_ENABLE BIT(0)
95 #define NFC_CMD_INT_ENABLE BIT(1)
96 #define NFC_DMA_INT_ENABLE BIT(2)
97 #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
98 NFC_CMD_INT_ENABLE | \
99 NFC_DMA_INT_ENABLE)
100
101 /* define bit use in NFC_TIMING_CTL */
102 #define NFC_TIMING_CTL_EDO BIT(8)
103
104 /* define NFC_TIMING_CFG register layout */
105 #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
106 (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
107 (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
108 (((tCAD) & 0x7) << 8))
109
110 /* define bit use in NFC_CMD */
111 #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
112 #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
113 #define NFC_CMD(x) (x)
114 #define NFC_ADR_NUM_MSK GENMASK(18, 16)
115 #define NFC_ADR_NUM(x) (((x) - 1) << 16)
116 #define NFC_SEND_ADR BIT(19)
117 #define NFC_ACCESS_DIR BIT(20)
118 #define NFC_DATA_TRANS BIT(21)
119 #define NFC_SEND_CMD1 BIT(22)
120 #define NFC_WAIT_FLAG BIT(23)
121 #define NFC_SEND_CMD2 BIT(24)
122 #define NFC_SEQ BIT(25)
123 #define NFC_DATA_SWAP_METHOD BIT(26)
124 #define NFC_ROW_AUTO_INC BIT(27)
125 #define NFC_SEND_CMD3 BIT(28)
126 #define NFC_SEND_CMD4 BIT(29)
127 #define NFC_CMD_TYPE_MSK GENMASK(31, 30)
128 #define NFC_NORMAL_OP (0 << 30)
129 #define NFC_ECC_OP (1 << 30)
130 #define NFC_PAGE_OP (2 << 30)
131
132 /* define bit use in NFC_RCMD_SET */
133 #define NFC_READ_CMD_MSK GENMASK(7, 0)
134 #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
135 #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
136
137 /* define bit use in NFC_WCMD_SET */
138 #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
139 #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
140 #define NFC_READ_CMD0_MSK GENMASK(23, 16)
141 #define NFC_READ_CMD1_MSK GENMASK(31, 24)
142
143 /* define bit use in NFC_ECC_CTL */
144 #define NFC_ECC_EN BIT(0)
145 #define NFC_ECC_PIPELINE BIT(3)
146 #define NFC_ECC_EXCEPTION BIT(4)
147 #define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
148 #define NFC_ECC_BLOCK_512 BIT(5)
149 #define NFC_RANDOM_EN BIT(9)
150 #define NFC_RANDOM_DIRECTION BIT(10)
151 #define NFC_ECC_MODE_MSK GENMASK(15, 12)
152 #define NFC_ECC_MODE(x) ((x) << 12)
153 #define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
154 #define NFC_RANDOM_SEED(x) ((x) << 16)
155
156 /* define bit use in NFC_ECC_ST */
157 #define NFC_ECC_ERR(x) BIT(x)
158 #define NFC_ECC_ERR_MSK GENMASK(15, 0)
159 #define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
160 #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
161
162 #define NFC_DEFAULT_TIMEOUT_MS 1000
163
164 #define NFC_SRAM_SIZE 1024
165
166 #define NFC_MAX_CS 7
167
168 /*
169 * Ready/Busy detection type: describes the Ready/Busy detection modes
170 *
171 * @RB_NONE: no external detection available, rely on STATUS command
172 * and software timeouts
173 * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
174 * pin of the NAND flash chip must be connected to one of the
175 * native NAND R/B pins (those which can be muxed to the NAND
176 * Controller)
177 * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
178 * pin of the NAND flash chip must be connected to a GPIO capable
179 * pin.
180 */
181 enum sunxi_nand_rb_type {
182 RB_NONE,
183 RB_NATIVE,
184 RB_GPIO,
185 };
186
187 /*
188 * Ready/Busy structure: stores information related to Ready/Busy detection
189 *
190 * @type: the Ready/Busy detection mode
191 * @info: information related to the R/B detection mode. Either a gpio
192 * id or a native R/B id (those supported by the NAND controller).
193 */
194 struct sunxi_nand_rb {
195 enum sunxi_nand_rb_type type;
196 union {
197 int gpio;
198 int nativeid;
199 } info;
200 };
201
202 /*
203 * Chip Select structure: stores information related to NAND Chip Select
204 *
205 * @cs: the NAND CS id used to communicate with a NAND Chip
206 * @rb: the Ready/Busy description
207 */
208 struct sunxi_nand_chip_sel {
209 u8 cs;
210 struct sunxi_nand_rb rb;
211 };
212
213 /*
214 * sunxi HW ECC infos: stores information related to HW ECC support
215 *
216 * @mode: the sunxi ECC mode field deduced from ECC requirements
217 */
218 struct sunxi_nand_hw_ecc {
219 int mode;
220 };
221
222 /*
223 * NAND chip structure: stores NAND chip device related information
224 *
225 * @node: used to store NAND chips into a list
226 * @nand: base NAND chip structure
227 * @mtd: base MTD structure
228 * @clk_rate: clk_rate required for this NAND chip
229 * @timing_cfg TIMING_CFG register value for this NAND chip
230 * @selected: current active CS
231 * @nsels: number of CS lines required by the NAND chip
232 * @sels: array of CS lines descriptions
233 */
234 struct sunxi_nand_chip {
235 struct list_head node;
236 struct nand_chip nand;
237 unsigned long clk_rate;
238 u32 timing_cfg;
239 u32 timing_ctl;
240 int selected;
241 int addr_cycles;
242 u32 addr[2];
243 int cmd_cycles;
244 u8 cmd[2];
245 int nsels;
246 struct sunxi_nand_chip_sel sels[0];
247 };
248
249 static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
250 {
251 return container_of(nand, struct sunxi_nand_chip, nand);
252 }
253
254 /*
255 * NAND Controller structure: stores sunxi NAND controller information
256 *
257 * @controller: base controller structure
258 * @dev: parent device (used to print error messages)
259 * @regs: NAND controller registers
260 * @ahb_clk: NAND Controller AHB clock
261 * @mod_clk: NAND Controller mod clock
262 * @assigned_cs: bitmask describing already assigned CS lines
263 * @clk_rate: NAND controller current clock rate
264 * @chips: a list containing all the NAND chips attached to
265 * this NAND controller
266 * @complete: a completion object used to wait for NAND
267 * controller events
268 */
269 struct sunxi_nfc {
270 struct nand_hw_control controller;
271 struct device *dev;
272 void __iomem *regs;
273 struct clk *ahb_clk;
274 struct clk *mod_clk;
275 struct reset_control *reset;
276 unsigned long assigned_cs;
277 unsigned long clk_rate;
278 struct list_head chips;
279 struct completion complete;
280 struct dma_chan *dmac;
281 };
282
283 static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
284 {
285 return container_of(ctrl, struct sunxi_nfc, controller);
286 }
287
288 static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
289 {
290 struct sunxi_nfc *nfc = dev_id;
291 u32 st = readl(nfc->regs + NFC_REG_ST);
292 u32 ien = readl(nfc->regs + NFC_REG_INT);
293
294 if (!(ien & st))
295 return IRQ_NONE;
296
297 if ((ien & st) == ien)
298 complete(&nfc->complete);
299
300 writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
301 writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
302
303 return IRQ_HANDLED;
304 }
305
306 static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
307 bool use_polling, unsigned int timeout_ms)
308 {
309 int ret;
310
311 if (events & ~NFC_INT_MASK)
312 return -EINVAL;
313
314 if (!timeout_ms)
315 timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
316
317 if (!use_polling) {
318 init_completion(&nfc->complete);
319
320 writel(events, nfc->regs + NFC_REG_INT);
321
322 ret = wait_for_completion_timeout(&nfc->complete,
323 msecs_to_jiffies(timeout_ms));
324 if (!ret)
325 ret = -ETIMEDOUT;
326 else
327 ret = 0;
328
329 writel(0, nfc->regs + NFC_REG_INT);
330 } else {
331 u32 status;
332
333 ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
334 (status & events) == events, 1,
335 timeout_ms * 1000);
336 }
337
338 writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
339
340 if (ret)
341 dev_err(nfc->dev, "wait interrupt timedout\n");
342
343 return ret;
344 }
345
346 static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
347 {
348 u32 status;
349 int ret;
350
351 ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
352 !(status & NFC_CMD_FIFO_STATUS), 1,
353 NFC_DEFAULT_TIMEOUT_MS * 1000);
354 if (ret)
355 dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
356
357 return ret;
358 }
359
360 static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
361 {
362 u32 ctl;
363 int ret;
364
365 writel(0, nfc->regs + NFC_REG_ECC_CTL);
366 writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
367
368 ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
369 !(ctl & NFC_RESET), 1,
370 NFC_DEFAULT_TIMEOUT_MS * 1000);
371 if (ret)
372 dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
373
374 return ret;
375 }
376
377 static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
378 int chunksize, int nchunks,
379 enum dma_data_direction ddir,
380 struct scatterlist *sg)
381 {
382 struct nand_chip *nand = mtd_to_nand(mtd);
383 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
384 struct dma_async_tx_descriptor *dmad;
385 enum dma_transfer_direction tdir;
386 dma_cookie_t dmat;
387 int ret;
388
389 if (ddir == DMA_FROM_DEVICE)
390 tdir = DMA_DEV_TO_MEM;
391 else
392 tdir = DMA_MEM_TO_DEV;
393
394 sg_init_one(sg, buf, nchunks * chunksize);
395 ret = dma_map_sg(nfc->dev, sg, 1, ddir);
396 if (!ret)
397 return -ENOMEM;
398
399 dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
400 if (!dmad) {
401 ret = -EINVAL;
402 goto err_unmap_buf;
403 }
404
405 writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
406 nfc->regs + NFC_REG_CTL);
407 writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
408 writel(chunksize, nfc->regs + NFC_REG_CNT);
409 dmat = dmaengine_submit(dmad);
410
411 ret = dma_submit_error(dmat);
412 if (ret)
413 goto err_clr_dma_flag;
414
415 return 0;
416
417 err_clr_dma_flag:
418 writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
419 nfc->regs + NFC_REG_CTL);
420
421 err_unmap_buf:
422 dma_unmap_sg(nfc->dev, sg, 1, ddir);
423 return ret;
424 }
425
426 static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
427 enum dma_data_direction ddir,
428 struct scatterlist *sg)
429 {
430 struct nand_chip *nand = mtd_to_nand(mtd);
431 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
432
433 dma_unmap_sg(nfc->dev, sg, 1, ddir);
434 writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
435 nfc->regs + NFC_REG_CTL);
436 }
437
438 static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
439 {
440 struct nand_chip *nand = mtd_to_nand(mtd);
441 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
442 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
443 struct sunxi_nand_rb *rb;
444 int ret;
445
446 if (sunxi_nand->selected < 0)
447 return 0;
448
449 rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
450
451 switch (rb->type) {
452 case RB_NATIVE:
453 ret = !!(readl(nfc->regs + NFC_REG_ST) &
454 NFC_RB_STATE(rb->info.nativeid));
455 break;
456 case RB_GPIO:
457 ret = gpio_get_value(rb->info.gpio);
458 break;
459 case RB_NONE:
460 default:
461 ret = 0;
462 dev_err(nfc->dev, "cannot check R/B NAND status!\n");
463 break;
464 }
465
466 return ret;
467 }
468
469 static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
470 {
471 struct nand_chip *nand = mtd_to_nand(mtd);
472 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
473 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
474 struct sunxi_nand_chip_sel *sel;
475 u32 ctl;
476
477 if (chip > 0 && chip >= sunxi_nand->nsels)
478 return;
479
480 if (chip == sunxi_nand->selected)
481 return;
482
483 ctl = readl(nfc->regs + NFC_REG_CTL) &
484 ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
485
486 if (chip >= 0) {
487 sel = &sunxi_nand->sels[chip];
488
489 ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
490 NFC_PAGE_SHIFT(nand->page_shift);
491 if (sel->rb.type == RB_NONE) {
492 nand->dev_ready = NULL;
493 } else {
494 nand->dev_ready = sunxi_nfc_dev_ready;
495 if (sel->rb.type == RB_NATIVE)
496 ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
497 }
498
499 writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
500
501 if (nfc->clk_rate != sunxi_nand->clk_rate) {
502 clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
503 nfc->clk_rate = sunxi_nand->clk_rate;
504 }
505 }
506
507 writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
508 writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
509 writel(ctl, nfc->regs + NFC_REG_CTL);
510
511 sunxi_nand->selected = chip;
512 }
513
514 static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
515 {
516 struct nand_chip *nand = mtd_to_nand(mtd);
517 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
518 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
519 int ret;
520 int cnt;
521 int offs = 0;
522 u32 tmp;
523
524 while (len > offs) {
525 bool poll = false;
526
527 cnt = min(len - offs, NFC_SRAM_SIZE);
528
529 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
530 if (ret)
531 break;
532
533 writel(cnt, nfc->regs + NFC_REG_CNT);
534 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
535 writel(tmp, nfc->regs + NFC_REG_CMD);
536
537 /* Arbitrary limit for polling mode */
538 if (cnt < 64)
539 poll = true;
540
541 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
542 if (ret)
543 break;
544
545 if (buf)
546 memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
547 cnt);
548 offs += cnt;
549 }
550 }
551
552 static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
553 int len)
554 {
555 struct nand_chip *nand = mtd_to_nand(mtd);
556 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
557 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
558 int ret;
559 int cnt;
560 int offs = 0;
561 u32 tmp;
562
563 while (len > offs) {
564 bool poll = false;
565
566 cnt = min(len - offs, NFC_SRAM_SIZE);
567
568 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
569 if (ret)
570 break;
571
572 writel(cnt, nfc->regs + NFC_REG_CNT);
573 memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
574 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
575 NFC_ACCESS_DIR;
576 writel(tmp, nfc->regs + NFC_REG_CMD);
577
578 /* Arbitrary limit for polling mode */
579 if (cnt < 64)
580 poll = true;
581
582 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
583 if (ret)
584 break;
585
586 offs += cnt;
587 }
588 }
589
590 static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
591 {
592 uint8_t ret;
593
594 sunxi_nfc_read_buf(mtd, &ret, 1);
595
596 return ret;
597 }
598
599 static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
600 unsigned int ctrl)
601 {
602 struct nand_chip *nand = mtd_to_nand(mtd);
603 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
604 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
605 int ret;
606
607 if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
608 !(ctrl & (NAND_CLE | NAND_ALE))) {
609 u32 cmd = 0;
610
611 if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
612 return;
613
614 if (sunxi_nand->cmd_cycles--)
615 cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
616
617 if (sunxi_nand->cmd_cycles--) {
618 cmd |= NFC_SEND_CMD2;
619 writel(sunxi_nand->cmd[1],
620 nfc->regs + NFC_REG_RCMD_SET);
621 }
622
623 sunxi_nand->cmd_cycles = 0;
624
625 if (sunxi_nand->addr_cycles) {
626 cmd |= NFC_SEND_ADR |
627 NFC_ADR_NUM(sunxi_nand->addr_cycles);
628 writel(sunxi_nand->addr[0],
629 nfc->regs + NFC_REG_ADDR_LOW);
630 }
631
632 if (sunxi_nand->addr_cycles > 4)
633 writel(sunxi_nand->addr[1],
634 nfc->regs + NFC_REG_ADDR_HIGH);
635
636 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
637 if (ret)
638 return;
639
640 writel(cmd, nfc->regs + NFC_REG_CMD);
641 sunxi_nand->addr[0] = 0;
642 sunxi_nand->addr[1] = 0;
643 sunxi_nand->addr_cycles = 0;
644 sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
645 }
646
647 if (ctrl & NAND_CLE) {
648 sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
649 } else if (ctrl & NAND_ALE) {
650 sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
651 dat << ((sunxi_nand->addr_cycles % 4) * 8);
652 sunxi_nand->addr_cycles++;
653 }
654 }
655
656 /* These seed values have been extracted from Allwinner's BSP */
657 static const u16 sunxi_nfc_randomizer_page_seeds[] = {
658 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
659 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
660 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
661 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
662 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
663 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
664 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
665 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
666 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
667 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
668 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
669 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
670 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
671 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
672 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
673 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
674 };
675
676 /*
677 * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
678 * have been generated using
679 * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
680 * the randomizer engine does internally before de/scrambling OOB data.
681 *
682 * Those tables are statically defined to avoid calculating randomizer state
683 * at runtime.
684 */
685 static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
686 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
687 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
688 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
689 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
690 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
691 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
692 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
693 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
694 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
695 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
696 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
697 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
698 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
699 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
700 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
701 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
702 };
703
704 static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
705 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
706 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
707 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
708 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
709 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
710 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
711 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
712 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
713 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
714 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
715 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
716 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
717 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
718 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
719 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
720 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
721 };
722
723 static u16 sunxi_nfc_randomizer_step(u16 state, int count)
724 {
725 state &= 0x7fff;
726
727 /*
728 * This loop is just a simple implementation of a Fibonacci LFSR using
729 * the x16 + x15 + 1 polynomial.
730 */
731 while (count--)
732 state = ((state >> 1) |
733 (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
734
735 return state;
736 }
737
738 static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
739 {
740 const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
741 int mod = mtd_div_by_ws(mtd->erasesize, mtd);
742
743 if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
744 mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
745
746 if (ecc) {
747 if (mtd->ecc_step_size == 512)
748 seeds = sunxi_nfc_randomizer_ecc512_seeds;
749 else
750 seeds = sunxi_nfc_randomizer_ecc1024_seeds;
751 }
752
753 return seeds[page % mod];
754 }
755
756 static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
757 int page, bool ecc)
758 {
759 struct nand_chip *nand = mtd_to_nand(mtd);
760 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
761 u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
762 u16 state;
763
764 if (!(nand->options & NAND_NEED_SCRAMBLING))
765 return;
766
767 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
768 state = sunxi_nfc_randomizer_state(mtd, page, ecc);
769 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
770 writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
771 }
772
773 static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
774 {
775 struct nand_chip *nand = mtd_to_nand(mtd);
776 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
777
778 if (!(nand->options & NAND_NEED_SCRAMBLING))
779 return;
780
781 writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
782 nfc->regs + NFC_REG_ECC_CTL);
783 }
784
785 static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
786 {
787 struct nand_chip *nand = mtd_to_nand(mtd);
788 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
789
790 if (!(nand->options & NAND_NEED_SCRAMBLING))
791 return;
792
793 writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
794 nfc->regs + NFC_REG_ECC_CTL);
795 }
796
797 static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
798 {
799 u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
800
801 bbm[0] ^= state;
802 bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
803 }
804
805 static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
806 const uint8_t *buf, int len,
807 bool ecc, int page)
808 {
809 sunxi_nfc_randomizer_config(mtd, page, ecc);
810 sunxi_nfc_randomizer_enable(mtd);
811 sunxi_nfc_write_buf(mtd, buf, len);
812 sunxi_nfc_randomizer_disable(mtd);
813 }
814
815 static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
816 int len, bool ecc, int page)
817 {
818 sunxi_nfc_randomizer_config(mtd, page, ecc);
819 sunxi_nfc_randomizer_enable(mtd);
820 sunxi_nfc_read_buf(mtd, buf, len);
821 sunxi_nfc_randomizer_disable(mtd);
822 }
823
824 static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
825 {
826 struct nand_chip *nand = mtd_to_nand(mtd);
827 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
828 struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
829 u32 ecc_ctl;
830
831 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
832 ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
833 NFC_ECC_BLOCK_SIZE_MSK);
834 ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
835 NFC_ECC_PIPELINE;
836
837 if (nand->ecc.size == 512)
838 ecc_ctl |= NFC_ECC_BLOCK_512;
839
840 writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
841 }
842
843 static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
844 {
845 struct nand_chip *nand = mtd_to_nand(mtd);
846 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
847
848 writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
849 nfc->regs + NFC_REG_ECC_CTL);
850 }
851
852 static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
853 {
854 buf[0] = user_data;
855 buf[1] = user_data >> 8;
856 buf[2] = user_data >> 16;
857 buf[3] = user_data >> 24;
858 }
859
860 static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
861 {
862 return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
863 }
864
865 static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
866 int step, bool bbm, int page)
867 {
868 struct nand_chip *nand = mtd_to_nand(mtd);
869 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
870
871 sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
872 oob);
873
874 /* De-randomize the Bad Block Marker. */
875 if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
876 sunxi_nfc_randomize_bbm(mtd, page, oob);
877 }
878
879 static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
880 const u8 *oob, int step,
881 bool bbm, int page)
882 {
883 struct nand_chip *nand = mtd_to_nand(mtd);
884 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
885 u8 user_data[4];
886
887 /* Randomize the Bad Block Marker. */
888 if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
889 memcpy(user_data, oob, sizeof(user_data));
890 sunxi_nfc_randomize_bbm(mtd, page, user_data);
891 oob = user_data;
892 }
893
894 writel(sunxi_nfc_buf_to_user_data(oob),
895 nfc->regs + NFC_REG_USER_DATA(step));
896 }
897
898 static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
899 unsigned int *max_bitflips, int ret)
900 {
901 if (ret < 0) {
902 mtd->ecc_stats.failed++;
903 } else {
904 mtd->ecc_stats.corrected += ret;
905 *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
906 }
907 }
908
909 static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
910 int step, u32 status, bool *erased)
911 {
912 struct nand_chip *nand = mtd_to_nand(mtd);
913 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
914 struct nand_ecc_ctrl *ecc = &nand->ecc;
915 u32 tmp;
916
917 *erased = false;
918
919 if (status & NFC_ECC_ERR(step))
920 return -EBADMSG;
921
922 if (status & NFC_ECC_PAT_FOUND(step)) {
923 u8 pattern;
924
925 if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
926 pattern = 0x0;
927 } else {
928 pattern = 0xff;
929 *erased = true;
930 }
931
932 if (data)
933 memset(data, pattern, ecc->size);
934
935 if (oob)
936 memset(oob, pattern, ecc->bytes + 4);
937
938 return 0;
939 }
940
941 tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
942
943 return NFC_ECC_ERR_CNT(step, tmp);
944 }
945
946 static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
947 u8 *data, int data_off,
948 u8 *oob, int oob_off,
949 int *cur_off,
950 unsigned int *max_bitflips,
951 bool bbm, bool oob_required, int page)
952 {
953 struct nand_chip *nand = mtd_to_nand(mtd);
954 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
955 struct nand_ecc_ctrl *ecc = &nand->ecc;
956 int raw_mode = 0;
957 bool erased;
958 int ret;
959
960 if (*cur_off != data_off)
961 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
962
963 sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
964
965 if (data_off + ecc->size != oob_off)
966 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
967
968 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
969 if (ret)
970 return ret;
971
972 sunxi_nfc_randomizer_enable(mtd);
973 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
974 nfc->regs + NFC_REG_CMD);
975
976 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
977 sunxi_nfc_randomizer_disable(mtd);
978 if (ret)
979 return ret;
980
981 *cur_off = oob_off + ecc->bytes + 4;
982
983 ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
984 readl(nfc->regs + NFC_REG_ECC_ST),
985 &erased);
986 if (erased)
987 return 1;
988
989 if (ret < 0) {
990 /*
991 * Re-read the data with the randomizer disabled to identify
992 * bitflips in erased pages.
993 */
994 if (nand->options & NAND_NEED_SCRAMBLING) {
995 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
996 nand->read_buf(mtd, data, ecc->size);
997 } else {
998 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
999 ecc->size);
1000 }
1001
1002 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
1003 nand->read_buf(mtd, oob, ecc->bytes + 4);
1004
1005 ret = nand_check_erased_ecc_chunk(data, ecc->size,
1006 oob, ecc->bytes + 4,
1007 NULL, 0, ecc->strength);
1008 if (ret >= 0)
1009 raw_mode = 1;
1010 } else {
1011 memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
1012
1013 if (oob_required) {
1014 nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
1015 sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
1016 true, page);
1017
1018 sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
1019 bbm, page);
1020 }
1021 }
1022
1023 sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
1024
1025 return raw_mode;
1026 }
1027
1028 static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
1029 u8 *oob, int *cur_off,
1030 bool randomize, int page)
1031 {
1032 struct nand_chip *nand = mtd_to_nand(mtd);
1033 struct nand_ecc_ctrl *ecc = &nand->ecc;
1034 int offset = ((ecc->bytes + 4) * ecc->steps);
1035 int len = mtd->oobsize - offset;
1036
1037 if (len <= 0)
1038 return;
1039
1040 if (!cur_off || *cur_off != offset)
1041 nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
1042 offset + mtd->writesize, -1);
1043
1044 if (!randomize)
1045 sunxi_nfc_read_buf(mtd, oob + offset, len);
1046 else
1047 sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
1048 false, page);
1049
1050 if (cur_off)
1051 *cur_off = mtd->oobsize + mtd->writesize;
1052 }
1053
1054 static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
1055 int oob_required, int page,
1056 int nchunks)
1057 {
1058 struct nand_chip *nand = mtd_to_nand(mtd);
1059 bool randomized = nand->options & NAND_NEED_SCRAMBLING;
1060 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
1061 struct nand_ecc_ctrl *ecc = &nand->ecc;
1062 unsigned int max_bitflips = 0;
1063 int ret, i, raw_mode = 0;
1064 struct scatterlist sg;
1065 u32 status;
1066
1067 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
1068 if (ret)
1069 return ret;
1070
1071 ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
1072 DMA_FROM_DEVICE, &sg);
1073 if (ret)
1074 return ret;
1075
1076 sunxi_nfc_hw_ecc_enable(mtd);
1077 sunxi_nfc_randomizer_config(mtd, page, false);
1078 sunxi_nfc_randomizer_enable(mtd);
1079
1080 writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
1081 NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
1082
1083 dma_async_issue_pending(nfc->dmac);
1084
1085 writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
1086 nfc->regs + NFC_REG_CMD);
1087
1088 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
1089 if (ret)
1090 dmaengine_terminate_all(nfc->dmac);
1091
1092 sunxi_nfc_randomizer_disable(mtd);
1093 sunxi_nfc_hw_ecc_disable(mtd);
1094
1095 sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
1096
1097 if (ret)
1098 return ret;
1099
1100 status = readl(nfc->regs + NFC_REG_ECC_ST);
1101
1102 for (i = 0; i < nchunks; i++) {
1103 int data_off = i * ecc->size;
1104 int oob_off = i * (ecc->bytes + 4);
1105 u8 *data = buf + data_off;
1106 u8 *oob = nand->oob_poi + oob_off;
1107 bool erased;
1108
1109 ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
1110 oob_required ? oob : NULL,
1111 i, status, &erased);
1112
1113 /* ECC errors are handled in the second loop. */
1114 if (ret < 0)
1115 continue;
1116
1117 if (oob_required && !erased) {
1118 /* TODO: use DMA to retrieve OOB */
1119 nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
1120 mtd->writesize + oob_off, -1);
1121 nand->read_buf(mtd, oob, ecc->bytes + 4);
1122
1123 sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
1124 !i, page);
1125 }
1126
1127 if (erased)
1128 raw_mode = 1;
1129
1130 sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
1131 }
1132
1133 if (status & NFC_ECC_ERR_MSK) {
1134 for (i = 0; i < nchunks; i++) {
1135 int data_off = i * ecc->size;
1136 int oob_off = i * (ecc->bytes + 4);
1137 u8 *data = buf + data_off;
1138 u8 *oob = nand->oob_poi + oob_off;
1139
1140 if (!(status & NFC_ECC_ERR(i)))
1141 continue;
1142
1143 /*
1144 * Re-read the data with the randomizer disabled to
1145 * identify bitflips in erased pages.
1146 */
1147 if (randomized) {
1148 /* TODO: use DMA to read page in raw mode */
1149 nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
1150 data_off, -1);
1151 nand->read_buf(mtd, data, ecc->size);
1152 }
1153
1154 /* TODO: use DMA to retrieve OOB */
1155 nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
1156 mtd->writesize + oob_off, -1);
1157 nand->read_buf(mtd, oob, ecc->bytes + 4);
1158
1159 ret = nand_check_erased_ecc_chunk(data, ecc->size,
1160 oob, ecc->bytes + 4,
1161 NULL, 0,
1162 ecc->strength);
1163 if (ret >= 0)
1164 raw_mode = 1;
1165
1166 sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
1167 }
1168 }
1169
1170 if (oob_required)
1171 sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
1172 NULL, !raw_mode,
1173 page);
1174
1175 return max_bitflips;
1176 }
1177
1178 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
1179 const u8 *data, int data_off,
1180 const u8 *oob, int oob_off,
1181 int *cur_off, bool bbm,
1182 int page)
1183 {
1184 struct nand_chip *nand = mtd_to_nand(mtd);
1185 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
1186 struct nand_ecc_ctrl *ecc = &nand->ecc;
1187 int ret;
1188
1189 if (data_off != *cur_off)
1190 nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
1191
1192 sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
1193
1194 if (data_off + ecc->size != oob_off)
1195 nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
1196
1197 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
1198 if (ret)
1199 return ret;
1200
1201 sunxi_nfc_randomizer_enable(mtd);
1202 sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
1203
1204 writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
1205 NFC_ACCESS_DIR | NFC_ECC_OP,
1206 nfc->regs + NFC_REG_CMD);
1207
1208 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
1209 sunxi_nfc_randomizer_disable(mtd);
1210 if (ret)
1211 return ret;
1212
1213 *cur_off = oob_off + ecc->bytes + 4;
1214
1215 return 0;
1216 }
1217
1218 static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
1219 u8 *oob, int *cur_off,
1220 int page)
1221 {
1222 struct nand_chip *nand = mtd_to_nand(mtd);
1223 struct nand_ecc_ctrl *ecc = &nand->ecc;
1224 int offset = ((ecc->bytes + 4) * ecc->steps);
1225 int len = mtd->oobsize - offset;
1226
1227 if (len <= 0)
1228 return;
1229
1230 if (!cur_off || *cur_off != offset)
1231 nand->cmdfunc(mtd, NAND_CMD_RNDIN,
1232 offset + mtd->writesize, -1);
1233
1234 sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
1235
1236 if (cur_off)
1237 *cur_off = mtd->oobsize + mtd->writesize;
1238 }
1239
1240 static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
1241 struct nand_chip *chip, uint8_t *buf,
1242 int oob_required, int page)
1243 {
1244 struct nand_ecc_ctrl *ecc = &chip->ecc;
1245 unsigned int max_bitflips = 0;
1246 int ret, i, cur_off = 0;
1247 bool raw_mode = false;
1248
1249 sunxi_nfc_hw_ecc_enable(mtd);
1250
1251 for (i = 0; i < ecc->steps; i++) {
1252 int data_off = i * ecc->size;
1253 int oob_off = i * (ecc->bytes + 4);
1254 u8 *data = buf + data_off;
1255 u8 *oob = chip->oob_poi + oob_off;
1256
1257 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
1258 oob_off + mtd->writesize,
1259 &cur_off, &max_bitflips,
1260 !i, oob_required, page);
1261 if (ret < 0)
1262 return ret;
1263 else if (ret)
1264 raw_mode = true;
1265 }
1266
1267 if (oob_required)
1268 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
1269 !raw_mode, page);
1270
1271 sunxi_nfc_hw_ecc_disable(mtd);
1272
1273 return max_bitflips;
1274 }
1275
1276 static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
1277 struct nand_chip *chip, u8 *buf,
1278 int oob_required, int page)
1279 {
1280 int ret;
1281
1282 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
1283 chip->ecc.steps);
1284 if (ret >= 0)
1285 return ret;
1286
1287 /* Fallback to PIO mode */
1288 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
1289
1290 return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
1291 }
1292
1293 static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
1294 struct nand_chip *chip,
1295 u32 data_offs, u32 readlen,
1296 u8 *bufpoi, int page)
1297 {
1298 struct nand_ecc_ctrl *ecc = &chip->ecc;
1299 int ret, i, cur_off = 0;
1300 unsigned int max_bitflips = 0;
1301
1302 sunxi_nfc_hw_ecc_enable(mtd);
1303
1304 for (i = data_offs / ecc->size;
1305 i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
1306 int data_off = i * ecc->size;
1307 int oob_off = i * (ecc->bytes + 4);
1308 u8 *data = bufpoi + data_off;
1309 u8 *oob = chip->oob_poi + oob_off;
1310
1311 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
1312 oob,
1313 oob_off + mtd->writesize,
1314 &cur_off, &max_bitflips, !i,
1315 false, page);
1316 if (ret < 0)
1317 return ret;
1318 }
1319
1320 sunxi_nfc_hw_ecc_disable(mtd);
1321
1322 return max_bitflips;
1323 }
1324
1325 static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
1326 struct nand_chip *chip,
1327 u32 data_offs, u32 readlen,
1328 u8 *buf, int page)
1329 {
1330 int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
1331 int ret;
1332
1333 ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
1334 if (ret >= 0)
1335 return ret;
1336
1337 /* Fallback to PIO mode */
1338 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
1339
1340 return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
1341 buf, page);
1342 }
1343
1344 static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
1345 struct nand_chip *chip,
1346 const uint8_t *buf, int oob_required,
1347 int page)
1348 {
1349 struct nand_ecc_ctrl *ecc = &chip->ecc;
1350 int ret, i, cur_off = 0;
1351
1352 sunxi_nfc_hw_ecc_enable(mtd);
1353
1354 for (i = 0; i < ecc->steps; i++) {
1355 int data_off = i * ecc->size;
1356 int oob_off = i * (ecc->bytes + 4);
1357 const u8 *data = buf + data_off;
1358 const u8 *oob = chip->oob_poi + oob_off;
1359
1360 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
1361 oob_off + mtd->writesize,
1362 &cur_off, !i, page);
1363 if (ret)
1364 return ret;
1365 }
1366
1367 if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
1368 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1369 &cur_off, page);
1370
1371 sunxi_nfc_hw_ecc_disable(mtd);
1372
1373 return 0;
1374 }
1375
1376 static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
1377 struct nand_chip *chip,
1378 u32 data_offs, u32 data_len,
1379 const u8 *buf, int oob_required,
1380 int page)
1381 {
1382 struct nand_ecc_ctrl *ecc = &chip->ecc;
1383 int ret, i, cur_off = 0;
1384
1385 sunxi_nfc_hw_ecc_enable(mtd);
1386
1387 for (i = data_offs / ecc->size;
1388 i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
1389 int data_off = i * ecc->size;
1390 int oob_off = i * (ecc->bytes + 4);
1391 const u8 *data = buf + data_off;
1392 const u8 *oob = chip->oob_poi + oob_off;
1393
1394 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
1395 oob_off + mtd->writesize,
1396 &cur_off, !i, page);
1397 if (ret)
1398 return ret;
1399 }
1400
1401 sunxi_nfc_hw_ecc_disable(mtd);
1402
1403 return 0;
1404 }
1405
1406 static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
1407 struct nand_chip *chip,
1408 const u8 *buf,
1409 int oob_required,
1410 int page)
1411 {
1412 struct nand_chip *nand = mtd_to_nand(mtd);
1413 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
1414 struct nand_ecc_ctrl *ecc = &nand->ecc;
1415 struct scatterlist sg;
1416 int ret, i;
1417
1418 ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
1419 if (ret)
1420 return ret;
1421
1422 ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
1423 DMA_TO_DEVICE, &sg);
1424 if (ret)
1425 goto pio_fallback;
1426
1427 for (i = 0; i < ecc->steps; i++) {
1428 const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
1429
1430 sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
1431 }
1432
1433 sunxi_nfc_hw_ecc_enable(mtd);
1434 sunxi_nfc_randomizer_config(mtd, page, false);
1435 sunxi_nfc_randomizer_enable(mtd);
1436
1437 writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
1438 nfc->regs + NFC_REG_RCMD_SET);
1439
1440 dma_async_issue_pending(nfc->dmac);
1441
1442 writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
1443 NFC_DATA_TRANS | NFC_ACCESS_DIR,
1444 nfc->regs + NFC_REG_CMD);
1445
1446 ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
1447 if (ret)
1448 dmaengine_terminate_all(nfc->dmac);
1449
1450 sunxi_nfc_randomizer_disable(mtd);
1451 sunxi_nfc_hw_ecc_disable(mtd);
1452
1453 sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
1454
1455 if (ret)
1456 return ret;
1457
1458 if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
1459 /* TODO: use DMA to transfer extra OOB bytes ? */
1460 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1461 NULL, page);
1462
1463 return 0;
1464
1465 pio_fallback:
1466 return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
1467 }
1468
1469 static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
1470 struct nand_chip *chip,
1471 uint8_t *buf, int oob_required,
1472 int page)
1473 {
1474 struct nand_ecc_ctrl *ecc = &chip->ecc;
1475 unsigned int max_bitflips = 0;
1476 int ret, i, cur_off = 0;
1477 bool raw_mode = false;
1478
1479 sunxi_nfc_hw_ecc_enable(mtd);
1480
1481 for (i = 0; i < ecc->steps; i++) {
1482 int data_off = i * (ecc->size + ecc->bytes + 4);
1483 int oob_off = data_off + ecc->size;
1484 u8 *data = buf + (i * ecc->size);
1485 u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
1486
1487 ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
1488 oob_off, &cur_off,
1489 &max_bitflips, !i,
1490 oob_required,
1491 page);
1492 if (ret < 0)
1493 return ret;
1494 else if (ret)
1495 raw_mode = true;
1496 }
1497
1498 if (oob_required)
1499 sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
1500 !raw_mode, page);
1501
1502 sunxi_nfc_hw_ecc_disable(mtd);
1503
1504 return max_bitflips;
1505 }
1506
1507 static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
1508 struct nand_chip *chip,
1509 const uint8_t *buf,
1510 int oob_required, int page)
1511 {
1512 struct nand_ecc_ctrl *ecc = &chip->ecc;
1513 int ret, i, cur_off = 0;
1514
1515 sunxi_nfc_hw_ecc_enable(mtd);
1516
1517 for (i = 0; i < ecc->steps; i++) {
1518 int data_off = i * (ecc->size + ecc->bytes + 4);
1519 int oob_off = data_off + ecc->size;
1520 const u8 *data = buf + (i * ecc->size);
1521 const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
1522
1523 ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
1524 oob, oob_off, &cur_off,
1525 false, page);
1526 if (ret)
1527 return ret;
1528 }
1529
1530 if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
1531 sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
1532 &cur_off, page);
1533
1534 sunxi_nfc_hw_ecc_disable(mtd);
1535
1536 return 0;
1537 }
1538
1539 static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
1540 struct nand_chip *chip,
1541 int page)
1542 {
1543 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1544
1545 chip->pagebuf = -1;
1546
1547 return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
1548 }
1549
1550 static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
1551 struct nand_chip *chip,
1552 int page)
1553 {
1554 int ret, status;
1555
1556 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
1557
1558 chip->pagebuf = -1;
1559
1560 memset(chip->buffers->databuf, 0xff, mtd->writesize);
1561 ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
1562 if (ret)
1563 return ret;
1564
1565 /* Send command to program the OOB data */
1566 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1567
1568 status = chip->waitfunc(mtd, chip);
1569
1570 return status & NAND_STATUS_FAIL ? -EIO : 0;
1571 }
1572
1573 static const s32 tWB_lut[] = {6, 12, 16, 20};
1574 static const s32 tRHW_lut[] = {4, 8, 12, 20};
1575
1576 static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
1577 u32 clk_period)
1578 {
1579 u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
1580 int i;
1581
1582 for (i = 0; i < lut_size; i++) {
1583 if (clk_cycles <= lut[i])
1584 return i;
1585 }
1586
1587 /* Doesn't fit */
1588 return -EINVAL;
1589 }
1590
1591 #define sunxi_nand_lookup_timing(l, p, c) \
1592 _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
1593
1594 static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
1595 const struct nand_data_interface *conf)
1596 {
1597 struct nand_chip *nand = mtd_to_nand(mtd);
1598 struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
1599 struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
1600 const struct nand_sdr_timings *timings;
1601 u32 min_clk_period = 0;
1602 s32 tWB, tADL, tWHR, tRHW, tCAD;
1603 long real_clk_rate;
1604
1605 timings = nand_get_sdr_timings(conf);
1606 if (IS_ERR(timings))
1607 return -ENOTSUPP;
1608
1609 /* T1 <=> tCLS */
1610 if (timings->tCLS_min > min_clk_period)
1611 min_clk_period = timings->tCLS_min;
1612
1613 /* T2 <=> tCLH */
1614 if (timings->tCLH_min > min_clk_period)
1615 min_clk_period = timings->tCLH_min;
1616
1617 /* T3 <=> tCS */
1618 if (timings->tCS_min > min_clk_period)
1619 min_clk_period = timings->tCS_min;
1620
1621 /* T4 <=> tCH */
1622 if (timings->tCH_min > min_clk_period)
1623 min_clk_period = timings->tCH_min;
1624
1625 /* T5 <=> tWP */
1626 if (timings->tWP_min > min_clk_period)
1627 min_clk_period = timings->tWP_min;
1628
1629 /* T6 <=> tWH */
1630 if (timings->tWH_min > min_clk_period)
1631 min_clk_period = timings->tWH_min;
1632
1633 /* T7 <=> tALS */
1634 if (timings->tALS_min > min_clk_period)
1635 min_clk_period = timings->tALS_min;
1636
1637 /* T8 <=> tDS */
1638 if (timings->tDS_min > min_clk_period)
1639 min_clk_period = timings->tDS_min;
1640
1641 /* T9 <=> tDH */
1642 if (timings->tDH_min > min_clk_period)
1643 min_clk_period = timings->tDH_min;
1644
1645 /* T10 <=> tRR */
1646 if (timings->tRR_min > (min_clk_period * 3))
1647 min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
1648
1649 /* T11 <=> tALH */
1650 if (timings->tALH_min > min_clk_period)
1651 min_clk_period = timings->tALH_min;
1652
1653 /* T12 <=> tRP */
1654 if (timings->tRP_min > min_clk_period)
1655 min_clk_period = timings->tRP_min;
1656
1657 /* T13 <=> tREH */
1658 if (timings->tREH_min > min_clk_period)
1659 min_clk_period = timings->tREH_min;
1660
1661 /* T14 <=> tRC */
1662 if (timings->tRC_min > (min_clk_period * 2))
1663 min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
1664
1665 /* T15 <=> tWC */
1666 if (timings->tWC_min > (min_clk_period * 2))
1667 min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
1668
1669 /* T16 - T19 + tCAD */
1670 if (timings->tWB_max > (min_clk_period * 20))
1671 min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
1672
1673 if (timings->tADL_min > (min_clk_period * 32))
1674 min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
1675
1676 if (timings->tWHR_min > (min_clk_period * 32))
1677 min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
1678
1679 if (timings->tRHW_min > (min_clk_period * 20))
1680 min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
1681
1682 tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
1683 min_clk_period);
1684 if (tWB < 0) {
1685 dev_err(nfc->dev, "unsupported tWB\n");
1686 return tWB;
1687 }
1688
1689 tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
1690 if (tADL > 3) {
1691 dev_err(nfc->dev, "unsupported tADL\n");
1692 return -EINVAL;
1693 }
1694
1695 tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
1696 if (tWHR > 3) {
1697 dev_err(nfc->dev, "unsupported tWHR\n");
1698 return -EINVAL;
1699 }
1700
1701 tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
1702 min_clk_period);
1703 if (tRHW < 0) {
1704 dev_err(nfc->dev, "unsupported tRHW\n");
1705 return tRHW;
1706 }
1707
1708 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1709 return 0;
1710
1711 /*
1712 * TODO: according to ONFI specs this value only applies for DDR NAND,
1713 * but Allwinner seems to set this to 0x7. Mimic them for now.
1714 */
1715 tCAD = 0x7;
1716
1717 /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
1718 chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
1719
1720 /* Convert min_clk_period from picoseconds to nanoseconds */
1721 min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
1722
1723 /*
1724 * Unlike what is stated in Allwinner datasheet, the clk_rate should
1725 * be set to (1 / min_clk_period), and not (2 / min_clk_period).
1726 * This new formula was verified with a scope and validated by
1727 * Allwinner engineers.
1728 */
1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period;
1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
1731
1732 /*
1733 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
1734 * output cycle timings shall be used if the host drives tRC less than
1735 * 30 ns.
1736 */
1737 min_clk_period = NSEC_PER_SEC / real_clk_rate;
1738 chip->timing_ctl = ((min_clk_period * 2) < 30) ?
1739 NFC_TIMING_CTL_EDO : 0;
1740
1741 return 0;
1742 }
1743
1744 static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
1745 struct mtd_oob_region *oobregion)
1746 {
1747 struct nand_chip *nand = mtd_to_nand(mtd);
1748 struct nand_ecc_ctrl *ecc = &nand->ecc;
1749
1750 if (section >= ecc->steps)
1751 return -ERANGE;
1752
1753 oobregion->offset = section * (ecc->bytes + 4) + 4;
1754 oobregion->length = ecc->bytes;
1755
1756 return 0;
1757 }
1758
1759 static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
1760 struct mtd_oob_region *oobregion)
1761 {
1762 struct nand_chip *nand = mtd_to_nand(mtd);
1763 struct nand_ecc_ctrl *ecc = &nand->ecc;
1764
1765 if (section > ecc->steps)
1766 return -ERANGE;
1767
1768 /*
1769 * The first 2 bytes are used for BB markers, hence we
1770 * only have 2 bytes available in the first user data
1771 * section.
1772 */
1773 if (!section && ecc->mode == NAND_ECC_HW) {
1774 oobregion->offset = 2;
1775 oobregion->length = 2;
1776
1777 return 0;
1778 }
1779
1780 oobregion->offset = section * (ecc->bytes + 4);
1781
1782 if (section < ecc->steps)
1783 oobregion->length = 4;
1784 else
1785 oobregion->offset = mtd->oobsize - oobregion->offset;
1786
1787 return 0;
1788 }
1789
1790 static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
1791 .ecc = sunxi_nand_ooblayout_ecc,
1792 .free = sunxi_nand_ooblayout_free,
1793 };
1794
1795 static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
1796 struct nand_ecc_ctrl *ecc,
1797 struct device_node *np)
1798 {
1799 static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
1800 struct nand_chip *nand = mtd_to_nand(mtd);
1801 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
1802 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
1803 struct sunxi_nand_hw_ecc *data;
1804 int nsectors;
1805 int ret;
1806 int i;
1807
1808 if (ecc->options & NAND_ECC_MAXIMIZE) {
1809 int bytes;
1810
1811 ecc->size = 1024;
1812 nsectors = mtd->writesize / ecc->size;
1813
1814 /* Reserve 2 bytes for the BBM */
1815 bytes = (mtd->oobsize - 2) / nsectors;
1816
1817 /* 4 non-ECC bytes are added before each ECC bytes section */
1818 bytes -= 4;
1819
1820 /* and bytes has to be even. */
1821 if (bytes % 2)
1822 bytes--;
1823
1824 ecc->strength = bytes * 8 / fls(8 * ecc->size);
1825
1826 for (i = 0; i < ARRAY_SIZE(strengths); i++) {
1827 if (strengths[i] > ecc->strength)
1828 break;
1829 }
1830
1831 if (!i)
1832 ecc->strength = 0;
1833 else
1834 ecc->strength = strengths[i - 1];
1835 }
1836
1837 if (ecc->size != 512 && ecc->size != 1024)
1838 return -EINVAL;
1839
1840 data = kzalloc(sizeof(*data), GFP_KERNEL);
1841 if (!data)
1842 return -ENOMEM;
1843
1844 /* Prefer 1k ECC chunk over 512 ones */
1845 if (ecc->size == 512 && mtd->writesize > 512) {
1846 ecc->size = 1024;
1847 ecc->strength *= 2;
1848 }
1849
1850 /* Add ECC info retrieval from DT */
1851 for (i = 0; i < ARRAY_SIZE(strengths); i++) {
1852 if (ecc->strength <= strengths[i])
1853 break;
1854 }
1855
1856 if (i >= ARRAY_SIZE(strengths)) {
1857 dev_err(nfc->dev, "unsupported strength\n");
1858 ret = -ENOTSUPP;
1859 goto err;
1860 }
1861
1862 data->mode = i;
1863
1864 /* HW ECC always request ECC bytes for 1024 bytes blocks */
1865 ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
1866
1867 /* HW ECC always work with even numbers of ECC bytes */
1868 ecc->bytes = ALIGN(ecc->bytes, 2);
1869
1870 nsectors = mtd->writesize / ecc->size;
1871
1872 if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
1873 ret = -EINVAL;
1874 goto err;
1875 }
1876
1877 ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
1878 ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
1879 mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
1880 ecc->priv = data;
1881
1882 return 0;
1883
1884 err:
1885 kfree(data);
1886
1887 return ret;
1888 }
1889
1890 static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
1891 {
1892 kfree(ecc->priv);
1893 }
1894
1895 static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
1896 struct nand_ecc_ctrl *ecc,
1897 struct device_node *np)
1898 {
1899 struct nand_chip *nand = mtd_to_nand(mtd);
1900 struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
1901 struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
1902 int ret;
1903
1904 ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
1905 if (ret)
1906 return ret;
1907
1908 if (nfc->dmac) {
1909 ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
1910 ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
1911 ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
1912 nand->options |= NAND_USE_BOUNCE_BUFFER;
1913 } else {
1914 ecc->read_page = sunxi_nfc_hw_ecc_read_page;
1915 ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
1916 ecc->write_page = sunxi_nfc_hw_ecc_write_page;
1917 }
1918
1919 /* TODO: support DMA for raw accesses and subpage write */
1920 ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
1921 ecc->read_oob_raw = nand_read_oob_std;
1922 ecc->write_oob_raw = nand_write_oob_std;
1923
1924 return 0;
1925 }
1926
1927 static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
1928 struct nand_ecc_ctrl *ecc,
1929 struct device_node *np)
1930 {
1931 int ret;
1932
1933 ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
1934 if (ret)
1935 return ret;
1936
1937 ecc->prepad = 4;
1938 ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
1939 ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
1940 ecc->read_oob_raw = nand_read_oob_syndrome;
1941 ecc->write_oob_raw = nand_write_oob_syndrome;
1942
1943 return 0;
1944 }
1945
1946 static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
1947 {
1948 switch (ecc->mode) {
1949 case NAND_ECC_HW:
1950 case NAND_ECC_HW_SYNDROME:
1951 sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
1952 break;
1953 case NAND_ECC_NONE:
1954 default:
1955 break;
1956 }
1957 }
1958
1959 static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
1960 struct device_node *np)
1961 {
1962 struct nand_chip *nand = mtd_to_nand(mtd);
1963 int ret;
1964
1965 if (!ecc->size) {
1966 ecc->size = nand->ecc_step_ds;
1967 ecc->strength = nand->ecc_strength_ds;
1968 }
1969
1970 if (!ecc->size || !ecc->strength)
1971 return -EINVAL;
1972
1973 switch (ecc->mode) {
1974 case NAND_ECC_HW:
1975 ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
1976 if (ret)
1977 return ret;
1978 break;
1979 case NAND_ECC_HW_SYNDROME:
1980 ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
1981 if (ret)
1982 return ret;
1983 break;
1984 case NAND_ECC_NONE:
1985 case NAND_ECC_SOFT:
1986 break;
1987 default:
1988 return -EINVAL;
1989 }
1990
1991 return 0;
1992 }
1993
1994 static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
1995 struct device_node *np)
1996 {
1997 struct sunxi_nand_chip *chip;
1998 struct mtd_info *mtd;
1999 struct nand_chip *nand;
2000 int nsels;
2001 int ret;
2002 int i;
2003 u32 tmp;
2004
2005 if (!of_get_property(np, "reg", &nsels))
2006 return -EINVAL;
2007
2008 nsels /= sizeof(u32);
2009 if (!nsels) {
2010 dev_err(dev, "invalid reg property size\n");
2011 return -EINVAL;
2012 }
2013
2014 chip = devm_kzalloc(dev,
2015 sizeof(*chip) +
2016 (nsels * sizeof(struct sunxi_nand_chip_sel)),
2017 GFP_KERNEL);
2018 if (!chip) {
2019 dev_err(dev, "could not allocate chip\n");
2020 return -ENOMEM;
2021 }
2022
2023 chip->nsels = nsels;
2024 chip->selected = -1;
2025
2026 for (i = 0; i < nsels; i++) {
2027 ret = of_property_read_u32_index(np, "reg", i, &tmp);
2028 if (ret) {
2029 dev_err(dev, "could not retrieve reg property: %d\n",
2030 ret);
2031 return ret;
2032 }
2033
2034 if (tmp > NFC_MAX_CS) {
2035 dev_err(dev,
2036 "invalid reg value: %u (max CS = 7)\n",
2037 tmp);
2038 return -EINVAL;
2039 }
2040
2041 if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
2042 dev_err(dev, "CS %d already assigned\n", tmp);
2043 return -EINVAL;
2044 }
2045
2046 chip->sels[i].cs = tmp;
2047
2048 if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
2049 tmp < 2) {
2050 chip->sels[i].rb.type = RB_NATIVE;
2051 chip->sels[i].rb.info.nativeid = tmp;
2052 } else {
2053 ret = of_get_named_gpio(np, "rb-gpios", i);
2054 if (ret >= 0) {
2055 tmp = ret;
2056 chip->sels[i].rb.type = RB_GPIO;
2057 chip->sels[i].rb.info.gpio = tmp;
2058 ret = devm_gpio_request(dev, tmp, "nand-rb");
2059 if (ret)
2060 return ret;
2061
2062 ret = gpio_direction_input(tmp);
2063 if (ret)
2064 return ret;
2065 } else {
2066 chip->sels[i].rb.type = RB_NONE;
2067 }
2068 }
2069 }
2070
2071 nand = &chip->nand;
2072 /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
2073 nand->chip_delay = 200;
2074 nand->controller = &nfc->controller;
2075 /*
2076 * Set the ECC mode to the default value in case nothing is specified
2077 * in the DT.
2078 */
2079 nand->ecc.mode = NAND_ECC_HW;
2080 nand_set_flash_node(nand, np);
2081 nand->select_chip = sunxi_nfc_select_chip;
2082 nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
2083 nand->read_buf = sunxi_nfc_read_buf;
2084 nand->write_buf = sunxi_nfc_write_buf;
2085 nand->read_byte = sunxi_nfc_read_byte;
2086 nand->setup_data_interface = sunxi_nfc_setup_data_interface;
2087
2088 mtd = nand_to_mtd(nand);
2089 mtd->dev.parent = dev;
2090
2091 ret = nand_scan_ident(mtd, nsels, NULL);
2092 if (ret)
2093 return ret;
2094
2095 if (nand->bbt_options & NAND_BBT_USE_FLASH)
2096 nand->bbt_options |= NAND_BBT_NO_OOB;
2097
2098 if (nand->options & NAND_NEED_SCRAMBLING)
2099 nand->options |= NAND_NO_SUBPAGE_WRITE;
2100
2101 nand->options |= NAND_SUBPAGE_READ;
2102
2103 ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
2104 if (ret) {
2105 dev_err(dev, "ECC init failed: %d\n", ret);
2106 return ret;
2107 }
2108
2109 ret = nand_scan_tail(mtd);
2110 if (ret) {
2111 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
2112 return ret;
2113 }
2114
2115 ret = mtd_device_register(mtd, NULL, 0);
2116 if (ret) {
2117 dev_err(dev, "failed to register mtd device: %d\n", ret);
2118 nand_release(mtd);
2119 return ret;
2120 }
2121
2122 list_add_tail(&chip->node, &nfc->chips);
2123
2124 return 0;
2125 }
2126
2127 static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
2128 {
2129 struct device_node *np = dev->of_node;
2130 struct device_node *nand_np;
2131 int nchips = of_get_child_count(np);
2132 int ret;
2133
2134 if (nchips > 8) {
2135 dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
2136 return -EINVAL;
2137 }
2138
2139 for_each_child_of_node(np, nand_np) {
2140 ret = sunxi_nand_chip_init(dev, nfc, nand_np);
2141 if (ret) {
2142 of_node_put(nand_np);
2143 return ret;
2144 }
2145 }
2146
2147 return 0;
2148 }
2149
2150 static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
2151 {
2152 struct sunxi_nand_chip *chip;
2153
2154 while (!list_empty(&nfc->chips)) {
2155 chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
2156 node);
2157 nand_release(nand_to_mtd(&chip->nand));
2158 sunxi_nand_ecc_cleanup(&chip->nand.ecc);
2159 list_del(&chip->node);
2160 }
2161 }
2162
2163 static int sunxi_nfc_probe(struct platform_device *pdev)
2164 {
2165 struct device *dev = &pdev->dev;
2166 struct resource *r;
2167 struct sunxi_nfc *nfc;
2168 int irq;
2169 int ret;
2170
2171 nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
2172 if (!nfc)
2173 return -ENOMEM;
2174
2175 nfc->dev = dev;
2176 nand_hw_control_init(&nfc->controller);
2177 INIT_LIST_HEAD(&nfc->chips);
2178
2179 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2180 nfc->regs = devm_ioremap_resource(dev, r);
2181 if (IS_ERR(nfc->regs))
2182 return PTR_ERR(nfc->regs);
2183
2184 irq = platform_get_irq(pdev, 0);
2185 if (irq < 0) {
2186 dev_err(dev, "failed to retrieve irq\n");
2187 return irq;
2188 }
2189
2190 nfc->ahb_clk = devm_clk_get(dev, "ahb");
2191 if (IS_ERR(nfc->ahb_clk)) {
2192 dev_err(dev, "failed to retrieve ahb clk\n");
2193 return PTR_ERR(nfc->ahb_clk);
2194 }
2195
2196 ret = clk_prepare_enable(nfc->ahb_clk);
2197 if (ret)
2198 return ret;
2199
2200 nfc->mod_clk = devm_clk_get(dev, "mod");
2201 if (IS_ERR(nfc->mod_clk)) {
2202 dev_err(dev, "failed to retrieve mod clk\n");
2203 ret = PTR_ERR(nfc->mod_clk);
2204 goto out_ahb_clk_unprepare;
2205 }
2206
2207 ret = clk_prepare_enable(nfc->mod_clk);
2208 if (ret)
2209 goto out_ahb_clk_unprepare;
2210
2211 nfc->reset = devm_reset_control_get_optional(dev, "ahb");
2212 if (IS_ERR(nfc->reset)) {
2213 ret = PTR_ERR(nfc->reset);
2214 goto out_mod_clk_unprepare;
2215 }
2216
2217 ret = reset_control_deassert(nfc->reset);
2218 if (ret) {
2219 dev_err(dev, "reset err %d\n", ret);
2220 goto out_mod_clk_unprepare;
2221 }
2222
2223 ret = sunxi_nfc_rst(nfc);
2224 if (ret)
2225 goto out_ahb_reset_reassert;
2226
2227 writel(0, nfc->regs + NFC_REG_INT);
2228 ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
2229 0, "sunxi-nand", nfc);
2230 if (ret)
2231 goto out_ahb_reset_reassert;
2232
2233 nfc->dmac = dma_request_slave_channel(dev, "rxtx");
2234 if (nfc->dmac) {
2235 struct dma_slave_config dmac_cfg = { };
2236
2237 dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
2238 dmac_cfg.dst_addr = dmac_cfg.src_addr;
2239 dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2240 dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
2241 dmac_cfg.src_maxburst = 4;
2242 dmac_cfg.dst_maxburst = 4;
2243 dmaengine_slave_config(nfc->dmac, &dmac_cfg);
2244 } else {
2245 dev_warn(dev, "failed to request rxtx DMA channel\n");
2246 }
2247
2248 platform_set_drvdata(pdev, nfc);
2249
2250 ret = sunxi_nand_chips_init(dev, nfc);
2251 if (ret) {
2252 dev_err(dev, "failed to init nand chips\n");
2253 goto out_release_dmac;
2254 }
2255
2256 return 0;
2257
2258 out_release_dmac:
2259 if (nfc->dmac)
2260 dma_release_channel(nfc->dmac);
2261 out_ahb_reset_reassert:
2262 reset_control_assert(nfc->reset);
2263 out_mod_clk_unprepare:
2264 clk_disable_unprepare(nfc->mod_clk);
2265 out_ahb_clk_unprepare:
2266 clk_disable_unprepare(nfc->ahb_clk);
2267
2268 return ret;
2269 }
2270
2271 static int sunxi_nfc_remove(struct platform_device *pdev)
2272 {
2273 struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
2274
2275 sunxi_nand_chips_cleanup(nfc);
2276
2277 reset_control_assert(nfc->reset);
2278
2279 if (nfc->dmac)
2280 dma_release_channel(nfc->dmac);
2281 clk_disable_unprepare(nfc->mod_clk);
2282 clk_disable_unprepare(nfc->ahb_clk);
2283
2284 return 0;
2285 }
2286
2287 static const struct of_device_id sunxi_nfc_ids[] = {
2288 { .compatible = "allwinner,sun4i-a10-nand" },
2289 { /* sentinel */ }
2290 };
2291 MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
2292
2293 static struct platform_driver sunxi_nfc_driver = {
2294 .driver = {
2295 .name = "sunxi_nand",
2296 .of_match_table = sunxi_nfc_ids,
2297 },
2298 .probe = sunxi_nfc_probe,
2299 .remove = sunxi_nfc_remove,
2300 };
2301 module_platform_driver(sunxi_nfc_driver);
2302
2303 MODULE_LICENSE("GPL v2");
2304 MODULE_AUTHOR("Boris BREZILLON");
2305 MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
2306 MODULE_ALIAS("platform:sunxi_nand");