]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/mtd/nand/nand_base.c
mtd: nand: Wait tCCS after a column change
[mirror_ubuntu-zesty-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand.h>
42 #include <linux/mtd/nand_ecc.h>
43 #include <linux/mtd/nand_bch.h>
44 #include <linux/interrupt.h>
45 #include <linux/bitops.h>
46 #include <linux/io.h>
47 #include <linux/mtd/partitions.h>
48 #include <linux/of.h>
49
50 static int nand_get_device(struct mtd_info *mtd, int new_state);
51
52 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
53 struct mtd_oob_ops *ops);
54
55 /* Define default oob placement schemes for large and small page devices */
56 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
57 struct mtd_oob_region *oobregion)
58 {
59 struct nand_chip *chip = mtd_to_nand(mtd);
60 struct nand_ecc_ctrl *ecc = &chip->ecc;
61
62 if (section > 1)
63 return -ERANGE;
64
65 if (!section) {
66 oobregion->offset = 0;
67 oobregion->length = 4;
68 } else {
69 oobregion->offset = 6;
70 oobregion->length = ecc->total - 4;
71 }
72
73 return 0;
74 }
75
76 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
77 struct mtd_oob_region *oobregion)
78 {
79 if (section > 1)
80 return -ERANGE;
81
82 if (mtd->oobsize == 16) {
83 if (section)
84 return -ERANGE;
85
86 oobregion->length = 8;
87 oobregion->offset = 8;
88 } else {
89 oobregion->length = 2;
90 if (!section)
91 oobregion->offset = 3;
92 else
93 oobregion->offset = 6;
94 }
95
96 return 0;
97 }
98
99 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
100 .ecc = nand_ooblayout_ecc_sp,
101 .free = nand_ooblayout_free_sp,
102 };
103 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
104
105 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
106 struct mtd_oob_region *oobregion)
107 {
108 struct nand_chip *chip = mtd_to_nand(mtd);
109 struct nand_ecc_ctrl *ecc = &chip->ecc;
110
111 if (section)
112 return -ERANGE;
113
114 oobregion->length = ecc->total;
115 oobregion->offset = mtd->oobsize - oobregion->length;
116
117 return 0;
118 }
119
120 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
121 struct mtd_oob_region *oobregion)
122 {
123 struct nand_chip *chip = mtd_to_nand(mtd);
124 struct nand_ecc_ctrl *ecc = &chip->ecc;
125
126 if (section)
127 return -ERANGE;
128
129 oobregion->length = mtd->oobsize - ecc->total - 2;
130 oobregion->offset = 2;
131
132 return 0;
133 }
134
135 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
136 .ecc = nand_ooblayout_ecc_lp,
137 .free = nand_ooblayout_free_lp,
138 };
139 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
140
141 static int check_offs_len(struct mtd_info *mtd,
142 loff_t ofs, uint64_t len)
143 {
144 struct nand_chip *chip = mtd_to_nand(mtd);
145 int ret = 0;
146
147 /* Start address must align on block boundary */
148 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
149 pr_debug("%s: unaligned address\n", __func__);
150 ret = -EINVAL;
151 }
152
153 /* Length must align on block boundary */
154 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
155 pr_debug("%s: length not block aligned\n", __func__);
156 ret = -EINVAL;
157 }
158
159 return ret;
160 }
161
162 /**
163 * nand_release_device - [GENERIC] release chip
164 * @mtd: MTD device structure
165 *
166 * Release chip lock and wake up anyone waiting on the device.
167 */
168 static void nand_release_device(struct mtd_info *mtd)
169 {
170 struct nand_chip *chip = mtd_to_nand(mtd);
171
172 /* Release the controller and the chip */
173 spin_lock(&chip->controller->lock);
174 chip->controller->active = NULL;
175 chip->state = FL_READY;
176 wake_up(&chip->controller->wq);
177 spin_unlock(&chip->controller->lock);
178 }
179
180 /**
181 * nand_read_byte - [DEFAULT] read one byte from the chip
182 * @mtd: MTD device structure
183 *
184 * Default read function for 8bit buswidth
185 */
186 static uint8_t nand_read_byte(struct mtd_info *mtd)
187 {
188 struct nand_chip *chip = mtd_to_nand(mtd);
189 return readb(chip->IO_ADDR_R);
190 }
191
192 /**
193 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
194 * @mtd: MTD device structure
195 *
196 * Default read function for 16bit buswidth with endianness conversion.
197 *
198 */
199 static uint8_t nand_read_byte16(struct mtd_info *mtd)
200 {
201 struct nand_chip *chip = mtd_to_nand(mtd);
202 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
203 }
204
205 /**
206 * nand_read_word - [DEFAULT] read one word from the chip
207 * @mtd: MTD device structure
208 *
209 * Default read function for 16bit buswidth without endianness conversion.
210 */
211 static u16 nand_read_word(struct mtd_info *mtd)
212 {
213 struct nand_chip *chip = mtd_to_nand(mtd);
214 return readw(chip->IO_ADDR_R);
215 }
216
217 /**
218 * nand_select_chip - [DEFAULT] control CE line
219 * @mtd: MTD device structure
220 * @chipnr: chipnumber to select, -1 for deselect
221 *
222 * Default select function for 1 chip devices.
223 */
224 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
225 {
226 struct nand_chip *chip = mtd_to_nand(mtd);
227
228 switch (chipnr) {
229 case -1:
230 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
231 break;
232 case 0:
233 break;
234
235 default:
236 BUG();
237 }
238 }
239
240 /**
241 * nand_write_byte - [DEFAULT] write single byte to chip
242 * @mtd: MTD device structure
243 * @byte: value to write
244 *
245 * Default function to write a byte to I/O[7:0]
246 */
247 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
248 {
249 struct nand_chip *chip = mtd_to_nand(mtd);
250
251 chip->write_buf(mtd, &byte, 1);
252 }
253
254 /**
255 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
256 * @mtd: MTD device structure
257 * @byte: value to write
258 *
259 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
260 */
261 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
262 {
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 uint16_t word = byte;
265
266 /*
267 * It's not entirely clear what should happen to I/O[15:8] when writing
268 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
269 *
270 * When the host supports a 16-bit bus width, only data is
271 * transferred at the 16-bit width. All address and command line
272 * transfers shall use only the lower 8-bits of the data bus. During
273 * command transfers, the host may place any value on the upper
274 * 8-bits of the data bus. During address transfers, the host shall
275 * set the upper 8-bits of the data bus to 00h.
276 *
277 * One user of the write_byte callback is nand_onfi_set_features. The
278 * four parameters are specified to be written to I/O[7:0], but this is
279 * neither an address nor a command transfer. Let's assume a 0 on the
280 * upper I/O lines is OK.
281 */
282 chip->write_buf(mtd, (uint8_t *)&word, 2);
283 }
284
285 /**
286 * nand_write_buf - [DEFAULT] write buffer to chip
287 * @mtd: MTD device structure
288 * @buf: data buffer
289 * @len: number of bytes to write
290 *
291 * Default write function for 8bit buswidth.
292 */
293 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
294 {
295 struct nand_chip *chip = mtd_to_nand(mtd);
296
297 iowrite8_rep(chip->IO_ADDR_W, buf, len);
298 }
299
300 /**
301 * nand_read_buf - [DEFAULT] read chip data into buffer
302 * @mtd: MTD device structure
303 * @buf: buffer to store date
304 * @len: number of bytes to read
305 *
306 * Default read function for 8bit buswidth.
307 */
308 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
309 {
310 struct nand_chip *chip = mtd_to_nand(mtd);
311
312 ioread8_rep(chip->IO_ADDR_R, buf, len);
313 }
314
315 /**
316 * nand_write_buf16 - [DEFAULT] write buffer to chip
317 * @mtd: MTD device structure
318 * @buf: data buffer
319 * @len: number of bytes to write
320 *
321 * Default write function for 16bit buswidth.
322 */
323 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
324 {
325 struct nand_chip *chip = mtd_to_nand(mtd);
326 u16 *p = (u16 *) buf;
327
328 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
329 }
330
331 /**
332 * nand_read_buf16 - [DEFAULT] read chip data into buffer
333 * @mtd: MTD device structure
334 * @buf: buffer to store date
335 * @len: number of bytes to read
336 *
337 * Default read function for 16bit buswidth.
338 */
339 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
340 {
341 struct nand_chip *chip = mtd_to_nand(mtd);
342 u16 *p = (u16 *) buf;
343
344 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
345 }
346
347 /**
348 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
349 * @mtd: MTD device structure
350 * @ofs: offset from device start
351 *
352 * Check, if the block is bad.
353 */
354 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
355 {
356 int page, res = 0, i = 0;
357 struct nand_chip *chip = mtd_to_nand(mtd);
358 u16 bad;
359
360 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
361 ofs += mtd->erasesize - mtd->writesize;
362
363 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
364
365 do {
366 if (chip->options & NAND_BUSWIDTH_16) {
367 chip->cmdfunc(mtd, NAND_CMD_READOOB,
368 chip->badblockpos & 0xFE, page);
369 bad = cpu_to_le16(chip->read_word(mtd));
370 if (chip->badblockpos & 0x1)
371 bad >>= 8;
372 else
373 bad &= 0xFF;
374 } else {
375 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
376 page);
377 bad = chip->read_byte(mtd);
378 }
379
380 if (likely(chip->badblockbits == 8))
381 res = bad != 0xFF;
382 else
383 res = hweight8(bad) < chip->badblockbits;
384 ofs += mtd->writesize;
385 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
386 i++;
387 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
388
389 return res;
390 }
391
392 /**
393 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
394 * @mtd: MTD device structure
395 * @ofs: offset from device start
396 *
397 * This is the default implementation, which can be overridden by a hardware
398 * specific driver. It provides the details for writing a bad block marker to a
399 * block.
400 */
401 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
402 {
403 struct nand_chip *chip = mtd_to_nand(mtd);
404 struct mtd_oob_ops ops;
405 uint8_t buf[2] = { 0, 0 };
406 int ret = 0, res, i = 0;
407
408 memset(&ops, 0, sizeof(ops));
409 ops.oobbuf = buf;
410 ops.ooboffs = chip->badblockpos;
411 if (chip->options & NAND_BUSWIDTH_16) {
412 ops.ooboffs &= ~0x01;
413 ops.len = ops.ooblen = 2;
414 } else {
415 ops.len = ops.ooblen = 1;
416 }
417 ops.mode = MTD_OPS_PLACE_OOB;
418
419 /* Write to first/last page(s) if necessary */
420 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
421 ofs += mtd->erasesize - mtd->writesize;
422 do {
423 res = nand_do_write_oob(mtd, ofs, &ops);
424 if (!ret)
425 ret = res;
426
427 i++;
428 ofs += mtd->writesize;
429 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
430
431 return ret;
432 }
433
434 /**
435 * nand_block_markbad_lowlevel - mark a block bad
436 * @mtd: MTD device structure
437 * @ofs: offset from device start
438 *
439 * This function performs the generic NAND bad block marking steps (i.e., bad
440 * block table(s) and/or marker(s)). We only allow the hardware driver to
441 * specify how to write bad block markers to OOB (chip->block_markbad).
442 *
443 * We try operations in the following order:
444 * (1) erase the affected block, to allow OOB marker to be written cleanly
445 * (2) write bad block marker to OOB area of affected block (unless flag
446 * NAND_BBT_NO_OOB_BBM is present)
447 * (3) update the BBT
448 * Note that we retain the first error encountered in (2) or (3), finish the
449 * procedures, and dump the error in the end.
450 */
451 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
452 {
453 struct nand_chip *chip = mtd_to_nand(mtd);
454 int res, ret = 0;
455
456 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
457 struct erase_info einfo;
458
459 /* Attempt erase before marking OOB */
460 memset(&einfo, 0, sizeof(einfo));
461 einfo.mtd = mtd;
462 einfo.addr = ofs;
463 einfo.len = 1ULL << chip->phys_erase_shift;
464 nand_erase_nand(mtd, &einfo, 0);
465
466 /* Write bad block marker to OOB */
467 nand_get_device(mtd, FL_WRITING);
468 ret = chip->block_markbad(mtd, ofs);
469 nand_release_device(mtd);
470 }
471
472 /* Mark block bad in BBT */
473 if (chip->bbt) {
474 res = nand_markbad_bbt(mtd, ofs);
475 if (!ret)
476 ret = res;
477 }
478
479 if (!ret)
480 mtd->ecc_stats.badblocks++;
481
482 return ret;
483 }
484
485 /**
486 * nand_check_wp - [GENERIC] check if the chip is write protected
487 * @mtd: MTD device structure
488 *
489 * Check, if the device is write protected. The function expects, that the
490 * device is already selected.
491 */
492 static int nand_check_wp(struct mtd_info *mtd)
493 {
494 struct nand_chip *chip = mtd_to_nand(mtd);
495
496 /* Broken xD cards report WP despite being writable */
497 if (chip->options & NAND_BROKEN_XD)
498 return 0;
499
500 /* Check the WP bit */
501 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
502 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
503 }
504
505 /**
506 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
507 * @mtd: MTD device structure
508 * @ofs: offset from device start
509 *
510 * Check if the block is marked as reserved.
511 */
512 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
513 {
514 struct nand_chip *chip = mtd_to_nand(mtd);
515
516 if (!chip->bbt)
517 return 0;
518 /* Return info from the table */
519 return nand_isreserved_bbt(mtd, ofs);
520 }
521
522 /**
523 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
524 * @mtd: MTD device structure
525 * @ofs: offset from device start
526 * @allowbbt: 1, if its allowed to access the bbt area
527 *
528 * Check, if the block is bad. Either by reading the bad block table or
529 * calling of the scan function.
530 */
531 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
532 {
533 struct nand_chip *chip = mtd_to_nand(mtd);
534
535 if (!chip->bbt)
536 return chip->block_bad(mtd, ofs);
537
538 /* Return info from the table */
539 return nand_isbad_bbt(mtd, ofs, allowbbt);
540 }
541
542 /**
543 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
544 * @mtd: MTD device structure
545 * @timeo: Timeout
546 *
547 * Helper function for nand_wait_ready used when needing to wait in interrupt
548 * context.
549 */
550 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
551 {
552 struct nand_chip *chip = mtd_to_nand(mtd);
553 int i;
554
555 /* Wait for the device to get ready */
556 for (i = 0; i < timeo; i++) {
557 if (chip->dev_ready(mtd))
558 break;
559 touch_softlockup_watchdog();
560 mdelay(1);
561 }
562 }
563
564 /**
565 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
566 * @mtd: MTD device structure
567 *
568 * Wait for the ready pin after a command, and warn if a timeout occurs.
569 */
570 void nand_wait_ready(struct mtd_info *mtd)
571 {
572 struct nand_chip *chip = mtd_to_nand(mtd);
573 unsigned long timeo = 400;
574
575 if (in_interrupt() || oops_in_progress)
576 return panic_nand_wait_ready(mtd, timeo);
577
578 /* Wait until command is processed or timeout occurs */
579 timeo = jiffies + msecs_to_jiffies(timeo);
580 do {
581 if (chip->dev_ready(mtd))
582 return;
583 cond_resched();
584 } while (time_before(jiffies, timeo));
585
586 if (!chip->dev_ready(mtd))
587 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
588 }
589 EXPORT_SYMBOL_GPL(nand_wait_ready);
590
591 /**
592 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
593 * @mtd: MTD device structure
594 * @timeo: Timeout in ms
595 *
596 * Wait for status ready (i.e. command done) or timeout.
597 */
598 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
599 {
600 register struct nand_chip *chip = mtd_to_nand(mtd);
601
602 timeo = jiffies + msecs_to_jiffies(timeo);
603 do {
604 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
605 break;
606 touch_softlockup_watchdog();
607 } while (time_before(jiffies, timeo));
608 };
609
610 /**
611 * nand_command - [DEFAULT] Send command to NAND device
612 * @mtd: MTD device structure
613 * @command: the command to be sent
614 * @column: the column address for this command, -1 if none
615 * @page_addr: the page address for this command, -1 if none
616 *
617 * Send command to NAND device. This function is used for small page devices
618 * (512 Bytes per page).
619 */
620 static void nand_command(struct mtd_info *mtd, unsigned int command,
621 int column, int page_addr)
622 {
623 register struct nand_chip *chip = mtd_to_nand(mtd);
624 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
625
626 /* Write out the command to the device */
627 if (command == NAND_CMD_SEQIN) {
628 int readcmd;
629
630 if (column >= mtd->writesize) {
631 /* OOB area */
632 column -= mtd->writesize;
633 readcmd = NAND_CMD_READOOB;
634 } else if (column < 256) {
635 /* First 256 bytes --> READ0 */
636 readcmd = NAND_CMD_READ0;
637 } else {
638 column -= 256;
639 readcmd = NAND_CMD_READ1;
640 }
641 chip->cmd_ctrl(mtd, readcmd, ctrl);
642 ctrl &= ~NAND_CTRL_CHANGE;
643 }
644 chip->cmd_ctrl(mtd, command, ctrl);
645
646 /* Address cycle, when necessary */
647 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
648 /* Serially input address */
649 if (column != -1) {
650 /* Adjust columns for 16 bit buswidth */
651 if (chip->options & NAND_BUSWIDTH_16 &&
652 !nand_opcode_8bits(command))
653 column >>= 1;
654 chip->cmd_ctrl(mtd, column, ctrl);
655 ctrl &= ~NAND_CTRL_CHANGE;
656 }
657 if (page_addr != -1) {
658 chip->cmd_ctrl(mtd, page_addr, ctrl);
659 ctrl &= ~NAND_CTRL_CHANGE;
660 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
661 /* One more address cycle for devices > 32MiB */
662 if (chip->chipsize > (32 << 20))
663 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
664 }
665 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
666
667 /*
668 * Program and erase have their own busy handlers status and sequential
669 * in needs no delay
670 */
671 switch (command) {
672
673 case NAND_CMD_PAGEPROG:
674 case NAND_CMD_ERASE1:
675 case NAND_CMD_ERASE2:
676 case NAND_CMD_SEQIN:
677 case NAND_CMD_STATUS:
678 return;
679
680 case NAND_CMD_RESET:
681 if (chip->dev_ready)
682 break;
683 udelay(chip->chip_delay);
684 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
685 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
686 chip->cmd_ctrl(mtd,
687 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
688 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
689 nand_wait_status_ready(mtd, 250);
690 return;
691
692 /* This applies to read commands */
693 default:
694 /*
695 * If we don't have access to the busy pin, we apply the given
696 * command delay
697 */
698 if (!chip->dev_ready) {
699 udelay(chip->chip_delay);
700 return;
701 }
702 }
703 /*
704 * Apply this short delay always to ensure that we do wait tWB in
705 * any case on any machine.
706 */
707 ndelay(100);
708
709 nand_wait_ready(mtd);
710 }
711
712 static void nand_ccs_delay(struct nand_chip *chip)
713 {
714 /*
715 * The controller already takes care of waiting for tCCS when the RNDIN
716 * or RNDOUT command is sent, return directly.
717 */
718 if (!(chip->options & NAND_WAIT_TCCS))
719 return;
720
721 /*
722 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
723 * (which should be safe for all NANDs).
724 */
725 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
726 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
727 else
728 ndelay(500);
729 }
730
731 /**
732 * nand_command_lp - [DEFAULT] Send command to NAND large page device
733 * @mtd: MTD device structure
734 * @command: the command to be sent
735 * @column: the column address for this command, -1 if none
736 * @page_addr: the page address for this command, -1 if none
737 *
738 * Send command to NAND device. This is the version for the new large page
739 * devices. We don't have the separate regions as we have in the small page
740 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
741 */
742 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
743 int column, int page_addr)
744 {
745 register struct nand_chip *chip = mtd_to_nand(mtd);
746
747 /* Emulate NAND_CMD_READOOB */
748 if (command == NAND_CMD_READOOB) {
749 column += mtd->writesize;
750 command = NAND_CMD_READ0;
751 }
752
753 /* Command latch cycle */
754 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
755
756 if (column != -1 || page_addr != -1) {
757 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
758
759 /* Serially input address */
760 if (column != -1) {
761 /* Adjust columns for 16 bit buswidth */
762 if (chip->options & NAND_BUSWIDTH_16 &&
763 !nand_opcode_8bits(command))
764 column >>= 1;
765 chip->cmd_ctrl(mtd, column, ctrl);
766 ctrl &= ~NAND_CTRL_CHANGE;
767
768 /* Only output a single addr cycle for 8bits opcodes. */
769 if (!nand_opcode_8bits(command))
770 chip->cmd_ctrl(mtd, column >> 8, ctrl);
771 }
772 if (page_addr != -1) {
773 chip->cmd_ctrl(mtd, page_addr, ctrl);
774 chip->cmd_ctrl(mtd, page_addr >> 8,
775 NAND_NCE | NAND_ALE);
776 /* One more address cycle for devices > 128MiB */
777 if (chip->chipsize > (128 << 20))
778 chip->cmd_ctrl(mtd, page_addr >> 16,
779 NAND_NCE | NAND_ALE);
780 }
781 }
782 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
783
784 /*
785 * Program and erase have their own busy handlers status, sequential
786 * in and status need no delay.
787 */
788 switch (command) {
789
790 case NAND_CMD_CACHEDPROG:
791 case NAND_CMD_PAGEPROG:
792 case NAND_CMD_ERASE1:
793 case NAND_CMD_ERASE2:
794 case NAND_CMD_SEQIN:
795 case NAND_CMD_STATUS:
796 return;
797
798 case NAND_CMD_RNDIN:
799 nand_ccs_delay(chip);
800 return;
801
802 case NAND_CMD_RESET:
803 if (chip->dev_ready)
804 break;
805 udelay(chip->chip_delay);
806 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
807 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
808 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
809 NAND_NCE | NAND_CTRL_CHANGE);
810 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
811 nand_wait_status_ready(mtd, 250);
812 return;
813
814 case NAND_CMD_RNDOUT:
815 /* No ready / busy check necessary */
816 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
817 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
818 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
819 NAND_NCE | NAND_CTRL_CHANGE);
820
821 nand_ccs_delay(chip);
822 return;
823
824 case NAND_CMD_READ0:
825 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
826 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
827 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
828 NAND_NCE | NAND_CTRL_CHANGE);
829
830 /* This applies to read commands */
831 default:
832 /*
833 * If we don't have access to the busy pin, we apply the given
834 * command delay.
835 */
836 if (!chip->dev_ready) {
837 udelay(chip->chip_delay);
838 return;
839 }
840 }
841
842 /*
843 * Apply this short delay always to ensure that we do wait tWB in
844 * any case on any machine.
845 */
846 ndelay(100);
847
848 nand_wait_ready(mtd);
849 }
850
851 /**
852 * panic_nand_get_device - [GENERIC] Get chip for selected access
853 * @chip: the nand chip descriptor
854 * @mtd: MTD device structure
855 * @new_state: the state which is requested
856 *
857 * Used when in panic, no locks are taken.
858 */
859 static void panic_nand_get_device(struct nand_chip *chip,
860 struct mtd_info *mtd, int new_state)
861 {
862 /* Hardware controller shared among independent devices */
863 chip->controller->active = chip;
864 chip->state = new_state;
865 }
866
867 /**
868 * nand_get_device - [GENERIC] Get chip for selected access
869 * @mtd: MTD device structure
870 * @new_state: the state which is requested
871 *
872 * Get the device and lock it for exclusive access
873 */
874 static int
875 nand_get_device(struct mtd_info *mtd, int new_state)
876 {
877 struct nand_chip *chip = mtd_to_nand(mtd);
878 spinlock_t *lock = &chip->controller->lock;
879 wait_queue_head_t *wq = &chip->controller->wq;
880 DECLARE_WAITQUEUE(wait, current);
881 retry:
882 spin_lock(lock);
883
884 /* Hardware controller shared among independent devices */
885 if (!chip->controller->active)
886 chip->controller->active = chip;
887
888 if (chip->controller->active == chip && chip->state == FL_READY) {
889 chip->state = new_state;
890 spin_unlock(lock);
891 return 0;
892 }
893 if (new_state == FL_PM_SUSPENDED) {
894 if (chip->controller->active->state == FL_PM_SUSPENDED) {
895 chip->state = FL_PM_SUSPENDED;
896 spin_unlock(lock);
897 return 0;
898 }
899 }
900 set_current_state(TASK_UNINTERRUPTIBLE);
901 add_wait_queue(wq, &wait);
902 spin_unlock(lock);
903 schedule();
904 remove_wait_queue(wq, &wait);
905 goto retry;
906 }
907
908 /**
909 * panic_nand_wait - [GENERIC] wait until the command is done
910 * @mtd: MTD device structure
911 * @chip: NAND chip structure
912 * @timeo: timeout
913 *
914 * Wait for command done. This is a helper function for nand_wait used when
915 * we are in interrupt context. May happen when in panic and trying to write
916 * an oops through mtdoops.
917 */
918 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
919 unsigned long timeo)
920 {
921 int i;
922 for (i = 0; i < timeo; i++) {
923 if (chip->dev_ready) {
924 if (chip->dev_ready(mtd))
925 break;
926 } else {
927 if (chip->read_byte(mtd) & NAND_STATUS_READY)
928 break;
929 }
930 mdelay(1);
931 }
932 }
933
934 /**
935 * nand_wait - [DEFAULT] wait until the command is done
936 * @mtd: MTD device structure
937 * @chip: NAND chip structure
938 *
939 * Wait for command done. This applies to erase and program only.
940 */
941 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
942 {
943
944 int status;
945 unsigned long timeo = 400;
946
947 /*
948 * Apply this short delay always to ensure that we do wait tWB in any
949 * case on any machine.
950 */
951 ndelay(100);
952
953 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
954
955 if (in_interrupt() || oops_in_progress)
956 panic_nand_wait(mtd, chip, timeo);
957 else {
958 timeo = jiffies + msecs_to_jiffies(timeo);
959 do {
960 if (chip->dev_ready) {
961 if (chip->dev_ready(mtd))
962 break;
963 } else {
964 if (chip->read_byte(mtd) & NAND_STATUS_READY)
965 break;
966 }
967 cond_resched();
968 } while (time_before(jiffies, timeo));
969 }
970
971 status = (int)chip->read_byte(mtd);
972 /* This can happen if in case of timeout or buggy dev_ready */
973 WARN_ON(!(status & NAND_STATUS_READY));
974 return status;
975 }
976
977 /**
978 * nand_reset_data_interface - Reset data interface and timings
979 * @chip: The NAND chip
980 *
981 * Reset the Data interface and timings to ONFI mode 0.
982 *
983 * Returns 0 for success or negative error code otherwise.
984 */
985 static int nand_reset_data_interface(struct nand_chip *chip)
986 {
987 struct mtd_info *mtd = nand_to_mtd(chip);
988 const struct nand_data_interface *conf;
989 int ret;
990
991 if (!chip->setup_data_interface)
992 return 0;
993
994 /*
995 * The ONFI specification says:
996 * "
997 * To transition from NV-DDR or NV-DDR2 to the SDR data
998 * interface, the host shall use the Reset (FFh) command
999 * using SDR timing mode 0. A device in any timing mode is
1000 * required to recognize Reset (FFh) command issued in SDR
1001 * timing mode 0.
1002 * "
1003 *
1004 * Configure the data interface in SDR mode and set the
1005 * timings to timing mode 0.
1006 */
1007
1008 conf = nand_get_default_data_interface();
1009 ret = chip->setup_data_interface(mtd, conf, false);
1010 if (ret)
1011 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1012
1013 return ret;
1014 }
1015
1016 /**
1017 * nand_setup_data_interface - Setup the best data interface and timings
1018 * @chip: The NAND chip
1019 *
1020 * Find and configure the best data interface and NAND timings supported by
1021 * the chip and the driver.
1022 * First tries to retrieve supported timing modes from ONFI information,
1023 * and if the NAND chip does not support ONFI, relies on the
1024 * ->onfi_timing_mode_default specified in the nand_ids table.
1025 *
1026 * Returns 0 for success or negative error code otherwise.
1027 */
1028 static int nand_setup_data_interface(struct nand_chip *chip)
1029 {
1030 struct mtd_info *mtd = nand_to_mtd(chip);
1031 int ret;
1032
1033 if (!chip->setup_data_interface || !chip->data_interface)
1034 return 0;
1035
1036 /*
1037 * Ensure the timing mode has been changed on the chip side
1038 * before changing timings on the controller side.
1039 */
1040 if (chip->onfi_version) {
1041 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1042 chip->onfi_timing_mode_default,
1043 };
1044
1045 ret = chip->onfi_set_features(mtd, chip,
1046 ONFI_FEATURE_ADDR_TIMING_MODE,
1047 tmode_param);
1048 if (ret)
1049 goto err;
1050 }
1051
1052 ret = chip->setup_data_interface(mtd, chip->data_interface, false);
1053 err:
1054 return ret;
1055 }
1056
1057 /**
1058 * nand_init_data_interface - find the best data interface and timings
1059 * @chip: The NAND chip
1060 *
1061 * Find the best data interface and NAND timings supported by the chip
1062 * and the driver.
1063 * First tries to retrieve supported timing modes from ONFI information,
1064 * and if the NAND chip does not support ONFI, relies on the
1065 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1066 * function nand_chip->data_interface is initialized with the best timing mode
1067 * available.
1068 *
1069 * Returns 0 for success or negative error code otherwise.
1070 */
1071 static int nand_init_data_interface(struct nand_chip *chip)
1072 {
1073 struct mtd_info *mtd = nand_to_mtd(chip);
1074 int modes, mode, ret;
1075
1076 if (!chip->setup_data_interface)
1077 return 0;
1078
1079 /*
1080 * First try to identify the best timings from ONFI parameters and
1081 * if the NAND does not support ONFI, fallback to the default ONFI
1082 * timing mode.
1083 */
1084 modes = onfi_get_async_timing_mode(chip);
1085 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1086 if (!chip->onfi_timing_mode_default)
1087 return 0;
1088
1089 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1090 }
1091
1092 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1093 GFP_KERNEL);
1094 if (!chip->data_interface)
1095 return -ENOMEM;
1096
1097 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1098 ret = onfi_init_data_interface(chip, chip->data_interface,
1099 NAND_SDR_IFACE, mode);
1100 if (ret)
1101 continue;
1102
1103 ret = chip->setup_data_interface(mtd, chip->data_interface,
1104 true);
1105 if (!ret) {
1106 chip->onfi_timing_mode_default = mode;
1107 break;
1108 }
1109 }
1110
1111 return 0;
1112 }
1113
1114 static void nand_release_data_interface(struct nand_chip *chip)
1115 {
1116 kfree(chip->data_interface);
1117 }
1118
1119 /**
1120 * nand_reset - Reset and initialize a NAND device
1121 * @chip: The NAND chip
1122 *
1123 * Returns 0 for success or negative error code otherwise
1124 */
1125 int nand_reset(struct nand_chip *chip)
1126 {
1127 struct mtd_info *mtd = nand_to_mtd(chip);
1128 int ret;
1129
1130 ret = nand_reset_data_interface(chip);
1131 if (ret)
1132 return ret;
1133
1134 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1135
1136 ret = nand_setup_data_interface(chip);
1137 if (ret)
1138 return ret;
1139
1140 return 0;
1141 }
1142
1143 /**
1144 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1145 * @mtd: mtd info
1146 * @ofs: offset to start unlock from
1147 * @len: length to unlock
1148 * @invert: when = 0, unlock the range of blocks within the lower and
1149 * upper boundary address
1150 * when = 1, unlock the range of blocks outside the boundaries
1151 * of the lower and upper boundary address
1152 *
1153 * Returs unlock status.
1154 */
1155 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1156 uint64_t len, int invert)
1157 {
1158 int ret = 0;
1159 int status, page;
1160 struct nand_chip *chip = mtd_to_nand(mtd);
1161
1162 /* Submit address of first page to unlock */
1163 page = ofs >> chip->page_shift;
1164 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1165
1166 /* Submit address of last page to unlock */
1167 page = (ofs + len) >> chip->page_shift;
1168 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1169 (page | invert) & chip->pagemask);
1170
1171 /* Call wait ready function */
1172 status = chip->waitfunc(mtd, chip);
1173 /* See if device thinks it succeeded */
1174 if (status & NAND_STATUS_FAIL) {
1175 pr_debug("%s: error status = 0x%08x\n",
1176 __func__, status);
1177 ret = -EIO;
1178 }
1179
1180 return ret;
1181 }
1182
1183 /**
1184 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1185 * @mtd: mtd info
1186 * @ofs: offset to start unlock from
1187 * @len: length to unlock
1188 *
1189 * Returns unlock status.
1190 */
1191 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1192 {
1193 int ret = 0;
1194 int chipnr;
1195 struct nand_chip *chip = mtd_to_nand(mtd);
1196
1197 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1198 __func__, (unsigned long long)ofs, len);
1199
1200 if (check_offs_len(mtd, ofs, len))
1201 return -EINVAL;
1202
1203 /* Align to last block address if size addresses end of the device */
1204 if (ofs + len == mtd->size)
1205 len -= mtd->erasesize;
1206
1207 nand_get_device(mtd, FL_UNLOCKING);
1208
1209 /* Shift to get chip number */
1210 chipnr = ofs >> chip->chip_shift;
1211
1212 chip->select_chip(mtd, chipnr);
1213
1214 /*
1215 * Reset the chip.
1216 * If we want to check the WP through READ STATUS and check the bit 7
1217 * we must reset the chip
1218 * some operation can also clear the bit 7 of status register
1219 * eg. erase/program a locked block
1220 */
1221 nand_reset(chip);
1222
1223 /* Check, if it is write protected */
1224 if (nand_check_wp(mtd)) {
1225 pr_debug("%s: device is write protected!\n",
1226 __func__);
1227 ret = -EIO;
1228 goto out;
1229 }
1230
1231 ret = __nand_unlock(mtd, ofs, len, 0);
1232
1233 out:
1234 chip->select_chip(mtd, -1);
1235 nand_release_device(mtd);
1236
1237 return ret;
1238 }
1239 EXPORT_SYMBOL(nand_unlock);
1240
1241 /**
1242 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1243 * @mtd: mtd info
1244 * @ofs: offset to start unlock from
1245 * @len: length to unlock
1246 *
1247 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1248 * have this feature, but it allows only to lock all blocks, not for specified
1249 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1250 * now.
1251 *
1252 * Returns lock status.
1253 */
1254 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1255 {
1256 int ret = 0;
1257 int chipnr, status, page;
1258 struct nand_chip *chip = mtd_to_nand(mtd);
1259
1260 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1261 __func__, (unsigned long long)ofs, len);
1262
1263 if (check_offs_len(mtd, ofs, len))
1264 return -EINVAL;
1265
1266 nand_get_device(mtd, FL_LOCKING);
1267
1268 /* Shift to get chip number */
1269 chipnr = ofs >> chip->chip_shift;
1270
1271 chip->select_chip(mtd, chipnr);
1272
1273 /*
1274 * Reset the chip.
1275 * If we want to check the WP through READ STATUS and check the bit 7
1276 * we must reset the chip
1277 * some operation can also clear the bit 7 of status register
1278 * eg. erase/program a locked block
1279 */
1280 nand_reset(chip);
1281
1282 /* Check, if it is write protected */
1283 if (nand_check_wp(mtd)) {
1284 pr_debug("%s: device is write protected!\n",
1285 __func__);
1286 status = MTD_ERASE_FAILED;
1287 ret = -EIO;
1288 goto out;
1289 }
1290
1291 /* Submit address of first page to lock */
1292 page = ofs >> chip->page_shift;
1293 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1294
1295 /* Call wait ready function */
1296 status = chip->waitfunc(mtd, chip);
1297 /* See if device thinks it succeeded */
1298 if (status & NAND_STATUS_FAIL) {
1299 pr_debug("%s: error status = 0x%08x\n",
1300 __func__, status);
1301 ret = -EIO;
1302 goto out;
1303 }
1304
1305 ret = __nand_unlock(mtd, ofs, len, 0x1);
1306
1307 out:
1308 chip->select_chip(mtd, -1);
1309 nand_release_device(mtd);
1310
1311 return ret;
1312 }
1313 EXPORT_SYMBOL(nand_lock);
1314
1315 /**
1316 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1317 * @buf: buffer to test
1318 * @len: buffer length
1319 * @bitflips_threshold: maximum number of bitflips
1320 *
1321 * Check if a buffer contains only 0xff, which means the underlying region
1322 * has been erased and is ready to be programmed.
1323 * The bitflips_threshold specify the maximum number of bitflips before
1324 * considering the region is not erased.
1325 * Note: The logic of this function has been extracted from the memweight
1326 * implementation, except that nand_check_erased_buf function exit before
1327 * testing the whole buffer if the number of bitflips exceed the
1328 * bitflips_threshold value.
1329 *
1330 * Returns a positive number of bitflips less than or equal to
1331 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1332 * threshold.
1333 */
1334 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1335 {
1336 const unsigned char *bitmap = buf;
1337 int bitflips = 0;
1338 int weight;
1339
1340 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1341 len--, bitmap++) {
1342 weight = hweight8(*bitmap);
1343 bitflips += BITS_PER_BYTE - weight;
1344 if (unlikely(bitflips > bitflips_threshold))
1345 return -EBADMSG;
1346 }
1347
1348 for (; len >= sizeof(long);
1349 len -= sizeof(long), bitmap += sizeof(long)) {
1350 weight = hweight_long(*((unsigned long *)bitmap));
1351 bitflips += BITS_PER_LONG - weight;
1352 if (unlikely(bitflips > bitflips_threshold))
1353 return -EBADMSG;
1354 }
1355
1356 for (; len > 0; len--, bitmap++) {
1357 weight = hweight8(*bitmap);
1358 bitflips += BITS_PER_BYTE - weight;
1359 if (unlikely(bitflips > bitflips_threshold))
1360 return -EBADMSG;
1361 }
1362
1363 return bitflips;
1364 }
1365
1366 /**
1367 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1368 * 0xff data
1369 * @data: data buffer to test
1370 * @datalen: data length
1371 * @ecc: ECC buffer
1372 * @ecclen: ECC length
1373 * @extraoob: extra OOB buffer
1374 * @extraooblen: extra OOB length
1375 * @bitflips_threshold: maximum number of bitflips
1376 *
1377 * Check if a data buffer and its associated ECC and OOB data contains only
1378 * 0xff pattern, which means the underlying region has been erased and is
1379 * ready to be programmed.
1380 * The bitflips_threshold specify the maximum number of bitflips before
1381 * considering the region as not erased.
1382 *
1383 * Note:
1384 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1385 * different from the NAND page size. When fixing bitflips, ECC engines will
1386 * report the number of errors per chunk, and the NAND core infrastructure
1387 * expect you to return the maximum number of bitflips for the whole page.
1388 * This is why you should always use this function on a single chunk and
1389 * not on the whole page. After checking each chunk you should update your
1390 * max_bitflips value accordingly.
1391 * 2/ When checking for bitflips in erased pages you should not only check
1392 * the payload data but also their associated ECC data, because a user might
1393 * have programmed almost all bits to 1 but a few. In this case, we
1394 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1395 * this case.
1396 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1397 * data are protected by the ECC engine.
1398 * It could also be used if you support subpages and want to attach some
1399 * extra OOB data to an ECC chunk.
1400 *
1401 * Returns a positive number of bitflips less than or equal to
1402 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1403 * threshold. In case of success, the passed buffers are filled with 0xff.
1404 */
1405 int nand_check_erased_ecc_chunk(void *data, int datalen,
1406 void *ecc, int ecclen,
1407 void *extraoob, int extraooblen,
1408 int bitflips_threshold)
1409 {
1410 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1411
1412 data_bitflips = nand_check_erased_buf(data, datalen,
1413 bitflips_threshold);
1414 if (data_bitflips < 0)
1415 return data_bitflips;
1416
1417 bitflips_threshold -= data_bitflips;
1418
1419 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1420 if (ecc_bitflips < 0)
1421 return ecc_bitflips;
1422
1423 bitflips_threshold -= ecc_bitflips;
1424
1425 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1426 bitflips_threshold);
1427 if (extraoob_bitflips < 0)
1428 return extraoob_bitflips;
1429
1430 if (data_bitflips)
1431 memset(data, 0xff, datalen);
1432
1433 if (ecc_bitflips)
1434 memset(ecc, 0xff, ecclen);
1435
1436 if (extraoob_bitflips)
1437 memset(extraoob, 0xff, extraooblen);
1438
1439 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1440 }
1441 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1442
1443 /**
1444 * nand_read_page_raw - [INTERN] read raw page data without ecc
1445 * @mtd: mtd info structure
1446 * @chip: nand chip info structure
1447 * @buf: buffer to store read data
1448 * @oob_required: caller requires OOB data read to chip->oob_poi
1449 * @page: page number to read
1450 *
1451 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1452 */
1453 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1454 uint8_t *buf, int oob_required, int page)
1455 {
1456 chip->read_buf(mtd, buf, mtd->writesize);
1457 if (oob_required)
1458 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1459 return 0;
1460 }
1461
1462 /**
1463 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1464 * @mtd: mtd info structure
1465 * @chip: nand chip info structure
1466 * @buf: buffer to store read data
1467 * @oob_required: caller requires OOB data read to chip->oob_poi
1468 * @page: page number to read
1469 *
1470 * We need a special oob layout and handling even when OOB isn't used.
1471 */
1472 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1473 struct nand_chip *chip, uint8_t *buf,
1474 int oob_required, int page)
1475 {
1476 int eccsize = chip->ecc.size;
1477 int eccbytes = chip->ecc.bytes;
1478 uint8_t *oob = chip->oob_poi;
1479 int steps, size;
1480
1481 for (steps = chip->ecc.steps; steps > 0; steps--) {
1482 chip->read_buf(mtd, buf, eccsize);
1483 buf += eccsize;
1484
1485 if (chip->ecc.prepad) {
1486 chip->read_buf(mtd, oob, chip->ecc.prepad);
1487 oob += chip->ecc.prepad;
1488 }
1489
1490 chip->read_buf(mtd, oob, eccbytes);
1491 oob += eccbytes;
1492
1493 if (chip->ecc.postpad) {
1494 chip->read_buf(mtd, oob, chip->ecc.postpad);
1495 oob += chip->ecc.postpad;
1496 }
1497 }
1498
1499 size = mtd->oobsize - (oob - chip->oob_poi);
1500 if (size)
1501 chip->read_buf(mtd, oob, size);
1502
1503 return 0;
1504 }
1505
1506 /**
1507 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1508 * @mtd: mtd info structure
1509 * @chip: nand chip info structure
1510 * @buf: buffer to store read data
1511 * @oob_required: caller requires OOB data read to chip->oob_poi
1512 * @page: page number to read
1513 */
1514 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1515 uint8_t *buf, int oob_required, int page)
1516 {
1517 int i, eccsize = chip->ecc.size, ret;
1518 int eccbytes = chip->ecc.bytes;
1519 int eccsteps = chip->ecc.steps;
1520 uint8_t *p = buf;
1521 uint8_t *ecc_calc = chip->buffers->ecccalc;
1522 uint8_t *ecc_code = chip->buffers->ecccode;
1523 unsigned int max_bitflips = 0;
1524
1525 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1526
1527 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1528 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1529
1530 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1531 chip->ecc.total);
1532 if (ret)
1533 return ret;
1534
1535 eccsteps = chip->ecc.steps;
1536 p = buf;
1537
1538 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1539 int stat;
1540
1541 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1542 if (stat < 0) {
1543 mtd->ecc_stats.failed++;
1544 } else {
1545 mtd->ecc_stats.corrected += stat;
1546 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1547 }
1548 }
1549 return max_bitflips;
1550 }
1551
1552 /**
1553 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1554 * @mtd: mtd info structure
1555 * @chip: nand chip info structure
1556 * @data_offs: offset of requested data within the page
1557 * @readlen: data length
1558 * @bufpoi: buffer to store read data
1559 * @page: page number to read
1560 */
1561 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1562 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1563 int page)
1564 {
1565 int start_step, end_step, num_steps, ret;
1566 uint8_t *p;
1567 int data_col_addr, i, gaps = 0;
1568 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1569 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1570 int index, section = 0;
1571 unsigned int max_bitflips = 0;
1572 struct mtd_oob_region oobregion = { };
1573
1574 /* Column address within the page aligned to ECC size (256bytes) */
1575 start_step = data_offs / chip->ecc.size;
1576 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1577 num_steps = end_step - start_step + 1;
1578 index = start_step * chip->ecc.bytes;
1579
1580 /* Data size aligned to ECC ecc.size */
1581 datafrag_len = num_steps * chip->ecc.size;
1582 eccfrag_len = num_steps * chip->ecc.bytes;
1583
1584 data_col_addr = start_step * chip->ecc.size;
1585 /* If we read not a page aligned data */
1586 if (data_col_addr != 0)
1587 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1588
1589 p = bufpoi + data_col_addr;
1590 chip->read_buf(mtd, p, datafrag_len);
1591
1592 /* Calculate ECC */
1593 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1594 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1595
1596 /*
1597 * The performance is faster if we position offsets according to
1598 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1599 */
1600 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1601 if (ret)
1602 return ret;
1603
1604 if (oobregion.length < eccfrag_len)
1605 gaps = 1;
1606
1607 if (gaps) {
1608 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1609 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1610 } else {
1611 /*
1612 * Send the command to read the particular ECC bytes take care
1613 * about buswidth alignment in read_buf.
1614 */
1615 aligned_pos = oobregion.offset & ~(busw - 1);
1616 aligned_len = eccfrag_len;
1617 if (oobregion.offset & (busw - 1))
1618 aligned_len++;
1619 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1620 (busw - 1))
1621 aligned_len++;
1622
1623 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1624 mtd->writesize + aligned_pos, -1);
1625 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1626 }
1627
1628 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1629 chip->oob_poi, index, eccfrag_len);
1630 if (ret)
1631 return ret;
1632
1633 p = bufpoi + data_col_addr;
1634 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1635 int stat;
1636
1637 stat = chip->ecc.correct(mtd, p,
1638 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1639 if (stat == -EBADMSG &&
1640 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1641 /* check for empty pages with bitflips */
1642 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1643 &chip->buffers->ecccode[i],
1644 chip->ecc.bytes,
1645 NULL, 0,
1646 chip->ecc.strength);
1647 }
1648
1649 if (stat < 0) {
1650 mtd->ecc_stats.failed++;
1651 } else {
1652 mtd->ecc_stats.corrected += stat;
1653 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1654 }
1655 }
1656 return max_bitflips;
1657 }
1658
1659 /**
1660 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1661 * @mtd: mtd info structure
1662 * @chip: nand chip info structure
1663 * @buf: buffer to store read data
1664 * @oob_required: caller requires OOB data read to chip->oob_poi
1665 * @page: page number to read
1666 *
1667 * Not for syndrome calculating ECC controllers which need a special oob layout.
1668 */
1669 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1670 uint8_t *buf, int oob_required, int page)
1671 {
1672 int i, eccsize = chip->ecc.size, ret;
1673 int eccbytes = chip->ecc.bytes;
1674 int eccsteps = chip->ecc.steps;
1675 uint8_t *p = buf;
1676 uint8_t *ecc_calc = chip->buffers->ecccalc;
1677 uint8_t *ecc_code = chip->buffers->ecccode;
1678 unsigned int max_bitflips = 0;
1679
1680 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1681 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1682 chip->read_buf(mtd, p, eccsize);
1683 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1684 }
1685 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1686
1687 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1688 chip->ecc.total);
1689 if (ret)
1690 return ret;
1691
1692 eccsteps = chip->ecc.steps;
1693 p = buf;
1694
1695 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1696 int stat;
1697
1698 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1699 if (stat == -EBADMSG &&
1700 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1701 /* check for empty pages with bitflips */
1702 stat = nand_check_erased_ecc_chunk(p, eccsize,
1703 &ecc_code[i], eccbytes,
1704 NULL, 0,
1705 chip->ecc.strength);
1706 }
1707
1708 if (stat < 0) {
1709 mtd->ecc_stats.failed++;
1710 } else {
1711 mtd->ecc_stats.corrected += stat;
1712 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1713 }
1714 }
1715 return max_bitflips;
1716 }
1717
1718 /**
1719 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1720 * @mtd: mtd info structure
1721 * @chip: nand chip info structure
1722 * @buf: buffer to store read data
1723 * @oob_required: caller requires OOB data read to chip->oob_poi
1724 * @page: page number to read
1725 *
1726 * Hardware ECC for large page chips, require OOB to be read first. For this
1727 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1728 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1729 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1730 * the data area, by overwriting the NAND manufacturer bad block markings.
1731 */
1732 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1733 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1734 {
1735 int i, eccsize = chip->ecc.size, ret;
1736 int eccbytes = chip->ecc.bytes;
1737 int eccsteps = chip->ecc.steps;
1738 uint8_t *p = buf;
1739 uint8_t *ecc_code = chip->buffers->ecccode;
1740 uint8_t *ecc_calc = chip->buffers->ecccalc;
1741 unsigned int max_bitflips = 0;
1742
1743 /* Read the OOB area first */
1744 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1745 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1746 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1747
1748 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1749 chip->ecc.total);
1750 if (ret)
1751 return ret;
1752
1753 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1754 int stat;
1755
1756 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1757 chip->read_buf(mtd, p, eccsize);
1758 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1759
1760 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1761 if (stat == -EBADMSG &&
1762 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1763 /* check for empty pages with bitflips */
1764 stat = nand_check_erased_ecc_chunk(p, eccsize,
1765 &ecc_code[i], eccbytes,
1766 NULL, 0,
1767 chip->ecc.strength);
1768 }
1769
1770 if (stat < 0) {
1771 mtd->ecc_stats.failed++;
1772 } else {
1773 mtd->ecc_stats.corrected += stat;
1774 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1775 }
1776 }
1777 return max_bitflips;
1778 }
1779
1780 /**
1781 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1782 * @mtd: mtd info structure
1783 * @chip: nand chip info structure
1784 * @buf: buffer to store read data
1785 * @oob_required: caller requires OOB data read to chip->oob_poi
1786 * @page: page number to read
1787 *
1788 * The hw generator calculates the error syndrome automatically. Therefore we
1789 * need a special oob layout and handling.
1790 */
1791 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1792 uint8_t *buf, int oob_required, int page)
1793 {
1794 int i, eccsize = chip->ecc.size;
1795 int eccbytes = chip->ecc.bytes;
1796 int eccsteps = chip->ecc.steps;
1797 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1798 uint8_t *p = buf;
1799 uint8_t *oob = chip->oob_poi;
1800 unsigned int max_bitflips = 0;
1801
1802 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1803 int stat;
1804
1805 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1806 chip->read_buf(mtd, p, eccsize);
1807
1808 if (chip->ecc.prepad) {
1809 chip->read_buf(mtd, oob, chip->ecc.prepad);
1810 oob += chip->ecc.prepad;
1811 }
1812
1813 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1814 chip->read_buf(mtd, oob, eccbytes);
1815 stat = chip->ecc.correct(mtd, p, oob, NULL);
1816
1817 oob += eccbytes;
1818
1819 if (chip->ecc.postpad) {
1820 chip->read_buf(mtd, oob, chip->ecc.postpad);
1821 oob += chip->ecc.postpad;
1822 }
1823
1824 if (stat == -EBADMSG &&
1825 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1826 /* check for empty pages with bitflips */
1827 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1828 oob - eccpadbytes,
1829 eccpadbytes,
1830 NULL, 0,
1831 chip->ecc.strength);
1832 }
1833
1834 if (stat < 0) {
1835 mtd->ecc_stats.failed++;
1836 } else {
1837 mtd->ecc_stats.corrected += stat;
1838 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1839 }
1840 }
1841
1842 /* Calculate remaining oob bytes */
1843 i = mtd->oobsize - (oob - chip->oob_poi);
1844 if (i)
1845 chip->read_buf(mtd, oob, i);
1846
1847 return max_bitflips;
1848 }
1849
1850 /**
1851 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1852 * @mtd: mtd info structure
1853 * @oob: oob destination address
1854 * @ops: oob ops structure
1855 * @len: size of oob to transfer
1856 */
1857 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1858 struct mtd_oob_ops *ops, size_t len)
1859 {
1860 struct nand_chip *chip = mtd_to_nand(mtd);
1861 int ret;
1862
1863 switch (ops->mode) {
1864
1865 case MTD_OPS_PLACE_OOB:
1866 case MTD_OPS_RAW:
1867 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1868 return oob + len;
1869
1870 case MTD_OPS_AUTO_OOB:
1871 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1872 ops->ooboffs, len);
1873 BUG_ON(ret);
1874 return oob + len;
1875
1876 default:
1877 BUG();
1878 }
1879 return NULL;
1880 }
1881
1882 /**
1883 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1884 * @mtd: MTD device structure
1885 * @retry_mode: the retry mode to use
1886 *
1887 * Some vendors supply a special command to shift the Vt threshold, to be used
1888 * when there are too many bitflips in a page (i.e., ECC error). After setting
1889 * a new threshold, the host should retry reading the page.
1890 */
1891 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1892 {
1893 struct nand_chip *chip = mtd_to_nand(mtd);
1894
1895 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1896
1897 if (retry_mode >= chip->read_retries)
1898 return -EINVAL;
1899
1900 if (!chip->setup_read_retry)
1901 return -EOPNOTSUPP;
1902
1903 return chip->setup_read_retry(mtd, retry_mode);
1904 }
1905
1906 /**
1907 * nand_do_read_ops - [INTERN] Read data with ECC
1908 * @mtd: MTD device structure
1909 * @from: offset to read from
1910 * @ops: oob ops structure
1911 *
1912 * Internal function. Called with chip held.
1913 */
1914 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1915 struct mtd_oob_ops *ops)
1916 {
1917 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1918 struct nand_chip *chip = mtd_to_nand(mtd);
1919 int ret = 0;
1920 uint32_t readlen = ops->len;
1921 uint32_t oobreadlen = ops->ooblen;
1922 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1923
1924 uint8_t *bufpoi, *oob, *buf;
1925 int use_bufpoi;
1926 unsigned int max_bitflips = 0;
1927 int retry_mode = 0;
1928 bool ecc_fail = false;
1929
1930 chipnr = (int)(from >> chip->chip_shift);
1931 chip->select_chip(mtd, chipnr);
1932
1933 realpage = (int)(from >> chip->page_shift);
1934 page = realpage & chip->pagemask;
1935
1936 col = (int)(from & (mtd->writesize - 1));
1937
1938 buf = ops->datbuf;
1939 oob = ops->oobbuf;
1940 oob_required = oob ? 1 : 0;
1941
1942 while (1) {
1943 unsigned int ecc_failures = mtd->ecc_stats.failed;
1944
1945 bytes = min(mtd->writesize - col, readlen);
1946 aligned = (bytes == mtd->writesize);
1947
1948 if (!aligned)
1949 use_bufpoi = 1;
1950 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1951 use_bufpoi = !virt_addr_valid(buf);
1952 else
1953 use_bufpoi = 0;
1954
1955 /* Is the current page in the buffer? */
1956 if (realpage != chip->pagebuf || oob) {
1957 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1958
1959 if (use_bufpoi && aligned)
1960 pr_debug("%s: using read bounce buffer for buf@%p\n",
1961 __func__, buf);
1962
1963 read_retry:
1964 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1965
1966 /*
1967 * Now read the page into the buffer. Absent an error,
1968 * the read methods return max bitflips per ecc step.
1969 */
1970 if (unlikely(ops->mode == MTD_OPS_RAW))
1971 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1972 oob_required,
1973 page);
1974 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1975 !oob)
1976 ret = chip->ecc.read_subpage(mtd, chip,
1977 col, bytes, bufpoi,
1978 page);
1979 else
1980 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1981 oob_required, page);
1982 if (ret < 0) {
1983 if (use_bufpoi)
1984 /* Invalidate page cache */
1985 chip->pagebuf = -1;
1986 break;
1987 }
1988
1989 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1990
1991 /* Transfer not aligned data */
1992 if (use_bufpoi) {
1993 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1994 !(mtd->ecc_stats.failed - ecc_failures) &&
1995 (ops->mode != MTD_OPS_RAW)) {
1996 chip->pagebuf = realpage;
1997 chip->pagebuf_bitflips = ret;
1998 } else {
1999 /* Invalidate page cache */
2000 chip->pagebuf = -1;
2001 }
2002 memcpy(buf, chip->buffers->databuf + col, bytes);
2003 }
2004
2005 if (unlikely(oob)) {
2006 int toread = min(oobreadlen, max_oobsize);
2007
2008 if (toread) {
2009 oob = nand_transfer_oob(mtd,
2010 oob, ops, toread);
2011 oobreadlen -= toread;
2012 }
2013 }
2014
2015 if (chip->options & NAND_NEED_READRDY) {
2016 /* Apply delay or wait for ready/busy pin */
2017 if (!chip->dev_ready)
2018 udelay(chip->chip_delay);
2019 else
2020 nand_wait_ready(mtd);
2021 }
2022
2023 if (mtd->ecc_stats.failed - ecc_failures) {
2024 if (retry_mode + 1 < chip->read_retries) {
2025 retry_mode++;
2026 ret = nand_setup_read_retry(mtd,
2027 retry_mode);
2028 if (ret < 0)
2029 break;
2030
2031 /* Reset failures; retry */
2032 mtd->ecc_stats.failed = ecc_failures;
2033 goto read_retry;
2034 } else {
2035 /* No more retry modes; real failure */
2036 ecc_fail = true;
2037 }
2038 }
2039
2040 buf += bytes;
2041 } else {
2042 memcpy(buf, chip->buffers->databuf + col, bytes);
2043 buf += bytes;
2044 max_bitflips = max_t(unsigned int, max_bitflips,
2045 chip->pagebuf_bitflips);
2046 }
2047
2048 readlen -= bytes;
2049
2050 /* Reset to retry mode 0 */
2051 if (retry_mode) {
2052 ret = nand_setup_read_retry(mtd, 0);
2053 if (ret < 0)
2054 break;
2055 retry_mode = 0;
2056 }
2057
2058 if (!readlen)
2059 break;
2060
2061 /* For subsequent reads align to page boundary */
2062 col = 0;
2063 /* Increment page address */
2064 realpage++;
2065
2066 page = realpage & chip->pagemask;
2067 /* Check, if we cross a chip boundary */
2068 if (!page) {
2069 chipnr++;
2070 chip->select_chip(mtd, -1);
2071 chip->select_chip(mtd, chipnr);
2072 }
2073 }
2074 chip->select_chip(mtd, -1);
2075
2076 ops->retlen = ops->len - (size_t) readlen;
2077 if (oob)
2078 ops->oobretlen = ops->ooblen - oobreadlen;
2079
2080 if (ret < 0)
2081 return ret;
2082
2083 if (ecc_fail)
2084 return -EBADMSG;
2085
2086 return max_bitflips;
2087 }
2088
2089 /**
2090 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2091 * @mtd: MTD device structure
2092 * @from: offset to read from
2093 * @len: number of bytes to read
2094 * @retlen: pointer to variable to store the number of read bytes
2095 * @buf: the databuffer to put data
2096 *
2097 * Get hold of the chip and call nand_do_read.
2098 */
2099 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2100 size_t *retlen, uint8_t *buf)
2101 {
2102 struct mtd_oob_ops ops;
2103 int ret;
2104
2105 nand_get_device(mtd, FL_READING);
2106 memset(&ops, 0, sizeof(ops));
2107 ops.len = len;
2108 ops.datbuf = buf;
2109 ops.mode = MTD_OPS_PLACE_OOB;
2110 ret = nand_do_read_ops(mtd, from, &ops);
2111 *retlen = ops.retlen;
2112 nand_release_device(mtd);
2113 return ret;
2114 }
2115
2116 /**
2117 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2118 * @mtd: mtd info structure
2119 * @chip: nand chip info structure
2120 * @page: page number to read
2121 */
2122 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2123 {
2124 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2125 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2126 return 0;
2127 }
2128 EXPORT_SYMBOL(nand_read_oob_std);
2129
2130 /**
2131 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2132 * with syndromes
2133 * @mtd: mtd info structure
2134 * @chip: nand chip info structure
2135 * @page: page number to read
2136 */
2137 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2138 int page)
2139 {
2140 int length = mtd->oobsize;
2141 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2142 int eccsize = chip->ecc.size;
2143 uint8_t *bufpoi = chip->oob_poi;
2144 int i, toread, sndrnd = 0, pos;
2145
2146 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2147 for (i = 0; i < chip->ecc.steps; i++) {
2148 if (sndrnd) {
2149 pos = eccsize + i * (eccsize + chunk);
2150 if (mtd->writesize > 512)
2151 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2152 else
2153 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2154 } else
2155 sndrnd = 1;
2156 toread = min_t(int, length, chunk);
2157 chip->read_buf(mtd, bufpoi, toread);
2158 bufpoi += toread;
2159 length -= toread;
2160 }
2161 if (length > 0)
2162 chip->read_buf(mtd, bufpoi, length);
2163
2164 return 0;
2165 }
2166 EXPORT_SYMBOL(nand_read_oob_syndrome);
2167
2168 /**
2169 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2170 * @mtd: mtd info structure
2171 * @chip: nand chip info structure
2172 * @page: page number to write
2173 */
2174 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2175 {
2176 int status = 0;
2177 const uint8_t *buf = chip->oob_poi;
2178 int length = mtd->oobsize;
2179
2180 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2181 chip->write_buf(mtd, buf, length);
2182 /* Send command to program the OOB data */
2183 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2184
2185 status = chip->waitfunc(mtd, chip);
2186
2187 return status & NAND_STATUS_FAIL ? -EIO : 0;
2188 }
2189 EXPORT_SYMBOL(nand_write_oob_std);
2190
2191 /**
2192 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2193 * with syndrome - only for large page flash
2194 * @mtd: mtd info structure
2195 * @chip: nand chip info structure
2196 * @page: page number to write
2197 */
2198 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2199 int page)
2200 {
2201 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2202 int eccsize = chip->ecc.size, length = mtd->oobsize;
2203 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2204 const uint8_t *bufpoi = chip->oob_poi;
2205
2206 /*
2207 * data-ecc-data-ecc ... ecc-oob
2208 * or
2209 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2210 */
2211 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2212 pos = steps * (eccsize + chunk);
2213 steps = 0;
2214 } else
2215 pos = eccsize;
2216
2217 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2218 for (i = 0; i < steps; i++) {
2219 if (sndcmd) {
2220 if (mtd->writesize <= 512) {
2221 uint32_t fill = 0xFFFFFFFF;
2222
2223 len = eccsize;
2224 while (len > 0) {
2225 int num = min_t(int, len, 4);
2226 chip->write_buf(mtd, (uint8_t *)&fill,
2227 num);
2228 len -= num;
2229 }
2230 } else {
2231 pos = eccsize + i * (eccsize + chunk);
2232 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2233 }
2234 } else
2235 sndcmd = 1;
2236 len = min_t(int, length, chunk);
2237 chip->write_buf(mtd, bufpoi, len);
2238 bufpoi += len;
2239 length -= len;
2240 }
2241 if (length > 0)
2242 chip->write_buf(mtd, bufpoi, length);
2243
2244 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2245 status = chip->waitfunc(mtd, chip);
2246
2247 return status & NAND_STATUS_FAIL ? -EIO : 0;
2248 }
2249 EXPORT_SYMBOL(nand_write_oob_syndrome);
2250
2251 /**
2252 * nand_do_read_oob - [INTERN] NAND read out-of-band
2253 * @mtd: MTD device structure
2254 * @from: offset to read from
2255 * @ops: oob operations description structure
2256 *
2257 * NAND read out-of-band data from the spare area.
2258 */
2259 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2260 struct mtd_oob_ops *ops)
2261 {
2262 int page, realpage, chipnr;
2263 struct nand_chip *chip = mtd_to_nand(mtd);
2264 struct mtd_ecc_stats stats;
2265 int readlen = ops->ooblen;
2266 int len;
2267 uint8_t *buf = ops->oobbuf;
2268 int ret = 0;
2269
2270 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2271 __func__, (unsigned long long)from, readlen);
2272
2273 stats = mtd->ecc_stats;
2274
2275 len = mtd_oobavail(mtd, ops);
2276
2277 if (unlikely(ops->ooboffs >= len)) {
2278 pr_debug("%s: attempt to start read outside oob\n",
2279 __func__);
2280 return -EINVAL;
2281 }
2282
2283 /* Do not allow reads past end of device */
2284 if (unlikely(from >= mtd->size ||
2285 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2286 (from >> chip->page_shift)) * len)) {
2287 pr_debug("%s: attempt to read beyond end of device\n",
2288 __func__);
2289 return -EINVAL;
2290 }
2291
2292 chipnr = (int)(from >> chip->chip_shift);
2293 chip->select_chip(mtd, chipnr);
2294
2295 /* Shift to get page */
2296 realpage = (int)(from >> chip->page_shift);
2297 page = realpage & chip->pagemask;
2298
2299 while (1) {
2300 if (ops->mode == MTD_OPS_RAW)
2301 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2302 else
2303 ret = chip->ecc.read_oob(mtd, chip, page);
2304
2305 if (ret < 0)
2306 break;
2307
2308 len = min(len, readlen);
2309 buf = nand_transfer_oob(mtd, buf, ops, len);
2310
2311 if (chip->options & NAND_NEED_READRDY) {
2312 /* Apply delay or wait for ready/busy pin */
2313 if (!chip->dev_ready)
2314 udelay(chip->chip_delay);
2315 else
2316 nand_wait_ready(mtd);
2317 }
2318
2319 readlen -= len;
2320 if (!readlen)
2321 break;
2322
2323 /* Increment page address */
2324 realpage++;
2325
2326 page = realpage & chip->pagemask;
2327 /* Check, if we cross a chip boundary */
2328 if (!page) {
2329 chipnr++;
2330 chip->select_chip(mtd, -1);
2331 chip->select_chip(mtd, chipnr);
2332 }
2333 }
2334 chip->select_chip(mtd, -1);
2335
2336 ops->oobretlen = ops->ooblen - readlen;
2337
2338 if (ret < 0)
2339 return ret;
2340
2341 if (mtd->ecc_stats.failed - stats.failed)
2342 return -EBADMSG;
2343
2344 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2345 }
2346
2347 /**
2348 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2349 * @mtd: MTD device structure
2350 * @from: offset to read from
2351 * @ops: oob operation description structure
2352 *
2353 * NAND read data and/or out-of-band data.
2354 */
2355 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2356 struct mtd_oob_ops *ops)
2357 {
2358 int ret;
2359
2360 ops->retlen = 0;
2361
2362 /* Do not allow reads past end of device */
2363 if (ops->datbuf && (from + ops->len) > mtd->size) {
2364 pr_debug("%s: attempt to read beyond end of device\n",
2365 __func__);
2366 return -EINVAL;
2367 }
2368
2369 if (ops->mode != MTD_OPS_PLACE_OOB &&
2370 ops->mode != MTD_OPS_AUTO_OOB &&
2371 ops->mode != MTD_OPS_RAW)
2372 return -ENOTSUPP;
2373
2374 nand_get_device(mtd, FL_READING);
2375
2376 if (!ops->datbuf)
2377 ret = nand_do_read_oob(mtd, from, ops);
2378 else
2379 ret = nand_do_read_ops(mtd, from, ops);
2380
2381 nand_release_device(mtd);
2382 return ret;
2383 }
2384
2385
2386 /**
2387 * nand_write_page_raw - [INTERN] raw page write function
2388 * @mtd: mtd info structure
2389 * @chip: nand chip info structure
2390 * @buf: data buffer
2391 * @oob_required: must write chip->oob_poi to OOB
2392 * @page: page number to write
2393 *
2394 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2395 */
2396 static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2397 const uint8_t *buf, int oob_required, int page)
2398 {
2399 chip->write_buf(mtd, buf, mtd->writesize);
2400 if (oob_required)
2401 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2402
2403 return 0;
2404 }
2405
2406 /**
2407 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2408 * @mtd: mtd info structure
2409 * @chip: nand chip info structure
2410 * @buf: data buffer
2411 * @oob_required: must write chip->oob_poi to OOB
2412 * @page: page number to write
2413 *
2414 * We need a special oob layout and handling even when ECC isn't checked.
2415 */
2416 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2417 struct nand_chip *chip,
2418 const uint8_t *buf, int oob_required,
2419 int page)
2420 {
2421 int eccsize = chip->ecc.size;
2422 int eccbytes = chip->ecc.bytes;
2423 uint8_t *oob = chip->oob_poi;
2424 int steps, size;
2425
2426 for (steps = chip->ecc.steps; steps > 0; steps--) {
2427 chip->write_buf(mtd, buf, eccsize);
2428 buf += eccsize;
2429
2430 if (chip->ecc.prepad) {
2431 chip->write_buf(mtd, oob, chip->ecc.prepad);
2432 oob += chip->ecc.prepad;
2433 }
2434
2435 chip->write_buf(mtd, oob, eccbytes);
2436 oob += eccbytes;
2437
2438 if (chip->ecc.postpad) {
2439 chip->write_buf(mtd, oob, chip->ecc.postpad);
2440 oob += chip->ecc.postpad;
2441 }
2442 }
2443
2444 size = mtd->oobsize - (oob - chip->oob_poi);
2445 if (size)
2446 chip->write_buf(mtd, oob, size);
2447
2448 return 0;
2449 }
2450 /**
2451 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2452 * @mtd: mtd info structure
2453 * @chip: nand chip info structure
2454 * @buf: data buffer
2455 * @oob_required: must write chip->oob_poi to OOB
2456 * @page: page number to write
2457 */
2458 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2459 const uint8_t *buf, int oob_required,
2460 int page)
2461 {
2462 int i, eccsize = chip->ecc.size, ret;
2463 int eccbytes = chip->ecc.bytes;
2464 int eccsteps = chip->ecc.steps;
2465 uint8_t *ecc_calc = chip->buffers->ecccalc;
2466 const uint8_t *p = buf;
2467
2468 /* Software ECC calculation */
2469 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2470 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2471
2472 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2473 chip->ecc.total);
2474 if (ret)
2475 return ret;
2476
2477 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2478 }
2479
2480 /**
2481 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2482 * @mtd: mtd info structure
2483 * @chip: nand chip info structure
2484 * @buf: data buffer
2485 * @oob_required: must write chip->oob_poi to OOB
2486 * @page: page number to write
2487 */
2488 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2489 const uint8_t *buf, int oob_required,
2490 int page)
2491 {
2492 int i, eccsize = chip->ecc.size, ret;
2493 int eccbytes = chip->ecc.bytes;
2494 int eccsteps = chip->ecc.steps;
2495 uint8_t *ecc_calc = chip->buffers->ecccalc;
2496 const uint8_t *p = buf;
2497
2498 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2499 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2500 chip->write_buf(mtd, p, eccsize);
2501 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2502 }
2503
2504 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2505 chip->ecc.total);
2506 if (ret)
2507 return ret;
2508
2509 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2510
2511 return 0;
2512 }
2513
2514
2515 /**
2516 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2517 * @mtd: mtd info structure
2518 * @chip: nand chip info structure
2519 * @offset: column address of subpage within the page
2520 * @data_len: data length
2521 * @buf: data buffer
2522 * @oob_required: must write chip->oob_poi to OOB
2523 * @page: page number to write
2524 */
2525 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2526 struct nand_chip *chip, uint32_t offset,
2527 uint32_t data_len, const uint8_t *buf,
2528 int oob_required, int page)
2529 {
2530 uint8_t *oob_buf = chip->oob_poi;
2531 uint8_t *ecc_calc = chip->buffers->ecccalc;
2532 int ecc_size = chip->ecc.size;
2533 int ecc_bytes = chip->ecc.bytes;
2534 int ecc_steps = chip->ecc.steps;
2535 uint32_t start_step = offset / ecc_size;
2536 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2537 int oob_bytes = mtd->oobsize / ecc_steps;
2538 int step, ret;
2539
2540 for (step = 0; step < ecc_steps; step++) {
2541 /* configure controller for WRITE access */
2542 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2543
2544 /* write data (untouched subpages already masked by 0xFF) */
2545 chip->write_buf(mtd, buf, ecc_size);
2546
2547 /* mask ECC of un-touched subpages by padding 0xFF */
2548 if ((step < start_step) || (step > end_step))
2549 memset(ecc_calc, 0xff, ecc_bytes);
2550 else
2551 chip->ecc.calculate(mtd, buf, ecc_calc);
2552
2553 /* mask OOB of un-touched subpages by padding 0xFF */
2554 /* if oob_required, preserve OOB metadata of written subpage */
2555 if (!oob_required || (step < start_step) || (step > end_step))
2556 memset(oob_buf, 0xff, oob_bytes);
2557
2558 buf += ecc_size;
2559 ecc_calc += ecc_bytes;
2560 oob_buf += oob_bytes;
2561 }
2562
2563 /* copy calculated ECC for whole page to chip->buffer->oob */
2564 /* this include masked-value(0xFF) for unwritten subpages */
2565 ecc_calc = chip->buffers->ecccalc;
2566 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2567 chip->ecc.total);
2568 if (ret)
2569 return ret;
2570
2571 /* write OOB buffer to NAND device */
2572 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2573
2574 return 0;
2575 }
2576
2577
2578 /**
2579 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2580 * @mtd: mtd info structure
2581 * @chip: nand chip info structure
2582 * @buf: data buffer
2583 * @oob_required: must write chip->oob_poi to OOB
2584 * @page: page number to write
2585 *
2586 * The hw generator calculates the error syndrome automatically. Therefore we
2587 * need a special oob layout and handling.
2588 */
2589 static int nand_write_page_syndrome(struct mtd_info *mtd,
2590 struct nand_chip *chip,
2591 const uint8_t *buf, int oob_required,
2592 int page)
2593 {
2594 int i, eccsize = chip->ecc.size;
2595 int eccbytes = chip->ecc.bytes;
2596 int eccsteps = chip->ecc.steps;
2597 const uint8_t *p = buf;
2598 uint8_t *oob = chip->oob_poi;
2599
2600 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2601
2602 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2603 chip->write_buf(mtd, p, eccsize);
2604
2605 if (chip->ecc.prepad) {
2606 chip->write_buf(mtd, oob, chip->ecc.prepad);
2607 oob += chip->ecc.prepad;
2608 }
2609
2610 chip->ecc.calculate(mtd, p, oob);
2611 chip->write_buf(mtd, oob, eccbytes);
2612 oob += eccbytes;
2613
2614 if (chip->ecc.postpad) {
2615 chip->write_buf(mtd, oob, chip->ecc.postpad);
2616 oob += chip->ecc.postpad;
2617 }
2618 }
2619
2620 /* Calculate remaining oob bytes */
2621 i = mtd->oobsize - (oob - chip->oob_poi);
2622 if (i)
2623 chip->write_buf(mtd, oob, i);
2624
2625 return 0;
2626 }
2627
2628 /**
2629 * nand_write_page - [REPLACEABLE] write one page
2630 * @mtd: MTD device structure
2631 * @chip: NAND chip descriptor
2632 * @offset: address offset within the page
2633 * @data_len: length of actual data to be written
2634 * @buf: the data to write
2635 * @oob_required: must write chip->oob_poi to OOB
2636 * @page: page number to write
2637 * @cached: cached programming
2638 * @raw: use _raw version of write_page
2639 */
2640 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2641 uint32_t offset, int data_len, const uint8_t *buf,
2642 int oob_required, int page, int cached, int raw)
2643 {
2644 int status, subpage;
2645
2646 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2647 chip->ecc.write_subpage)
2648 subpage = offset || (data_len < mtd->writesize);
2649 else
2650 subpage = 0;
2651
2652 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2653
2654 if (unlikely(raw))
2655 status = chip->ecc.write_page_raw(mtd, chip, buf,
2656 oob_required, page);
2657 else if (subpage)
2658 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2659 buf, oob_required, page);
2660 else
2661 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2662 page);
2663
2664 if (status < 0)
2665 return status;
2666
2667 /*
2668 * Cached progamming disabled for now. Not sure if it's worth the
2669 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2670 */
2671 cached = 0;
2672
2673 if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2674
2675 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2676 status = chip->waitfunc(mtd, chip);
2677 /*
2678 * See if operation failed and additional status checks are
2679 * available.
2680 */
2681 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2682 status = chip->errstat(mtd, chip, FL_WRITING, status,
2683 page);
2684
2685 if (status & NAND_STATUS_FAIL)
2686 return -EIO;
2687 } else {
2688 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2689 status = chip->waitfunc(mtd, chip);
2690 }
2691
2692 return 0;
2693 }
2694
2695 /**
2696 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2697 * @mtd: MTD device structure
2698 * @oob: oob data buffer
2699 * @len: oob data write length
2700 * @ops: oob ops structure
2701 */
2702 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2703 struct mtd_oob_ops *ops)
2704 {
2705 struct nand_chip *chip = mtd_to_nand(mtd);
2706 int ret;
2707
2708 /*
2709 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2710 * data from a previous OOB read.
2711 */
2712 memset(chip->oob_poi, 0xff, mtd->oobsize);
2713
2714 switch (ops->mode) {
2715
2716 case MTD_OPS_PLACE_OOB:
2717 case MTD_OPS_RAW:
2718 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2719 return oob + len;
2720
2721 case MTD_OPS_AUTO_OOB:
2722 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2723 ops->ooboffs, len);
2724 BUG_ON(ret);
2725 return oob + len;
2726
2727 default:
2728 BUG();
2729 }
2730 return NULL;
2731 }
2732
2733 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2734
2735 /**
2736 * nand_do_write_ops - [INTERN] NAND write with ECC
2737 * @mtd: MTD device structure
2738 * @to: offset to write to
2739 * @ops: oob operations description structure
2740 *
2741 * NAND write with ECC.
2742 */
2743 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2744 struct mtd_oob_ops *ops)
2745 {
2746 int chipnr, realpage, page, blockmask, column;
2747 struct nand_chip *chip = mtd_to_nand(mtd);
2748 uint32_t writelen = ops->len;
2749
2750 uint32_t oobwritelen = ops->ooblen;
2751 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2752
2753 uint8_t *oob = ops->oobbuf;
2754 uint8_t *buf = ops->datbuf;
2755 int ret;
2756 int oob_required = oob ? 1 : 0;
2757
2758 ops->retlen = 0;
2759 if (!writelen)
2760 return 0;
2761
2762 /* Reject writes, which are not page aligned */
2763 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2764 pr_notice("%s: attempt to write non page aligned data\n",
2765 __func__);
2766 return -EINVAL;
2767 }
2768
2769 column = to & (mtd->writesize - 1);
2770
2771 chipnr = (int)(to >> chip->chip_shift);
2772 chip->select_chip(mtd, chipnr);
2773
2774 /* Check, if it is write protected */
2775 if (nand_check_wp(mtd)) {
2776 ret = -EIO;
2777 goto err_out;
2778 }
2779
2780 realpage = (int)(to >> chip->page_shift);
2781 page = realpage & chip->pagemask;
2782 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2783
2784 /* Invalidate the page cache, when we write to the cached page */
2785 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2786 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2787 chip->pagebuf = -1;
2788
2789 /* Don't allow multipage oob writes with offset */
2790 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2791 ret = -EINVAL;
2792 goto err_out;
2793 }
2794
2795 while (1) {
2796 int bytes = mtd->writesize;
2797 int cached = writelen > bytes && page != blockmask;
2798 uint8_t *wbuf = buf;
2799 int use_bufpoi;
2800 int part_pagewr = (column || writelen < mtd->writesize);
2801
2802 if (part_pagewr)
2803 use_bufpoi = 1;
2804 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2805 use_bufpoi = !virt_addr_valid(buf);
2806 else
2807 use_bufpoi = 0;
2808
2809 /* Partial page write?, or need to use bounce buffer */
2810 if (use_bufpoi) {
2811 pr_debug("%s: using write bounce buffer for buf@%p\n",
2812 __func__, buf);
2813 cached = 0;
2814 if (part_pagewr)
2815 bytes = min_t(int, bytes - column, writelen);
2816 chip->pagebuf = -1;
2817 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2818 memcpy(&chip->buffers->databuf[column], buf, bytes);
2819 wbuf = chip->buffers->databuf;
2820 }
2821
2822 if (unlikely(oob)) {
2823 size_t len = min(oobwritelen, oobmaxlen);
2824 oob = nand_fill_oob(mtd, oob, len, ops);
2825 oobwritelen -= len;
2826 } else {
2827 /* We still need to erase leftover OOB data */
2828 memset(chip->oob_poi, 0xff, mtd->oobsize);
2829 }
2830 ret = chip->write_page(mtd, chip, column, bytes, wbuf,
2831 oob_required, page, cached,
2832 (ops->mode == MTD_OPS_RAW));
2833 if (ret)
2834 break;
2835
2836 writelen -= bytes;
2837 if (!writelen)
2838 break;
2839
2840 column = 0;
2841 buf += bytes;
2842 realpage++;
2843
2844 page = realpage & chip->pagemask;
2845 /* Check, if we cross a chip boundary */
2846 if (!page) {
2847 chipnr++;
2848 chip->select_chip(mtd, -1);
2849 chip->select_chip(mtd, chipnr);
2850 }
2851 }
2852
2853 ops->retlen = ops->len - writelen;
2854 if (unlikely(oob))
2855 ops->oobretlen = ops->ooblen;
2856
2857 err_out:
2858 chip->select_chip(mtd, -1);
2859 return ret;
2860 }
2861
2862 /**
2863 * panic_nand_write - [MTD Interface] NAND write with ECC
2864 * @mtd: MTD device structure
2865 * @to: offset to write to
2866 * @len: number of bytes to write
2867 * @retlen: pointer to variable to store the number of written bytes
2868 * @buf: the data to write
2869 *
2870 * NAND write with ECC. Used when performing writes in interrupt context, this
2871 * may for example be called by mtdoops when writing an oops while in panic.
2872 */
2873 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2874 size_t *retlen, const uint8_t *buf)
2875 {
2876 struct nand_chip *chip = mtd_to_nand(mtd);
2877 struct mtd_oob_ops ops;
2878 int ret;
2879
2880 /* Wait for the device to get ready */
2881 panic_nand_wait(mtd, chip, 400);
2882
2883 /* Grab the device */
2884 panic_nand_get_device(chip, mtd, FL_WRITING);
2885
2886 memset(&ops, 0, sizeof(ops));
2887 ops.len = len;
2888 ops.datbuf = (uint8_t *)buf;
2889 ops.mode = MTD_OPS_PLACE_OOB;
2890
2891 ret = nand_do_write_ops(mtd, to, &ops);
2892
2893 *retlen = ops.retlen;
2894 return ret;
2895 }
2896
2897 /**
2898 * nand_write - [MTD Interface] NAND write with ECC
2899 * @mtd: MTD device structure
2900 * @to: offset to write to
2901 * @len: number of bytes to write
2902 * @retlen: pointer to variable to store the number of written bytes
2903 * @buf: the data to write
2904 *
2905 * NAND write with ECC.
2906 */
2907 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2908 size_t *retlen, const uint8_t *buf)
2909 {
2910 struct mtd_oob_ops ops;
2911 int ret;
2912
2913 nand_get_device(mtd, FL_WRITING);
2914 memset(&ops, 0, sizeof(ops));
2915 ops.len = len;
2916 ops.datbuf = (uint8_t *)buf;
2917 ops.mode = MTD_OPS_PLACE_OOB;
2918 ret = nand_do_write_ops(mtd, to, &ops);
2919 *retlen = ops.retlen;
2920 nand_release_device(mtd);
2921 return ret;
2922 }
2923
2924 /**
2925 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2926 * @mtd: MTD device structure
2927 * @to: offset to write to
2928 * @ops: oob operation description structure
2929 *
2930 * NAND write out-of-band.
2931 */
2932 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2933 struct mtd_oob_ops *ops)
2934 {
2935 int chipnr, page, status, len;
2936 struct nand_chip *chip = mtd_to_nand(mtd);
2937
2938 pr_debug("%s: to = 0x%08x, len = %i\n",
2939 __func__, (unsigned int)to, (int)ops->ooblen);
2940
2941 len = mtd_oobavail(mtd, ops);
2942
2943 /* Do not allow write past end of page */
2944 if ((ops->ooboffs + ops->ooblen) > len) {
2945 pr_debug("%s: attempt to write past end of page\n",
2946 __func__);
2947 return -EINVAL;
2948 }
2949
2950 if (unlikely(ops->ooboffs >= len)) {
2951 pr_debug("%s: attempt to start write outside oob\n",
2952 __func__);
2953 return -EINVAL;
2954 }
2955
2956 /* Do not allow write past end of device */
2957 if (unlikely(to >= mtd->size ||
2958 ops->ooboffs + ops->ooblen >
2959 ((mtd->size >> chip->page_shift) -
2960 (to >> chip->page_shift)) * len)) {
2961 pr_debug("%s: attempt to write beyond end of device\n",
2962 __func__);
2963 return -EINVAL;
2964 }
2965
2966 chipnr = (int)(to >> chip->chip_shift);
2967 chip->select_chip(mtd, chipnr);
2968
2969 /* Shift to get page */
2970 page = (int)(to >> chip->page_shift);
2971
2972 /*
2973 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2974 * of my DiskOnChip 2000 test units) will clear the whole data page too
2975 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2976 * it in the doc2000 driver in August 1999. dwmw2.
2977 */
2978 nand_reset(chip);
2979
2980 /* Check, if it is write protected */
2981 if (nand_check_wp(mtd)) {
2982 chip->select_chip(mtd, -1);
2983 return -EROFS;
2984 }
2985
2986 /* Invalidate the page cache, if we write to the cached page */
2987 if (page == chip->pagebuf)
2988 chip->pagebuf = -1;
2989
2990 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2991
2992 if (ops->mode == MTD_OPS_RAW)
2993 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2994 else
2995 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2996
2997 chip->select_chip(mtd, -1);
2998
2999 if (status)
3000 return status;
3001
3002 ops->oobretlen = ops->ooblen;
3003
3004 return 0;
3005 }
3006
3007 /**
3008 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3009 * @mtd: MTD device structure
3010 * @to: offset to write to
3011 * @ops: oob operation description structure
3012 */
3013 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3014 struct mtd_oob_ops *ops)
3015 {
3016 int ret = -ENOTSUPP;
3017
3018 ops->retlen = 0;
3019
3020 /* Do not allow writes past end of device */
3021 if (ops->datbuf && (to + ops->len) > mtd->size) {
3022 pr_debug("%s: attempt to write beyond end of device\n",
3023 __func__);
3024 return -EINVAL;
3025 }
3026
3027 nand_get_device(mtd, FL_WRITING);
3028
3029 switch (ops->mode) {
3030 case MTD_OPS_PLACE_OOB:
3031 case MTD_OPS_AUTO_OOB:
3032 case MTD_OPS_RAW:
3033 break;
3034
3035 default:
3036 goto out;
3037 }
3038
3039 if (!ops->datbuf)
3040 ret = nand_do_write_oob(mtd, to, ops);
3041 else
3042 ret = nand_do_write_ops(mtd, to, ops);
3043
3044 out:
3045 nand_release_device(mtd);
3046 return ret;
3047 }
3048
3049 /**
3050 * single_erase - [GENERIC] NAND standard block erase command function
3051 * @mtd: MTD device structure
3052 * @page: the page address of the block which will be erased
3053 *
3054 * Standard erase command for NAND chips. Returns NAND status.
3055 */
3056 static int single_erase(struct mtd_info *mtd, int page)
3057 {
3058 struct nand_chip *chip = mtd_to_nand(mtd);
3059 /* Send commands to erase a block */
3060 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3061 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3062
3063 return chip->waitfunc(mtd, chip);
3064 }
3065
3066 /**
3067 * nand_erase - [MTD Interface] erase block(s)
3068 * @mtd: MTD device structure
3069 * @instr: erase instruction
3070 *
3071 * Erase one ore more blocks.
3072 */
3073 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3074 {
3075 return nand_erase_nand(mtd, instr, 0);
3076 }
3077
3078 /**
3079 * nand_erase_nand - [INTERN] erase block(s)
3080 * @mtd: MTD device structure
3081 * @instr: erase instruction
3082 * @allowbbt: allow erasing the bbt area
3083 *
3084 * Erase one ore more blocks.
3085 */
3086 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3087 int allowbbt)
3088 {
3089 int page, status, pages_per_block, ret, chipnr;
3090 struct nand_chip *chip = mtd_to_nand(mtd);
3091 loff_t len;
3092
3093 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3094 __func__, (unsigned long long)instr->addr,
3095 (unsigned long long)instr->len);
3096
3097 if (check_offs_len(mtd, instr->addr, instr->len))
3098 return -EINVAL;
3099
3100 /* Grab the lock and see if the device is available */
3101 nand_get_device(mtd, FL_ERASING);
3102
3103 /* Shift to get first page */
3104 page = (int)(instr->addr >> chip->page_shift);
3105 chipnr = (int)(instr->addr >> chip->chip_shift);
3106
3107 /* Calculate pages in each block */
3108 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3109
3110 /* Select the NAND device */
3111 chip->select_chip(mtd, chipnr);
3112
3113 /* Check, if it is write protected */
3114 if (nand_check_wp(mtd)) {
3115 pr_debug("%s: device is write protected!\n",
3116 __func__);
3117 instr->state = MTD_ERASE_FAILED;
3118 goto erase_exit;
3119 }
3120
3121 /* Loop through the pages */
3122 len = instr->len;
3123
3124 instr->state = MTD_ERASING;
3125
3126 while (len) {
3127 /* Check if we have a bad block, we do not erase bad blocks! */
3128 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3129 chip->page_shift, allowbbt)) {
3130 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3131 __func__, page);
3132 instr->state = MTD_ERASE_FAILED;
3133 goto erase_exit;
3134 }
3135
3136 /*
3137 * Invalidate the page cache, if we erase the block which
3138 * contains the current cached page.
3139 */
3140 if (page <= chip->pagebuf && chip->pagebuf <
3141 (page + pages_per_block))
3142 chip->pagebuf = -1;
3143
3144 status = chip->erase(mtd, page & chip->pagemask);
3145
3146 /*
3147 * See if operation failed and additional status checks are
3148 * available
3149 */
3150 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3151 status = chip->errstat(mtd, chip, FL_ERASING,
3152 status, page);
3153
3154 /* See if block erase succeeded */
3155 if (status & NAND_STATUS_FAIL) {
3156 pr_debug("%s: failed erase, page 0x%08x\n",
3157 __func__, page);
3158 instr->state = MTD_ERASE_FAILED;
3159 instr->fail_addr =
3160 ((loff_t)page << chip->page_shift);
3161 goto erase_exit;
3162 }
3163
3164 /* Increment page address and decrement length */
3165 len -= (1ULL << chip->phys_erase_shift);
3166 page += pages_per_block;
3167
3168 /* Check, if we cross a chip boundary */
3169 if (len && !(page & chip->pagemask)) {
3170 chipnr++;
3171 chip->select_chip(mtd, -1);
3172 chip->select_chip(mtd, chipnr);
3173 }
3174 }
3175 instr->state = MTD_ERASE_DONE;
3176
3177 erase_exit:
3178
3179 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3180
3181 /* Deselect and wake up anyone waiting on the device */
3182 chip->select_chip(mtd, -1);
3183 nand_release_device(mtd);
3184
3185 /* Do call back function */
3186 if (!ret)
3187 mtd_erase_callback(instr);
3188
3189 /* Return more or less happy */
3190 return ret;
3191 }
3192
3193 /**
3194 * nand_sync - [MTD Interface] sync
3195 * @mtd: MTD device structure
3196 *
3197 * Sync is actually a wait for chip ready function.
3198 */
3199 static void nand_sync(struct mtd_info *mtd)
3200 {
3201 pr_debug("%s: called\n", __func__);
3202
3203 /* Grab the lock and see if the device is available */
3204 nand_get_device(mtd, FL_SYNCING);
3205 /* Release it and go back */
3206 nand_release_device(mtd);
3207 }
3208
3209 /**
3210 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3211 * @mtd: MTD device structure
3212 * @offs: offset relative to mtd start
3213 */
3214 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3215 {
3216 struct nand_chip *chip = mtd_to_nand(mtd);
3217 int chipnr = (int)(offs >> chip->chip_shift);
3218 int ret;
3219
3220 /* Select the NAND device */
3221 nand_get_device(mtd, FL_READING);
3222 chip->select_chip(mtd, chipnr);
3223
3224 ret = nand_block_checkbad(mtd, offs, 0);
3225
3226 chip->select_chip(mtd, -1);
3227 nand_release_device(mtd);
3228
3229 return ret;
3230 }
3231
3232 /**
3233 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3234 * @mtd: MTD device structure
3235 * @ofs: offset relative to mtd start
3236 */
3237 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3238 {
3239 int ret;
3240
3241 ret = nand_block_isbad(mtd, ofs);
3242 if (ret) {
3243 /* If it was bad already, return success and do nothing */
3244 if (ret > 0)
3245 return 0;
3246 return ret;
3247 }
3248
3249 return nand_block_markbad_lowlevel(mtd, ofs);
3250 }
3251
3252 /**
3253 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3254 * @mtd: MTD device structure
3255 * @chip: nand chip info structure
3256 * @addr: feature address.
3257 * @subfeature_param: the subfeature parameters, a four bytes array.
3258 */
3259 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3260 int addr, uint8_t *subfeature_param)
3261 {
3262 int status;
3263 int i;
3264
3265 if (!chip->onfi_version ||
3266 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3267 & ONFI_OPT_CMD_SET_GET_FEATURES))
3268 return -EINVAL;
3269
3270 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3271 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3272 chip->write_byte(mtd, subfeature_param[i]);
3273
3274 status = chip->waitfunc(mtd, chip);
3275 if (status & NAND_STATUS_FAIL)
3276 return -EIO;
3277 return 0;
3278 }
3279
3280 /**
3281 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3282 * @mtd: MTD device structure
3283 * @chip: nand chip info structure
3284 * @addr: feature address.
3285 * @subfeature_param: the subfeature parameters, a four bytes array.
3286 */
3287 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3288 int addr, uint8_t *subfeature_param)
3289 {
3290 int i;
3291
3292 if (!chip->onfi_version ||
3293 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3294 & ONFI_OPT_CMD_SET_GET_FEATURES))
3295 return -EINVAL;
3296
3297 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3298 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3299 *subfeature_param++ = chip->read_byte(mtd);
3300 return 0;
3301 }
3302
3303 /**
3304 * nand_suspend - [MTD Interface] Suspend the NAND flash
3305 * @mtd: MTD device structure
3306 */
3307 static int nand_suspend(struct mtd_info *mtd)
3308 {
3309 return nand_get_device(mtd, FL_PM_SUSPENDED);
3310 }
3311
3312 /**
3313 * nand_resume - [MTD Interface] Resume the NAND flash
3314 * @mtd: MTD device structure
3315 */
3316 static void nand_resume(struct mtd_info *mtd)
3317 {
3318 struct nand_chip *chip = mtd_to_nand(mtd);
3319
3320 if (chip->state == FL_PM_SUSPENDED)
3321 nand_release_device(mtd);
3322 else
3323 pr_err("%s called for a chip which is not in suspended state\n",
3324 __func__);
3325 }
3326
3327 /**
3328 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3329 * prevent further operations
3330 * @mtd: MTD device structure
3331 */
3332 static void nand_shutdown(struct mtd_info *mtd)
3333 {
3334 nand_get_device(mtd, FL_PM_SUSPENDED);
3335 }
3336
3337 /* Set default functions */
3338 static void nand_set_defaults(struct nand_chip *chip, int busw)
3339 {
3340 /* check for proper chip_delay setup, set 20us if not */
3341 if (!chip->chip_delay)
3342 chip->chip_delay = 20;
3343
3344 /* check, if a user supplied command function given */
3345 if (chip->cmdfunc == NULL)
3346 chip->cmdfunc = nand_command;
3347
3348 /* check, if a user supplied wait function given */
3349 if (chip->waitfunc == NULL)
3350 chip->waitfunc = nand_wait;
3351
3352 if (!chip->select_chip)
3353 chip->select_chip = nand_select_chip;
3354
3355 /* set for ONFI nand */
3356 if (!chip->onfi_set_features)
3357 chip->onfi_set_features = nand_onfi_set_features;
3358 if (!chip->onfi_get_features)
3359 chip->onfi_get_features = nand_onfi_get_features;
3360
3361 /* If called twice, pointers that depend on busw may need to be reset */
3362 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3363 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3364 if (!chip->read_word)
3365 chip->read_word = nand_read_word;
3366 if (!chip->block_bad)
3367 chip->block_bad = nand_block_bad;
3368 if (!chip->block_markbad)
3369 chip->block_markbad = nand_default_block_markbad;
3370 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3371 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3372 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3373 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3374 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3375 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3376 if (!chip->scan_bbt)
3377 chip->scan_bbt = nand_default_bbt;
3378
3379 if (!chip->controller) {
3380 chip->controller = &chip->hwcontrol;
3381 nand_hw_control_init(chip->controller);
3382 }
3383
3384 }
3385
3386 /* Sanitize ONFI strings so we can safely print them */
3387 static void sanitize_string(uint8_t *s, size_t len)
3388 {
3389 ssize_t i;
3390
3391 /* Null terminate */
3392 s[len - 1] = 0;
3393
3394 /* Remove non printable chars */
3395 for (i = 0; i < len - 1; i++) {
3396 if (s[i] < ' ' || s[i] > 127)
3397 s[i] = '?';
3398 }
3399
3400 /* Remove trailing spaces */
3401 strim(s);
3402 }
3403
3404 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3405 {
3406 int i;
3407 while (len--) {
3408 crc ^= *p++ << 8;
3409 for (i = 0; i < 8; i++)
3410 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3411 }
3412
3413 return crc;
3414 }
3415
3416 /* Parse the Extended Parameter Page. */
3417 static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
3418 struct nand_chip *chip, struct nand_onfi_params *p)
3419 {
3420 struct onfi_ext_param_page *ep;
3421 struct onfi_ext_section *s;
3422 struct onfi_ext_ecc_info *ecc;
3423 uint8_t *cursor;
3424 int ret = -EINVAL;
3425 int len;
3426 int i;
3427
3428 len = le16_to_cpu(p->ext_param_page_length) * 16;
3429 ep = kmalloc(len, GFP_KERNEL);
3430 if (!ep)
3431 return -ENOMEM;
3432
3433 /* Send our own NAND_CMD_PARAM. */
3434 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3435
3436 /* Use the Change Read Column command to skip the ONFI param pages. */
3437 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3438 sizeof(*p) * p->num_of_param_pages , -1);
3439
3440 /* Read out the Extended Parameter Page. */
3441 chip->read_buf(mtd, (uint8_t *)ep, len);
3442 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3443 != le16_to_cpu(ep->crc))) {
3444 pr_debug("fail in the CRC.\n");
3445 goto ext_out;
3446 }
3447
3448 /*
3449 * Check the signature.
3450 * Do not strictly follow the ONFI spec, maybe changed in future.
3451 */
3452 if (strncmp(ep->sig, "EPPS", 4)) {
3453 pr_debug("The signature is invalid.\n");
3454 goto ext_out;
3455 }
3456
3457 /* find the ECC section. */
3458 cursor = (uint8_t *)(ep + 1);
3459 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3460 s = ep->sections + i;
3461 if (s->type == ONFI_SECTION_TYPE_2)
3462 break;
3463 cursor += s->length * 16;
3464 }
3465 if (i == ONFI_EXT_SECTION_MAX) {
3466 pr_debug("We can not find the ECC section.\n");
3467 goto ext_out;
3468 }
3469
3470 /* get the info we want. */
3471 ecc = (struct onfi_ext_ecc_info *)cursor;
3472
3473 if (!ecc->codeword_size) {
3474 pr_debug("Invalid codeword size\n");
3475 goto ext_out;
3476 }
3477
3478 chip->ecc_strength_ds = ecc->ecc_bits;
3479 chip->ecc_step_ds = 1 << ecc->codeword_size;
3480 ret = 0;
3481
3482 ext_out:
3483 kfree(ep);
3484 return ret;
3485 }
3486
3487 static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
3488 {
3489 struct nand_chip *chip = mtd_to_nand(mtd);
3490 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
3491
3492 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
3493 feature);
3494 }
3495
3496 /*
3497 * Configure chip properties from Micron vendor-specific ONFI table
3498 */
3499 static void nand_onfi_detect_micron(struct nand_chip *chip,
3500 struct nand_onfi_params *p)
3501 {
3502 struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
3503
3504 if (le16_to_cpu(p->vendor_revision) < 1)
3505 return;
3506
3507 chip->read_retries = micron->read_retry_options;
3508 chip->setup_read_retry = nand_setup_read_retry_micron;
3509 }
3510
3511 /*
3512 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3513 */
3514 static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3515 int *busw)
3516 {
3517 struct nand_onfi_params *p = &chip->onfi_params;
3518 int i, j;
3519 int val;
3520
3521 /* Try ONFI for unknown chip or LP */
3522 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3523 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3524 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3525 return 0;
3526
3527 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3528 for (i = 0; i < 3; i++) {
3529 for (j = 0; j < sizeof(*p); j++)
3530 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3531 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3532 le16_to_cpu(p->crc)) {
3533 break;
3534 }
3535 }
3536
3537 if (i == 3) {
3538 pr_err("Could not find valid ONFI parameter page; aborting\n");
3539 return 0;
3540 }
3541
3542 /* Check version */
3543 val = le16_to_cpu(p->revision);
3544 if (val & (1 << 5))
3545 chip->onfi_version = 23;
3546 else if (val & (1 << 4))
3547 chip->onfi_version = 22;
3548 else if (val & (1 << 3))
3549 chip->onfi_version = 21;
3550 else if (val & (1 << 2))
3551 chip->onfi_version = 20;
3552 else if (val & (1 << 1))
3553 chip->onfi_version = 10;
3554
3555 if (!chip->onfi_version) {
3556 pr_info("unsupported ONFI version: %d\n", val);
3557 return 0;
3558 }
3559
3560 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3561 sanitize_string(p->model, sizeof(p->model));
3562 if (!mtd->name)
3563 mtd->name = p->model;
3564
3565 mtd->writesize = le32_to_cpu(p->byte_per_page);
3566
3567 /*
3568 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3569 * (don't ask me who thought of this...). MTD assumes that these
3570 * dimensions will be power-of-2, so just truncate the remaining area.
3571 */
3572 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3573 mtd->erasesize *= mtd->writesize;
3574
3575 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3576
3577 /* See erasesize comment */
3578 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3579 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3580 chip->bits_per_cell = p->bits_per_cell;
3581
3582 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3583 *busw = NAND_BUSWIDTH_16;
3584 else
3585 *busw = 0;
3586
3587 if (p->ecc_bits != 0xff) {
3588 chip->ecc_strength_ds = p->ecc_bits;
3589 chip->ecc_step_ds = 512;
3590 } else if (chip->onfi_version >= 21 &&
3591 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3592
3593 /*
3594 * The nand_flash_detect_ext_param_page() uses the
3595 * Change Read Column command which maybe not supported
3596 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3597 * now. We do not replace user supplied command function.
3598 */
3599 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3600 chip->cmdfunc = nand_command_lp;
3601
3602 /* The Extended Parameter Page is supported since ONFI 2.1. */
3603 if (nand_flash_detect_ext_param_page(mtd, chip, p))
3604 pr_warn("Failed to detect ONFI extended param page\n");
3605 } else {
3606 pr_warn("Could not retrieve ONFI ECC requirements\n");
3607 }
3608
3609 if (p->jedec_id == NAND_MFR_MICRON)
3610 nand_onfi_detect_micron(chip, p);
3611
3612 return 1;
3613 }
3614
3615 /*
3616 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3617 */
3618 static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
3619 int *busw)
3620 {
3621 struct nand_jedec_params *p = &chip->jedec_params;
3622 struct jedec_ecc_info *ecc;
3623 int val;
3624 int i, j;
3625
3626 /* Try JEDEC for unknown chip or LP */
3627 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3628 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3629 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3630 chip->read_byte(mtd) != 'C')
3631 return 0;
3632
3633 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3634 for (i = 0; i < 3; i++) {
3635 for (j = 0; j < sizeof(*p); j++)
3636 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3637
3638 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3639 le16_to_cpu(p->crc))
3640 break;
3641 }
3642
3643 if (i == 3) {
3644 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3645 return 0;
3646 }
3647
3648 /* Check version */
3649 val = le16_to_cpu(p->revision);
3650 if (val & (1 << 2))
3651 chip->jedec_version = 10;
3652 else if (val & (1 << 1))
3653 chip->jedec_version = 1; /* vendor specific version */
3654
3655 if (!chip->jedec_version) {
3656 pr_info("unsupported JEDEC version: %d\n", val);
3657 return 0;
3658 }
3659
3660 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3661 sanitize_string(p->model, sizeof(p->model));
3662 if (!mtd->name)
3663 mtd->name = p->model;
3664
3665 mtd->writesize = le32_to_cpu(p->byte_per_page);
3666
3667 /* Please reference to the comment for nand_flash_detect_onfi. */
3668 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3669 mtd->erasesize *= mtd->writesize;
3670
3671 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3672
3673 /* Please reference to the comment for nand_flash_detect_onfi. */
3674 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3675 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3676 chip->bits_per_cell = p->bits_per_cell;
3677
3678 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3679 *busw = NAND_BUSWIDTH_16;
3680 else
3681 *busw = 0;
3682
3683 /* ECC info */
3684 ecc = &p->ecc_info[0];
3685
3686 if (ecc->codeword_size >= 9) {
3687 chip->ecc_strength_ds = ecc->ecc_bits;
3688 chip->ecc_step_ds = 1 << ecc->codeword_size;
3689 } else {
3690 pr_warn("Invalid codeword size\n");
3691 }
3692
3693 return 1;
3694 }
3695
3696 /*
3697 * nand_id_has_period - Check if an ID string has a given wraparound period
3698 * @id_data: the ID string
3699 * @arrlen: the length of the @id_data array
3700 * @period: the period of repitition
3701 *
3702 * Check if an ID string is repeated within a given sequence of bytes at
3703 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3704 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3705 * if the repetition has a period of @period; otherwise, returns zero.
3706 */
3707 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3708 {
3709 int i, j;
3710 for (i = 0; i < period; i++)
3711 for (j = i + period; j < arrlen; j += period)
3712 if (id_data[i] != id_data[j])
3713 return 0;
3714 return 1;
3715 }
3716
3717 /*
3718 * nand_id_len - Get the length of an ID string returned by CMD_READID
3719 * @id_data: the ID string
3720 * @arrlen: the length of the @id_data array
3721
3722 * Returns the length of the ID string, according to known wraparound/trailing
3723 * zero patterns. If no pattern exists, returns the length of the array.
3724 */
3725 static int nand_id_len(u8 *id_data, int arrlen)
3726 {
3727 int last_nonzero, period;
3728
3729 /* Find last non-zero byte */
3730 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3731 if (id_data[last_nonzero])
3732 break;
3733
3734 /* All zeros */
3735 if (last_nonzero < 0)
3736 return 0;
3737
3738 /* Calculate wraparound period */
3739 for (period = 1; period < arrlen; period++)
3740 if (nand_id_has_period(id_data, arrlen, period))
3741 break;
3742
3743 /* There's a repeated pattern */
3744 if (period < arrlen)
3745 return period;
3746
3747 /* There are trailing zeros */
3748 if (last_nonzero < arrlen - 1)
3749 return last_nonzero + 1;
3750
3751 /* No pattern detected */
3752 return arrlen;
3753 }
3754
3755 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3756 static int nand_get_bits_per_cell(u8 cellinfo)
3757 {
3758 int bits;
3759
3760 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3761 bits >>= NAND_CI_CELLTYPE_SHIFT;
3762 return bits + 1;
3763 }
3764
3765 /*
3766 * Many new NAND share similar device ID codes, which represent the size of the
3767 * chip. The rest of the parameters must be decoded according to generic or
3768 * manufacturer-specific "extended ID" decoding patterns.
3769 */
3770 static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
3771 u8 id_data[8], int *busw)
3772 {
3773 int extid, id_len;
3774 /* The 3rd id byte holds MLC / multichip data */
3775 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3776 /* The 4th id byte is the important one */
3777 extid = id_data[3];
3778
3779 id_len = nand_id_len(id_data, 8);
3780
3781 /*
3782 * Field definitions are in the following datasheets:
3783 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
3784 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
3785 * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
3786 *
3787 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
3788 * ID to decide what to do.
3789 */
3790 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
3791 !nand_is_slc(chip) && id_data[5] != 0x00) {
3792 /* Calc pagesize */
3793 mtd->writesize = 2048 << (extid & 0x03);
3794 extid >>= 2;
3795 /* Calc oobsize */
3796 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3797 case 1:
3798 mtd->oobsize = 128;
3799 break;
3800 case 2:
3801 mtd->oobsize = 218;
3802 break;
3803 case 3:
3804 mtd->oobsize = 400;
3805 break;
3806 case 4:
3807 mtd->oobsize = 436;
3808 break;
3809 case 5:
3810 mtd->oobsize = 512;
3811 break;
3812 case 6:
3813 mtd->oobsize = 640;
3814 break;
3815 case 7:
3816 default: /* Other cases are "reserved" (unknown) */
3817 mtd->oobsize = 1024;
3818 break;
3819 }
3820 extid >>= 2;
3821 /* Calc blocksize */
3822 mtd->erasesize = (128 * 1024) <<
3823 (((extid >> 1) & 0x04) | (extid & 0x03));
3824 *busw = 0;
3825 } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3826 !nand_is_slc(chip)) {
3827 unsigned int tmp;
3828
3829 /* Calc pagesize */
3830 mtd->writesize = 2048 << (extid & 0x03);
3831 extid >>= 2;
3832 /* Calc oobsize */
3833 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3834 case 0:
3835 mtd->oobsize = 128;
3836 break;
3837 case 1:
3838 mtd->oobsize = 224;
3839 break;
3840 case 2:
3841 mtd->oobsize = 448;
3842 break;
3843 case 3:
3844 mtd->oobsize = 64;
3845 break;
3846 case 4:
3847 mtd->oobsize = 32;
3848 break;
3849 case 5:
3850 mtd->oobsize = 16;
3851 break;
3852 default:
3853 mtd->oobsize = 640;
3854 break;
3855 }
3856 extid >>= 2;
3857 /* Calc blocksize */
3858 tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3859 if (tmp < 0x03)
3860 mtd->erasesize = (128 * 1024) << tmp;
3861 else if (tmp == 0x03)
3862 mtd->erasesize = 768 * 1024;
3863 else
3864 mtd->erasesize = (64 * 1024) << tmp;
3865 *busw = 0;
3866 } else {
3867 /* Calc pagesize */
3868 mtd->writesize = 1024 << (extid & 0x03);
3869 extid >>= 2;
3870 /* Calc oobsize */
3871 mtd->oobsize = (8 << (extid & 0x01)) *
3872 (mtd->writesize >> 9);
3873 extid >>= 2;
3874 /* Calc blocksize. Blocksize is multiples of 64KiB */
3875 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3876 extid >>= 2;
3877 /* Get buswidth information */
3878 *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3879
3880 /*
3881 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
3882 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
3883 * follows:
3884 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
3885 * 110b -> 24nm
3886 * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
3887 */
3888 if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
3889 nand_is_slc(chip) &&
3890 (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
3891 !(id_data[4] & 0x80) /* !BENAND */) {
3892 mtd->oobsize = 32 * mtd->writesize >> 9;
3893 }
3894
3895 }
3896 }
3897
3898 /*
3899 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3900 * decodes a matching ID table entry and assigns the MTD size parameters for
3901 * the chip.
3902 */
3903 static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3904 struct nand_flash_dev *type, u8 id_data[8],
3905 int *busw)
3906 {
3907 int maf_id = id_data[0];
3908
3909 mtd->erasesize = type->erasesize;
3910 mtd->writesize = type->pagesize;
3911 mtd->oobsize = mtd->writesize / 32;
3912 *busw = type->options & NAND_BUSWIDTH_16;
3913
3914 /* All legacy ID NAND are small-page, SLC */
3915 chip->bits_per_cell = 1;
3916
3917 /*
3918 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3919 * some Spansion chips have erasesize that conflicts with size
3920 * listed in nand_ids table.
3921 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3922 */
3923 if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3924 && id_data[6] == 0x00 && id_data[7] == 0x00
3925 && mtd->writesize == 512) {
3926 mtd->erasesize = 128 * 1024;
3927 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3928 }
3929 }
3930
3931 /*
3932 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3933 * heuristic patterns using various detected parameters (e.g., manufacturer,
3934 * page size, cell-type information).
3935 */
3936 static void nand_decode_bbm_options(struct mtd_info *mtd,
3937 struct nand_chip *chip, u8 id_data[8])
3938 {
3939 int maf_id = id_data[0];
3940
3941 /* Set the bad block position */
3942 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3943 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3944 else
3945 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3946
3947 /*
3948 * Bad block marker is stored in the last page of each block on Samsung
3949 * and Hynix MLC devices; stored in first two pages of each block on
3950 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
3951 * AMD/Spansion, and Macronix. All others scan only the first page.
3952 */
3953 if (!nand_is_slc(chip) &&
3954 (maf_id == NAND_MFR_SAMSUNG ||
3955 maf_id == NAND_MFR_HYNIX))
3956 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3957 else if ((nand_is_slc(chip) &&
3958 (maf_id == NAND_MFR_SAMSUNG ||
3959 maf_id == NAND_MFR_HYNIX ||
3960 maf_id == NAND_MFR_TOSHIBA ||
3961 maf_id == NAND_MFR_AMD ||
3962 maf_id == NAND_MFR_MACRONIX)) ||
3963 (mtd->writesize == 2048 &&
3964 maf_id == NAND_MFR_MICRON))
3965 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3966 }
3967
3968 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3969 {
3970 return type->id_len;
3971 }
3972
3973 static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
3974 struct nand_flash_dev *type, u8 *id_data, int *busw)
3975 {
3976 if (!strncmp(type->id, id_data, type->id_len)) {
3977 mtd->writesize = type->pagesize;
3978 mtd->erasesize = type->erasesize;
3979 mtd->oobsize = type->oobsize;
3980
3981 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3982 chip->chipsize = (uint64_t)type->chipsize << 20;
3983 chip->options |= type->options;
3984 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3985 chip->ecc_step_ds = NAND_ECC_STEP(type);
3986 chip->onfi_timing_mode_default =
3987 type->onfi_timing_mode_default;
3988
3989 *busw = type->options & NAND_BUSWIDTH_16;
3990
3991 if (!mtd->name)
3992 mtd->name = type->name;
3993
3994 return true;
3995 }
3996 return false;
3997 }
3998
3999 /*
4000 * Get the flash and manufacturer id and lookup if the type is supported.
4001 */
4002 static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
4003 struct nand_chip *chip,
4004 int *maf_id, int *dev_id,
4005 struct nand_flash_dev *type)
4006 {
4007 int busw;
4008 int i, maf_idx;
4009 u8 id_data[8];
4010
4011 /* Select the device */
4012 chip->select_chip(mtd, 0);
4013
4014 /*
4015 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4016 * after power-up.
4017 */
4018 nand_reset(chip);
4019
4020 /* Send the command for reading device ID */
4021 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4022
4023 /* Read manufacturer and device IDs */
4024 *maf_id = chip->read_byte(mtd);
4025 *dev_id = chip->read_byte(mtd);
4026
4027 /*
4028 * Try again to make sure, as some systems the bus-hold or other
4029 * interface concerns can cause random data which looks like a
4030 * possibly credible NAND flash to appear. If the two results do
4031 * not match, ignore the device completely.
4032 */
4033
4034 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4035
4036 /* Read entire ID string */
4037 for (i = 0; i < 8; i++)
4038 id_data[i] = chip->read_byte(mtd);
4039
4040 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
4041 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4042 *maf_id, *dev_id, id_data[0], id_data[1]);
4043 return ERR_PTR(-ENODEV);
4044 }
4045
4046 if (!type)
4047 type = nand_flash_ids;
4048
4049 for (; type->name != NULL; type++) {
4050 if (is_full_id_nand(type)) {
4051 if (find_full_id_nand(mtd, chip, type, id_data, &busw))
4052 goto ident_done;
4053 } else if (*dev_id == type->dev_id) {
4054 break;
4055 }
4056 }
4057
4058 chip->onfi_version = 0;
4059 if (!type->name || !type->pagesize) {
4060 /* Check if the chip is ONFI compliant */
4061 if (nand_flash_detect_onfi(mtd, chip, &busw))
4062 goto ident_done;
4063
4064 /* Check if the chip is JEDEC compliant */
4065 if (nand_flash_detect_jedec(mtd, chip, &busw))
4066 goto ident_done;
4067 }
4068
4069 if (!type->name)
4070 return ERR_PTR(-ENODEV);
4071
4072 if (!mtd->name)
4073 mtd->name = type->name;
4074
4075 chip->chipsize = (uint64_t)type->chipsize << 20;
4076
4077 if (!type->pagesize) {
4078 /* Decode parameters from extended ID */
4079 nand_decode_ext_id(mtd, chip, id_data, &busw);
4080 } else {
4081 nand_decode_id(mtd, chip, type, id_data, &busw);
4082 }
4083 /* Get chip options */
4084 chip->options |= type->options;
4085
4086 /*
4087 * Check if chip is not a Samsung device. Do not clear the
4088 * options for chips which do not have an extended id.
4089 */
4090 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
4091 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
4092 ident_done:
4093
4094 /* Try to identify manufacturer */
4095 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
4096 if (nand_manuf_ids[maf_idx].id == *maf_id)
4097 break;
4098 }
4099
4100 if (chip->options & NAND_BUSWIDTH_AUTO) {
4101 WARN_ON(chip->options & NAND_BUSWIDTH_16);
4102 chip->options |= busw;
4103 nand_set_defaults(chip, busw);
4104 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4105 /*
4106 * Check, if buswidth is correct. Hardware drivers should set
4107 * chip correct!
4108 */
4109 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4110 *maf_id, *dev_id);
4111 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
4112 pr_warn("bus width %d instead %d bit\n",
4113 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
4114 busw ? 16 : 8);
4115 return ERR_PTR(-EINVAL);
4116 }
4117
4118 nand_decode_bbm_options(mtd, chip, id_data);
4119
4120 /* Calculate the address shift from the page size */
4121 chip->page_shift = ffs(mtd->writesize) - 1;
4122 /* Convert chipsize to number of pages per chip -1 */
4123 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4124
4125 chip->bbt_erase_shift = chip->phys_erase_shift =
4126 ffs(mtd->erasesize) - 1;
4127 if (chip->chipsize & 0xffffffff)
4128 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4129 else {
4130 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4131 chip->chip_shift += 32 - 1;
4132 }
4133
4134 chip->badblockbits = 8;
4135 chip->erase = single_erase;
4136
4137 /* Do not replace user supplied command function! */
4138 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4139 chip->cmdfunc = nand_command_lp;
4140
4141 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4142 *maf_id, *dev_id);
4143
4144 if (chip->onfi_version)
4145 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4146 chip->onfi_params.model);
4147 else if (chip->jedec_version)
4148 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4149 chip->jedec_params.model);
4150 else
4151 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4152 type->name);
4153
4154 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4155 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4156 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4157 return type;
4158 }
4159
4160 static const char * const nand_ecc_modes[] = {
4161 [NAND_ECC_NONE] = "none",
4162 [NAND_ECC_SOFT] = "soft",
4163 [NAND_ECC_HW] = "hw",
4164 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4165 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4166 };
4167
4168 static int of_get_nand_ecc_mode(struct device_node *np)
4169 {
4170 const char *pm;
4171 int err, i;
4172
4173 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4174 if (err < 0)
4175 return err;
4176
4177 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4178 if (!strcasecmp(pm, nand_ecc_modes[i]))
4179 return i;
4180
4181 /*
4182 * For backward compatibility we support few obsoleted values that don't
4183 * have their mappings into nand_ecc_modes_t anymore (they were merged
4184 * with other enums).
4185 */
4186 if (!strcasecmp(pm, "soft_bch"))
4187 return NAND_ECC_SOFT;
4188
4189 return -ENODEV;
4190 }
4191
4192 static const char * const nand_ecc_algos[] = {
4193 [NAND_ECC_HAMMING] = "hamming",
4194 [NAND_ECC_BCH] = "bch",
4195 };
4196
4197 static int of_get_nand_ecc_algo(struct device_node *np)
4198 {
4199 const char *pm;
4200 int err, i;
4201
4202 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4203 if (!err) {
4204 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4205 if (!strcasecmp(pm, nand_ecc_algos[i]))
4206 return i;
4207 return -ENODEV;
4208 }
4209
4210 /*
4211 * For backward compatibility we also read "nand-ecc-mode" checking
4212 * for some obsoleted values that were specifying ECC algorithm.
4213 */
4214 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4215 if (err < 0)
4216 return err;
4217
4218 if (!strcasecmp(pm, "soft"))
4219 return NAND_ECC_HAMMING;
4220 else if (!strcasecmp(pm, "soft_bch"))
4221 return NAND_ECC_BCH;
4222
4223 return -ENODEV;
4224 }
4225
4226 static int of_get_nand_ecc_step_size(struct device_node *np)
4227 {
4228 int ret;
4229 u32 val;
4230
4231 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4232 return ret ? ret : val;
4233 }
4234
4235 static int of_get_nand_ecc_strength(struct device_node *np)
4236 {
4237 int ret;
4238 u32 val;
4239
4240 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4241 return ret ? ret : val;
4242 }
4243
4244 static int of_get_nand_bus_width(struct device_node *np)
4245 {
4246 u32 val;
4247
4248 if (of_property_read_u32(np, "nand-bus-width", &val))
4249 return 8;
4250
4251 switch (val) {
4252 case 8:
4253 case 16:
4254 return val;
4255 default:
4256 return -EIO;
4257 }
4258 }
4259
4260 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4261 {
4262 return of_property_read_bool(np, "nand-on-flash-bbt");
4263 }
4264
4265 static int nand_dt_init(struct nand_chip *chip)
4266 {
4267 struct device_node *dn = nand_get_flash_node(chip);
4268 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4269
4270 if (!dn)
4271 return 0;
4272
4273 if (of_get_nand_bus_width(dn) == 16)
4274 chip->options |= NAND_BUSWIDTH_16;
4275
4276 if (of_get_nand_on_flash_bbt(dn))
4277 chip->bbt_options |= NAND_BBT_USE_FLASH;
4278
4279 ecc_mode = of_get_nand_ecc_mode(dn);
4280 ecc_algo = of_get_nand_ecc_algo(dn);
4281 ecc_strength = of_get_nand_ecc_strength(dn);
4282 ecc_step = of_get_nand_ecc_step_size(dn);
4283
4284 if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
4285 (!(ecc_step >= 0) && ecc_strength >= 0)) {
4286 pr_err("must set both strength and step size in DT\n");
4287 return -EINVAL;
4288 }
4289
4290 if (ecc_mode >= 0)
4291 chip->ecc.mode = ecc_mode;
4292
4293 if (ecc_algo >= 0)
4294 chip->ecc.algo = ecc_algo;
4295
4296 if (ecc_strength >= 0)
4297 chip->ecc.strength = ecc_strength;
4298
4299 if (ecc_step > 0)
4300 chip->ecc.size = ecc_step;
4301
4302 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4303 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4304
4305 return 0;
4306 }
4307
4308 /**
4309 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4310 * @mtd: MTD device structure
4311 * @maxchips: number of chips to scan for
4312 * @table: alternative NAND ID table
4313 *
4314 * This is the first phase of the normal nand_scan() function. It reads the
4315 * flash ID and sets up MTD fields accordingly.
4316 *
4317 */
4318 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4319 struct nand_flash_dev *table)
4320 {
4321 int i, nand_maf_id, nand_dev_id;
4322 struct nand_chip *chip = mtd_to_nand(mtd);
4323 struct nand_flash_dev *type;
4324 int ret;
4325
4326 ret = nand_dt_init(chip);
4327 if (ret)
4328 return ret;
4329
4330 if (!mtd->name && mtd->dev.parent)
4331 mtd->name = dev_name(mtd->dev.parent);
4332
4333 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4334 /*
4335 * Default functions assigned for chip_select() and
4336 * cmdfunc() both expect cmd_ctrl() to be populated,
4337 * so we need to check that that's the case
4338 */
4339 pr_err("chip.cmd_ctrl() callback is not provided");
4340 return -EINVAL;
4341 }
4342 /* Set the default functions */
4343 nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
4344
4345 /* Read the flash type */
4346 type = nand_get_flash_type(mtd, chip, &nand_maf_id,
4347 &nand_dev_id, table);
4348
4349 if (IS_ERR(type)) {
4350 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4351 pr_warn("No NAND device found\n");
4352 chip->select_chip(mtd, -1);
4353 return PTR_ERR(type);
4354 }
4355
4356 ret = nand_init_data_interface(chip);
4357 if (ret)
4358 return ret;
4359
4360 chip->select_chip(mtd, -1);
4361
4362 /* Check for a chip array */
4363 for (i = 1; i < maxchips; i++) {
4364 chip->select_chip(mtd, i);
4365 /* See comment in nand_get_flash_type for reset */
4366 nand_reset(chip);
4367 /* Send the command for reading device ID */
4368 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4369 /* Read manufacturer and device IDs */
4370 if (nand_maf_id != chip->read_byte(mtd) ||
4371 nand_dev_id != chip->read_byte(mtd)) {
4372 chip->select_chip(mtd, -1);
4373 break;
4374 }
4375 chip->select_chip(mtd, -1);
4376 }
4377 if (i > 1)
4378 pr_info("%d chips detected\n", i);
4379
4380 /* Store the number of chips and calc total size for mtd */
4381 chip->numchips = i;
4382 mtd->size = i * chip->chipsize;
4383
4384 return 0;
4385 }
4386 EXPORT_SYMBOL(nand_scan_ident);
4387
4388 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4389 {
4390 struct nand_chip *chip = mtd_to_nand(mtd);
4391 struct nand_ecc_ctrl *ecc = &chip->ecc;
4392
4393 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4394 return -EINVAL;
4395
4396 switch (ecc->algo) {
4397 case NAND_ECC_HAMMING:
4398 ecc->calculate = nand_calculate_ecc;
4399 ecc->correct = nand_correct_data;
4400 ecc->read_page = nand_read_page_swecc;
4401 ecc->read_subpage = nand_read_subpage;
4402 ecc->write_page = nand_write_page_swecc;
4403 ecc->read_page_raw = nand_read_page_raw;
4404 ecc->write_page_raw = nand_write_page_raw;
4405 ecc->read_oob = nand_read_oob_std;
4406 ecc->write_oob = nand_write_oob_std;
4407 if (!ecc->size)
4408 ecc->size = 256;
4409 ecc->bytes = 3;
4410 ecc->strength = 1;
4411 return 0;
4412 case NAND_ECC_BCH:
4413 if (!mtd_nand_has_bch()) {
4414 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4415 return -EINVAL;
4416 }
4417 ecc->calculate = nand_bch_calculate_ecc;
4418 ecc->correct = nand_bch_correct_data;
4419 ecc->read_page = nand_read_page_swecc;
4420 ecc->read_subpage = nand_read_subpage;
4421 ecc->write_page = nand_write_page_swecc;
4422 ecc->read_page_raw = nand_read_page_raw;
4423 ecc->write_page_raw = nand_write_page_raw;
4424 ecc->read_oob = nand_read_oob_std;
4425 ecc->write_oob = nand_write_oob_std;
4426
4427 /*
4428 * Board driver should supply ecc.size and ecc.strength
4429 * values to select how many bits are correctable.
4430 * Otherwise, default to 4 bits for large page devices.
4431 */
4432 if (!ecc->size && (mtd->oobsize >= 64)) {
4433 ecc->size = 512;
4434 ecc->strength = 4;
4435 }
4436
4437 /*
4438 * if no ecc placement scheme was provided pickup the default
4439 * large page one.
4440 */
4441 if (!mtd->ooblayout) {
4442 /* handle large page devices only */
4443 if (mtd->oobsize < 64) {
4444 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4445 return -EINVAL;
4446 }
4447
4448 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4449
4450 }
4451
4452 /*
4453 * We can only maximize ECC config when the default layout is
4454 * used, otherwise we don't know how many bytes can really be
4455 * used.
4456 */
4457 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4458 ecc->options & NAND_ECC_MAXIMIZE) {
4459 int steps, bytes;
4460
4461 /* Always prefer 1k blocks over 512bytes ones */
4462 ecc->size = 1024;
4463 steps = mtd->writesize / ecc->size;
4464
4465 /* Reserve 2 bytes for the BBM */
4466 bytes = (mtd->oobsize - 2) / steps;
4467 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4468 }
4469
4470 /* See nand_bch_init() for details. */
4471 ecc->bytes = 0;
4472 ecc->priv = nand_bch_init(mtd);
4473 if (!ecc->priv) {
4474 WARN(1, "BCH ECC initialization failed!\n");
4475 return -EINVAL;
4476 }
4477 return 0;
4478 default:
4479 WARN(1, "Unsupported ECC algorithm!\n");
4480 return -EINVAL;
4481 }
4482 }
4483
4484 /*
4485 * Check if the chip configuration meet the datasheet requirements.
4486
4487 * If our configuration corrects A bits per B bytes and the minimum
4488 * required correction level is X bits per Y bytes, then we must ensure
4489 * both of the following are true:
4490 *
4491 * (1) A / B >= X / Y
4492 * (2) A >= X
4493 *
4494 * Requirement (1) ensures we can correct for the required bitflip density.
4495 * Requirement (2) ensures we can correct even when all bitflips are clumped
4496 * in the same sector.
4497 */
4498 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4499 {
4500 struct nand_chip *chip = mtd_to_nand(mtd);
4501 struct nand_ecc_ctrl *ecc = &chip->ecc;
4502 int corr, ds_corr;
4503
4504 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4505 /* Not enough information */
4506 return true;
4507
4508 /*
4509 * We get the number of corrected bits per page to compare
4510 * the correction density.
4511 */
4512 corr = (mtd->writesize * ecc->strength) / ecc->size;
4513 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4514
4515 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4516 }
4517
4518 /**
4519 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4520 * @mtd: MTD device structure
4521 *
4522 * This is the second phase of the normal nand_scan() function. It fills out
4523 * all the uninitialized function pointers with the defaults and scans for a
4524 * bad block table if appropriate.
4525 */
4526 int nand_scan_tail(struct mtd_info *mtd)
4527 {
4528 struct nand_chip *chip = mtd_to_nand(mtd);
4529 struct nand_ecc_ctrl *ecc = &chip->ecc;
4530 struct nand_buffers *nbuf;
4531 int ret;
4532
4533 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4534 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4535 !(chip->bbt_options & NAND_BBT_USE_FLASH)))
4536 return -EINVAL;
4537
4538 if (!(chip->options & NAND_OWN_BUFFERS)) {
4539 nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
4540 + mtd->oobsize * 3, GFP_KERNEL);
4541 if (!nbuf)
4542 return -ENOMEM;
4543 nbuf->ecccalc = (uint8_t *)(nbuf + 1);
4544 nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
4545 nbuf->databuf = nbuf->ecccode + mtd->oobsize;
4546
4547 chip->buffers = nbuf;
4548 } else {
4549 if (!chip->buffers)
4550 return -ENOMEM;
4551 }
4552
4553 /* Set the internal oob buffer location, just after the page data */
4554 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4555
4556 /*
4557 * If no default placement scheme is given, select an appropriate one.
4558 */
4559 if (!mtd->ooblayout &&
4560 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4561 switch (mtd->oobsize) {
4562 case 8:
4563 case 16:
4564 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4565 break;
4566 case 64:
4567 case 128:
4568 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4569 break;
4570 default:
4571 WARN(1, "No oob scheme defined for oobsize %d\n",
4572 mtd->oobsize);
4573 ret = -EINVAL;
4574 goto err_free;
4575 }
4576 }
4577
4578 if (!chip->write_page)
4579 chip->write_page = nand_write_page;
4580
4581 /*
4582 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4583 * selected and we have 256 byte pagesize fallback to software ECC
4584 */
4585
4586 switch (ecc->mode) {
4587 case NAND_ECC_HW_OOB_FIRST:
4588 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4589 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4590 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4591 ret = -EINVAL;
4592 goto err_free;
4593 }
4594 if (!ecc->read_page)
4595 ecc->read_page = nand_read_page_hwecc_oob_first;
4596
4597 case NAND_ECC_HW:
4598 /* Use standard hwecc read page function? */
4599 if (!ecc->read_page)
4600 ecc->read_page = nand_read_page_hwecc;
4601 if (!ecc->write_page)
4602 ecc->write_page = nand_write_page_hwecc;
4603 if (!ecc->read_page_raw)
4604 ecc->read_page_raw = nand_read_page_raw;
4605 if (!ecc->write_page_raw)
4606 ecc->write_page_raw = nand_write_page_raw;
4607 if (!ecc->read_oob)
4608 ecc->read_oob = nand_read_oob_std;
4609 if (!ecc->write_oob)
4610 ecc->write_oob = nand_write_oob_std;
4611 if (!ecc->read_subpage)
4612 ecc->read_subpage = nand_read_subpage;
4613 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4614 ecc->write_subpage = nand_write_subpage_hwecc;
4615
4616 case NAND_ECC_HW_SYNDROME:
4617 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4618 (!ecc->read_page ||
4619 ecc->read_page == nand_read_page_hwecc ||
4620 !ecc->write_page ||
4621 ecc->write_page == nand_write_page_hwecc)) {
4622 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4623 ret = -EINVAL;
4624 goto err_free;
4625 }
4626 /* Use standard syndrome read/write page function? */
4627 if (!ecc->read_page)
4628 ecc->read_page = nand_read_page_syndrome;
4629 if (!ecc->write_page)
4630 ecc->write_page = nand_write_page_syndrome;
4631 if (!ecc->read_page_raw)
4632 ecc->read_page_raw = nand_read_page_raw_syndrome;
4633 if (!ecc->write_page_raw)
4634 ecc->write_page_raw = nand_write_page_raw_syndrome;
4635 if (!ecc->read_oob)
4636 ecc->read_oob = nand_read_oob_syndrome;
4637 if (!ecc->write_oob)
4638 ecc->write_oob = nand_write_oob_syndrome;
4639
4640 if (mtd->writesize >= ecc->size) {
4641 if (!ecc->strength) {
4642 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4643 ret = -EINVAL;
4644 goto err_free;
4645 }
4646 break;
4647 }
4648 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4649 ecc->size, mtd->writesize);
4650 ecc->mode = NAND_ECC_SOFT;
4651 ecc->algo = NAND_ECC_HAMMING;
4652
4653 case NAND_ECC_SOFT:
4654 ret = nand_set_ecc_soft_ops(mtd);
4655 if (ret) {
4656 ret = -EINVAL;
4657 goto err_free;
4658 }
4659 break;
4660
4661 case NAND_ECC_NONE:
4662 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4663 ecc->read_page = nand_read_page_raw;
4664 ecc->write_page = nand_write_page_raw;
4665 ecc->read_oob = nand_read_oob_std;
4666 ecc->read_page_raw = nand_read_page_raw;
4667 ecc->write_page_raw = nand_write_page_raw;
4668 ecc->write_oob = nand_write_oob_std;
4669 ecc->size = mtd->writesize;
4670 ecc->bytes = 0;
4671 ecc->strength = 0;
4672 break;
4673
4674 default:
4675 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4676 ret = -EINVAL;
4677 goto err_free;
4678 }
4679
4680 /* For many systems, the standard OOB write also works for raw */
4681 if (!ecc->read_oob_raw)
4682 ecc->read_oob_raw = ecc->read_oob;
4683 if (!ecc->write_oob_raw)
4684 ecc->write_oob_raw = ecc->write_oob;
4685
4686 /* propagate ecc info to mtd_info */
4687 mtd->ecc_strength = ecc->strength;
4688 mtd->ecc_step_size = ecc->size;
4689
4690 /*
4691 * Set the number of read / write steps for one page depending on ECC
4692 * mode.
4693 */
4694 ecc->steps = mtd->writesize / ecc->size;
4695 if (ecc->steps * ecc->size != mtd->writesize) {
4696 WARN(1, "Invalid ECC parameters\n");
4697 ret = -EINVAL;
4698 goto err_free;
4699 }
4700 ecc->total = ecc->steps * ecc->bytes;
4701
4702 /*
4703 * The number of bytes available for a client to place data into
4704 * the out of band area.
4705 */
4706 ret = mtd_ooblayout_count_freebytes(mtd);
4707 if (ret < 0)
4708 ret = 0;
4709
4710 mtd->oobavail = ret;
4711
4712 /* ECC sanity check: warn if it's too weak */
4713 if (!nand_ecc_strength_good(mtd))
4714 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4715 mtd->name);
4716
4717 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4718 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4719 switch (ecc->steps) {
4720 case 2:
4721 mtd->subpage_sft = 1;
4722 break;
4723 case 4:
4724 case 8:
4725 case 16:
4726 mtd->subpage_sft = 2;
4727 break;
4728 }
4729 }
4730 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4731
4732 /* Initialize state */
4733 chip->state = FL_READY;
4734
4735 /* Invalidate the pagebuffer reference */
4736 chip->pagebuf = -1;
4737
4738 /* Large page NAND with SOFT_ECC should support subpage reads */
4739 switch (ecc->mode) {
4740 case NAND_ECC_SOFT:
4741 if (chip->page_shift > 9)
4742 chip->options |= NAND_SUBPAGE_READ;
4743 break;
4744
4745 default:
4746 break;
4747 }
4748
4749 /* Fill in remaining MTD driver data */
4750 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4751 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4752 MTD_CAP_NANDFLASH;
4753 mtd->_erase = nand_erase;
4754 mtd->_point = NULL;
4755 mtd->_unpoint = NULL;
4756 mtd->_read = nand_read;
4757 mtd->_write = nand_write;
4758 mtd->_panic_write = panic_nand_write;
4759 mtd->_read_oob = nand_read_oob;
4760 mtd->_write_oob = nand_write_oob;
4761 mtd->_sync = nand_sync;
4762 mtd->_lock = NULL;
4763 mtd->_unlock = NULL;
4764 mtd->_suspend = nand_suspend;
4765 mtd->_resume = nand_resume;
4766 mtd->_reboot = nand_shutdown;
4767 mtd->_block_isreserved = nand_block_isreserved;
4768 mtd->_block_isbad = nand_block_isbad;
4769 mtd->_block_markbad = nand_block_markbad;
4770 mtd->writebufsize = mtd->writesize;
4771
4772 /*
4773 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4774 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4775 * properly set.
4776 */
4777 if (!mtd->bitflip_threshold)
4778 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4779
4780 /* Check, if we should skip the bad block table scan */
4781 if (chip->options & NAND_SKIP_BBTSCAN)
4782 return 0;
4783
4784 /* Build bad block table */
4785 return chip->scan_bbt(mtd);
4786 err_free:
4787 if (!(chip->options & NAND_OWN_BUFFERS))
4788 kfree(chip->buffers);
4789 return ret;
4790 }
4791 EXPORT_SYMBOL(nand_scan_tail);
4792
4793 /*
4794 * is_module_text_address() isn't exported, and it's mostly a pointless
4795 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4796 * to call us from in-kernel code if the core NAND support is modular.
4797 */
4798 #ifdef MODULE
4799 #define caller_is_module() (1)
4800 #else
4801 #define caller_is_module() \
4802 is_module_text_address((unsigned long)__builtin_return_address(0))
4803 #endif
4804
4805 /**
4806 * nand_scan - [NAND Interface] Scan for the NAND device
4807 * @mtd: MTD device structure
4808 * @maxchips: number of chips to scan for
4809 *
4810 * This fills out all the uninitialized function pointers with the defaults.
4811 * The flash ID is read and the mtd/chip structures are filled with the
4812 * appropriate values.
4813 */
4814 int nand_scan(struct mtd_info *mtd, int maxchips)
4815 {
4816 int ret;
4817
4818 ret = nand_scan_ident(mtd, maxchips, NULL);
4819 if (!ret)
4820 ret = nand_scan_tail(mtd);
4821 return ret;
4822 }
4823 EXPORT_SYMBOL(nand_scan);
4824
4825 /**
4826 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
4827 * @chip: NAND chip object
4828 */
4829 void nand_cleanup(struct nand_chip *chip)
4830 {
4831 if (chip->ecc.mode == NAND_ECC_SOFT &&
4832 chip->ecc.algo == NAND_ECC_BCH)
4833 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4834
4835 nand_release_data_interface(chip);
4836
4837 /* Free bad block table memory */
4838 kfree(chip->bbt);
4839 if (!(chip->options & NAND_OWN_BUFFERS))
4840 kfree(chip->buffers);
4841
4842 /* Free bad block descriptor memory */
4843 if (chip->badblock_pattern && chip->badblock_pattern->options
4844 & NAND_BBT_DYNAMICSTRUCT)
4845 kfree(chip->badblock_pattern);
4846 }
4847 EXPORT_SYMBOL_GPL(nand_cleanup);
4848
4849 /**
4850 * nand_release - [NAND Interface] Unregister the MTD device and free resources
4851 * held by the NAND device
4852 * @mtd: MTD device structure
4853 */
4854 void nand_release(struct mtd_info *mtd)
4855 {
4856 mtd_device_unregister(mtd);
4857 nand_cleanup(mtd_to_nand(mtd));
4858 }
4859 EXPORT_SYMBOL_GPL(nand_release);
4860
4861 MODULE_LICENSE("GPL");
4862 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
4863 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
4864 MODULE_DESCRIPTION("Generic NAND flash driver code");